summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/clients/Makefile
blob: c16addbc327f09572aa3142cbf0d1d13f172a9e9 (plain)
1
2
3
4
5
6
7
8
# SPDX-License-Identifier: GPL-2.0

subdir-ccflags-y += -I$(src)/..

drm_client_lib-y := drm_client_setup.o
drm_client_lib-$(CONFIG_DRM_CLIENT_LOG) += drm_log.o
drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o
obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o
tion>space:mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig25
-rw-r--r--drivers/net/Makefile7
-rw-r--r--drivers/net/Space.c5
-rw-r--r--drivers/net/amt.c316
-rw-r--r--drivers/net/appletalk/Kconfig11
-rw-r--r--drivers/net/appletalk/Makefile1
-rw-r--r--drivers/net/appletalk/ipddp.c1
-rw-r--r--drivers/net/appletalk/ltpc.c1277
-rw-r--r--drivers/net/appletalk/ltpc.h74
-rw-r--r--drivers/net/arcnet/com20020-pci.c3
-rw-r--r--drivers/net/arcnet/com20020_cs.c11
-rw-r--r--drivers/net/bareudp.c73
-rw-r--r--drivers/net/bonding/bond_3ad.c92
-rw-r--r--drivers/net/bonding/bond_alb.c43
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c840
-rw-r--r--drivers/net/bonding/bond_netlink.c188
-rw-r--r--drivers/net/bonding/bond_options.c199
-rw-r--r--drivers/net/bonding/bond_procfs.c26
-rw-r--r--drivers/net/bonding/bond_sysfs.c135
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c36
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/caif/caif_virtio.c15
-rw-r--r--drivers/net/can/Kconfig147
-rw-r--r--drivers/net/can/Makefile8
-rw-r--r--drivers/net/can/at91_can.c38
-rw-r--r--drivers/net/can/bxcan.c1098
-rw-r--r--drivers/net/can/c_can/Kconfig3
-rw-r--r--drivers/net/can/c_can/c_can.h20
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c21
-rw-r--r--drivers/net/can/c_can/c_can_main.c57
-rw-r--r--drivers/net/can/c_can/c_can_pci.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c3
-rw-r--r--drivers/net/can/can327.c1149
-rw-r--r--drivers/net/can/cc770/cc770.c25
-rw-r--r--drivers/net/can/cc770/cc770_isa.c10
-rw-r--r--drivers/net/can/cc770/cc770_platform.c12
-rw-r--r--drivers/net/can/ctucanfd/Kconfig34
-rw-r--r--drivers/net/can/ctucanfd/Makefile10
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd.h82
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c1458
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kframe.h77
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_kregs.h349
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_pci.c290
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c128
-rw-r--r--drivers/net/can/dev/Makefile17
-rw-r--r--drivers/net/can/dev/bittiming.c302
-rw-r--r--drivers/net/can/dev/calc_bittiming.c198
-rw-r--r--drivers/net/can/dev/dev.c96
-rw-r--r--drivers/net/can/dev/netlink.c91
-rw-r--r--drivers/net/can/dev/rx-offload.c22
-rw-r--r--drivers/net/can/dev/skb.c167
-rw-r--r--drivers/net/can/flexcan/Makefile7
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c (renamed from drivers/net/can/flexcan.c)322
-rw-r--r--drivers/net/can/flexcan/flexcan-ethtool.c110
-rw-r--r--drivers/net/can/flexcan/flexcan.h165
-rw-r--r--drivers/net/can/grcan.c79
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c34
-rw-r--r--drivers/net/can/janz-ican3.c24
-rw-r--r--drivers/net/can/kvaser_pciefd.c35
-rw-r--r--drivers/net/can/led.c140
-rw-r--r--drivers/net/can/m_can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c292
-rw-r--r--drivers/net/can/m_can/m_can.h22
-rw-r--r--drivers/net/can/m_can/m_can_pci.c57
-rw-r--r--drivers/net/can/m_can/m_can_platform.c6
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c22
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c49
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c12
-rw-r--r--drivers/net/can/mscan/mscan.c23
-rw-r--r--drivers/net/can/pch_can.c1247
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c70
-rw-r--r--drivers/net/can/rcar/rcar_can.c51
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c668
-rw-r--r--drivers/net/can/sja1000/Kconfig2
-rw-r--r--drivers/net/can/sja1000/ems_pci.c154
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c52
-rw-r--r--drivers/net/can/sja1000/sja1000.h3
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c10
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c71
-rw-r--r--drivers/net/can/sja1000/tscan1.c7
-rw-r--r--drivers/net/can/slcan.c790
-rw-r--r--drivers/net/can/slcan/Makefile7
-rw-r--r--drivers/net/can/slcan/slcan-core.c941
-rw-r--r--drivers/net/can/slcan/slcan-ethtool.c61
-rw-r--r--drivers/net/can/slcan/slcan.h19
-rw-r--r--drivers/net/can/softing/softing_cs.c2
-rw-r--r--drivers/net/can/softing/softing_fw.c11
-rw-r--r--drivers/net/can/softing/softing_main.c25
-rw-r--r--drivers/net/can/spi/hi311x.c115
-rw-r--r--drivers/net/can/spi/mcp251x.c83
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile7
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c119
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c1483
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c6
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c145
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c153
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h62
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c47
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c522
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c276
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c266
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c205
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h168
-rw-r--r--drivers/net/can/sun4i_can.c109
-rw-r--r--drivers/net/can/ti_hecc.c40
-rw-r--r--drivers/net/can/usb/Kconfig25
-rw-r--r--drivers/net/can/usb/Makefile2
-rw-r--r--drivers/net/can/usb/ems_usb.c29
-rw-r--r--drivers/net/can/usb/esd_usb.c (renamed from drivers/net/can/usb/esd_usb2.c)471
-rw-r--r--drivers/net/can/usb/etas_es58x/Makefile2
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c157
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.h74
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c235
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c15
-rw-r--r--drivers/net/can/usb/gs_usb.c1016
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb.h63
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c536
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c313
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c698
-rw-r--r--drivers/net/can/usb/mcba_usb.c74
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c58
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c189
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h18
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c144
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c43
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h3
-rw-r--r--drivers/net/can/usb/ucan.c41
-rw-r--r--drivers/net/can/usb/usb_8dev.c80
-rw-r--r--drivers/net/can/vcan.c23
-rw-r--r--drivers/net/can/vxcan.c37
-rw-r--r--drivers/net/can/xilinx_can.c136
-rw-r--r--drivers/net/dsa/Kconfig67
-rw-r--r--drivers/net/dsa/Makefile7
-rw-r--r--drivers/net/dsa/b53/Kconfig1
-rw-r--r--drivers/net/dsa/b53/b53_common.c216
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c7
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c47
-rw-r--r--drivers/net/dsa/b53/b53_priv.h69
-rw-r--r--drivers/net/dsa/b53/b53_regs.h1
-rw-r--r--drivers/net/dsa/b53/b53_serdes.c93
-rw-r--r--drivers/net/dsa/b53/b53_serdes.h14
-rw-r--r--drivers/net/dsa/b53/b53_spi.c6
-rw-r--r--drivers/net/dsa/b53/b53_srab.c41
-rw-r--r--drivers/net/dsa/bcm_sf2.c249
-rw-r--r--drivers/net/dsa/bcm_sf2.h10
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c12
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h65
-rw-r--r--drivers/net/dsa/dsa_loop.c37
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c234
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h7
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c6
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c46
-rw-r--r--drivers/net/dsa/lan9303-core.c239
-rw-r--r--drivers/net/dsa/lan9303_i2c.c15
-rw-r--r--drivers/net/dsa/lan9303_mdio.c5
-rw-r--r--drivers/net/dsa/lantiq_gswip.c229
-rw-r--r--drivers/net/dsa/microchip/Kconfig49
-rw-r--r--drivers/net/dsa/microchip/Makefile16
-rw-r--r--drivers/net/dsa/microchip/ksz8.h105
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c1233
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h45
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c142
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c49
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c893
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h62
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c62
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h88
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c122
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c3431
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h546
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c1201
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h86
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp_reg.h142
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c244
-rw-r--r--drivers/net/dsa/microchip/lan937x.h24
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c404
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h205
-rw-r--r--drivers/net/dsa/mt7530-mdio.c271
-rw-r--r--drivers/net/dsa/mt7530-mmio.c101
-rw-r--r--drivers/net/dsa/mt7530.c1367
-rw-r--r--drivers/net/dsa/mt7530.h130
-rw-r--r--drivers/net/dsa/mv88e6060.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig4
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile5
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c1815
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h77
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c99
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h13
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c107
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_vtu.c319
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c106
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h24
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c28
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c32
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c105
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h20
-rw-r--r--drivers/net/dsa/mv88e6xxx/port_hidden.c5
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c46
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c124
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h5
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c35
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.c83
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.h19
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.h96
-rw-r--r--drivers/net/dsa/ocelot/Kconfig34
-rw-r--r--drivers/net/dsa/ocelot/Makefile13
-rw-r--r--drivers/net/dsa/ocelot/felix.c1504
-rw-r--r--drivers/net/dsa/ocelot/felix.h63
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c1875
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c164
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c549
-rw-r--r--drivers/net/dsa/qca/Kconfig16
-rw-r--r--drivers/net/dsa/qca/Makefile5
-rw-r--r--drivers/net/dsa/qca/ar9331.c84
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c (renamed from drivers/net/dsa/qca8k.c)2292
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c1184
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c277
-rw-r--r--drivers/net/dsa/qca/qca8k.h588
-rw-r--r--drivers/net/dsa/qca/qca8k_leds.h16
-rw-r--r--drivers/net/dsa/qca8k.h311
-rw-r--r--drivers/net/dsa/realtek-smi-core.c523
-rw-r--r--drivers/net/dsa/realtek/Kconfig54
-rw-r--r--drivers/net/dsa/realtek/Makefile6
-rw-r--r--drivers/net/dsa/realtek/realtek-mdio.c290
-rw-r--r--drivers/net/dsa/realtek/realtek-smi.c570
-rw-r--r--drivers/net/dsa/realtek/realtek.h (renamed from drivers/net/dsa/realtek-smi-core.h)91
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c (renamed from drivers/net/dsa/rtl8365mb.c)937
-rw-r--r--drivers/net/dsa/realtek/rtl8366-core.c (renamed from drivers/net/dsa/rtl8366.c)164
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c (renamed from drivers/net/dsa/rtl8366rb.c)494
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c1056
-rw-r--r--drivers/net/dsa/rzn1_a5psw.h259
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h22
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c49
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c386
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c131
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c88
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h24
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c24
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c16
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c7
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c18
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c46
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c13
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c2
-rw-r--r--drivers/net/dummy.c7
-rw-r--r--drivers/net/eql.c7
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c6
-rw-r--r--drivers/net/ethernet/3com/typhoon.c44
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c3
-rw-r--r--drivers/net/ethernet/8390/etherh.c12
-rw-r--r--drivers/net/ethernet/8390/hydra.c4
-rw-r--r--drivers/net/ethernet/8390/mac8390.c4
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c15
-rw-r--r--drivers/net/ethernet/8390/smc-ultra.c4
-rw-r--r--drivers/net/ethernet/8390/wd.c4
-rw-r--r--drivers/net/ethernet/Kconfig32
-rw-r--r--drivers/net/ethernet/Makefile6
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c8
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c6
-rw-r--r--drivers/net/ethernet/adi/Kconfig28
-rw-r--r--drivers/net/ethernet/adi/Makefile6
-rw-r--r--drivers/net/ethernet/adi/adin1110.c1735
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c7
-rw-r--r--drivers/net/ethernet/agere/et131x.c27
-rw-r--r--drivers/net/ethernet/alacritech/slic.h14
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c249
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.h18
-rw-r--r--drivers/net/ethernet/alteon/acenic.c16
-rw-r--r--drivers/net/ethernet/alteon/acenic.h1
-rw-r--r--drivers/net/ethernet/altera/Kconfig2
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h19
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c23
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c470
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h5
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h13
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c118
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c513
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h57
-rw-r--r--drivers/net/ethernet/amd/Kconfig24
-rw-r--r--drivers/net/ethernet/amd/Makefile2
-rw-r--r--drivers/net/ethernet/amd/a2065.c20
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c54
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c24
-rw-r--r--drivers/net/ethernet/amd/atarilance.c19
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c30
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h4
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c10
-rw-r--r--drivers/net/ethernet/amd/mvme147.c14
-rw-r--r--drivers/net/ethernet/amd/ni65.c1249
-rw-r--r--drivers/net/ethernet/amd/ni65.h121
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c22
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c27
-rw-r--r--drivers/net/ethernet/amd/pds_core/Makefile13
-rw-r--r--drivers/net/ethernet/amd/pds_core/adminq.c290
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c264
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c597
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h312
-rw-r--r--drivers/net/ethernet/amd/pds_core/debugfs.c170
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c351
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c183
-rw-r--r--drivers/net/ethernet/amd/pds_core/fw.c194
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c480
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h55
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c117
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c32
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c52
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c488
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h47
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c24
-rw-r--r--drivers/net/ethernet/apple/bmac.c8
-rw-r--r--drivers/net/ethernet/apple/mace.c19
-rw-r--r--drivers/net/ethernet/apple/macmace.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c24
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c155
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c98
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c170
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c459
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c50
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c5
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c5
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c34
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c155
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c26
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c31
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c14
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c21
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig6
-rw-r--r--drivers/net/ethernet/broadcom/b44.c46
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c86
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c71
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c46
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h13
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c23
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c19
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c76
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h10
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c935
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h127
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c159
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c378
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h1139
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c114
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c296
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c477
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c207
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h20
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c15
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c95
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c15
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c33
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c19
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h60
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.h8
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h27
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c43
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c38
-rw-r--r--drivers/net/ethernet/cadence/macb.h55
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c845
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c102
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c56
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c7
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c9
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c29
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cphy.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/elmer0.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/espi.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/mv88x201x.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/regs.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c28
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c60
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c36
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c42
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c17
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c26
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c3
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c4
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h40
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c33
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c49
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/rq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_enet.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_intr.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_resource.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c16
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rss.h14
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_stats.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.c19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_vic.h19
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c15
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h15
-rw-r--r--drivers/net/ethernet/cisco/enic/wq_enet_desc.h15
-rw-r--r--drivers/net/ethernet/cortina/gemini.c29
-rw-r--r--drivers/net/ethernet/davicom/Kconfig31
-rw-r--r--drivers/net/ethernet/davicom/Makefile1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c36
-rw-r--r--drivers/net/ethernet/davicom/dm9051.c1262
-rw-r--r--drivers/net/ethernet/davicom/dm9051.h162
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig15
-rw-r--r--drivers/net/ethernet/dec/tulip/Makefile1
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c5591
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h1017
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c65
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c66
-rw-r--r--drivers/net/ethernet/dnet.c10
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c49
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c53
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c57
-rw-r--r--drivers/net/ethernet/engleder/Kconfig40
-rw-r--r--drivers/net/ethernet/engleder/Makefile10
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h259
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c486
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h246
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c2617
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c246
-rw-r--r--drivers/net/ethernet/engleder/tsnep_rxnfc.c307
-rw-r--r--drivers/net/ethernet/engleder/tsnep_selftests.c811
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c464
-rw-r--r--drivers/net/ethernet/engleder/tsnep_xdp.c85
-rw-r--r--drivers/net/ethernet/ethoc.c19
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c5
-rw-r--r--drivers/net/ethernet/faraday/Kconfig12
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c317
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c81
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.h12
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig9
-rw-r--r--drivers/net/ethernet/freescale/dpaa/Kconfig4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c194
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h31
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c32
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h32
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c134
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c57
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c22
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h142
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c1060
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h142
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c130
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c285
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h21
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c45
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c78
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c454
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.c54
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpmac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h25
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h15
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig15
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c893
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h111
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c49
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c571
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h161
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c121
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c10
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c278
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c300
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c17
-rw-r--r--drivers/net/ethernet/freescale/fec.h54
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c957
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c18
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c190
-rw-r--r--drivers/net/ethernet/freescale/fman/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c63
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c676
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h58
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_mac.h34
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c848
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h57
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c31
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h32
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c41
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.c29
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_sp.h28
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c264
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h54
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c651
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h64
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c8
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c15
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c17
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c12
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c269
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/fungible/Kconfig28
-rw-r--r--drivers/net/ethernet/fungible/Makefile7
-rw-r--r--drivers/net/ethernet/fungible/funcore/Makefile5
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.c836
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_dev.h150
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_hci.h1242
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.c601
-rw-r--r--drivers/net/ethernet/fungible/funcore/fun_queue.h175
-rw-r--r--drivers/net/ethernet/fungible/funeth/Kconfig17
-rw-r--r--drivers/net/ethernet/fungible/funeth/Makefile10
-rw-r--r--drivers/net/ethernet/fungible/funeth/fun_port.h97
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h171
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.c27
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_devlink.h13
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c1198
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.c155
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ktls.h30
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c2081
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_rx.c829
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_trace.h117
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_tx.c801
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h265
-rw-r--r--drivers/net/ethernet/google/gve/gve.h160
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c41
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h55
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h20
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h5
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h24
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c184
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c915
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c689
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c379
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c26
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c34
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h80
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c621
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h468
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h134
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c115
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c117
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1325
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c391
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c591
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h463
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c91
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c183
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c2249
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h158
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c703
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c253
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c556
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h218
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c922
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h100
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c111
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c194
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.c18
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c41
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c24
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h175
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c35
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c17
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c32
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h25
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c112
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c50
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c11
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.c40
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_sriov.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c3
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c6
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c17
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea.h1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c7
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c27
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c325
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h25
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c1244
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h76
-rw-r--r--drivers/net/ethernet/intel/Kconfig28
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e100.c20
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c29
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000e_trace.h42
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h9
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c55
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c183
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c37
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c31
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h71
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c163
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1224
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c23
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c349
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h46
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c94
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1192
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c257
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h661
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c651
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c470
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h13
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c148
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h180
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.c19
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c32
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c94
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1756
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c155
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h30
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c895
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile16
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h156
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h424
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c1072
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c132
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c139
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c113
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c1897
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h445
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c1333
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c367
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c614
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c308
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c2012
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h376
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c215
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c286
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c479
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c471
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1912
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h88
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c2973
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c217
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_osdep.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1649
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h126
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h374
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c2907
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h363
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c160
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c513
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2073
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h160
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2619
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h109
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c905
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c615
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c284
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h105
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c1312
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h295
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c380
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.h51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c225
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c4068
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c425
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c5343
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h344
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c438
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c707
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c103
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c642
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c36
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c603
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c232
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c65
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c13
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c31
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h25
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h13
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c32
-rw-r--r--drivers/net/ethernet/intel/igc/igc_mac.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c641
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c59
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h10
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c111
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c6
-rw-r--r--drivers/net/ethernet/intel/ixgb/Makefile9
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb.h179
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.c580
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ee.h79
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c638
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c1229
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h767
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h23
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2295
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_osdep.h39
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c444
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c125
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c38
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c395
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c251
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c141
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c223
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c60
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c88
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c41
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c101
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c323
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c106
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h6
-rw-r--r--drivers/net/ethernet/jme.c11
-rw-r--r--drivers/net/ethernet/jme.h2
-rw-r--r--drivers/net/ethernet/korina.c11
-rw-r--r--drivers/net/ethernet/lantiq_etop.c62
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c143
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c3
-rw-r--r--drivers/net/ethernet/marvell/Kconfig3
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c110
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c30
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c813
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c30
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c311
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c86
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Kconfig20
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/Makefile9
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c755
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h210
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c269
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h174
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c335
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h337
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c467
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c1267
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h375
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h373
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.c506
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h199
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c334
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h284
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c400
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h602
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.c1605
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs.h244
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c277
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h1106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c926
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h34
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h75
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c586
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c106
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h91
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c317
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c558
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c103
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c168
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c127
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c357
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c2017
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h23
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c1693
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c175
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h183
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c470
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c136
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c144
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c358
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c186
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c215
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c68
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h117
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c907
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h251
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.c475
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_counter.h30
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c22
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h5
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c32
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c173
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h23
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c504
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h9
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c1177
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h126
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c629
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.c127
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_matchall.h17
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c118
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c1645
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c688
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h155
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c11
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c72
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.h12
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c714
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c24
-rw-r--r--drivers/net/ethernet/marvell/skge.c17
-rw-r--r--drivers/net/ethernet/marvell/sky2.c115
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig11
-rw-r--r--drivers/net/ethernet/mediatek/Makefile7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c14
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2962
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h643
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c763
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h183
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c59
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c289
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h28
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c126
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c541
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c1965
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h169
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c263
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c395
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_ops.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h468
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c501
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h282
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/crdump.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c385
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h156
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c589
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c503
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h351
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h198
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c188
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.c722
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/htb.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c765
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c229
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c811
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c409
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c127
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c192
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c266
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/accept.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/csum.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/drop.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mark.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c335
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred_nic.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mpls.c99
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ptype.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/sample.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/trap.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/tun.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_dmfs.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c380
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c595
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c460
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c733
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c510
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c276
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c1056
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c1781
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c603
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c284
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c128
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c)53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c613
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1876
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.h73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c1393
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h132
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c390
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c143
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c168
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c849
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1573
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c473
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c1257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h103
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c317
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c349
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c589
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c1126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h181
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c247
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c338
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1536
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c1582
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c622
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c513
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c907
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c136
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c210
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c755
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/dm.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c371
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c519
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c516
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c240
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c577
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c273
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c195
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c720
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c151
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c347
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c241
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c444
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c299
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c544
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c234
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h476
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c154
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/thermal.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_intr.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c117
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c284
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c839
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h214
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c772
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c358
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1601
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c732
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c413
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c150
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h83
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2540
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c1192
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h180
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c114
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c351
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c98
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c1084
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c765
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1698
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router_xm.c812
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c1080
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h12
-rw-r--r--drivers/net/ethernet/micrel/Kconfig2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c46
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c11
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c327
-rw-r--r--drivers/net/ethernet/microchip/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c12
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c4
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c10
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c588
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.h95
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c794
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h341
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c631
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Kconfig12
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile21
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_cbs.c70
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c727
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ets.c96
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdb.c289
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c1072
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_goto.c50
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h174
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_lag.c363
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c592
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c1300
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h727
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mdb.c551
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mirror.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mqprio.c28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_phylink.c138
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_police.c226
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c421
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c1119
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_regs.h1741
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c664
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_taprio.c528
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tbf.c85
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c136
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c559
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c91
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.c3008
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_ag_api.h11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c219
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c702
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c323
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_xdp.c136
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig14
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile13
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c407
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c39
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c9
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c35
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c92
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h253
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h3452
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c64
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c70
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pgid.c46
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c76
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c53
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pool.c81
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c203
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h83
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c332
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c684
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c576
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.h82
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c335
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c379
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c174
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h108
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c1483
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c97
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c3874
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.h18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c471
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.h33
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c2111
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h207
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c38
-rw-r--r--drivers/net/ethernet/microchip/vcap/Kconfig53
-rw-r--r--drivers/net/ethernet/microchip/vcap/Makefile10
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h897
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c3585
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h283
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h285
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c468
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.h41
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c554
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c2308
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h124
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c4062
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h18
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.c412
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.h32
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/Makefile2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma.h675
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c120
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c24
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.h190
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h540
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c226
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c962
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c97
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.h21
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c52
-rw-r--r--drivers/net/ethernet/mscc/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Makefile14
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1888
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h63
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c893
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.h166
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c225
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c53
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mm.c300
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c559
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.c67
-rw-r--r--drivers/net/ethernet/mscc/ocelot_police.h7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c489
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c989
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c200
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c822
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c685
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c43
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c27
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c12
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c4
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c6
-rw-r--r--drivers/net/ethernet/neterion/Kconfig24
-rw-r--r--drivers/net/ethernet/neterion/Makefile1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c79
-rw-r--r--drivers/net/ethernet/neterion/s2io.h1
-rw-r--r--drivers/net/ethernet/neterion/vxge/Makefile8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c5099
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2086
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c1154
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.h48
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4832
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h518
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-reg.h4636
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c2428
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.h2290
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-version.h49
-rw-r--r--drivers/net/ethernet/netronome/Kconfig13
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile12
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm_mbox.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/crypto.h23
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c613
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c86
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c776
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c56
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h176
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c67
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c91
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c122
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c486
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c596
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c1418
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/ipsec.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/nfd3.h114
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/rings.c279
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c409
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c1580
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h136
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/rings.c198
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c87
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_hwmon.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c145
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h14
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h262
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c2444
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h147
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c66
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.c466
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_dp.h219
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c725
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c127
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c174
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_xsk.h41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c19
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h64
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c68
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/dcb.c571
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h46
-rw-r--r--drivers/net/ethernet/ni/nixge.c184
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c30
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c13
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c22
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c8
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c39
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c254
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h34
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c133
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h54
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c641
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h44
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c170
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c41
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c212
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c104
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c110
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c136
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c54
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c26
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c21
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c117
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c28
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c23
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h20
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c53
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c23
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c1
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c8
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c6
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c9
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c18
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h20
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c20
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c195
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c58
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h1
-rw-r--r--drivers/net/ethernet/rdc/r6040.c13
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c12
-rw-r--r--drivers/net/ethernet/realtek/8139too.c16
-rw-r--r--drivers/net/ethernet/realtek/atp.h4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h18
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c672
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c205
-rw-r--r--drivers/net/ethernet/renesas/Kconfig12
-rw-r--r--drivers/net/ethernet/renesas/Makefile4
-rw-r--r--drivers/net/ethernet/renesas/ravb.h14
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c193
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c23
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c181
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h72
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c1962
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h1010
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c78
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c47
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c20
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c12
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c105
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/sfc/Kconfig16
-rw-r--r--drivers/net/ethernet/sfc/Makefile9
-rw-r--r--drivers/net/ethernet/sfc/ef10.c164
-rw-r--r--drivers/net/ethernet/sfc/ef100.c88
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c9
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c183
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.h9
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c596
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h20
-rw-r--r--drivers/net/ethernet/sfc/ef100_regs.h83
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c499
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h80
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c69
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.c72
-rw-r--r--drivers/net/ethernet/sfc/ef100_sriov.h14
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c87
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c26
-rw-r--r--drivers/net/ethernet/sfc/efx.c137
-rw-r--r--drivers/net/ethernet/sfc/efx.h10
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c300
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h3
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c126
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h19
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c731
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.h47
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c36
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c57
-rw-r--r--drivers/net/ethernet/sfc/falcon/bitfield.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c23
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c22
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c6
-rw-r--r--drivers/net/ethernet/sfc/falcon/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c14
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c3
-rw-r--r--drivers/net/ethernet/sfc/filter.h42
-rw-r--r--drivers/net/ethernet/sfc/mae.c1258
-rw-r--r--drivers/net/ethernet/sfc/mae.h114
-rw-r--r--drivers/net/ethernet/sfc/mae_counter_format.h73
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c137
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h45
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h8140
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol_mae.h24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c15
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h113
-rw-r--r--drivers/net/ethernet/sfc/nic.c2
-rw-r--r--drivers/net/ethernet/sfc/nic.h4
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h5
-rw-r--r--drivers/net/ethernet/sfc/ptp.c366
-rw-r--r--drivers/net/ethernet/sfc/ptp.h1
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c52
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h6
-rw-r--r--drivers/net/ethernet/sfc/siena/Kconfig46
-rw-r--r--drivers/net/ethernet/sfc/siena/Makefile11
-rw-r--r--drivers/net/ethernet/sfc/siena/bitfield.h614
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c1336
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.h218
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c1368
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.h45
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c1408
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.h118
-rw-r--r--drivers/net/ethernet/sfc/siena/enum.h176
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c282
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.c1340
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool_common.h60
-rw-r--r--drivers/net/ethernet/sfc/siena/farch.c (renamed from drivers/net/ethernet/sfc/farch.c)83
-rw-r--r--drivers/net/ethernet/sfc/siena/farch_regs.h2929
-rw-r--r--drivers/net/ethernet/sfc/siena/filter.h309
-rw-r--r--drivers/net/ethernet/sfc/siena/io.h310
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.c2260
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi.h386
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_mon.c531
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_pcol.h17204
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port.c110
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port.h17
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port_common.c1282
-rw-r--r--drivers/net/ethernet/sfc/siena/mcdi_port_common.h58
-rw-r--r--drivers/net/ethernet/sfc/siena/mtd.c124
-rw-r--r--drivers/net/ethernet/sfc/siena/net_driver.h1715
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.c530
-rw-r--r--drivers/net/ethernet/sfc/siena/nic.h206
-rw-r--r--drivers/net/ethernet/sfc/siena/nic_common.h251
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2202
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h45
-rw-r--r--drivers/net/ethernet/sfc/siena/rx.c400
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.c1094
-rw-r--r--drivers/net/ethernet/sfc/siena/rx_common.h110
-rw-r--r--drivers/net/ethernet/sfc/siena/selftest.c807
-rw-r--r--drivers/net/ethernet/sfc/siena/selftest.h52
-rw-r--r--drivers/net/ethernet/sfc/siena/siena.c (renamed from drivers/net/ethernet/sfc/siena.c)176
-rw-r--r--drivers/net/ethernet/sfc/siena/siena_sriov.c (renamed from drivers/net/ethernet/sfc/siena_sriov.c)35
-rw-r--r--drivers/net/ethernet/sfc/siena/siena_sriov.h (renamed from drivers/net/ethernet/sfc/siena_sriov.h)9
-rw-r--r--drivers/net/ethernet/sfc/siena/sriov.h83
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.c392
-rw-r--r--drivers/net/ethernet/sfc/siena/tx.h40
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.c448
-rw-r--r--drivers/net/ethernet/sfc/siena/tx_common.h39
-rw-r--r--drivers/net/ethernet/sfc/siena/vfdi.h252
-rw-r--r--drivers/net/ethernet/sfc/siena/workarounds.h28
-rw-r--r--drivers/net/ethernet/sfc/sriov.c10
-rw-r--r--drivers/net/ethernet/sfc/tc.c1526
-rw-r--r--drivers/net/ethernet/sfc/tc.h176
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.c228
-rw-r--r--drivers/net/ethernet/sfc/tc_bindings.h29
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c503
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.h59
-rw-r--r--drivers/net/ethernet/sfc/tx.c22
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c40
-rw-r--r--drivers/net/ethernet/sfc/tx_common.h3
-rw-r--r--drivers/net/ethernet/sfc/tx_tso.c2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/sis/sis190.c6
-rw-r--r--drivers/net/ethernet/sis/sis900.c6
-rw-r--r--drivers/net/ethernet/smsc/Kconfig18
-rw-r--r--drivers/net/ethernet/smsc/Makefile1
-rw-r--r--drivers/net/ethernet/smsc/epic100.c12
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2193
-rw-r--r--drivers/net/ethernet/smsc/smc911x.h901
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c8
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h11
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c24
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c8
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.h1
-rw-r--r--drivers/net/ethernet/socionext/netsec.c28
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c85
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c95
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c157
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c380
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c219
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c478
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c171
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c65
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c69
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c388
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h109
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c141
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c201
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c71
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h188
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c65
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1463
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c367
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c150
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c4
-rw-r--r--drivers/net/ethernet/sun/cassini.c111
-rw-r--r--drivers/net/ethernet/sun/cassini.h2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c12
-rw-r--r--drivers/net/ethernet/sun/niu.c55
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sungem.c11
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1710
-rw-r--r--drivers/net/ethernet/sun/sunhme.h6
-rw-r--r--drivers/net/ethernet/sun/sunqe.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c12
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c4
-rw-r--r--drivers/net/ethernet/sunplus/Kconfig32
-rw-r--r--drivers/net/ethernet/sunplus/Makefile6
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_define.h270
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_desc.c228
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_desc.h19
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c565
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_int.c273
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_int.h13
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mac.c274
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mac.h18
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.c125
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.h12
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_phy.c90
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_phy.h12
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_register.h86
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c6
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c50
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c73
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c756
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h17
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c328
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h12
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c278
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h15
-rw-r--r--drivers/net/ethernet/ti/cpmac.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c69
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c76
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h3
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c77
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c268
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h20
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c4
-rw-r--r--drivers/net/ethernet/ti/cpts.c24
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c112
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c296
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c19
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c12
-rw-r--r--drivers/net/ethernet/ti/tlan.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c47
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h5
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c16
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.h1
-rw-r--r--drivers/net/ethernet/toshiba/spider_net_ethtool.c12
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c8
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c65
-rw-r--r--drivers/net/ethernet/vertexcom/Kconfig25
-rw-r--r--drivers/net/ethernet/vertexcom/Makefile6
-rw-r--r--drivers/net/ethernet/vertexcom/mse102x.c775
-rw-r--r--drivers/net/ethernet/via/via-rhine.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.c14
-rw-r--r--drivers/net/ethernet/via/via-velocity.h5
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig54
-rw-r--r--drivers/net/ethernet/wangxun/Makefile8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile7
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c1730
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h35
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2007
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h32
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h726
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c97
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h13
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c718
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c286
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h12
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h154
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile11
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c304
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h11
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c749
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h102
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c10
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c8
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h185
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c115
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h135
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1059
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c83
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c106
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c7
-rw-r--r--drivers/net/ethernet/xscale/Kconfig4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c109
-rw-r--r--drivers/net/ethernet/xscale/ptp_ixp46x.c22
-rw-r--r--drivers/net/fddi/defxx.c22
-rw-r--r--drivers/net/fddi/skfp/fplustm.c2
-rw-r--r--drivers/net/fddi/skfp/h/hwmtm.h2
-rw-r--r--drivers/net/fddi/skfp/hwmtm.c6
-rw-r--r--drivers/net/fddi/skfp/rmt.c6
-rw-r--r--drivers/net/fddi/skfp/smt.c16
-rw-r--r--drivers/net/fjes/fjes_ethtool.c6
-rw-r--r--drivers/net/fjes/fjes_main.c1154
-rw-r--r--drivers/net/geneve.c138
-rw-r--r--drivers/net/gtp.c568
-rw-r--r--drivers/net/hamradio/6pack.c17
-rw-r--r--drivers/net/hamradio/Kconfig36
-rw-r--r--drivers/net/hamradio/Makefile1
-rw-r--r--drivers/net/hamradio/baycom_epp.c16
-rw-r--r--drivers/net/hamradio/bpqether.c2
-rw-r--r--drivers/net/hamradio/dmascc.c1449
-rw-r--r--drivers/net/hamradio/hdlcdrv.c5
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hamradio/scc.c7
-rw-r--r--drivers/net/hamradio/yam.c6
-rw-r--r--drivers/net/hippi/rrunner.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h75
-rw-r--r--drivers/net/hyperv/netvsc.c143
-rw-r--r--drivers/net/hyperv/netvsc_bpf.c103
-rw-r--r--drivers/net/hyperv/netvsc_drv.c206
-rw-r--r--drivers/net/hyperv/rndis_filter.c14
-rw-r--r--drivers/net/ieee802154/Kconfig7
-rw-r--r--drivers/net/ieee802154/adf7242.c10
-rw-r--r--drivers/net/ieee802154/at86rf230.c268
-rw-r--r--drivers/net/ieee802154/atusb.c258
-rw-r--r--drivers/net/ieee802154/ca8210.c207
-rw-r--r--drivers/net/ieee802154/cc2520.c143
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c179
-rw-r--r--drivers/net/ieee802154/mcr20a.c20
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ifb.c146
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/Makefile23
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c (renamed from drivers/net/ipa/ipa_data-v3.1.c)31
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c (renamed from drivers/net/ipa/ipa_data-v3.5.1.c)43
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c (renamed from drivers/net/ipa/ipa_data-v4.11.c)29
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c (renamed from drivers/net/ipa/ipa_data-v4.2.c)29
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c (renamed from drivers/net/ipa/ipa_data-v4.5.c)36
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c405
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c (renamed from drivers/net/ipa/ipa_data-v4.9.c)29
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c481
-rw-r--r--drivers/net/ipa/gsi.c934
-rw-r--r--drivers/net/ipa/gsi.h79
-rw-r--r--drivers/net/ipa/gsi_private.h38
-rw-r--r--drivers/net/ipa/gsi_reg.c161
-rw-r--r--drivers/net/ipa/gsi_reg.h629
-rw-r--r--drivers/net/ipa/gsi_trans.c442
-rw-r--r--drivers/net/ipa/gsi_trans.h56
-rw-r--r--drivers/net/ipa/ipa.h40
-rw-r--r--drivers/net/ipa/ipa_cmd.c191
-rw-r--r--drivers/net/ipa/ipa_cmd.h29
-rw-r--r--drivers/net/ipa/ipa_data.h76
-rw-r--r--drivers/net/ipa/ipa_endpoint.c1487
-rw-r--r--drivers/net/ipa/ipa_endpoint.h120
-rw-r--r--drivers/net/ipa/ipa_interrupt.c178
-rw-r--r--drivers/net/ipa/ipa_interrupt.h66
-rw-r--r--drivers/net/ipa/ipa_main.c414
-rw-r--r--drivers/net/ipa/ipa_mem.c55
-rw-r--r--drivers/net/ipa/ipa_mem.h8
-rw-r--r--drivers/net/ipa/ipa_modem.c25
-rw-r--r--drivers/net/ipa/ipa_modem.h5
-rw-r--r--drivers/net/ipa/ipa_power.c268
-rw-r--r--drivers/net/ipa/ipa_power.h21
-rw-r--r--drivers/net/ipa/ipa_qmi.c13
-rw-r--r--drivers/net/ipa/ipa_qmi.h2
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c30
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h61
-rw-r--r--drivers/net/ipa/ipa_reg.c133
-rw-r--r--drivers/net/ipa/ipa_reg.h1152
-rw-r--r--drivers/net/ipa/ipa_resource.c65
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_smp2p.h2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c79
-rw-r--r--drivers/net/ipa/ipa_sysfs.h3
-rw-r--r--drivers/net/ipa/ipa_table.c461
-rw-r--r--drivers/net/ipa/ipa_table.h29
-rw-r--r--drivers/net/ipa/ipa_uc.c37
-rw-r--r--drivers/net/ipa/ipa_uc.h10
-rw-r--r--drivers/net/ipa/ipa_version.h40
-rw-r--r--drivers/net/ipa/reg.h134
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c291
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c303
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c308
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c313
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c311
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c312
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c317
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c448
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c458
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c514
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c458
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c535
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c506
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c511
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.0.c564
-rw-r--r--drivers/net/ipvlan/ipvlan.h11
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c25
-rw-r--r--drivers/net/ipvlan/ipvlan_l3s.c1
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c37
-rw-r--r--drivers/net/ipvlan/ipvtap.c9
-rw-r--r--drivers/net/loopback.c15
-rw-r--r--drivers/net/macsec.c459
-rw-r--r--drivers/net/macvlan.c178
-rw-r--r--drivers/net/macvtap.c15
-rw-r--r--drivers/net/mctp/Kconfig30
-rw-r--r--drivers/net/mctp/Makefile2
-rw-r--r--drivers/net/mctp/mctp-i2c.c1097
-rw-r--r--drivers/net/mctp/mctp-serial.c524
-rw-r--r--drivers/net/mdio/Kconfig16
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/acpi_mdio.c10
-rw-r--r--drivers/net/mdio/fwnode_mdio.c77
-rw-r--r--drivers/net/mdio/mdio-aspeed.c121
-rw-r--r--drivers/net/mdio/mdio-bitbang.c77
-rw-r--r--drivers/net/mdio/mdio-cavium.c111
-rw-r--r--drivers/net/mdio/mdio-cavium.h9
-rw-r--r--drivers/net/mdio/mdio-i2c.c326
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c160
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c10
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c278
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c54
-rw-r--r--drivers/net/mdio/mdio-mux-bcm6368.c2
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c77
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c164
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c9
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c9
-rw-r--r--drivers/net/mdio/mdio-mux.c4
-rw-r--r--drivers/net/mdio/mdio-mvusb.c17
-rw-r--r--drivers/net/mdio/mdio-octeon.c6
-rw-r--r--drivers/net/mdio/mdio-thunder.c7
-rw-r--r--drivers/net/mdio/mdio-xgene.c3
-rw-r--r--drivers/net/mdio/of_mdio.c20
-rw-r--r--drivers/net/mhi_net.c12
-rw-r--r--drivers/net/net_failover.c14
-rw-r--r--drivers/net/netconsole.c33
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/bpf.c12
-rw-r--r--drivers/net/netdevsim/bus.c32
-rw-r--r--drivers/net/netdevsim/dev.c315
-rw-r--r--drivers/net/netdevsim/ethtool.c8
-rw-r--r--drivers/net/netdevsim/fib.c116
-rw-r--r--drivers/net/netdevsim/health.c20
-rw-r--r--drivers/net/netdevsim/hwstats.c486
-rw-r--r--drivers/net/netdevsim/ipsec.c19
-rw-r--r--drivers/net/netdevsim/netdev.c11
-rw-r--r--drivers/net/netdevsim/netdevsim.h28
-rw-r--r--drivers/net/ntb_netdev.c21
-rw-r--r--drivers/net/pcs/Kconfig25
-rw-r--r--drivers/net/pcs/Makefile3
-rw-r--r--drivers/net/pcs/pcs-altera-tse.c160
-rw-r--r--drivers/net/pcs/pcs-lynx.c118
-rw-r--r--drivers/net/pcs/pcs-mtk-lynxi.c305
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c527
-rw-r--r--drivers/net/pcs/pcs-xpcs.c242
-rw-r--r--drivers/net/pcs/pcs-xpcs.h1
-rw-r--r--drivers/net/phy/Kconfig51
-rw-r--r--drivers/net/phy/Makefile6
-rw-r--r--drivers/net/phy/adin.c42
-rw-r--r--drivers/net/phy/adin1100.c297
-rw-r--r--drivers/net/phy/aquantia_hwmon.c2
-rw-r--r--drivers/net/phy/aquantia_main.c200
-rw-r--r--drivers/net/phy/at803x.c242
-rw-r--r--drivers/net/phy/ax88796b.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.h24
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c944
-rw-r--r--drivers/net/phy/bcm54140.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c24
-rw-r--r--drivers/net/phy/bcm87xx.c36
-rw-r--r--drivers/net/phy/broadcom.c107
-rw-r--r--drivers/net/phy/dp83640.c22
-rw-r--r--drivers/net/phy/dp83822.c26
-rw-r--r--drivers/net/phy/dp83867.c168
-rw-r--r--drivers/net/phy/dp83869.c48
-rw-r--r--drivers/net/phy/dp83td510.c258
-rw-r--r--drivers/net/phy/fixed_phy.c7
-rw-r--r--drivers/net/phy/marvell-88x2222.c9
-rw-r--r--drivers/net/phy/marvell.c246
-rw-r--r--drivers/net/phy/marvell10g.c137
-rw-r--r--drivers/net/phy/mdio-open-alliance.h46
-rw-r--r--drivers/net/phy/mdio_bus.c494
-rw-r--r--drivers/net/phy/mdio_device.c2
-rw-r--r--drivers/net/phy/mdio_devres.c11
-rw-r--r--drivers/net/phy/mediatek-ge.c3
-rw-r--r--drivers/net/phy/meson-gxl.c82
-rw-r--r--drivers/net/phy/micrel.c3049
-rw-r--r--drivers/net/phy/microchip.c42
-rw-r--r--drivers/net/phy/microchip_t1.c511
-rw-r--r--drivers/net/phy/microchip_t1s.c138
-rw-r--r--drivers/net/phy/motorcomm.c2078
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c157
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.h2
-rw-r--r--drivers/net/phy/mscc/mscc_main.c29
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c5
-rw-r--r--drivers/net/phy/mxl-gpy.c381
-rw-r--r--drivers/net/phy/ncn26000.c171
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c238
-rw-r--r--drivers/net/phy/nxp-cbtx.c227
-rw-r--r--drivers/net/phy/nxp-tja11xx.c96
-rw-r--r--drivers/net/phy/phy-c45.c839
-rw-r--r--drivers/net/phy/phy-core.c113
-rw-r--r--drivers/net/phy/phy.c550
-rw-r--r--drivers/net/phy/phy_device.c309
-rw-r--r--drivers/net/phy/phylink.c1239
-rw-r--r--drivers/net/phy/realtek.c44
-rw-r--r--drivers/net/phy/sfp-bus.c191
-rw-r--r--drivers/net/phy/sfp.c735
-rw-r--r--drivers/net/phy/sfp.h11
-rw-r--r--drivers/net/phy/smsc.c261
-rw-r--r--drivers/net/phy/spi_ks8995.c75
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c1
-rw-r--r--drivers/net/plip/plip.c8
-rw-r--r--drivers/net/ppp/ppp_async.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c20
-rw-r--r--drivers/net/ppp/ppp_synctty.c3
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/pse-pd/Kconfig23
-rw-r--r--drivers/net/pse-pd/Makefile6
-rw-r--r--drivers/net/pse-pd/pse_core.c314
-rw-r--r--drivers/net/pse-pd/pse_regulator.c147
-rw-r--r--drivers/net/rionet.c13
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/slip/slip.c8
-rw-r--r--drivers/net/sungem_phy.c7
-rw-r--r--drivers/net/tap.c89
-rw-r--r--drivers/net/team/team.c64
-rw-r--r--drivers/net/team/team_mode_loadbalance.c4
-rw-r--r--drivers/net/team/team_mode_random.c2
-rw-r--r--drivers/net/thunderbolt/Kconfig12
-rw-r--r--drivers/net/thunderbolt/Makefile6
-rw-r--r--drivers/net/thunderbolt/main.c (renamed from drivers/net/thunderbolt.c)183
-rw-r--r--drivers/net/thunderbolt/trace.c10
-rw-r--r--drivers/net/thunderbolt/trace.h141
-rw-r--r--drivers/net/tun.c370
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/aqc111.c13
-rw-r--r--drivers/net/usb/asix.h19
-rw-r--r--drivers/net/usb/asix_common.c130
-rw-r--r--drivers/net/usb/asix_devices.c291
-rw-r--r--drivers/net/usb/ax88179_178a.c518
-rw-r--r--drivers/net/usb/catc.c52
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ether.c134
-rw-r--r--drivers/net/usb/cdc_mbim.c11
-rw-r--r--drivers/net/usb/cdc_ncm.c49
-rw-r--r--drivers/net/usb/cdc_subset.c10
-rw-r--r--drivers/net/usb/gl620a.c2
-rw-r--r--drivers/net/usb/hso.c5
-rw-r--r--drivers/net/usb/ipheth.c6
-rw-r--r--drivers/net/usb/kalmia.c8
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c1262
-rw-r--r--drivers/net/usb/mcs7830.c12
-rw-r--r--drivers/net/usb/pegasus.c6
-rw-r--r--drivers/net/usb/plusb.c12
-rw-r--r--drivers/net/usb/qmi_wwan.c21
-rw-r--r--drivers/net/usb/r8152.c438
-rw-r--r--drivers/net/usb/rndis_host.c57
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/sierra_net.c6
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/usb/smsc95xx.c455
-rw-r--r--drivers/net/usb/sr9700.c2
-rw-r--r--drivers/net/usb/sr9800.h2
-rw-r--r--drivers/net/usb/usbnet.c92
-rw-r--r--drivers/net/usb/zaurus.c12
-rw-r--r--drivers/net/veth.c464
-rw-r--r--drivers/net/virtio_net.c1432
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h80
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c413
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c167
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h24
-rw-r--r--drivers/net/vrf.c49
-rw-r--r--drivers/net/vxlan/Makefile7
-rw-r--r--drivers/net/vxlan/vxlan_core.c (renamed from drivers/net/vxlan.c)714
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c1462
-rw-r--r--drivers/net/vxlan/vxlan_multicast.c272
-rw-r--r--drivers/net/vxlan/vxlan_private.h246
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c1005
-rw-r--r--drivers/net/wan/Kconfig75
-rw-r--r--drivers/net/wan/Makefile5
-rw-r--r--drivers/net/wan/cosa.c2052
-rw-r--r--drivers/net/wan/cosa.h104
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/farsync.h2
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c81
-rw-r--r--drivers/net/wan/hd64572.c3
-rw-r--r--drivers/net/wan/hostess_sv11.c336
-rw-r--r--drivers/net/wan/ixp4xx_hss.c300
-rw-r--r--drivers/net/wan/lapbether.c5
-rw-r--r--drivers/net/wan/lmc/Makefile18
-rw-r--r--drivers/net/wan/lmc/lmc.h33
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c65
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h52
-rw-r--r--drivers/net/wan/lmc/lmc_ioctl.h255
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2008
-rw-r--r--drivers/net/wan/lmc/lmc_media.c1206
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c106
-rw-r--r--drivers/net/wan/lmc/lmc_proto.h18
-rw-r--r--drivers/net/wan/lmc/lmc_var.h468
-rw-r--r--drivers/net/wan/sealevel.c352
-rw-r--r--drivers/net/wan/slic_ds26522.c5
-rw-r--r--drivers/net/wan/z85230.c1641
-rw-r--r--drivers/net/wan/z85230.h407
-rw-r--r--drivers/net/wireguard/allowedips.c9
-rw-r--r--drivers/net/wireguard/device.c44
-rw-r--r--drivers/net/wireguard/netlink.c14
-rw-r--r--drivers/net/wireguard/noise.c45
-rw-r--r--drivers/net/wireguard/peer.c3
-rw-r--r--drivers/net/wireguard/queueing.c3
-rw-r--r--drivers/net/wireguard/queueing.h6
-rw-r--r--drivers/net/wireguard/receive.c9
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c42
-rw-r--r--drivers/net/wireguard/selftest/ratelimiter.c4
-rw-r--r--drivers/net/wireguard/socket.c5
-rw-r--r--drivers/net/wireguard/timers.c12
-rw-r--r--drivers/net/wireless/Kconfig77
-rw-r--r--drivers/net/wireless/Makefile15
-rw-r--r--drivers/net/wireless/admtek/adm8211.c3
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c32
-rw-r--r--drivers/net/wireless/ath/ath.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c69
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c182
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c153
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h412
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c372
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c102
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c21
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h36
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c300
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c49
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c126
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h102
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h42
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c28
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c654
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h25
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c63
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h21
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c994
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h274
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c65
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h7
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c1016
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h193
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c88
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h116
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c185
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c73
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h93
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c1188
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c338
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c95
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h48
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h28
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c559
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h219
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c79
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c766
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h112
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c2458
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h20
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c348
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h17
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1041
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h25
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c820
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c412
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c634
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h58
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c233
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c55
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h225
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c2649
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h1116
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c804
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.h45
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig34
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile27
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c964
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h184
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c939
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h823
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c357
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c102
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c1577
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h1816
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c2597
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h106
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c4242
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h145
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c1215
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2222
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1142
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h2961
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c850
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h704
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.c145
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h194
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h144
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c789
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.h316
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c1041
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h312
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c7097
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c616
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h46
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c1401
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h141
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c342
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c3089
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h569
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c732
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h95
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h1441
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h152
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c6606
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h4803
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c10
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c54
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c23
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h19
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h14
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c17
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c22
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c90
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c149
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c53
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h84
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c132
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h158
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c79
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c122
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c82
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c22
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h4
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c6
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/key.c2
-rw-r--r--drivers/net/wireless/ath/regd.c10
-rw-r--r--drivers/net/wireless/ath/regd.h1
-rw-r--r--drivers/net/wireless/ath/regd_common.h3
-rw-r--r--drivers/net/wireless/ath/spectral_common.h4
-rw-r--r--drivers/net/wireless/ath/trace.h7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c39
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c119
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.c125
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.h84
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h112
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c323
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c286
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c110
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h16
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c68
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c3
-rw-r--r--drivers/net/wireless/atmel/atmel.c166
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c22
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c10
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c51
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c182
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h58
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c1049
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c206
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c61
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c56
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c152
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h182
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c199
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c34
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c797
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c76
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c50
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h13
-rw-r--r--drivers/net/wireless/cisco/Kconfig2
-rw-r--r--drivers/net/wireless/cisco/airo.c246
-rw-r--r--drivers/net/wireless/intel/Makefile1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c85
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c176
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h5
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h13
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c31
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c26
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c41
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c370
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c302
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h65
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h479
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h90
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h452
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h133
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h215
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/system.h (renamed from drivers/net/wireless/intel/iwlwifi/fw/api/soc.h)16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c395
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c223
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c215
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c165
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c310
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/Makefile8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/internal.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h529
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c2187
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c412
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h786
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace-data.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace.h76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c961
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c525
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c781
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c642
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c2965
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c303
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c309
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c1101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c1167
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h702
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c399
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c151
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c326
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c431
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c146
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c332
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c1170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c507
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c1024
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-sync.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-sync.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c323
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c153
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c789
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c324
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h21
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c18
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c5
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c246
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c24
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_wlan.h14
-rw-r--r--drivers/net/wireless/intersil/orinoco/airport.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/wext.c131
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.h4
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c6
-rw-r--r--drivers/net/wireless/intersil/p54/main.c18
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c7
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c6
-rw-r--r--drivers/net/wireless/legacy/Kconfig55
-rw-r--r--drivers/net/wireless/legacy/Makefile6
-rw-r--r--drivers/net/wireless/legacy/ray_cs.c (renamed from drivers/net/wireless/ray_cs.c)28
-rw-r--r--drivers/net/wireless/legacy/ray_cs.h (renamed from drivers/net/wireless/ray_cs.h)0
-rw-r--r--drivers/net/wireless/legacy/rayctl.h (renamed from drivers/net/wireless/rayctl.h)0
-rw-r--r--drivers/net/wireless/legacy/rndis_wlan.c (renamed from drivers/net/wireless/rndis_wlan.c)57
-rw-r--r--drivers/net/wireless/legacy/wl3501.h (renamed from drivers/net/wireless/wl3501.h)0
-rw-r--r--drivers/net/wireless/legacy/wl3501_cs.c (renamed from drivers/net/wireless/wl3501_cs.c)10
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c91
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/ethtool.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h16
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c10
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/tx.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/types.h21
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h36
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Makefile13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c46
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ethtool.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h45
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c25
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.h18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c69
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c38
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c42
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c33
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c29
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c16
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h14
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c80
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile1
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c529
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h10
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c395
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h358
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c50
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c93
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c505
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c113
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c519
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h141
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c72
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c109
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c93
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h241
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h (renamed from drivers/net/wireless/mediatek/mt76/mt7921/mac.h)452
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c999
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c1250
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h915
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c72
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c410
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h136
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c911
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c692
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c279
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c721
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c1958
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h359
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c481
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c2960
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h1035
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c1068
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h283
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c276
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h1031
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1257
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c169
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c340
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h105
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c80
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c190
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.h27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c250
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c1036
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c721
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c1333
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h254
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h272
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c230
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c118
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c393
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb_mac.c255
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c268
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c971
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c424
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c913
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c2724
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h358
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c1390
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c3684
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h683
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c394
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h567
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c222
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h589
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c60
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c58
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c195
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c9
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c317
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h21
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c260
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h18
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c57
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h18
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c57
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c97
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c73
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h20
-rw-r--r--drivers/net/wireless/purelifi/Kconfig17
-rw-r--r--drivers/net/wireless/purelifi/Makefile2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Kconfig14
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Makefile3
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.c98
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.h70
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/firmware.c276
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/intf.h52
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c755
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h184
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c892
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.h198
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c32
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c14
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c1800
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h12
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c19
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c9
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c16
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c8
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig8
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h575
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c1899
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c1761
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c93
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c260
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c1887
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c149
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c116
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c1661
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h120
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c56
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c51
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c27
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c37
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c36
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c52
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig83
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile28
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c33
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c383
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h19
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c138
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c572
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h185
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h9
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c115
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c203
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c647
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h206
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c159
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c136
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c12
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c82
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c50
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c44
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c90
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c101
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c44
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c114
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c1394
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.h178
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c118
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h38
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c924
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h107
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c107
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h12
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig36
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile34
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c190
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h498
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c302
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c4181
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c1936
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h2022
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c1352
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h9
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c160
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2947
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h4148
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c2526
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h337
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c490
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c1724
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h557
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c2067
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h300
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c120
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h2759
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c514
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c534
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c14824
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c546
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c185
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c2744
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h49
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c13717
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.h11
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c88
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c2607
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.h137
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c4174
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.c794
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.h62
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c22892
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c90
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c2906
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h87
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c4368
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h29
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c781
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c37495
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.h22
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c93
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c142
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c268
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h205
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h41
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c842
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h21
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c21
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c53
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c18
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c9
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h2
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h2
-rw-r--r--drivers/net/wireless/silabs/Kconfig18
-rw-r--r--drivers/net/wireless/silabs/Makefile3
-rw-r--r--drivers/net/wireless/silabs/wfx/Kconfig13
-rw-r--r--drivers/net/wireless/silabs/wfx/Makefile25
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.c324
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.h34
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h36
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c273
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c284
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.c93
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.h17
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.c569
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.h66
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.c331
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.h19
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.c388
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.h15
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_cmd.h553
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_general.h252
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_mib.h346
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.c391
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.h17
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c494
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.h61
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.c307
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.h48
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.c332
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.h78
-rw-r--r--drivers/net/wireless/silabs/wfx/key.c227
-rw-r--r--drivers/net/wireless/silabs/wfx/key.h19
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c494
-rw-r--r--drivers/net/wireless/silabs/wfx/main.h41
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.c298
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.h44
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.c147
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.h22
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c817
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h71
-rw-r--r--drivers/net/wireless/silabs/wfx/traces.h496
-rw-r--r--drivers/net/wireless/silabs/wfx/wfx.h167
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c10
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c6
-rw-r--r--drivers/net/wireless/st/cw1200/main.c1
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c19
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c55
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h5
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c12
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c4
-rw-r--r--drivers/net/wireless/ti/Kconfig8
-rw-r--r--drivers/net/wireless/ti/Makefile3
-rw-r--r--drivers/net/wireless/ti/wilink_platform_data.c35
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c22
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/io.c20
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c24
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c8
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c80
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c15
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c3
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c18
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c22
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c52
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c298
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c12
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c18
-rw-r--r--drivers/net/wireless/virtual/Kconfig20
-rw-r--r--drivers/net/wireless/virtual/Makefile3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c (renamed from drivers/net/wireless/mac80211_hwsim.c)2364
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h (renamed from drivers/net/wireless/mac80211_hwsim.h)63
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c (renamed from drivers/net/wireless/virt_wifi.c)2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c177
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c14
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf.h3
-rw-r--r--drivers/net/wwan/Kconfig42
-rw-r--r--drivers/net/wwan/Makefile2
-rw-r--r--drivers/net/wwan/iosm/Makefile4
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.h5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.c30
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_debugfs.h17
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c72
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h15
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c23
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h9
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c8
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c50
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h135
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c772
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h142
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c69
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.h2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c10
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.c182
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_trace.h74
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c49
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.h10
-rw-r--r--drivers/net/wwan/mhi_wwan_ctrl.c3
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c13
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c907
-rw-r--r--drivers/net/wwan/rpmsg_wwan_ctrl.c3
-rw-r--r--drivers/net/wwan/t7xx/Makefile21
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.c281
-rw-r--r--drivers/net/wwan/t7xx/t7xx_cldma.h180
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.c1281
-rw-r--r--drivers/net/wwan/t7xx/t7xx_dpmaif.h179
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c1341
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h127
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c583
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.h202
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c1168
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.h117
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c683
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.h78
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.c122
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.h37
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.c729
-rw-r--r--drivers/net/wwan/t7xx/t7xx_modem_ops.h88
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c516
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.h60
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c763
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h123
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.c262
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pcie_mac.h31
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port.h142
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_ctrl_msg.c273
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.c521
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_proxy.h102
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_trace.c116
-rw-r--r--drivers/net/wwan/t7xx/t7xx_port_wwan.c178
-rw-r--r--drivers/net/wwan/t7xx/t7xx_reg.h350
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.c550
-rw-r--r--drivers/net/wwan/t7xx/t7xx_state_monitor.h135
-rw-r--r--drivers/net/wwan/wwan_core.c145
-rw-r--r--drivers/net/wwan/wwan_hwsim.c34
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c30
-rw-r--r--drivers/net/xen-netback/netback.c276
-rw-r--r--drivers/net/xen-netback/rx.c84
-rw-r--r--drivers/net/xen-netback/xenbus.c22
-rw-r--r--drivers/net/xen-netfront.c376
3617 files changed, 697766 insertions, 193627 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6cccc3dc00bc..d0a1ed216d15 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,18 +76,16 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
+ depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_LIB_BLAKE2S
select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select ARM_CRYPTO if ARM
- select ARM64_CRYPTO if ARM64
select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
select CRYPTO_POLY1305_ARM if ARM
@@ -95,6 +93,7 @@ config WIREGUARD
select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
select CRYPTO_POLY1305_MIPS if MIPS
+ select CRYPTO_CHACHA_S390 if S390
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
@@ -335,7 +334,6 @@ config NETCONSOLE_DYNAMIC
config NETPOLL
def_bool NETCONSOLE
- select SRCU
config NET_POLL_CONTROLLER
def_bool NETPOLL
@@ -404,6 +402,8 @@ config TUN_VNET_CROSS_LE
config VETH
tristate "Virtual ethernet pair device"
+ select PAGE_POOL
+ select PAGE_POOL_STATS
help
This device is a local ethernet tunnel. Devices are created in pairs.
When one end receives the packet it appears on its pair and vice
@@ -500,6 +500,10 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/pse-pd/Kconfig"
+
+source "drivers/net/can/Kconfig"
+
source "drivers/net/mctp/Kconfig"
source "drivers/net/mdio/Kconfig"
@@ -580,18 +584,7 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
-config USB4_NET
- tristate "Networking over USB4 and Thunderbolt cables"
- depends on USB4 && INET
- help
- Select this if you want to create network between two computers
- over a USB4 and Thunderbolt cables. The driver supports Apple
- ThunderboltIP protocol and allows communication with any host
- supporting the same protocol including Windows and macOS.
-
- To compile this driver a module, choose M here. The module will be
- called thunderbolt-net.
-
+source "drivers/net/thunderbolt/Kconfig"
source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 50b23e71065f..e26f98f897c5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_NET) += loopback.o
obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
+obj-y += pse-pd/
obj-y += mdio/
obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
@@ -31,7 +32,7 @@ obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_TAP) += tap.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
-obj-$(CONFIG_VXLAN) += vxlan.o
+obj-$(CONFIG_VXLAN) += vxlan/
obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
@@ -83,8 +84,6 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
-
-thunderbolt-net-y += thunderbolt.o
-obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
+obj-$(CONFIG_USB4_NET) += thunderbolt/
obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 49e67c9fb5a4..83214e2e70ab 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -68,7 +68,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
memset(s[i].name, 0, sizeof(s[i].name));
- strlcpy(s[i].name, name, IFNAMSIZ);
+ strscpy(s[i].name, name, IFNAMSIZ);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
@@ -222,9 +222,6 @@ static struct devprobe2 isa_probes[] __initdata = {
#ifdef CONFIG_CS89x0_ISA
{cs89x0_probe, 0},
#endif
-#ifdef CONFIG_NI65
- {ni65_probe, 0},
-#endif
{NULL, 0},
};
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index b732ee9a50ef..2d20be6ffb7e 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -11,6 +11,7 @@
#include <linux/net.h>
#include <linux/igmp.h>
#include <linux/workqueue.h>
+#include <net/sch_generic.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/udp.h>
@@ -50,13 +51,14 @@ static char *status_str[] = {
};
static char *type_str[] = {
+ "", /* Type 0 is not defined */
"AMT_MSG_DISCOVERY",
"AMT_MSG_ADVERTISEMENT",
"AMT_MSG_REQUEST",
"AMT_MSG_MEMBERSHIP_QUERY",
"AMT_MSG_MEMBERSHIP_UPDATE",
"AMT_MSG_MULTICAST_DATA",
- "AMT_MSG_TEARDOWM",
+ "AMT_MSG_TEARDOWN",
};
static char *action_str[] = {
@@ -447,7 +449,7 @@ out:
dev_put(amt->dev);
}
-/* Non-existant group is created as INCLUDE {empty}:
+/* Non-existent group is created as INCLUDE {empty}:
*
* RFC 3376 - 5.1. Action on Change of Interface State
*
@@ -561,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
ihv3->nsrcs = 0;
ihv3->resv = 0;
ihv3->suppress = false;
- ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
ihv3->csum = 0;
csum = &ihv3->csum;
csum_start = (void *)ihv3;
@@ -575,14 +577,14 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
return skb;
}
-static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
- bool validate)
+static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
{
if (validate && amt->status >= status)
return;
netdev_dbg(amt->dev, "Update GW status %s -> %s",
status_str[amt->status], status_str[status]);
- amt->status = status;
+ WRITE_ONCE(amt->status, status);
}
static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
@@ -598,14 +600,6 @@ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
tunnel->status = status;
}
-static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
- bool validate)
-{
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, status, validate);
- spin_unlock_bh(&amt->lock);
-}
-
static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
enum amt_status status, bool validate)
{
@@ -698,9 +692,7 @@ static void amt_send_discovery(struct amt_dev *amt)
if (unlikely(net_xmit_eval(err)))
amt->dev->stats.tx_errors++;
- spin_lock_bh(&amt->lock);
- __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
- spin_unlock_bh(&amt->lock);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
out:
rcu_read_unlock();
}
@@ -898,6 +890,28 @@ static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
}
#endif
+static bool amt_queue_event(struct amt_dev *amt, enum amt_event event,
+ struct sk_buff *skb)
+{
+ int index;
+
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events >= AMT_MAX_EVENTS) {
+ spin_unlock_bh(&amt->lock);
+ return 1;
+ }
+
+ index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS;
+ amt->events[index].event = event;
+ amt->events[index].skb = skb;
+ amt->nr_events++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ queue_work(amt_wq, &amt->event_wq);
+ spin_unlock_bh(&amt->lock);
+
+ return 0;
+}
+
static void amt_secret_work(struct work_struct *work)
{
struct amt_dev *amt = container_of(to_delayed_work(work),
@@ -911,56 +925,72 @@ static void amt_secret_work(struct work_struct *work)
msecs_to_jiffies(AMT_SECRET_TIMEOUT));
}
-static void amt_discovery_work(struct work_struct *work)
+static void amt_event_send_discovery(struct amt_dev *amt)
{
- struct amt_dev *amt = container_of(to_delayed_work(work),
- struct amt_dev,
- discovery_wq);
-
- spin_lock_bh(&amt->lock);
if (amt->status > AMT_STATUS_SENT_DISCOVERY)
goto out;
get_random_bytes(&amt->nonce, sizeof(__be32));
- spin_unlock_bh(&amt->lock);
amt_send_discovery(amt);
- spin_lock_bh(&amt->lock);
out:
mod_delayed_work(amt_wq, &amt->discovery_wq,
msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
- spin_unlock_bh(&amt->lock);
}
-static void amt_req_work(struct work_struct *work)
+static void amt_discovery_work(struct work_struct *work)
{
struct amt_dev *amt = container_of(to_delayed_work(work),
struct amt_dev,
- req_wq);
+ discovery_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL))
+ mod_delayed_work(amt_wq, &amt->discovery_wq,
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+}
+
+static void amt_event_send_request(struct amt_dev *amt)
+{
u32 exp;
- spin_lock_bh(&amt->lock);
if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
goto out;
- if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) {
+ if (amt->req_cnt > AMT_MAX_REQ_COUNT) {
netdev_dbg(amt->dev, "Gateway is not ready");
amt->qi = AMT_INIT_REQ_TIMEOUT;
- amt->ready4 = false;
- amt->ready6 = false;
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
amt->remote_ip = 0;
- __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
+ amt_update_gw_status(amt, AMT_STATUS_INIT, false);
amt->req_cnt = 0;
+ amt->nonce = 0;
+ goto out;
+ }
+
+ if (!amt->req_cnt) {
+ WRITE_ONCE(amt->ready4, false);
+ WRITE_ONCE(amt->ready6, false);
+ get_random_bytes(&amt->nonce, sizeof(__be32));
}
- spin_unlock_bh(&amt->lock);
amt_send_request(amt, false);
amt_send_request(amt, true);
amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
- spin_lock_bh(&amt->lock);
+ amt->req_cnt++;
out:
exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
- spin_unlock_bh(&amt->lock);
+}
+
+static void amt_req_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ req_wq);
+
+ if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL))
+ mod_delayed_work(amt_wq, &amt->req_wq,
+ msecs_to_jiffies(100));
}
static bool amt_send_membership_update(struct amt_dev *amt,
@@ -1106,7 +1136,7 @@ static bool amt_send_membership_query(struct amt_dev *amt,
rt = ip_route_output_key(amt->net, &fl4);
if (IS_ERR(rt)) {
netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
- return -1;
+ return true;
}
amtmq = skb_push(skb, sizeof(*amtmq));
@@ -1216,7 +1246,8 @@ static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
/* Gateway only passes IGMP/MLD packets */
if (!report)
goto free;
- if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
+ if ((!v6 && !READ_ONCE(amt->ready4)) ||
+ (v6 && !READ_ONCE(amt->ready6)))
goto free;
if (amt_send_membership_update(amt, skb, v6))
goto free;
@@ -1369,11 +1400,11 @@ static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
int i;
if (!v6) {
- igmp_grec = (struct igmpv3_grec *)grec;
+ igmp_grec = grec;
nsrcs = ntohs(igmp_grec->grec_nsrcs);
} else {
#if IS_ENABLED(CONFIG_IPV6)
- mld_grec = (struct mld2_grec *)grec;
+ mld_grec = grec;
nsrcs = ntohs(mld_grec->grec_nsrcs);
#else
return;
@@ -1454,11 +1485,11 @@ static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
int i, j;
if (!v6) {
- igmp_grec = (struct igmpv3_grec *)grec;
+ igmp_grec = grec;
nsrcs = ntohs(igmp_grec->grec_nsrcs);
} else {
#if IS_ENABLED(CONFIG_IPV6)
- mld_grec = (struct mld2_grec *)grec;
+ mld_grec = grec;
nsrcs = ntohs(mld_grec->grec_nsrcs);
#else
return;
@@ -2217,8 +2248,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
struct amt_header_advertisement *amta;
int hdr_size;
- hdr_size = sizeof(*amta) - sizeof(struct amt_header);
-
+ hdr_size = sizeof(*amta) + sizeof(struct udphdr);
if (!pskb_may_pull(skb, hdr_size))
return true;
@@ -2233,6 +2263,10 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
ipv4_is_zeronet(amta->ip4))
return true;
+ if (amt->status != AMT_STATUS_SENT_DISCOVERY ||
+ amt->nonce != amta->nonce)
+ return true;
+
amt->remote_ip = amta->ip4;
netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
mod_delayed_work(amt_wq, &amt->req_wq, 0);
@@ -2248,19 +2282,30 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
struct ethhdr *eth;
struct iphdr *iph;
+ if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE)
+ return true;
+
+ hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
if (amtmd->reserved || amtmd->version)
return true;
- hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
return true;
+
skb_reset_network_header(skb);
skb_push(skb, sizeof(*eth));
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
eth = eth_hdr(skb);
+
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
iph = ip_hdr(skb);
+
if (iph->version == 4) {
if (!ipv4_is_multicast(iph->daddr))
return true;
@@ -2271,6 +2316,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
} else if (iph->version == 6) {
struct ipv6hdr *ip6h;
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return true;
+
ip6h = ipv6_hdr(skb);
if (!ipv6_addr_is_multicast(&ip6h->daddr))
return true;
@@ -2303,8 +2351,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
struct iphdr *iph;
int hdr_size, len;
- hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
-
+ hdr_size = sizeof(*amtmq) + sizeof(struct udphdr);
if (!pskb_may_pull(skb, hdr_size))
return true;
@@ -2312,54 +2359,66 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
if (amtmq->reserved || amtmq->version)
return true;
- hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
+ if (amtmq->nonce != amt->nonce)
+ return true;
+
+ hdr_size -= sizeof(*eth);
if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
return true;
+
oeth = eth_hdr(skb);
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
skb_reset_network_header(skb);
eth = eth_hdr(skb);
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+
iph = ip_hdr(skb);
if (iph->version == 4) {
- if (!ipv4_is_multicast(iph->daddr))
+ if (READ_ONCE(amt->ready4))
return true;
+
if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
sizeof(*ihv3)))
return true;
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+
ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
- spin_lock_bh(&amt->lock);
- amt->ready4 = true;
+ WRITE_ONCE(amt->ready4, true);
amt->mac = amtmq->response_mac;
amt->req_cnt = 0;
amt->qi = ihv3->qqic;
- spin_unlock_bh(&amt->lock);
skb->protocol = htons(ETH_P_IP);
eth->h_proto = htons(ETH_P_IP);
ip_eth_mc_map(iph->daddr, eth->h_dest);
#if IS_ENABLED(CONFIG_IPV6)
} else if (iph->version == 6) {
- struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct mld2_query *mld2q;
+ struct ipv6hdr *ip6h;
- if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ if (READ_ONCE(amt->ready6))
return true;
+
if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
sizeof(*mld2q)))
return true;
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+
mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
skb_reset_transport_header(skb);
skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
- spin_lock_bh(&amt->lock);
- amt->ready6 = true;
+ WRITE_ONCE(amt->ready6, true);
amt->mac = amtmq->response_mac;
amt->req_cnt = 0;
amt->qi = mld2q->mld2q_qqic;
- spin_unlock_bh(&amt->lock);
skb->protocol = htons(ETH_P_IPV6);
eth->h_proto = htons(ETH_P_IPV6);
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
@@ -2372,12 +2431,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ local_bh_disable();
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
dev_sw_netstats_rx_add(amt->dev, len);
} else {
amt->dev->stats.rx_dropped++;
}
+ local_bh_enable();
return false;
}
@@ -2386,23 +2447,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
{
struct amt_header_membership_update *amtmu;
struct amt_tunnel_list *tunnel;
- struct udphdr *udph;
struct ethhdr *eth;
struct iphdr *iph;
- int len;
+ int len, hdr_size;
iph = ip_hdr(skb);
- udph = udp_hdr(skb);
- if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
- false, false))
+ hdr_size = sizeof(*amtmu) + sizeof(struct udphdr);
+ if (!pskb_may_pull(skb, hdr_size))
return true;
- amtmu = (struct amt_header_membership_update *)skb->data;
+ amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1);
if (amtmu->reserved || amtmu->version)
return true;
- skb_pull(skb, sizeof(*amtmu));
+ if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false))
+ return true;
+
skb_reset_network_header(skb);
list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
@@ -2420,9 +2481,12 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
}
}
- return false;
+ return true;
report:
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return true;
+
iph = ip_hdr(skb);
if (iph->version == 4) {
if (ip_mc_check_igmp(skb)) {
@@ -2469,7 +2533,7 @@ report:
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
true);
dev_sw_netstats_rx_add(amt->dev, len);
@@ -2615,7 +2679,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
if (tunnel->ip4 == iph->saddr)
goto send;
+ spin_lock_bh(&amt->lock);
if (amt->nr_tunnels >= amt->max_tunnels) {
+ spin_unlock_bh(&amt->lock);
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
return true;
}
@@ -2623,8 +2689,10 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
tunnel = kzalloc(sizeof(*tunnel) +
(sizeof(struct hlist_head) * amt->hash_buckets),
GFP_ATOMIC);
- if (!tunnel)
+ if (!tunnel) {
+ spin_unlock_bh(&amt->lock);
return true;
+ }
tunnel->source_port = udph->source;
tunnel->ip4 = iph->saddr;
@@ -2637,10 +2705,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
- spin_lock_bh(&amt->lock);
list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
tunnel->key = amt->key;
- amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
+ __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
amt->nr_tunnels++;
mod_delayed_work(amt_wq, &tunnel->gc_wq,
msecs_to_jiffies(amt_gmi(amt)));
@@ -2665,6 +2732,38 @@ send:
return false;
}
+static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb)
+{
+ int type = amt_parse_type(skb);
+ int err = 1;
+
+ if (type == -1)
+ goto drop;
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ switch (type) {
+ case AMT_MSG_ADVERTISEMENT:
+ err = amt_advertisement_handler(amt, skb);
+ break;
+ case AMT_MSG_MEMBERSHIP_QUERY:
+ err = amt_membership_query_handler(amt, skb);
+ if (!err)
+ return;
+ break;
+ default:
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+ break;
+ }
+ }
+drop:
+ if (err) {
+ amt->dev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+}
+
static int amt_rcv(struct sock *sk, struct sk_buff *skb)
{
struct amt_dev *amt;
@@ -2676,6 +2775,7 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
amt = rcu_dereference_sk_user_data(sk);
if (!amt) {
err = true;
+ kfree_skb(skb);
goto out;
}
@@ -2695,8 +2795,11 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
err = true;
goto drop;
}
- if (amt_advertisement_handler(amt, skb))
- amt->dev->stats.rx_dropped++;
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
+ goto drop;
+ }
goto out;
case AMT_MSG_MULTICAST_DATA:
if (iph->saddr != amt->remote_ip) {
@@ -2715,11 +2818,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb)
err = true;
goto drop;
}
- err = amt_membership_query_handler(amt, skb);
- if (err)
+ if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) {
+ netdev_dbg(amt->dev, "AMT Event queue full\n");
+ err = true;
goto drop;
- else
- goto out;
+ }
+ goto out;
default:
err = true;
netdev_dbg(amt->dev, "Invalid type of Gateway\n");
@@ -2757,6 +2861,45 @@ out:
return 0;
}
+static void amt_event_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(work, struct amt_dev, event_wq);
+ struct sk_buff *skb;
+ u8 event;
+ int i;
+
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ spin_lock_bh(&amt->lock);
+ if (amt->nr_events == 0) {
+ spin_unlock_bh(&amt->lock);
+ return;
+ }
+ event = amt->events[amt->event_idx].event;
+ skb = amt->events[amt->event_idx].skb;
+ amt->events[amt->event_idx].event = AMT_EVENT_NONE;
+ amt->events[amt->event_idx].skb = NULL;
+ amt->nr_events--;
+ amt->event_idx++;
+ amt->event_idx %= AMT_MAX_EVENTS;
+ spin_unlock_bh(&amt->lock);
+
+ switch (event) {
+ case AMT_EVENT_RECEIVE:
+ amt_gw_rcv(amt, skb);
+ break;
+ case AMT_EVENT_SEND_DISCOVERY:
+ amt_event_send_discovery(amt);
+ break;
+ case AMT_EVENT_SEND_REQUEST:
+ amt_event_send_request(amt);
+ break;
+ default:
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
{
struct amt_dev *amt;
@@ -2781,7 +2924,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
break;
case AMT_MSG_REQUEST:
case AMT_MSG_MEMBERSHIP_UPDATE:
- if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
mod_delayed_work(amt_wq, &amt->req_wq, 0);
break;
default:
@@ -2844,6 +2987,8 @@ static int amt_dev_open(struct net_device *dev)
amt->ready4 = false;
amt->ready6 = false;
+ amt->event_idx = 0;
+ amt->nr_events = 0;
err = amt_socket_create(amt);
if (err)
@@ -2851,6 +2996,7 @@ static int amt_dev_open(struct net_device *dev)
amt->req_cnt = 0;
amt->remote_ip = 0;
+ amt->nonce = 0;
get_random_bytes(&amt->key, sizeof(siphash_key_t));
amt->status = AMT_STATUS_INIT;
@@ -2869,6 +3015,8 @@ static int amt_dev_stop(struct net_device *dev)
struct amt_dev *amt = netdev_priv(dev);
struct amt_tunnel_list *tunnel, *tmp;
struct socket *sock;
+ struct sk_buff *skb;
+ int i;
cancel_delayed_work_sync(&amt->req_wq);
cancel_delayed_work_sync(&amt->discovery_wq);
@@ -2881,6 +3029,14 @@ static int amt_dev_stop(struct net_device *dev)
if (sock)
udp_tunnel_sock_release(sock);
+ cancel_work_sync(&amt->event_wq);
+ for (i = 0; i < AMT_MAX_EVENTS; i++) {
+ skb = amt->events[i].skb;
+ kfree_skb(skb);
+ amt->events[i].event = AMT_EVENT_NONE;
+ amt->events[i].skb = NULL;
+ }
+
amt->ready4 = false;
amt->ready6 = false;
amt->req_cnt = 0;
@@ -3072,7 +3228,7 @@ static int amt_newlink(struct net *net, struct net_device *dev,
goto err;
}
if (amt->mode == AMT_MODE_RELAY) {
- amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv);
amt->qri = 10;
dev->needed_headroom = amt->stream_dev->needed_headroom +
AMT_RELAY_HLEN;
@@ -3123,8 +3279,8 @@ static int amt_newlink(struct net *net, struct net_device *dev,
INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
+ INIT_WORK(&amt->event_wq, amt_event_work);
INIT_LIST_HEAD(&amt->tunnel_list);
-
return 0;
err:
dev_put(amt->stream_dev);
@@ -3257,7 +3413,7 @@ static int __init amt_init(void)
if (err < 0)
goto unregister_notifier;
- amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
+ amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0);
if (!amt_wq) {
err = -ENOMEM;
goto rtnl_unregister;
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index 90b9f1d6eda9..b38ed52b82bc 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -39,17 +39,6 @@ config DEV_APPLETALK
connect to the AppleTalk network, say Y.
-config LTPC
- tristate "Apple/Farallon LocalTalk PC support"
- depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
- help
- This allows you to use the AppleTalk PC card to connect to LocalTalk
- networks. The card is also known as the Farallon PhoneNet PC card.
- If you are in doubt, this card is the one with the 65C02 chip on it.
- You also need version 1.3.3 or later of the netatalk package.
- This driver is experimental, which means that it may not work.
- See the file <file:Documentation/networking/device_drivers/appletalk/ltpc.rst>.
-
config COPS
tristate "COPS LocalTalk PC support"
depends on DEV_APPLETALK && ISA
diff --git a/drivers/net/appletalk/Makefile b/drivers/net/appletalk/Makefile
index 903da3303f41..6db2943ce5d6 100644
--- a/drivers/net/appletalk/Makefile
+++ b/drivers/net/appletalk/Makefile
@@ -5,4 +5,3 @@
obj-$(CONFIG_IPDDP) += ipddp.o
obj-$(CONFIG_COPS) += cops.o
-obj-$(CONFIG_LTPC) += ltpc.o
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 5566daefbff4..d558535390f9 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -23,6 +23,7 @@
* of the GNU General Public License, incorporated herein by reference.
*/
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
deleted file mode 100644
index 388d7b3bd4c2..000000000000
--- a/drivers/net/appletalk/ltpc.c
+++ /dev/null
@@ -1,1277 +0,0 @@
-/*** ltpc.c -- a driver for the LocalTalk PC card.
- *
- * Copyright (c) 1995,1996 Bradford W. Johnson <johns393@maroon.tc.umn.edu>
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * This is ALPHA code at best. It may not work for you. It may
- * damage your equipment. It may damage your relations with other
- * users of your network. Use it at your own risk!
- *
- * Based in part on:
- * skeleton.c by Donald Becker
- * dummy.c by Nick Holloway and Alan Cox
- * loopback.c by Ross Biro, Fred van Kampen, Donald Becker
- * the netatalk source code (UMICH)
- * lots of work on the card...
- *
- * I do not have access to the (proprietary) SDK that goes with the card.
- * If you do, I don't want to know about it, and you can probably write
- * a better driver yourself anyway. This does mean that the pieces that
- * talk to the card are guesswork on my part, so use at your own risk!
- *
- * This is my first try at writing Linux networking code, and is also
- * guesswork. Again, use at your own risk! (Although on this part, I'd
- * welcome suggestions)
- *
- * This is a loadable kernel module which seems to work at my site
- * consisting of a 1.2.13 linux box running netatalk 1.3.3, and with
- * the kernel support from 1.3.3b2 including patches routing.patch
- * and ddp.disappears.from.chooser. In order to run it, you will need
- * to patch ddp.c and aarp.c in the kernel, but only a little...
- *
- * I'm fairly confident that while this is arguably badly written, the
- * problems that people experience will be "higher level", that is, with
- * complications in the netatalk code. The driver itself doesn't do
- * anything terribly complicated -- it pretends to be an ether device
- * as far as netatalk is concerned, strips the DDP data out of the ether
- * frame and builds a LLAP packet to send out the card. In the other
- * direction, it receives LLAP frames from the card and builds a fake
- * ether packet that it then tosses up to the networking code. You can
- * argue (correctly) that this is an ugly way to do things, but it
- * requires a minimal amount of fooling with the code in ddp.c and aarp.c.
- *
- * The card will do a lot more than is used here -- I *think* it has the
- * layers up through ATP. Even if you knew how that part works (which I
- * don't) it would be a big job to carve up the kernel ddp code to insert
- * things at a higher level, and probably a bad idea...
- *
- * There are a number of other cards that do LocalTalk on the PC. If
- * nobody finds any insurmountable (at the netatalk level) problems
- * here, this driver should encourage people to put some work into the
- * other cards (some of which I gather are still commercially available)
- * and also to put hooks for LocalTalk into the official ddp code.
- *
- * I welcome comments and suggestions. This is my first try at Linux
- * networking stuff, and there are probably lots of things that I did
- * suboptimally.
- *
- ***/
-
-/***
- *
- * $Log: ltpc.c,v $
- * Revision 1.1.2.1 2000/03/01 05:35:07 jgarzik
- * at and tr cleanup
- *
- * Revision 1.8 1997/01/28 05:44:54 bradford
- * Clean up for non-module a little.
- * Hacked about a bit to clean things up - Alan Cox
- * Probably broken it from the origina 1.8
- *
-
- * 1998/11/09: David Huggins-Daines <dhd@debian.org>
- * Cleaned up the initialization code to use the standard autoirq methods,
- and to probe for things in the standard order of i/o, irq, dma. This
- removes the "reset the reset" hack, because I couldn't figure out an
- easy way to get the card to trigger an interrupt after it.
- * Added support for passing configuration parameters on the kernel command
- line and through insmod
- * Changed the device name from "ltalk0" to "lt0", both to conform with the
- other localtalk driver, and to clear up the inconsistency between the
- module and the non-module versions of the driver :-)
- * Added a bunch of comments (I was going to make some enums for the state
- codes and the register offsets, but I'm still not sure exactly what their
- semantics are)
- * Don't poll anymore in interrupt-driven mode
- * It seems to work as a module now (as of 2.1.127), but I don't think
- I'm responsible for that...
-
- *
- * Revision 1.7 1996/12/12 03:42:33 bradford
- * DMA alloc cribbed from 3c505.c.
- *
- * Revision 1.6 1996/12/12 03:18:58 bradford
- * Added virt_to_bus; works in 2.1.13.
- *
- * Revision 1.5 1996/12/12 03:13:22 root
- * xmitQel initialization -- think through better though.
- *
- * Revision 1.4 1996/06/18 14:55:55 root
- * Change names to ltpc. Tabs. Took a shot at dma alloc,
- * although more needs to be done eventually.
- *
- * Revision 1.3 1996/05/22 14:59:39 root
- * Change dev->open, dev->close to track dummy.c in 1.99.(around 7)
- *
- * Revision 1.2 1996/05/22 14:58:24 root
- * Change tabs mostly.
- *
- * Revision 1.1 1996/04/23 04:45:09 root
- * Initial revision
- *
- * Revision 0.16 1996/03/05 15:59:56 root
- * Change ARPHRD_LOCALTLK definition to the "real" one.
- *
- * Revision 0.15 1996/03/05 06:28:30 root
- * Changes for kernel 1.3.70. Still need a few patches to kernel, but
- * it's getting closer.
- *
- * Revision 0.14 1996/02/25 17:38:32 root
- * More cleanups. Removed query to card on get_stats.
- *
- * Revision 0.13 1996/02/21 16:27:40 root
- * Refix debug_print_skb. Fix mac.raw gotcha that appeared in 1.3.65.
- * Clean up receive code a little.
- *
- * Revision 0.12 1996/02/19 16:34:53 root
- * Fix debug_print_skb. Kludge outgoing snet to 0 when using startup
- * range. Change debug to mask: 1 for verbose, 2 for higher level stuff
- * including packet printing, 4 for lower level (card i/o) stuff.
- *
- * Revision 0.11 1996/02/12 15:53:38 root
- * Added router sends (requires new aarp.c patch)
- *
- * Revision 0.10 1996/02/11 00:19:35 root
- * Change source LTALK_LOGGING debug switch to insmod ... debug=2.
- *
- * Revision 0.9 1996/02/10 23:59:35 root
- * Fixed those fixes for 1.2 -- DANGER! The at.h that comes with netatalk
- * has a *different* definition of struct sockaddr_at than the Linux kernel
- * does. This is an "insidious and invidious" bug...
- * (Actually the preceding comment is false -- it's the atalk.h in the
- * ancient atalk-0.06 that's the problem)
- *
- * Revision 0.8 1996/02/10 19:09:00 root
- * Merge 1.3 changes. Tested OK under 1.3.60.
- *
- * Revision 0.7 1996/02/10 17:56:56 root
- * Added debug=1 parameter on insmod for debugging prints. Tried
- * to fix timer unload on rmmod, but I don't think that's the problem.
- *
- * Revision 0.6 1995/12/31 19:01:09 root
- * Clean up rmmod, irq comments per feedback from Corin Anderson (Thanks Corey!)
- * Clean up initial probing -- sometimes the card wakes up latched in reset.
- *
- * Revision 0.5 1995/12/22 06:03:44 root
- * Added comments in front and cleaned up a bit.
- * This version sent out to people.
- *
- * Revision 0.4 1995/12/18 03:46:44 root
- * Return shortDDP to longDDP fake to 0/0. Added command structs.
- *
- ***/
-
-/* ltpc jumpers are:
-*
-* Interrupts -- set at most one. If none are set, the driver uses
-* polled mode. Because the card was developed in the XT era, the
-* original documentation refers to IRQ2. Since you'll be running
-* this on an AT (or later) class machine, that really means IRQ9.
-*
-* SW1 IRQ 4
-* SW2 IRQ 3
-* SW3 IRQ 9 (2 in original card documentation only applies to XT)
-*
-*
-* DMA -- choose DMA 1 or 3, and set both corresponding switches.
-*
-* SW4 DMA 3
-* SW5 DMA 1
-* SW6 DMA 3
-* SW7 DMA 1
-*
-*
-* I/O address -- choose one.
-*
-* SW8 220 / 240
-*/
-
-/* To have some stuff logged, do
-* insmod ltpc.o debug=1
-*
-* For a whole bunch of stuff, use higher numbers.
-*
-* The default is 0, i.e. no messages except for the probe results.
-*/
-
-/* insmod-tweakable variables */
-static int debug;
-#define DEBUG_VERBOSE 1
-#define DEBUG_UPPER 2
-#define DEBUG_LOWER 4
-
-static int io;
-static int irq;
-static int dma;
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/spinlock.h>
-#include <linux/in.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/if_ltalk.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/atalk.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <net/Space.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-
-/* our stuff */
-#include "ltpc.h"
-
-static DEFINE_SPINLOCK(txqueue_lock);
-static DEFINE_SPINLOCK(mbox_lock);
-
-/* function prototypes */
-static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen);
-static int sendup_buffer (struct net_device *dev);
-
-/* Dma Memory related stuff, cribbed directly from 3c505.c */
-
-static unsigned long dma_mem_alloc(int size)
-{
- int order = get_order(size);
-
- return __get_dma_pages(GFP_KERNEL, order);
-}
-
-/* DMA data buffer, DMA command buffer */
-static unsigned char *ltdmabuf;
-static unsigned char *ltdmacbuf;
-
-/* private struct, holds our appletalk address */
-
-struct ltpc_private
-{
- struct atalk_addr my_addr;
-};
-
-/* transmit queue element struct */
-
-struct xmitQel {
- struct xmitQel *next;
- /* command buffer */
- unsigned char *cbuf;
- short cbuflen;
- /* data buffer */
- unsigned char *dbuf;
- short dbuflen;
- unsigned char QWrite; /* read or write data */
- unsigned char mailbox;
-};
-
-/* the transmit queue itself */
-
-static struct xmitQel *xmQhd, *xmQtl;
-
-static void enQ(struct xmitQel *qel)
-{
- unsigned long flags;
- qel->next = NULL;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if (xmQtl) {
- xmQtl->next = qel;
- } else {
- xmQhd = qel;
- }
- xmQtl = qel;
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- if (debug & DEBUG_LOWER)
- printk("enqueued a 0x%02x command\n",qel->cbuf[0]);
-}
-
-static struct xmitQel *deQ(void)
-{
- unsigned long flags;
- int i;
- struct xmitQel *qel=NULL;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if (xmQhd) {
- qel = xmQhd;
- xmQhd = qel->next;
- if(!xmQhd) xmQtl = NULL;
- }
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- if ((debug & DEBUG_LOWER) && qel) {
- int n;
- printk(KERN_DEBUG "ltpc: dequeued command ");
- n = qel->cbuflen;
- if (n>100) n=100;
- for(i=0;i<n;i++) printk("%02x ",qel->cbuf[i]);
- printk("\n");
- }
-
- return qel;
-}
-
-/* and... the queue elements we'll be using */
-static struct xmitQel qels[16];
-
-/* and their corresponding mailboxes */
-static unsigned char mailbox[16];
-static unsigned char mboxinuse[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-
-static int wait_timeout(struct net_device *dev, int c)
-{
- /* returns true if it stayed c */
- /* this uses base+6, but it's ok */
- int i;
-
- /* twenty second or so total */
-
- for(i=0;i<200000;i++) {
- if ( c != inb_p(dev->base_addr+6) ) return 0;
- udelay(100);
- }
- return 1; /* timed out */
-}
-
-/* get the first free mailbox */
-
-static int getmbox(void)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&mbox_lock, flags);
- for(i=1;i<16;i++) if(!mboxinuse[i]) {
- mboxinuse[i]=1;
- spin_unlock_irqrestore(&mbox_lock, flags);
- return i;
- }
- spin_unlock_irqrestore(&mbox_lock, flags);
- return 0;
-}
-
-/* read a command from the card */
-static void handlefc(struct net_device *dev)
-{
- /* called *only* from idle, non-reentrant */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmacbuf));
- set_dma_count(dma,50);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfc) ) printk("timed out in handlefc\n");
-}
-
-/* read data from the card */
-static void handlefd(struct net_device *dev)
-{
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfd) ) printk("timed out in handlefd\n");
- sendup_buffer(dev);
-}
-
-static void handlewrite(struct net_device *dev)
-{
- /* called *only* from idle, non-reentrant */
- /* on entry, 0xfb and ltdmabuf holds data */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_WRITE);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
-
- if ( wait_timeout(dev,0xfb) ) {
- flags=claim_dma_lock();
- printk("timed out in handlewrite, dma res %d\n",
- get_dma_residue(dev->dma) );
- release_dma_lock(flags);
- }
-}
-
-static void handleread(struct net_device *dev)
-{
- /* on entry, 0xfb */
- /* on exit, ltdmabuf holds data */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,800);
- enable_dma(dma);
- release_dma_lock(flags);
-
- inb_p(base+3);
- inb_p(base+2);
- if ( wait_timeout(dev,0xfb) ) printk("timed out in handleread\n");
-}
-
-static void handlecommand(struct net_device *dev)
-{
- /* on entry, 0xfa and ltdmacbuf holds command */
- int dma = dev->dma;
- int base = dev->base_addr;
- unsigned long flags;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_WRITE);
- set_dma_addr(dma,virt_to_bus(ltdmacbuf));
- set_dma_count(dma,50);
- enable_dma(dma);
- release_dma_lock(flags);
- inb_p(base+3);
- inb_p(base+2);
- if ( wait_timeout(dev,0xfa) ) printk("timed out in handlecommand\n");
-}
-
-/* ready made command for getting the result from the card */
-static unsigned char rescbuf[2] = {LT_GETRESULT,0};
-static unsigned char resdbuf[2];
-
-static int QInIdle;
-
-/* idle expects to be called with the IRQ line high -- either because of
- * an interrupt, or because the line is tri-stated
- */
-
-static void idle(struct net_device *dev)
-{
- unsigned long flags;
- int state;
- /* FIXME This is initialized to shut the warning up, but I need to
- * think this through again.
- */
- struct xmitQel *q = NULL;
- int oops;
- int i;
- int base = dev->base_addr;
-
- spin_lock_irqsave(&txqueue_lock, flags);
- if(QInIdle) {
- spin_unlock_irqrestore(&txqueue_lock, flags);
- return;
- }
- QInIdle = 1;
- spin_unlock_irqrestore(&txqueue_lock, flags);
-
- /* this tri-states the IRQ line */
- (void) inb_p(base+6);
-
- oops = 100;
-
-loop:
- if (0>oops--) {
- printk("idle: looped too many times\n");
- goto done;
- }
-
- state = inb_p(base+6);
- if (state != inb_p(base+6)) goto loop;
-
- switch(state) {
- case 0xfc:
- /* incoming command */
- if (debug & DEBUG_LOWER) printk("idle: fc\n");
- handlefc(dev);
- break;
- case 0xfd:
- /* incoming data */
- if(debug & DEBUG_LOWER) printk("idle: fd\n");
- handlefd(dev);
- break;
- case 0xf9:
- /* result ready */
- if (debug & DEBUG_LOWER) printk("idle: f9\n");
- if(!mboxinuse[0]) {
- mboxinuse[0] = 1;
- qels[0].cbuf = rescbuf;
- qels[0].cbuflen = 2;
- qels[0].dbuf = resdbuf;
- qels[0].dbuflen = 2;
- qels[0].QWrite = 0;
- qels[0].mailbox = 0;
- enQ(&qels[0]);
- }
- inb_p(dev->base_addr+1);
- inb_p(dev->base_addr+0);
- if( wait_timeout(dev,0xf9) )
- printk("timed out idle f9\n");
- break;
- case 0xf8:
- /* ?? */
- if (xmQhd) {
- inb_p(dev->base_addr+1);
- inb_p(dev->base_addr+0);
- if(wait_timeout(dev,0xf8) )
- printk("timed out idle f8\n");
- } else {
- goto done;
- }
- break;
- case 0xfa:
- /* waiting for command */
- if(debug & DEBUG_LOWER) printk("idle: fa\n");
- if (xmQhd) {
- q=deQ();
- memcpy(ltdmacbuf,q->cbuf,q->cbuflen);
- ltdmacbuf[1] = q->mailbox;
- if (debug>1) {
- int n;
- printk("ltpc: sent command ");
- n = q->cbuflen;
- if (n>100) n=100;
- for(i=0;i<n;i++)
- printk("%02x ",ltdmacbuf[i]);
- printk("\n");
- }
-
- handlecommand(dev);
-
- if (0xfa == inb_p(base + 6)) {
- /* we timed out, so return */
- goto done;
- }
- } else {
- /* we don't seem to have a command */
- if (!mboxinuse[0]) {
- mboxinuse[0] = 1;
- qels[0].cbuf = rescbuf;
- qels[0].cbuflen = 2;
- qels[0].dbuf = resdbuf;
- qels[0].dbuflen = 2;
- qels[0].QWrite = 0;
- qels[0].mailbox = 0;
- enQ(&qels[0]);
- } else {
- printk("trouble: response command already queued\n");
- goto done;
- }
- }
- break;
- case 0Xfb:
- /* data transfer ready */
- if(debug & DEBUG_LOWER) printk("idle: fb\n");
- if(q->QWrite) {
- memcpy(ltdmabuf,q->dbuf,q->dbuflen);
- handlewrite(dev);
- } else {
- handleread(dev);
- /* non-zero mailbox numbers are for
- commmands, 0 is for GETRESULT
- requests */
- if(q->mailbox) {
- memcpy(q->dbuf,ltdmabuf,q->dbuflen);
- } else {
- /* this was a result */
- mailbox[ 0x0f & ltdmabuf[0] ] = ltdmabuf[1];
- mboxinuse[0]=0;
- }
- }
- break;
- }
- goto loop;
-
-done:
- QInIdle=0;
-
- /* now set the interrupts back as appropriate */
- /* the first read takes it out of tri-state (but still high) */
- /* the second resets it */
- /* note that after this point, any read of base+6 will
- trigger an interrupt */
-
- if (dev->irq) {
- inb_p(base+7);
- inb_p(base+7);
- }
-}
-
-
-static int do_write(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen)
-{
-
- int i = getmbox();
- int ret;
-
- if(i) {
- qels[i].cbuf = cbuf;
- qels[i].cbuflen = cbuflen;
- qels[i].dbuf = dbuf;
- qels[i].dbuflen = dbuflen;
- qels[i].QWrite = 1;
- qels[i].mailbox = i; /* this should be initted rather */
- enQ(&qels[i]);
- idle(dev);
- ret = mailbox[i];
- mboxinuse[i]=0;
- return ret;
- }
- printk("ltpc: could not allocate mbox\n");
- return -1;
-}
-
-static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
- void *dbuf, int dbuflen)
-{
-
- int i = getmbox();
- int ret;
-
- if(i) {
- qels[i].cbuf = cbuf;
- qels[i].cbuflen = cbuflen;
- qels[i].dbuf = dbuf;
- qels[i].dbuflen = dbuflen;
- qels[i].QWrite = 0;
- qels[i].mailbox = i; /* this should be initted rather */
- enQ(&qels[i]);
- idle(dev);
- ret = mailbox[i];
- mboxinuse[i]=0;
- return ret;
- }
- printk("ltpc: could not allocate mbox\n");
- return -1;
-}
-
-/* end of idle handlers -- what should be seen is do_read, do_write */
-
-static struct timer_list ltpc_timer;
-static struct net_device *ltpc_timer_dev;
-
-static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
-
-static int read_30 ( struct net_device *dev)
-{
- lt_command c;
- c.getflags.command = LT_GETFLAGS;
- return do_read(dev, &c, sizeof(c.getflags),&c,0);
-}
-
-static int set_30 (struct net_device *dev,int x)
-{
- lt_command c;
- c.setflags.command = LT_SETFLAGS;
- c.setflags.flags = x;
- return do_write(dev, &c, sizeof(c.setflags),&c,0);
-}
-
-/* LLAP to DDP translation */
-
-static int sendup_buffer (struct net_device *dev)
-{
- /* on entry, command is in ltdmacbuf, data in ltdmabuf */
- /* called from idle, non-reentrant */
-
- int dnode, snode, llaptype, len;
- int sklen;
- struct sk_buff *skb;
- struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
-
- if (ltc->command != LT_RCVLAP) {
- printk("unknown command 0x%02x from ltpc card\n",ltc->command);
- return -1;
- }
- dnode = ltc->dnode;
- snode = ltc->snode;
- llaptype = ltc->laptype;
- len = ltc->length;
-
- sklen = len;
- if (llaptype == 1)
- sklen += 8; /* correct for short ddp */
- if(sklen > 800) {
- printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
- dev->name,sklen);
- return -1;
- }
-
- if ( (llaptype==0) || (llaptype>2) ) {
- printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
- return -1;
- }
-
-
- skb = dev_alloc_skb(3+sklen);
- if (skb == NULL)
- {
- printk("%s: dropping packet due to memory squeeze.\n",
- dev->name);
- return -1;
- }
- skb->dev = dev;
-
- if (sklen > len)
- skb_reserve(skb,8);
- skb_put(skb,len+3);
- skb->protocol = htons(ETH_P_LOCALTALK);
- /* add LLAP header */
- skb->data[0] = dnode;
- skb->data[1] = snode;
- skb->data[2] = llaptype;
- skb_reset_mac_header(skb); /* save pointer to llap header */
- skb_pull(skb,3);
-
- /* copy ddp(s,e)hdr + contents */
- skb_copy_to_linear_data(skb, ltdmabuf, len);
-
- skb_reset_transport_header(skb);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- /* toss it onwards */
- netif_rx(skb);
- return 0;
-}
-
-/* the handler for the board interrupt */
-
-static irqreturn_t
-ltpc_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
-
- if (dev==NULL) {
- printk("ltpc_interrupt: unknown device.\n");
- return IRQ_NONE;
- }
-
- inb_p(dev->base_addr+6); /* disable further interrupts from board */
-
- idle(dev); /* handle whatever is coming in */
-
- /* idle re-enables interrupts from board */
-
- return IRQ_HANDLED;
-}
-
-/***
- *
- * The ioctls that the driver responds to are:
- *
- * SIOCSIFADDR -- do probe using the passed node hint.
- * SIOCGIFADDR -- return net, node.
- *
- * some of this stuff should be done elsewhere.
- *
- ***/
-
-static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
- struct sockaddr_at *sa = (struct sockaddr_at *) &ifr->ifr_addr;
- /* we'll keep the localtalk node address in dev->pa_addr */
- struct ltpc_private *ltpc_priv = netdev_priv(dev);
- struct atalk_addr *aa = &ltpc_priv->my_addr;
- struct lt_init c;
- int ltflags;
-
- if(debug & DEBUG_VERBOSE) printk("ltpc_ioctl called\n");
-
- switch(cmd) {
- case SIOCSIFADDR:
-
- aa->s_net = sa->sat_addr.s_net;
-
- /* this does the probe and returns the node addr */
- c.command = LT_INIT;
- c.hint = sa->sat_addr.s_node;
-
- aa->s_node = do_read(dev,&c,sizeof(c),&c,0);
-
- /* get all llap frames raw */
- ltflags = read_30(dev);
- ltflags |= LT_FLAG_ALLLAP;
- set_30 (dev,ltflags);
-
- dev->broadcast[0] = 0xFF;
- dev->addr_len=1;
- dev_addr_set(dev, &aa->s_node);
-
- return 0;
-
- case SIOCGIFADDR:
-
- sa->sat_addr.s_net = aa->s_net;
- sa->sat_addr.s_node = aa->s_node;
-
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static void set_multicast_list(struct net_device *dev)
-{
- /* This needs to be present to keep netatalk happy. */
- /* Actually netatalk needs fixing! */
-}
-
-static int ltpc_poll_counter;
-
-static void ltpc_poll(struct timer_list *unused)
-{
- del_timer(&ltpc_timer);
-
- if(debug & DEBUG_VERBOSE) {
- if (!ltpc_poll_counter) {
- ltpc_poll_counter = 50;
- printk("ltpc poll is alive\n");
- }
- ltpc_poll_counter--;
- }
-
- /* poll 20 times per second */
- idle(ltpc_timer_dev);
- ltpc_timer.expires = jiffies + HZ/20;
- add_timer(&ltpc_timer);
-}
-
-/* DDP to LLAP translation */
-
-static netdev_tx_t ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- /* in kernel 1.3.xx, on entry skb->data points to ddp header,
- * and skb->len is the length of the ddp data + ddp header
- */
- int i;
- struct lt_sendlap cbuf;
- unsigned char *hdr;
-
- cbuf.command = LT_SENDLAP;
- cbuf.dnode = skb->data[0];
- cbuf.laptype = skb->data[2];
- skb_pull(skb,3); /* skip past LLAP header */
- cbuf.length = skb->len; /* this is host order */
- skb_reset_transport_header(skb);
-
- if(debug & DEBUG_UPPER) {
- printk("command ");
- for(i=0;i<6;i++)
- printk("%02x ",((unsigned char *)&cbuf)[i]);
- printk("\n");
- }
-
- hdr = skb_transport_header(skb);
- do_write(dev, &cbuf, sizeof(cbuf), hdr, skb->len);
-
- if(debug & DEBUG_UPPER) {
- printk("sent %d ddp bytes\n",skb->len);
- for (i = 0; i < skb->len; i++)
- printk("%02x ", hdr[i]);
- printk("\n");
- }
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
-
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-/* initialization stuff */
-
-static int __init ltpc_probe_dma(int base, int dma)
-{
- int want = (dma == 3) ? 2 : (dma == 1) ? 1 : 3;
- unsigned long timeout;
- unsigned long f;
-
- if (want & 1) {
- if (request_dma(1,"ltpc")) {
- want &= ~1;
- } else {
- f=claim_dma_lock();
- disable_dma(1);
- clear_dma_ff(1);
- set_dma_mode(1,DMA_MODE_WRITE);
- set_dma_addr(1,virt_to_bus(ltdmabuf));
- set_dma_count(1,sizeof(struct lt_mem));
- enable_dma(1);
- release_dma_lock(f);
- }
- }
- if (want & 2) {
- if (request_dma(3,"ltpc")) {
- want &= ~2;
- } else {
- f=claim_dma_lock();
- disable_dma(3);
- clear_dma_ff(3);
- set_dma_mode(3,DMA_MODE_WRITE);
- set_dma_addr(3,virt_to_bus(ltdmabuf));
- set_dma_count(3,sizeof(struct lt_mem));
- enable_dma(3);
- release_dma_lock(f);
- }
- }
- /* set up request */
-
- /* FIXME -- do timings better! */
-
- ltdmabuf[0] = LT_READMEM;
- ltdmabuf[1] = 1; /* mailbox */
- ltdmabuf[2] = 0; ltdmabuf[3] = 0; /* address */
- ltdmabuf[4] = 0; ltdmabuf[5] = 1; /* read 0x0100 bytes */
- ltdmabuf[6] = 0; /* dunno if this is necessary */
-
- inb_p(io+1);
- inb_p(io+0);
- timeout = jiffies+100*HZ/100;
- while(time_before(jiffies, timeout)) {
- if ( 0xfa == inb_p(io+6) ) break;
- }
-
- inb_p(io+3);
- inb_p(io+2);
- while(time_before(jiffies, timeout)) {
- if ( 0xfb == inb_p(io+6) ) break;
- }
-
- /* release the other dma channel (if we opened both of them) */
-
- if ((want & 2) && (get_dma_residue(3)==sizeof(struct lt_mem))) {
- want &= ~2;
- free_dma(3);
- }
-
- if ((want & 1) && (get_dma_residue(1)==sizeof(struct lt_mem))) {
- want &= ~1;
- free_dma(1);
- }
-
- if (!want)
- return 0;
-
- return (want & 2) ? 3 : 1;
-}
-
-static const struct net_device_ops ltpc_netdev = {
- .ndo_start_xmit = ltpc_xmit,
- .ndo_do_ioctl = ltpc_ioctl,
- .ndo_set_rx_mode = set_multicast_list,
-};
-
-static struct net_device * __init ltpc_probe(void)
-{
- struct net_device *dev;
- int err = -ENOMEM;
- int x=0,y=0;
- int autoirq;
- unsigned long f;
- unsigned long timeout;
-
- dev = alloc_ltalkdev(sizeof(struct ltpc_private));
- if (!dev)
- goto out;
-
- /* probe for the I/O port address */
-
- if (io != 0x240 && request_region(0x220,8,"ltpc")) {
- x = inb_p(0x220+6);
- if ( (x!=0xff) && (x>=0xf0) ) {
- io = 0x220;
- goto got_port;
- }
- release_region(0x220,8);
- }
- if (io != 0x220 && request_region(0x240,8,"ltpc")) {
- y = inb_p(0x240+6);
- if ( (y!=0xff) && (y>=0xf0) ){
- io = 0x240;
- goto got_port;
- }
- release_region(0x240,8);
- }
-
- /* give up in despair */
- printk(KERN_ERR "LocalTalk card not found; 220 = %02x, 240 = %02x.\n", x,y);
- err = -ENODEV;
- goto out1;
-
- got_port:
- /* probe for the IRQ line */
- if (irq < 2) {
- unsigned long irq_mask;
-
- irq_mask = probe_irq_on();
- /* reset the interrupt line */
- inb_p(io+7);
- inb_p(io+7);
- /* trigger an interrupt (I hope) */
- inb_p(io+6);
- mdelay(2);
- autoirq = probe_irq_off(irq_mask);
-
- if (autoirq == 0) {
- printk(KERN_ERR "ltpc: probe at %#x failed to detect IRQ line.\n", io);
- } else {
- irq = autoirq;
- }
- }
-
- /* allocate a DMA buffer */
- ltdmabuf = (unsigned char *) dma_mem_alloc(1000);
- if (!ltdmabuf) {
- printk(KERN_ERR "ltpc: mem alloc failed\n");
- err = -ENOMEM;
- goto out2;
- }
-
- ltdmacbuf = &ltdmabuf[800];
-
- if(debug & DEBUG_VERBOSE) {
- printk("ltdmabuf pointer %08lx\n",(unsigned long) ltdmabuf);
- }
-
- /* reset the card */
-
- inb_p(io+1);
- inb_p(io+3);
-
- msleep(20);
-
- inb_p(io+0);
- inb_p(io+2);
- inb_p(io+7); /* clear reset */
- inb_p(io+4);
- inb_p(io+5);
- inb_p(io+5); /* enable dma */
- inb_p(io+6); /* tri-state interrupt line */
-
- ssleep(1);
-
- /* now, figure out which dma channel we're using, unless it's
- already been specified */
- /* well, 0 is a legal DMA channel, but the LTPC card doesn't
- use it... */
- dma = ltpc_probe_dma(io, dma);
- if (!dma) { /* no dma channel */
- printk(KERN_ERR "No DMA channel found on ltpc card.\n");
- err = -ENODEV;
- goto out3;
- }
-
- /* print out friendly message */
- if(irq)
- printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, IR%d, DMA%d.\n",io,irq,dma);
- else
- printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
-
- dev->netdev_ops = &ltpc_netdev;
- dev->base_addr = io;
- dev->irq = irq;
- dev->dma = dma;
-
- /* the card will want to send a result at this point */
- /* (I think... leaving out this part makes the kernel crash,
- so I put it back in...) */
-
- f=claim_dma_lock();
- disable_dma(dma);
- clear_dma_ff(dma);
- set_dma_mode(dma,DMA_MODE_READ);
- set_dma_addr(dma,virt_to_bus(ltdmabuf));
- set_dma_count(dma,0x100);
- enable_dma(dma);
- release_dma_lock(f);
-
- (void) inb_p(io+3);
- (void) inb_p(io+2);
- timeout = jiffies+100*HZ/100;
-
- while(time_before(jiffies, timeout)) {
- if( 0xf9 == inb_p(io+6))
- break;
- schedule();
- }
-
- if(debug & DEBUG_VERBOSE) {
- printk("setting up timer and irq\n");
- }
-
- /* grab it and don't let go :-) */
- if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
- {
- (void) inb_p(io+7); /* enable interrupts from board */
- (void) inb_p(io+7); /* and reset irq line */
- } else {
- if( irq )
- printk(KERN_ERR "ltpc: IRQ already in use, using polled mode.\n");
- dev->irq = 0;
- /* polled mode -- 20 times per second */
- /* this is really, really slow... should it poll more often? */
- ltpc_timer_dev = dev;
- timer_setup(&ltpc_timer, ltpc_poll, 0);
-
- ltpc_timer.expires = jiffies + HZ/20;
- add_timer(&ltpc_timer);
- }
- err = register_netdev(dev);
- if (err)
- goto out4;
-
- return NULL;
-out4:
- del_timer_sync(&ltpc_timer);
- if (dev->irq)
- free_irq(dev->irq, dev);
-out3:
- free_pages((unsigned long)ltdmabuf, get_order(1000));
-out2:
- release_region(io, 8);
-out1:
- free_netdev(dev);
-out:
- return ERR_PTR(err);
-}
-
-#ifndef MODULE
-/* handles "ltpc=io,irq,dma" kernel command lines */
-static int __init ltpc_setup(char *str)
-{
- int ints[5];
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- if (ints[0] == 0) {
- if (str && !strncmp(str, "auto", 4)) {
- /* do nothing :-) */
- }
- else {
- /* usage message */
- printk (KERN_ERR
- "ltpc: usage: ltpc=auto|iobase[,irq[,dma]]\n");
- return 0;
- }
- } else {
- io = ints[1];
- if (ints[0] > 1) {
- irq = ints[2];
- }
- if (ints[0] > 2) {
- dma = ints[3];
- }
- /* ignore any other parameters */
- }
- return 1;
-}
-
-__setup("ltpc=", ltpc_setup);
-#endif
-
-static struct net_device *dev_ltpc;
-
-MODULE_LICENSE("GPL");
-module_param(debug, int, 0);
-module_param_hw(io, int, ioport, 0);
-module_param_hw(irq, int, irq, 0);
-module_param_hw(dma, int, dma, 0);
-
-
-static int __init ltpc_module_init(void)
-{
- if(io == 0)
- printk(KERN_NOTICE
- "ltpc: Autoprobing is not recommended for modules\n");
-
- dev_ltpc = ltpc_probe();
- return PTR_ERR_OR_ZERO(dev_ltpc);
-}
-module_init(ltpc_module_init);
-
-static void __exit ltpc_cleanup(void)
-{
-
- if(debug & DEBUG_VERBOSE) printk("unregister_netdev\n");
- unregister_netdev(dev_ltpc);
-
- del_timer_sync(&ltpc_timer);
-
- if(debug & DEBUG_VERBOSE) printk("freeing irq\n");
-
- if (dev_ltpc->irq)
- free_irq(dev_ltpc->irq, dev_ltpc);
-
- if(debug & DEBUG_VERBOSE) printk("freeing dma\n");
-
- if (dev_ltpc->dma)
- free_dma(dev_ltpc->dma);
-
- if(debug & DEBUG_VERBOSE) printk("freeing ioaddr\n");
-
- if (dev_ltpc->base_addr)
- release_region(dev_ltpc->base_addr,8);
-
- free_netdev(dev_ltpc);
-
- if(debug & DEBUG_VERBOSE) printk("free_pages\n");
-
- free_pages( (unsigned long) ltdmabuf, get_order(1000));
-
- if(debug & DEBUG_VERBOSE) printk("returning from cleanup_module\n");
-}
-
-module_exit(ltpc_cleanup);
diff --git a/drivers/net/appletalk/ltpc.h b/drivers/net/appletalk/ltpc.h
deleted file mode 100644
index 58cf945732a4..000000000000
--- a/drivers/net/appletalk/ltpc.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*** ltpc.h
- *
- *
- ***/
-
-#define LT_GETRESULT 0x00
-#define LT_WRITEMEM 0x01
-#define LT_READMEM 0x02
-#define LT_GETFLAGS 0x04
-#define LT_SETFLAGS 0x05
-#define LT_INIT 0x10
-#define LT_SENDLAP 0x13
-#define LT_RCVLAP 0x14
-
-/* the flag that we care about */
-#define LT_FLAG_ALLLAP 0x04
-
-struct lt_getresult {
- unsigned char command;
- unsigned char mailbox;
-};
-
-struct lt_mem {
- unsigned char command;
- unsigned char mailbox;
- unsigned short addr; /* host order */
- unsigned short length; /* host order */
-};
-
-struct lt_setflags {
- unsigned char command;
- unsigned char mailbox;
- unsigned char flags;
-};
-
-struct lt_getflags {
- unsigned char command;
- unsigned char mailbox;
-};
-
-struct lt_init {
- unsigned char command;
- unsigned char mailbox;
- unsigned char hint;
-};
-
-struct lt_sendlap {
- unsigned char command;
- unsigned char mailbox;
- unsigned char dnode;
- unsigned char laptype;
- unsigned short length; /* host order */
-};
-
-struct lt_rcvlap {
- unsigned char command;
- unsigned char dnode;
- unsigned char snode;
- unsigned char laptype;
- unsigned short length; /* host order */
-};
-
-union lt_command {
- struct lt_getresult getresult;
- struct lt_mem mem;
- struct lt_setflags setflags;
- struct lt_getflags getflags;
- struct lt_init init;
- struct lt_sendlap sendlap;
- struct lt_rcvlap rcvlap;
-};
-typedef union lt_command lt_command;
-
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 6382e1937cca..c580acb8b1d3 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -138,6 +138,9 @@ static int com20020pci_probe(struct pci_dev *pdev,
return -ENOMEM;
ci = (struct com20020_pci_card_info *)id->driver_data;
+ if (!ci)
+ return -EINVAL;
+
priv->ci = ci;
mm = &ci->misc_map;
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index 24150c933fcb..dc3253b318da 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -113,6 +113,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
struct com20020_dev *info;
struct net_device *dev;
struct arcnet_local *lp;
+ int ret = -ENOMEM;
dev_dbg(&p_dev->dev, "com20020_attach()\n");
@@ -142,12 +143,18 @@ static int com20020_probe(struct pcmcia_device *p_dev)
info->dev = dev;
p_dev->priv = info;
- return com20020_config(p_dev);
+ ret = com20020_config(p_dev);
+ if (ret)
+ goto fail_config;
+
+ return 0;
+fail_config:
+ free_arcdev(dev);
fail_alloc_dev:
kfree(info);
fail_alloc_info:
- return -ENOMEM;
+ return ret;
} /* com20020_attach */
static void com20020_detach(struct pcmcia_device *link)
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index edffc3489a12..683203f87ae2 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -38,6 +38,13 @@ struct bareudp_net {
struct list_head bareudp_list;
};
+struct bareudp_conf {
+ __be16 ethertype;
+ __be16 port;
+ u16 sport_min;
+ bool multi_proto_mode;
+};
+
/* Pseudo network device */
struct bareudp_dev {
struct net *net; /* netns for packet i/o */
@@ -141,14 +148,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
else
err = IP6_ECN_decapsulate(oiph, skb);
if (unlikely(err)) {
if (log_ecn_error) {
- if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET)
+ if (!ipv6_mod_enabled() || family == AF_INET)
net_info_ratelimited("non-ECT from %pI4 "
"with TOS=%#x\n",
&((struct iphdr *)oiph)->saddr,
@@ -214,11 +221,12 @@ static struct socket *bareudp_create_sock(struct net *net, __be16 port)
int err;
memset(&udp_conf, 0, sizeof(udp_conf));
-#if IS_ENABLED(CONFIG_IPV6)
- udp_conf.family = AF_INET6;
-#else
- udp_conf.family = AF_INET;
-#endif
+
+ if (ipv6_mod_enabled())
+ udp_conf.family = AF_INET6;
+ else
+ udp_conf.family = AF_INET;
+
udp_conf.local_udp_port = port;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
@@ -441,7 +449,7 @@ static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
}
rcu_read_lock();
- if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6)
+ if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
err = bareudp6_xmit_skb(skb, dev, bareudp, info);
else
err = bareudp_xmit_skb(skb, dev, bareudp, info);
@@ -471,7 +479,7 @@ static int bareudp_fill_metadata_dst(struct net_device *dev,
use_cache = ip_tunnel_dst_cache_usable(skb, info);
- if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) {
+ if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
struct rtable *rt;
__be32 saddr;
@@ -602,7 +610,8 @@ static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
}
static int bareudp_configure(struct net *net, struct net_device *dev,
- struct bareudp_conf *conf)
+ struct bareudp_conf *conf,
+ struct netlink_ext_ack *extack)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
struct bareudp_dev *t, *bareudp = netdev_priv(dev);
@@ -611,13 +620,17 @@ static int bareudp_configure(struct net *net, struct net_device *dev,
bareudp->net = net;
bareudp->dev = dev;
t = bareudp_find_dev(bn, conf);
- if (t)
+ if (t) {
+ NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists");
return -EBUSY;
+ }
if (conf->multi_proto_mode &&
(conf->ethertype != htons(ETH_P_MPLS_UC) &&
- conf->ethertype != htons(ETH_P_IP)))
+ conf->ethertype != htons(ETH_P_IP))) {
+ NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)");
return -EINVAL;
+ }
bareudp->port = conf->port;
bareudp->ethertype = conf->ethertype;
@@ -664,7 +677,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
- err = bareudp_configure(net, dev, &conf);
+ err = bareudp_configure(net, dev, &conf, extack);
if (err)
return err;
@@ -721,40 +734,6 @@ static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
.fill_info = bareudp_fill_info,
};
-struct net_device *bareudp_dev_create(struct net *net, const char *name,
- u8 name_assign_type,
- struct bareudp_conf *conf)
-{
- struct nlattr *tb[IFLA_MAX + 1];
- struct net_device *dev;
- int err;
-
- memset(tb, 0, sizeof(tb));
- dev = rtnl_create_link(net, name, name_assign_type,
- &bareudp_link_ops, tb, NULL);
- if (IS_ERR(dev))
- return dev;
-
- err = bareudp_configure(net, dev, conf);
- if (err) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
- err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
- if (err)
- goto err;
-
- err = rtnl_configure_link(dev, NULL);
- if (err < 0)
- goto err;
-
- return dev;
-err:
- bareudp_dellink(dev, NULL);
- return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(bareudp_dev_create);
-
static __net_init int bareudp_init_net(struct net *net)
{
struct bareudp_net *bn = net_generic(net, bareudp_net_id);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6006c2e8fa2b..c99ffe6c683a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -75,6 +75,7 @@ enum ad_link_speed_type {
AD_LINK_SPEED_100000MBPS,
AD_LINK_SPEED_200000MBPS,
AD_LINK_SPEED_400000MBPS,
+ AD_LINK_SPEED_800000MBPS,
};
/* compare MAC addresses */
@@ -84,11 +85,13 @@ enum ad_link_speed_type {
static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
0, 0, 0, 0, 0, 0
};
-static u16 ad_ticks_per_sec;
+
+static const u16 ad_ticks_per_sec = 1000 / AD_TIMER_INTERVAL;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
- MULTICAST_LACPDU_ADDR;
+const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02
+};
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -225,7 +228,7 @@ static inline int __check_agg_selection_timer(struct port *port)
if (bond == NULL)
return 0;
- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
@@ -249,6 +252,7 @@ static inline int __check_agg_selection_timer(struct port *port)
* %AD_LINK_SPEED_100000MBPS
* %AD_LINK_SPEED_200000MBPS
* %AD_LINK_SPEED_400000MBPS
+ * %AD_LINK_SPEED_800000MBPS
*/
static u16 __get_link_speed(struct port *port)
{
@@ -324,6 +328,10 @@ static u16 __get_link_speed(struct port *port)
speed = AD_LINK_SPEED_400000MBPS;
break;
+ case SPEED_800000:
+ speed = AD_LINK_SPEED_800000MBPS;
+ break;
+
default:
/* unknown speed value from ethtool. shouldn't happen */
if (slave->speed != SPEED_UNKNOWN)
@@ -751,6 +759,9 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
case AD_LINK_SPEED_400000MBPS:
bandwidth = nports * 400000;
break;
+ case AD_LINK_SPEED_800000MBPS:
+ bandwidth = nports * 800000;
+ break;
default:
bandwidth = 0; /* to silence the compiler */
}
@@ -1021,8 +1032,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
-
__enable_port(port);
+ *update_slave_arr = true;
}
}
break;
@@ -1538,6 +1549,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
slave_err(bond->dev, port->slave->dev,
"Port %d did not find a suitable aggregator\n",
port->actor_port_number);
+ return;
}
}
/* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
@@ -1779,6 +1791,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
port = port->next_port_in_aggregator) {
__enable_port(port);
}
+ *update_slave_arr = true;
}
}
@@ -1994,42 +2007,30 @@ static void ad_marker_response_received(struct bond_marker *marker,
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
- BOND_AD_INFO(bond).agg_select_timer = timeout;
+ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
- * @tick_resolution: tick duration (millisecond resolution)
*
* Can be called only after the mac address of the bond is set.
*/
-void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
+void bond_3ad_initialize(struct bonding *bond)
{
- /* check that the bond is not initialized yet */
- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
- bond->dev->dev_addr)) {
-
- BOND_AD_INFO(bond).aggregator_identifier = 0;
-
- BOND_AD_INFO(bond).system.sys_priority =
- bond->params.ad_actor_sys_prio;
- if (is_zero_ether_addr(bond->params.ad_actor_system))
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->dev->dev_addr);
- else
- BOND_AD_INFO(bond).system.sys_mac_addr =
- *((struct mac_addr *)bond->params.ad_actor_system);
-
- /* initialize how many times this module is called in one
- * second (should be about every 100ms)
- */
- ad_ticks_per_sec = tick_resolution;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
+ BOND_AD_INFO(bond).system.sys_priority =
+ bond->params.ad_actor_sys_prio;
+ if (is_zero_ether_addr(bond->params.ad_actor_system))
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->dev->dev_addr);
+ else
+ BOND_AD_INFO(bond).system.sys_mac_addr =
+ *((struct mac_addr *)bond->params.ad_actor_system);
- bond_3ad_initiate_agg_selection(bond,
- AD_AGGREGATOR_SELECTION_TIMER *
- ad_ticks_per_sec);
- }
+ bond_3ad_initiate_agg_selection(bond,
+ AD_AGGREGATOR_SELECTION_TIMER *
+ ad_ticks_per_sec);
}
/**
@@ -2227,7 +2228,8 @@ void bond_3ad_unbind_slave(struct slave *slave)
temp_aggregator->num_of_ports--;
if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
- ad_clear_agg(temp_aggregator);
+ if (temp_aggregator->num_of_ports == 0)
+ ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
slave_info(bond->dev, slave->dev, "Removing an active aggregator\n");
/* select new active aggregator */
@@ -2278,6 +2280,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
}
/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond: bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+ int val, nval;
+
+ while (1) {
+ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+ if (!val)
+ return false;
+ nval = val - 1;
+ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+ val, nval) == val)
+ break;
+ }
+ return nval == 0;
+}
+
+/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
*
@@ -2312,9 +2336,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (!bond_has_slaves(bond))
goto re_arm;
- /* check if agg_select_timer timer after initialize is timed out */
- if (BOND_AD_INFO(bond).agg_select_timer &&
- !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 533e476988f2..b9dbad3a8af8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -19,6 +19,7 @@
#include <linux/in.h>
#include <net/arp.h>
#include <net/ipv6.h>
+#include <net/ndisc.h>
#include <asm/byteorder.h>
#include <net/bonding.h>
#include <net/bond_alb.h>
@@ -652,6 +653,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb,
static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
{
struct slave *tx_slave = NULL;
+ struct net_device *dev;
struct arp_pkt *arp;
if (!pskb_network_may_pull(skb, sizeof(*arp)))
@@ -664,6 +666,15 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
return NULL;
+ dev = ip_dev_find(dev_net(bond->dev), arp->ip_src);
+ if (dev) {
+ if (netif_is_bridge_master(dev)) {
+ dev_put(dev);
+ return NULL;
+ }
+ dev_put(dev);
+ }
+
if (arp->op_code == htons(ARPOP_REPLY)) {
/* the arp must be sent on the selected rx channel */
tx_slave = rlb_choose_channel(skb, bond, arp);
@@ -1269,6 +1280,27 @@ unwind:
return res;
}
+/* determine if the packet is NA or NS */
+static bool alb_determine_nd(struct sk_buff *skb, struct bonding *bond)
+{
+ struct ipv6hdr *ip6hdr;
+ struct icmp6hdr *hdr;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr)))
+ return true;
+
+ ip6hdr = ipv6_hdr(skb);
+ if (ip6hdr->nexthdr != IPPROTO_ICMPV6)
+ return false;
+
+ if (!pskb_network_may_pull(skb, sizeof(*ip6hdr) + sizeof(*hdr)))
+ return true;
+
+ hdr = icmp6_hdr(skb);
+ return hdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT ||
+ hdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION;
+}
+
/************************ exported alb functions ************************/
int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
@@ -1280,12 +1312,12 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
return res;
if (rlb_enabled) {
- bond->alb_info.rlb_enabled = 1;
res = rlb_initialize(bond);
if (res) {
tlb_deinitialize(bond);
return res;
}
+ bond->alb_info.rlb_enabled = 1;
} else {
bond->alb_info.rlb_enabled = 0;
}
@@ -1348,8 +1380,11 @@ struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,
/* Do not TX balance any multicast or broadcast */
if (!is_multicast_ether_addr(eth_data->h_dest)) {
switch (skb->protocol) {
- case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
+ if (alb_determine_nd(skb, bond))
+ break;
+ fallthrough;
+ case htons(ETH_P_IP):
hash_index = bond_xmit_hash(bond, skb);
if (bond->params.tlb_dynamic_lb) {
tx_slave = tlb_choose_channel(bond,
@@ -1432,10 +1467,12 @@ struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
break;
}
- if (!pskb_network_may_pull(skb, sizeof(*ip6hdr))) {
+ if (alb_determine_nd(skb, bond)) {
do_tx_balance = false;
break;
}
+
+ /* The IPv6 header is pulled by alb_determine_nd */
/* Additionally, DAD probes should not be tx-balanced as that
* will lead to false positives for duplicate addresses and
* prevent address configuration from working.
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 4f9b4a18c74c..594094526648 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
- if (d) {
+ if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ff8da720a33a..3fed888629f7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
+#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
@@ -71,6 +72,7 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/if_bonding.h>
+#include <linux/phy.h>
#include <linux/jiffies.h>
#include <linux/preempt.h>
#include <net/route.h>
@@ -86,6 +88,7 @@
#if IS_ENABLED(CONFIG_TLS_DEVICE)
#include <net/tls.h>
#endif
+#include <net/ip6_route.h>
#include "bonding_priv.h"
@@ -304,7 +307,7 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
return dev_queue_xmit(skb);
}
-bool bond_sk_check(struct bonding *bond)
+static bool bond_sk_check(struct bonding *bond)
{
switch (BOND_MODE(bond)) {
case BOND_MODE_8023AD:
@@ -416,8 +419,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
/**
* bond_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int bond_ipsec_add_sa(struct xfrm_state *xs)
+static int bond_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *bond_dev = xs->xso.dev;
struct bond_ipsec *ipsec;
@@ -439,7 +444,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
rcu_read_unlock();
return -EINVAL;
}
@@ -451,7 +456,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
}
xs->xso.real_dev = slave->dev;
- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
if (!err) {
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
@@ -491,7 +496,7 @@ static void bond_ipsec_add_sa_all(struct bonding *bond)
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
ipsec->xs->xso.real_dev = slave->dev;
- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
+ if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
ipsec->xs->xso.real_dev = NULL;
}
@@ -862,12 +867,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
dev_uc_unsync(slave_dev, bond_dev);
dev_mc_unsync(slave_dev, bond_dev);
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* del lacpdu mc addr from mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_del(slave_dev, lacpdu_multicast);
- }
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_del(slave_dev, lacpdu_mcast_addr);
}
/*--------------------------- Active slave change ---------------------------*/
@@ -887,7 +888,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- bond_hw_addr_flush(bond->dev, old_active->dev);
+ if (bond->dev->flags & IFF_UP)
+ bond_hw_addr_flush(bond->dev, old_active->dev);
}
if (new_active) {
@@ -898,10 +900,12 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
- dev_uc_sync(new_active->dev, bond->dev);
- dev_mc_sync(new_active->dev, bond->dev);
- netif_addr_unlock_bh(bond->dev);
+ if (bond->dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond->dev);
+ dev_uc_sync(new_active->dev, bond->dev);
+ dev_mc_sync(new_active->dev, bond->dev);
+ netif_addr_unlock_bh(bond->dev);
+ }
}
}
@@ -1023,12 +1027,38 @@ out:
}
+/**
+ * bond_choose_primary_or_current - select the primary or high priority slave
+ * @bond: our bonding struct
+ *
+ * - Check if there is a primary link. If the primary link was set and is up,
+ * go on and do link reselection.
+ *
+ * - If primary link is not set or down, find the highest priority link.
+ * If the highest priority link is not current slave, set it as primary
+ * link and do link reselection.
+ */
static struct slave *bond_choose_primary_or_current(struct bonding *bond)
{
struct slave *prim = rtnl_dereference(bond->primary_slave);
struct slave *curr = rtnl_dereference(bond->curr_active_slave);
+ struct slave *slave, *hprio = NULL;
+ struct list_head *iter;
if (!prim || prim->link != BOND_LINK_UP) {
+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->link == BOND_LINK_UP) {
+ hprio = hprio ?: slave;
+ if (slave->prio > hprio->prio)
+ hprio = slave;
+ }
+ }
+
+ if (hprio && hprio != curr) {
+ prim = hprio;
+ goto link_reselect;
+ }
+
if (!curr || curr->link != BOND_LINK_UP)
return NULL;
return curr;
@@ -1039,6 +1069,7 @@ static struct slave *bond_choose_primary_or_current(struct bonding *bond)
return prim;
}
+link_reselect:
if (!curr || curr->link != BOND_LINK_UP)
return prim;
@@ -1096,9 +1127,6 @@ static bool bond_should_notify_peers(struct bonding *bond)
slave = rcu_dereference(bond->curr_active_slave);
rcu_read_unlock();
- netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
- slave ? slave->dev->name : "NULL");
-
if (!slave || !bond->send_peer_notif ||
bond->send_peer_notif %
max(1, bond->params.peer_notif_delay) != 0 ||
@@ -1106,6 +1134,9 @@ static bool bond_should_notify_peers(struct bonding *bond)
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
+ netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
+ slave ? slave->dev->name : "NULL");
+
return true;
}
@@ -1369,13 +1400,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
netdev_features_t mask;
struct slave *slave;
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
- if (bond_sk_check(bond))
- features |= BOND_TLS_FEATURES;
- else
- features &= ~BOND_TLS_FEATURES;
-#endif
-
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
@@ -1416,8 +1440,8 @@ static void bond_compute_features(struct bonding *bond)
struct list_head *iter;
struct slave *slave;
unsigned short max_hard_header_len = ETH_HLEN;
- unsigned int gso_max_size = GSO_MAX_SIZE;
- u16 gso_max_segs = GSO_MAX_SEGS;
+ unsigned int tso_max_size = TSO_MAX_SIZE;
+ u16 tso_max_segs = TSO_MAX_SEGS;
if (!bond_has_slaves(bond))
goto done;
@@ -1446,8 +1470,8 @@ static void bond_compute_features(struct bonding *bond)
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
- gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
- gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
+ tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
+ tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
}
bond_dev->hard_header_len = max_hard_header_len;
@@ -1460,8 +1484,8 @@ done:
bond_dev->hw_enc_features |= xfrm_features;
#endif /* CONFIG_XFRM_OFFLOAD */
bond_dev->mpls_features = mpls_features;
- bond_dev->gso_max_segs = gso_max_segs;
- netif_set_gso_max_size(bond_dev, gso_max_size);
+ netif_set_tso_max_segs(bond_dev, tso_max_segs);
+ netif_set_tso_max_size(bond_dev, tso_max_size);
bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
@@ -1610,13 +1634,19 @@ static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
{
struct netdev_lag_upper_info lag_upper_info;
enum netdev_lag_tx_type type;
+ int err;
type = bond_lag_tx_type(bond);
lag_upper_info.tx_type = type;
lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
- return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
- &lag_upper_info, extack);
+ err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
+ &lag_upper_info, extack);
+ if (err)
+ return err;
+
+ slave->dev->flags |= IFF_SLAVE;
+ return 0;
}
static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
@@ -1745,6 +1775,40 @@ void bond_lower_state_changed(struct slave *slave)
slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
} while (0)
+/* The bonding driver uses ether_setup() to convert a master bond device
+ * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
+ * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
+ * if they were set
+ */
+static void bond_ether_setup(struct net_device *bond_dev)
+{
+ unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
+
+ ether_setup(bond_dev);
+ bond_dev->flags |= IFF_MASTER | flags;
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+}
+
+void bond_xdp_set_features(struct net_device *bond_dev)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ xdp_features_t val = NETDEV_XDP_ACT_MASK;
+ struct list_head *iter;
+ struct slave *slave;
+
+ ASSERT_RTNL();
+
+ if (!bond_xdp_check(bond)) {
+ xdp_clear_features_flag(bond_dev);
+ return;
+ }
+
+ bond_for_each_slave(bond, slave, iter)
+ val &= slave->dev->xdp_features;
+
+ xdp_set_features_flag(bond_dev, val);
+}
+
/* enslave device <slave> to bond device <master> */
int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
struct netlink_ext_ack *extack)
@@ -1836,10 +1900,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
- else {
- ether_setup(bond_dev);
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- }
+ else
+ bond_ether_setup(bond_dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
bond_dev);
@@ -1928,8 +1990,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
- /* set slave flag before open to prevent IPv6 addrconf */
- slave_dev->flags |= IFF_SLAVE;
+ /* set no_addrconf flag before open to prevent IPv6 addrconf */
+ slave_dev->priv_flags |= IFF_NO_ADDRCONF;
/* open the slave since the application closed it */
res = dev_open(slave_dev, extack);
@@ -1971,6 +2033,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
new_slave->target_last_arp_rx[i] = new_slave->last_rx;
+ new_slave->last_tx = new_slave->last_rx;
+
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -2049,7 +2113,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
- bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
+ bond_3ad_initialize(bond);
} else {
SLAVE_AD_INFO(new_slave)->id =
SLAVE_AD_INFO(prev_slave)->id + 1;
@@ -2134,16 +2198,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
}
}
- netif_addr_lock_bh(bond_dev);
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
- netif_addr_unlock_bh(bond_dev);
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+ if (bond_dev->flags & IFF_UP) {
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
- dev_mc_add(slave_dev, lacpdu_multicast);
+ if (BOND_MODE(bond) == BOND_MODE_8023AD)
+ dev_mc_add(slave_dev, lacpdu_mcast_addr);
}
}
@@ -2194,6 +2256,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
bpf_prog_inc(bond->xdp_prog);
}
+ bond_xdp_set_features(bond_dev);
+
slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
@@ -2232,7 +2296,7 @@ err_close:
dev_close(slave_dev);
err_restore_mac:
- slave_dev->flags &= ~IFF_SLAVE;
+ slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
if (!bond->params.fail_over_mac ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
@@ -2259,9 +2323,7 @@ err_undo_flags:
eth_hw_addr_random(bond_dev);
if (bond_dev->type != ARPHRD_ETHER) {
dev_close(bond_dev);
- ether_setup(bond_dev);
- bond_dev->flags |= IFF_MASTER;
- bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ bond_ether_setup(bond_dev);
}
}
@@ -2377,10 +2439,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_select_active_slave(bond);
}
- if (!bond_has_slaves(bond)) {
- bond_set_carrier(bond);
+ bond_set_carrier(bond);
+ if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
- }
unblock_netpoll_tx();
synchronize_rcu();
@@ -2416,7 +2477,8 @@ static int __bond_release_one(struct net_device *bond_dev,
if (old_flags & IFF_ALLMULTI)
dev_set_allmulti(slave_dev, -1);
- bond_hw_addr_flush(bond_dev, slave_dev);
+ if (old_flags & IFF_UP)
+ bond_hw_addr_flush(bond_dev, slave_dev);
}
slave_disable_netpoll(slave);
@@ -2424,6 +2486,8 @@ static int __bond_release_one(struct net_device *bond_dev,
/* close slave before restoring its mac address */
dev_close(slave_dev);
+ slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
+
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
@@ -2441,6 +2505,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (!netif_is_bond_master(slave_dev))
slave_dev->priv_flags &= ~IFF_BONDING;
+ bond_xdp_set_features(bond_dev);
kobject_put(&slave->kobj);
return 0;
@@ -2502,12 +2567,21 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
/* called with rcu_read_lock() */
static int bond_miimon_inspect(struct bonding *bond)
{
+ bool ignore_updelay = false;
int link_state, commit = 0;
struct list_head *iter;
struct slave *slave;
- bool ignore_updelay;
- ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
+ ignore_updelay = !rcu_dereference(bond->curr_active_slave);
+ } else {
+ struct bond_up_slave *usable_slaves;
+
+ usable_slaves = rcu_dereference(bond->usable_slaves);
+
+ if (usable_slaves && usable_slaves->count == 0)
+ ignore_updelay = true;
+ }
bond_for_each_slave_rcu(bond, slave, iter) {
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
@@ -2615,8 +2689,11 @@ static void bond_miimon_link_change(struct bonding *bond,
static void bond_miimon_commit(struct bonding *bond)
{
+ struct slave *slave, *primary, *active;
+ bool do_failover = false;
struct list_head *iter;
- struct slave *slave, *primary;
+
+ ASSERT_RTNL();
bond_for_each_slave(bond, slave, iter) {
switch (slave->link_new_state) {
@@ -2660,8 +2737,9 @@ static void bond_miimon_commit(struct bonding *bond)
bond_miimon_link_change(bond, slave, BOND_LINK_UP);
- if (!bond->curr_active_slave || slave == primary)
- goto do_failover;
+ active = rtnl_dereference(bond->curr_active_slave);
+ if (!active || slave == primary || slave->prio > active->prio)
+ do_failover = true;
continue;
@@ -2682,7 +2760,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
if (slave == rcu_access_pointer(bond->curr_active_slave))
- goto do_failover;
+ do_failover = true;
continue;
@@ -2693,8 +2771,9 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
}
+ }
-do_failover:
+ if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
@@ -2792,31 +2871,15 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
return ret;
}
-/* We go to the (large) trouble of VLAN tagging ARP frames because
- * switches in VLAN mode (especially if ports are configured as
- * "native" to a VLAN) might not pass non-tagged frames.
- */
-static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
- __be32 src_ip, struct bond_vlan_tag *tags)
+static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
+ struct sk_buff *skb)
{
- struct sk_buff *skb;
- struct bond_vlan_tag *outer_tag = tags;
- struct net_device *slave_dev = slave->dev;
struct net_device *bond_dev = slave->bond->dev;
-
- slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
- arp_op, &dest_ip, &src_ip);
-
- skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
- NULL, slave_dev->dev_addr, NULL);
-
- if (!skb) {
- net_err_ratelimited("ARP packet allocation failed\n");
- return;
- }
+ struct net_device *slave_dev = slave->dev;
+ struct bond_vlan_tag *outer_tag = tags;
if (!tags || tags->vlan_proto == VLAN_N_VID)
- goto xmit;
+ return true;
tags++;
@@ -2833,7 +2896,7 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
tags->vlan_id);
if (!skb) {
net_err_ratelimited("failed to insert inner VLAN tag\n");
- return;
+ return false;
}
tags++;
@@ -2846,8 +2909,37 @@ static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
outer_tag->vlan_id);
}
-xmit:
- arp_xmit(skb);
+ return true;
+}
+
+/* We go to the (large) trouble of VLAN tagging ARP frames because
+ * switches in VLAN mode (especially if ports are configured as
+ * "native" to a VLAN) might not pass non-tagged frames.
+ */
+static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
+ __be32 src_ip, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
+ arp_op, &dest_ip, &src_ip);
+
+ skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
+ NULL, slave_dev->dev_addr, NULL);
+
+ if (!skb) {
+ net_err_ratelimited("ARP packet allocation failed\n");
+ return;
+ }
+
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
+ arp_xmit(skb);
+ }
+
+ return;
}
/* Validate the device path between the @start_dev and the @end_dev.
@@ -2964,30 +3056,17 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
slave->target_last_arp_rx[i] = jiffies;
}
-int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
unsigned int alen;
- if (!slave_do_arp_validate(bond, slave)) {
- if ((slave_do_arp_validate_only(bond) && is_arp) ||
- !slave_do_arp_validate_only(bond))
- slave->last_rx = jiffies;
- return RX_HANDLER_ANOTHER;
- } else if (!is_arp) {
- return RX_HANDLER_ANOTHER;
- }
-
alen = arp_hdr_len(bond->dev);
- slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
- __func__, skb->dev->name);
-
if (alen > skb_headlen(skb)) {
arp = kmalloc(alen, GFP_ATOMIC);
if (!arp)
@@ -3048,8 +3127,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
- bond_time_in_interval(bond,
- dev_trans_start(curr_arp_slave->dev), 1))
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
bond_validate_arp(bond, slave, sip, tip);
out_unlock:
@@ -3058,6 +3136,235 @@ out_unlock:
return RX_HANDLER_ANOTHER;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
+ const struct in6_addr *saddr, struct bond_vlan_tag *tags)
+{
+ struct net_device *bond_dev = slave->bond->dev;
+ struct net_device *slave_dev = slave->dev;
+ struct in6_addr mcaddr;
+ struct sk_buff *skb;
+
+ slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
+ daddr, saddr);
+
+ skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
+ if (!skb) {
+ net_err_ratelimited("NS packet allocation failed\n");
+ return;
+ }
+
+ addrconf_addr_solict_mult(daddr, &mcaddr);
+ if (bond_handle_vlan(slave, tags, skb)) {
+ slave_update_last_tx(slave);
+ ndisc_send_skb(skb, &mcaddr, saddr);
+ }
+}
+
+static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct bond_vlan_tag *tags;
+ struct dst_entry *dst;
+ struct in6_addr saddr;
+ struct flowi6 fl6;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
+ slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
+ __func__, &targets[i]);
+ tags = NULL;
+
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+ fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+ dst_release(dst);
+ /* there's no route to target - try to send arp
+ * probe to generate any traffic (arp_validate=0)
+ */
+ if (bond->params.arp_validate)
+ pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
+ bond->dev->name,
+ &targets[i]);
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+ continue;
+ }
+
+ /* bond device itself */
+ if (dst->dev == bond->dev)
+ goto found;
+
+ rcu_read_lock();
+ tags = bond_verify_device_path(bond->dev, dst->dev, 0);
+ rcu_read_unlock();
+
+ if (!IS_ERR_OR_NULL(tags))
+ goto found;
+
+ /* Not our device - skip */
+ slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
+ &targets[i], dst->dev ? dst->dev->name : "NULL");
+
+ dst_release(dst);
+ continue;
+
+found:
+ if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
+ bond_ns_send(slave, &targets[i], &saddr, tags);
+ else
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+
+ dst_release(dst);
+ kfree(tags);
+ }
+}
+
+static int bond_confirm_addr6(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct in6_addr *addr = (struct in6_addr *)priv->data;
+
+ return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
+}
+
+static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
+{
+ struct netdev_nested_priv priv = {
+ .data = addr,
+ };
+ int ret = false;
+
+ if (bond_confirm_addr6(bond->dev, &priv))
+ return true;
+
+ rcu_read_lock();
+ if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
+ ret = true;
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void bond_validate_na(struct bonding *bond, struct slave *slave,
+ struct in6_addr *saddr, struct in6_addr *daddr)
+{
+ int i;
+
+ /* Ignore NAs that:
+ * 1. Source address is unspecified address.
+ * 2. Dest address is neither all-nodes multicast address nor
+ * exist on bond interface.
+ */
+ if (ipv6_addr_any(saddr) ||
+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
+ !bond_has_this_ip6(bond, daddr))) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
+ __func__, saddr, daddr);
+ return;
+ }
+
+ i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
+ if (i == -1) {
+ slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
+ __func__, saddr);
+ return;
+ }
+ slave->last_rx = jiffies;
+ slave->target_last_arp_rx[i] = jiffies;
+}
+
+static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+ struct slave *curr_active_slave, *curr_arp_slave;
+ struct in6_addr *saddr, *daddr;
+ struct {
+ struct ipv6hdr ip6;
+ struct icmp6hdr icmp6;
+ } *combined, _combined;
+
+ if (skb->pkt_type == PACKET_OTHERHOST ||
+ skb->pkt_type == PACKET_LOOPBACK)
+ goto out;
+
+ combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
+ if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
+ (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
+ combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
+ goto out;
+
+ saddr = &combined->ip6.saddr;
+ daddr = &combined->ip6.daddr;
+
+ slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
+ __func__, slave->dev->name, bond_slave_state(slave),
+ bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+ saddr, daddr);
+
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+
+ /* We 'trust' the received ARP enough to validate it if:
+ * see bond_arp_rcv().
+ */
+ if (bond_is_active_slave(slave))
+ bond_validate_na(bond, slave, saddr, daddr);
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
+ bond_validate_na(bond, slave, daddr, saddr);
+ else if (curr_arp_slave &&
+ bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
+ bond_validate_na(bond, slave, saddr, daddr);
+
+out:
+ return RX_HANDLER_ANOTHER;
+}
+#endif
+
+int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
+#endif
+ bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+
+ slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
+ __func__, skb->dev->name);
+
+ /* Use arp validate logic for both ARP and NS */
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond) && is_arp) ||
+#if IS_ENABLED(CONFIG_IPV6)
+ (slave_do_arp_validate_only(bond) && is_ipv6) ||
+#endif
+ !slave_do_arp_validate_only(bond))
+ slave->last_rx = jiffies;
+ return RX_HANDLER_ANOTHER;
+ } else if (is_arp) {
+ return bond_arp_rcv(skb, bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (is_ipv6) {
+ return bond_na_rcv(skb, bond, slave);
+#endif
+ } else {
+ return RX_HANDLER_ANOTHER;
+ }
+}
+
+static void bond_send_validate(struct bonding *bond, struct slave *slave)
+{
+ bond_arp_send_all(bond, slave);
+#if IS_ENABLED(CONFIG_IPV6)
+ bond_ns_send_all(bond, slave);
+#endif
+}
+
/* function to verify if we're in the arp_interval timeslice, returns true if
* (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
* arp_interval/2) . the arp_interval/2 is needed for really fast networks.
@@ -3099,12 +3406,12 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* so it can wait
*/
bond_for_each_slave_rcu(bond, slave, iter) {
- unsigned long trans_start = dev_trans_start(slave->dev);
+ unsigned long last_tx = slave_last_tx(slave);
bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
if (slave->link != BOND_LINK_UP) {
- if (bond_time_in_interval(bond, trans_start, 1) &&
+ if (bond_time_in_interval(bond, last_tx, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {
bond_propose_link_state(slave, BOND_LINK_UP);
@@ -3129,8 +3436,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, slave->last_rx, 2)) {
+ if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
+ !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
slave_state_changed = 1;
@@ -3153,7 +3460,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
* to be unstable during low/no traffic periods
*/
if (bond_slave_is_up(slave))
- bond_arp_send_all(bond, slave);
+ bond_send_validate(bond, slave);
}
rcu_read_unlock();
@@ -3195,7 +3502,7 @@ re_arm:
*/
static int bond_ab_arp_inspect(struct bonding *bond)
{
- unsigned long trans_start, last_rx;
+ unsigned long last_tx, last_rx;
struct list_head *iter;
struct slave *slave;
int commit = 0;
@@ -3224,7 +3531,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
/* Backup slave is down if:
* - No current_arp_slave AND
- * - more than 3*delta since last receive AND
+ * - more than (missed_max+1)*delta since last receive AND
* - the bond has an IP address
*
* Note: a non-null current_arp_slave indicates
@@ -3236,20 +3543,20 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
if (!bond_is_active_slave(slave) &&
!rcu_access_pointer(bond->current_arp_slave) &&
- !bond_time_in_interval(bond, last_rx, 3)) {
+ !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
/* Active slave is down if:
- * - more than 2*delta since transmitting OR
- * - (more than 2*delta since receive AND
+ * - more than missed_max*delta since transmitting OR
+ * - (more than missed_max*delta since receive AND
* the bond has an IP address)
*/
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (bond_is_active_slave(slave) &&
- (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, last_rx, 2))) {
+ (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
+ !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
bond_propose_link_state(slave, BOND_LINK_DOWN);
commit++;
}
@@ -3265,8 +3572,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
*/
static void bond_ab_arp_commit(struct bonding *bond)
{
- unsigned long trans_start;
+ bool do_failover = false;
struct list_head *iter;
+ unsigned long last_tx;
struct slave *slave;
bond_for_each_slave(bond, slave, iter) {
@@ -3275,10 +3583,10 @@ static void bond_ab_arp_commit(struct bonding *bond)
continue;
case BOND_LINK_UP:
- trans_start = dev_trans_start(slave->dev);
+ last_tx = slave_last_tx(slave);
if (rtnl_dereference(bond->curr_active_slave) != slave ||
(!rtnl_dereference(bond->curr_active_slave) &&
- bond_time_in_interval(bond, trans_start, 1))) {
+ bond_time_in_interval(bond, last_tx, 1))) {
struct slave *current_arp_slave;
current_arp_slave = rtnl_dereference(bond->current_arp_slave);
@@ -3294,8 +3602,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
slave_info(bond->dev, slave->dev, "link status definitely up\n");
if (!rtnl_dereference(bond->curr_active_slave) ||
- slave == rtnl_dereference(bond->primary_slave))
- goto do_failover;
+ slave == rtnl_dereference(bond->primary_slave) ||
+ slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
+ do_failover = true;
}
@@ -3314,7 +3623,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
if (slave == rtnl_dereference(bond->curr_active_slave)) {
RCU_INIT_POINTER(bond->current_arp_slave, NULL);
- goto do_failover;
+ do_failover = true;
}
continue;
@@ -3338,8 +3647,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
slave->link_new_state);
continue;
}
+ }
-do_failover:
+ if (do_failover) {
block_netpoll_tx();
bond_select_active_slave(bond);
unblock_netpoll_tx();
@@ -3367,7 +3677,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
curr_active_slave->dev->name);
if (curr_active_slave) {
- bond_arp_send_all(bond, curr_active_slave);
+ bond_send_validate(bond, curr_active_slave);
return should_notify_rtnl;
}
@@ -3419,7 +3729,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
BOND_SLAVE_NOTIFY_LATER);
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_arp_send_all(bond, new_slave);
+ bond_send_validate(bond, new_slave);
new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
@@ -3475,9 +3785,11 @@ re_arm:
if (!rtnl_trylock())
return;
- if (should_notify_peers)
+ if (should_notify_peers) {
+ bond->send_peer_notif--;
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
bond->dev);
+ }
if (should_notify_rtnl) {
bond_slave_state_notify(bond);
bond_slave_link_notify(bond);
@@ -3641,6 +3953,9 @@ static int bond_slave_netdev_event(unsigned long event,
/* Propagate to master device */
call_netdevice_notifiers(event, slave->bond->dev);
break;
+ case NETDEV_XDP_FEAT_CHANGE:
+ bond_xdp_set_features(bond_dev);
+ break;
default:
break;
}
@@ -3818,14 +4133,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
return true;
}
-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
{
hash ^= (__force u32)flow_get_u32_dst(flow) ^
(__force u32)flow_get_u32_src(flow);
hash ^= (hash >> 16);
hash ^= (hash >> 8);
+
/* discard lowest hash bit to deal with the common even ports pattern */
- return hash >> 1;
+ if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
+ xmit_policy == BOND_XMIT_POLICY_ENCAP34)
+ return hash >> 1;
+
+ return hash;
}
/* Generate hash based on xmit policy. If @skb is given it is used to linearize
@@ -3855,7 +4175,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
memcpy(&hash, &flow.ports.ports, sizeof(hash));
}
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
}
/**
@@ -3872,8 +4192,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb->l4_hash)
return skb->hash;
- return __bond_xmit_hash(bond, skb, skb->head, skb->protocol,
- skb->mac_header, skb->network_header,
+ return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
+ skb_mac_offset(skb), skb_network_offset(skb),
skb_headlen(skb));
}
@@ -3926,6 +4246,12 @@ static int bond_open(struct net_device *bond_dev)
struct list_head *iter;
struct slave *slave;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter)
+ return -ENOMEM;
+ }
+
/* reset slave->backup and slave->inactive */
if (bond_has_slaves(bond)) {
bond_for_each_slave(bond, slave, iter) {
@@ -3955,7 +4281,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -3963,6 +4289,9 @@ static int bond_open(struct net_device *bond_dev)
/* register to receive LACPDUs */
bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
+
+ bond_for_each_slave(bond, slave, iter)
+ dev_mc_add(slave->dev, lacpdu_mcast_addr);
}
if (bond_mode_can_use_xmit_hash(bond))
@@ -3974,6 +4303,7 @@ static int bond_open(struct net_device *bond_dev)
static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
+ struct slave *slave;
bond_work_cancel_all(bond);
bond->send_peer_notif = 0;
@@ -3981,6 +4311,19 @@ static int bond_close(struct net_device *bond_dev)
bond_alb_deinitialize(bond);
bond->recv_probe = NULL;
+ if (bond_uses_primary(bond)) {
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ rcu_read_unlock();
+ } else {
+ struct list_head *iter;
+
+ bond_for_each_slave(bond, slave, iter)
+ bond_hw_addr_flush(bond_dev, slave->dev);
+ }
+
return 0;
}
@@ -4091,7 +4434,11 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
{
struct bonding *bond = netdev_priv(bond_dev);
struct mii_ioctl_data *mii = NULL;
- int res;
+ const struct net_device_ops *ops;
+ struct net_device *real_dev;
+ struct hwtstamp_config cfg;
+ struct ifreq ifrr;
+ int res = 0;
netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
@@ -4117,7 +4464,40 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
mii->val_out = BMSR_LSTATUS;
}
- return 0;
+ break;
+ case SIOCSHWTSTAMP:
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
+ return -EOPNOTSUPP;
+
+ fallthrough;
+ case SIOCGHWTSTAMP:
+ real_dev = bond_option_active_slave_get_rcu(bond);
+ if (!real_dev)
+ return -EOPNOTSUPP;
+
+ strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ ifrr.ifr_ifru = ifr->ifr_ifru;
+
+ ops = real_dev->netdev_ops;
+ if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) {
+ res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
+ if (res)
+ return res;
+
+ ifr->ifr_ifru = ifrr.ifr_ifru;
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* Set the BOND_PHC_INDEX flag to notify user space */
+ cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
+ -EFAULT : 0;
+ }
+ fallthrough;
default:
res = -EOPNOTSUPP;
}
@@ -4490,7 +4870,7 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
switch (packets_per_slave) {
case 0:
- slave_id = prandom_u32();
+ slave_id = get_random_u32();
break;
case 1:
slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
@@ -4843,25 +5223,39 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
struct list_head *iter;
+ bool xmit_suc = false;
+ bool skb_used = false;
bond_for_each_slave_rcu(bond, slave, iter) {
- if (bond_is_last_slave(bond, slave))
- break;
- if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+ struct sk_buff *skb2;
+
+ if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
+ continue;
+ if (bond_is_last_slave(bond, slave)) {
+ skb2 = skb;
+ skb_used = true;
+ } else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
bond_dev->name, __func__);
continue;
}
- bond_dev_queue_xmit(bond, skb2, slave->dev);
}
+
+ if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
+ xmit_suc = true;
}
- if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
- return bond_dev_queue_xmit(bond, skb, slave->dev);
- return bond_tx_drop(bond_dev, skb);
+ if (!skb_used)
+ dev_kfree_skb_any(skb);
+
+ if (xmit_suc)
+ return NETDEV_TX_OK;
+
+ dev_core_stats_tx_dropped_inc(bond_dev);
+ return NET_XMIT_DROP;
}
/*------------------------- Device initialization ---------------------------*/
@@ -4961,7 +5355,7 @@ static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
switch (sk->sk_family) {
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- if (sk->sk_ipv6only ||
+ if (ipv6_only_sock(sk) ||
ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
@@ -4999,7 +5393,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
/* L4 */
memcpy(&hash, &flow.ports.ports, sizeof(hash));
/* L3 */
- return bond_ip_hash(hash, &flow);
+ return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
}
static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
@@ -5039,8 +5433,14 @@ static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
struct net_device *dev)
{
- if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
- return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
+ struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
+
+ /* tls_netdev might become NULL, even if tls_is_sk_tx_device_offloaded
+ * was true, if tls_device_down is running in parallel, but it's OK,
+ * because bond_get_slave_by_dev has a NULL check.
+ */
+ if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
+ return bond_dev_queue_xmit(bond, skb, tls_netdev);
return bond_tx_drop(dev, skb);
}
#endif
@@ -5314,15 +5714,82 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
+static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
+ struct ethtool_ts_info *info)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct ethtool_ts_info ts_info;
+ const struct ethtool_ops *ops;
+ struct net_device *real_dev;
+ bool sw_tx_support = false;
+ struct phy_device *phydev;
+ struct list_head *iter;
+ struct slave *slave;
+ int ret = 0;
+
+ rcu_read_lock();
+ real_dev = bond_option_active_slave_get_rcu(bond);
+ dev_hold(real_dev);
+ rcu_read_unlock();
+
+ if (real_dev) {
+ ops = real_dev->ethtool_ops;
+ phydev = real_dev->phydev;
+
+ if (phy_has_tsinfo(phydev)) {
+ ret = phy_ts_info(phydev, info);
+ goto out;
+ } else if (ops->get_ts_info) {
+ ret = ops->get_ts_info(real_dev, info);
+ goto out;
+ }
+ } else {
+ /* Check if all slaves support software tx timestamping */
+ rcu_read_lock();
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ ret = -1;
+ ops = slave->dev->ethtool_ops;
+ phydev = slave->dev->phydev;
+
+ if (phy_has_tsinfo(phydev))
+ ret = phy_ts_info(phydev, &ts_info);
+ else if (ops->get_ts_info)
+ ret = ops->get_ts_info(slave->dev, &ts_info);
+
+ if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) {
+ sw_tx_support = true;
+ continue;
+ }
+
+ sw_tx_support = false;
+ break;
+ }
+ rcu_read_unlock();
+ }
+
+ ret = 0;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ if (sw_tx_support)
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
+
+ info->phc_index = -1;
+
+out:
+ dev_put(real_dev);
+ return ret;
+}
+
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_link_ksettings = bond_ethtool_get_link_ksettings,
+ .get_ts_info = bond_ethtool_get_ts_info,
};
static const struct net_device_ops bond_netdev_ops = {
@@ -5433,10 +5900,9 @@ void bond_setup(struct net_device *bond_dev)
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
bond_dev->features |= BOND_XFRM_FEATURES;
#endif /* CONFIG_XFRM_OFFLOAD */
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
- if (bond_sk_check(bond))
- bond_dev->features |= BOND_TLS_FEATURES;
-#endif
+
+ if (bond_xdp_check(bond))
+ bond_dev->xdp_features = NETDEV_XDP_ACT_MASK;
}
/* Destroy a bonding device.
@@ -5822,6 +6288,7 @@ static int bond_check_params(struct bond_params *params)
params->arp_interval = arp_interval;
params->arp_validate = arp_validate_value;
params->arp_all_targets = arp_all_targets_value;
+ params->missed_max = 2;
params->updelay = updelay;
params->downdelay = downdelay;
params->peer_notif_delay = 0;
@@ -5856,6 +6323,9 @@ static int bond_check_params(struct bond_params *params)
strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
+#if IS_ENABLED(CONFIG_IPV6)
+ memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
+#endif
return 0;
}
@@ -5872,15 +6342,6 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
- if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
- bond->rr_tx_counter = alloc_percpu(u32);
- if (!bond->rr_tx_counter) {
- destroy_workqueue(bond->wq);
- bond->wq = NULL;
- return -ENOMEM;
- }
- }
-
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
@@ -5912,45 +6373,33 @@ int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
struct bonding *bond;
- struct alb_bond_info *bond_info;
- int res;
+ int res = -ENOMEM;
rtnl_lock();
bond_dev = alloc_netdev_mq(sizeof(struct bonding),
name ? name : "bond%d", NET_NAME_UNKNOWN,
bond_setup, tx_queues);
- if (!bond_dev) {
- pr_err("%s: eek! can't alloc netdev!\n", name);
- rtnl_unlock();
- return -ENOMEM;
- }
+ if (!bond_dev)
+ goto out;
- /*
- * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
- * It is set to 0 by default which is wrong.
- */
bond = netdev_priv(bond_dev);
- bond_info = &(BOND_ALB_INFO(bond));
- bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
-
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
if (res < 0) {
free_netdev(bond_dev);
- rtnl_unlock();
-
- return res;
+ goto out;
}
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
+out:
rtnl_unlock();
- return 0;
+ return res;
}
static int __net_init bond_net_init(struct net *net)
@@ -5966,27 +6415,38 @@ static int __net_init bond_net_init(struct net *net)
return 0;
}
-static void __net_exit bond_net_exit(struct net *net)
+static void __net_exit bond_net_exit_batch(struct list_head *net_list)
{
- struct bond_net *bn = net_generic(net, bond_net_id);
- struct bonding *bond, *tmp_bond;
+ struct bond_net *bn;
+ struct net *net;
LIST_HEAD(list);
- bond_destroy_sysfs(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_sysfs(bn);
+ }
/* Kill off any bonds created after unregistering bond rtnl ops */
rtnl_lock();
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
+ list_for_each_entry(net, net_list, exit_list) {
+ struct bonding *bond, *tmp_bond;
+
+ bn = net_generic(net, bond_net_id);
+ list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
+ unregister_netdevice_queue(bond->dev, &list);
+ }
unregister_netdevice_many(&list);
rtnl_unlock();
- bond_destroy_proc_dir(bn);
+ list_for_each_entry(net, net_list, exit_list) {
+ bn = net_generic(net, bond_net_id);
+ bond_destroy_proc_dir(bn);
+ }
}
static struct pernet_operations bond_net_ops = {
.init = bond_net_init,
- .exit = bond_net_exit,
+ .exit_batch = bond_net_exit_batch,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
};
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 5d54e11d18fa..27cbe148f0db 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -14,6 +14,7 @@
#include <net/netlink.h>
#include <net/rtnetlink.h>
#include <net/bonding.h>
+#include <net/ipv6.h>
static size_t bond_get_slave_size(const struct net_device *bond_dev,
const struct net_device *slave_dev)
@@ -26,6 +27,7 @@ static size_t bond_get_slave_size(const struct net_device *bond_dev,
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_AGGREGATOR_ID */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE */
nla_total_size(sizeof(u16)) + /* IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE */
+ nla_total_size(sizeof(s32)) + /* IFLA_BOND_SLAVE_PRIO */
0;
}
@@ -52,6 +54,9 @@ static int bond_fill_slave_info(struct sk_buff *skb,
if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
goto nla_put_failure;
+ if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
+ goto nla_put_failure;
+
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
const struct aggregator *agg;
const struct port *ad_port;
@@ -79,6 +84,11 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* Limit the max delay range to 300s */
+static struct netlink_range_validation delay_range = {
+ .max = 300000,
+};
+
static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_MODE] = { .type = NLA_U8 },
[IFLA_BOND_ACTIVE_SLAVE] = { .type = NLA_U32 },
@@ -109,11 +119,14 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
[IFLA_BOND_AD_ACTOR_SYSTEM] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[IFLA_BOND_TLB_DYNAMIC_LB] = { .type = NLA_U8 },
- [IFLA_BOND_PEER_NOTIF_DELAY] = { .type = NLA_U32 },
+ [IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
+ [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
+ [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
};
static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
[IFLA_BOND_SLAVE_QUEUE_ID] = { .type = NLA_U16 },
+ [IFLA_BOND_SLAVE_PRIO] = { .type = NLA_S32 },
};
static int bond_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -148,7 +161,18 @@ static int bond_slave_changelink(struct net_device *bond_dev,
snprintf(queue_id_str, sizeof(queue_id_str), "%s:%u\n",
slave_dev->name, queue_id);
bond_opt_initstr(&newval, queue_id_str);
- err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_QUEUE_ID, &newval,
+ data[IFLA_BOND_SLAVE_QUEUE_ID], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_SLAVE_PRIO]) {
+ int prio = nla_get_s32(data[IFLA_BOND_SLAVE_PRIO]);
+
+ bond_opt_slave_initval(&newval, &slave_dev, prio);
+ err = __bond_opt_set(bond, BOND_OPT_PRIO, &newval,
+ data[IFLA_BOND_SLAVE_PRIO], extack);
if (err)
return err;
}
@@ -172,7 +196,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int mode = nla_get_u8(data[IFLA_BOND_MODE]);
bond_opt_initval(&newval, mode);
- err = __bond_opt_set(bond, BOND_OPT_MODE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MODE, &newval,
+ data[IFLA_BOND_MODE], extack);
if (err)
return err;
}
@@ -189,7 +214,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
active_slave = slave_dev->name;
}
bond_opt_initstr(&newval, active_slave);
- err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ACTIVE_SLAVE, &newval,
+ data[IFLA_BOND_ACTIVE_SLAVE], extack);
if (err)
return err;
}
@@ -197,7 +223,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
miimon = nla_get_u32(data[IFLA_BOND_MIIMON]);
bond_opt_initval(&newval, miimon);
- err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MIIMON, &newval,
+ data[IFLA_BOND_MIIMON], extack);
if (err)
return err;
}
@@ -205,7 +232,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int updelay = nla_get_u32(data[IFLA_BOND_UPDELAY]);
bond_opt_initval(&newval, updelay);
- err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_UPDELAY, &newval,
+ data[IFLA_BOND_UPDELAY], extack);
if (err)
return err;
}
@@ -213,7 +241,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int downdelay = nla_get_u32(data[IFLA_BOND_DOWNDELAY]);
bond_opt_initval(&newval, downdelay);
- err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_DOWNDELAY, &newval,
+ data[IFLA_BOND_DOWNDELAY], extack);
if (err)
return err;
}
@@ -221,7 +250,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int delay = nla_get_u32(data[IFLA_BOND_PEER_NOTIF_DELAY]);
bond_opt_initval(&newval, delay);
- err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PEER_NOTIF_DELAY, &newval,
+ data[IFLA_BOND_PEER_NOTIF_DELAY], extack);
if (err)
return err;
}
@@ -229,7 +259,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int use_carrier = nla_get_u8(data[IFLA_BOND_USE_CARRIER]);
bond_opt_initval(&newval, use_carrier);
- err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_USE_CARRIER, &newval,
+ data[IFLA_BOND_USE_CARRIER], extack);
if (err)
return err;
}
@@ -237,12 +268,14 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
- netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
+ "ARP monitoring cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_interval);
- err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_INTERVAL, &newval,
+ data[IFLA_BOND_ARP_INTERVAL], extack);
if (err)
return err;
}
@@ -261,7 +294,9 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
- &newval);
+ &newval,
+ data[IFLA_BOND_ARP_IP_TARGET],
+ extack);
if (err)
break;
i++;
@@ -271,16 +306,49 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
if (err)
return err;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ if (data[IFLA_BOND_NS_IP6_TARGET]) {
+ struct nlattr *attr;
+ int i = 0, rem;
+
+ bond_option_ns_ip6_targets_clear(bond);
+ nla_for_each_nested(attr, data[IFLA_BOND_NS_IP6_TARGET], rem) {
+ struct in6_addr addr6;
+
+ if (nla_len(attr) < sizeof(addr6)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 address");
+ return -EINVAL;
+ }
+
+ addr6 = nla_get_in6_addr(attr);
+
+ bond_opt_initextra(&newval, &addr6, sizeof(addr6));
+ err = __bond_opt_set(bond, BOND_OPT_NS_TARGETS,
+ &newval,
+ data[IFLA_BOND_NS_IP6_TARGET],
+ extack);
+ if (err)
+ break;
+ i++;
+ }
+ if (i == 0 && bond->params.arp_interval)
+ netdev_warn(bond->dev, "Removing last ns target with arp_interval on\n");
+ if (err)
+ return err;
+ }
+#endif
if (data[IFLA_BOND_ARP_VALIDATE]) {
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
- netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_BOND_ARP_INTERVAL],
+ "ARP validating cannot be used with MII monitoring");
return -EINVAL;
}
bond_opt_initval(&newval, arp_validate);
- err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_VALIDATE, &newval,
+ data[IFLA_BOND_ARP_VALIDATE], extack);
if (err)
return err;
}
@@ -289,7 +357,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_ARP_ALL_TARGETS]);
bond_opt_initval(&newval, arp_all_targets);
- err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ARP_ALL_TARGETS, &newval,
+ data[IFLA_BOND_ARP_ALL_TARGETS], extack);
if (err)
return err;
}
@@ -303,7 +372,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
primary = dev->name;
bond_opt_initstr(&newval, primary);
- err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY, &newval,
+ data[IFLA_BOND_PRIMARY], extack);
if (err)
return err;
}
@@ -312,7 +382,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_PRIMARY_RESELECT]);
bond_opt_initval(&newval, primary_reselect);
- err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PRIMARY_RESELECT, &newval,
+ data[IFLA_BOND_PRIMARY_RESELECT], extack);
if (err)
return err;
}
@@ -321,7 +392,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_FAIL_OVER_MAC]);
bond_opt_initval(&newval, fail_over_mac);
- err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_FAIL_OVER_MAC, &newval,
+ data[IFLA_BOND_FAIL_OVER_MAC], extack);
if (err)
return err;
}
@@ -330,7 +402,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_XMIT_HASH_POLICY]);
bond_opt_initval(&newval, xmit_hash_policy);
- err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_XMIT_HASH, &newval,
+ data[IFLA_BOND_XMIT_HASH_POLICY], extack);
if (err)
return err;
}
@@ -339,7 +412,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_RESEND_IGMP]);
bond_opt_initval(&newval, resend_igmp);
- err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_RESEND_IGMP, &newval,
+ data[IFLA_BOND_RESEND_IGMP], extack);
if (err)
return err;
}
@@ -348,7 +422,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_NUM_PEER_NOTIF]);
bond_opt_initval(&newval, num_peer_notif);
- err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_NUM_PEER_NOTIF, &newval,
+ data[IFLA_BOND_NUM_PEER_NOTIF], extack);
if (err)
return err;
}
@@ -357,7 +432,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_ALL_SLAVES_ACTIVE]);
bond_opt_initval(&newval, all_slaves_active);
- err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_ALL_SLAVES_ACTIVE, &newval,
+ data[IFLA_BOND_ALL_SLAVES_ACTIVE], extack);
if (err)
return err;
}
@@ -366,7 +442,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_MIN_LINKS]);
bond_opt_initval(&newval, min_links);
- err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_MINLINKS, &newval,
+ data[IFLA_BOND_MIN_LINKS], extack);
if (err)
return err;
}
@@ -375,7 +452,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_LP_INTERVAL]);
bond_opt_initval(&newval, lp_interval);
- err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LP_INTERVAL, &newval,
+ data[IFLA_BOND_LP_INTERVAL], extack);
if (err)
return err;
}
@@ -384,7 +462,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u32(data[IFLA_BOND_PACKETS_PER_SLAVE]);
bond_opt_initval(&newval, packets_per_slave);
- err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_PACKETS_PER_SLAVE, &newval,
+ data[IFLA_BOND_PACKETS_PER_SLAVE], extack);
if (err)
return err;
}
@@ -393,7 +472,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int lacp_active = nla_get_u8(data[IFLA_BOND_AD_LACP_ACTIVE]);
bond_opt_initval(&newval, lacp_active);
- err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_ACTIVE, &newval,
+ data[IFLA_BOND_AD_LACP_ACTIVE], extack);
if (err)
return err;
}
@@ -403,7 +483,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_AD_LACP_RATE]);
bond_opt_initval(&newval, lacp_rate);
- err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_LACP_RATE, &newval,
+ data[IFLA_BOND_AD_LACP_RATE], extack);
if (err)
return err;
}
@@ -412,7 +493,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u8(data[IFLA_BOND_AD_SELECT]);
bond_opt_initval(&newval, ad_select);
- err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_SELECT, &newval,
+ data[IFLA_BOND_AD_SELECT], extack);
if (err)
return err;
}
@@ -421,7 +503,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u16(data[IFLA_BOND_AD_ACTOR_SYS_PRIO]);
bond_opt_initval(&newval, actor_sys_prio);
- err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYS_PRIO, &newval,
+ data[IFLA_BOND_AD_ACTOR_SYS_PRIO], extack);
if (err)
return err;
}
@@ -430,7 +513,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
bond_opt_initval(&newval, port_key);
- err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_USER_PORT_KEY, &newval,
+ data[IFLA_BOND_AD_USER_PORT_KEY], extack);
if (err)
return err;
}
@@ -440,7 +524,8 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
bond_opt_initval(&newval,
nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
- err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval,
+ data[IFLA_BOND_AD_ACTOR_SYSTEM], extack);
if (err)
return err;
}
@@ -448,7 +533,18 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
bond_opt_initval(&newval, dynamic_lb);
- err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+ err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval,
+ data[IFLA_BOND_TLB_DYNAMIC_LB], extack);
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BOND_MISSED_MAX]) {
+ int missed_max = nla_get_u8(data[IFLA_BOND_MISSED_MAX]);
+
+ bond_opt_initval(&newval, missed_max);
+ err = __bond_opt_set(bond, BOND_OPT_MISSED_MAX, &newval,
+ data[IFLA_BOND_MISSED_MAX], extack);
if (err)
return err;
}
@@ -515,6 +611,10 @@ static size_t bond_get_size(const struct net_device *bond_dev)
nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
nla_total_size(sizeof(u32)) + /* IFLA_BOND_PEER_NOTIF_DELAY */
+ nla_total_size(sizeof(u8)) + /* IFLA_BOND_MISSED_MAX */
+ /* IFLA_BOND_NS_IP6_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
0;
}
@@ -592,6 +692,26 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.arp_all_targets))
goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ targets = nla_nest_start(skb, IFLA_BOND_NS_IP6_TARGET);
+ if (!targets)
+ goto nla_put_failure;
+
+ targets_added = 0;
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++) {
+ if (!ipv6_addr_any(&bond->params.ns_targets[i])) {
+ if (nla_put_in6_addr(skb, i, &bond->params.ns_targets[i]))
+ goto nla_put_failure;
+ targets_added = 1;
+ }
+ }
+
+ if (targets_added)
+ nla_nest_end(skb, targets);
+ else
+ nla_nest_cancel(skb, targets);
+#endif
+
primary = rtnl_dereference(bond->primary_slave);
if (primary &&
nla_put_u32(skb, IFLA_BOND_PRIMARY, primary->dev->ifindex))
@@ -650,6 +770,10 @@ static int bond_fill_info(struct sk_buff *skb,
bond->params.tlb_dynamic_lb))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_BOND_MISSED_MAX,
+ bond->params.missed_max))
+ goto nla_put_failure;
+
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info info;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index a8fde3bc458f..f3f27f0bd2a6 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -34,10 +34,14 @@ static int bond_option_arp_ip_target_add(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target);
static int bond_option_arp_ip_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_arp_all_targets_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_primary_reselect_set(struct bonding *bond,
@@ -78,6 +82,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
const struct bond_opt_value *newval);
static int bond_option_ad_user_port_key_set(struct bonding *bond,
const struct bond_opt_value *newval);
+static int bond_option_missed_max_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
static const struct bond_opt_value bond_mode_tbl[] = {
@@ -163,6 +169,12 @@ static const struct bond_opt_value bond_num_peer_notif_tbl[] = {
{ NULL, -1, 0}
};
+static const struct bond_opt_value bond_peer_notif_delay_tbl[] = {
+ { "off", 0, 0},
+ { "maxval", 300000, BOND_VALFLAG_MAX},
+ { NULL, -1, 0}
+};
+
static const struct bond_opt_value bond_primary_reselect_tbl[] = {
{ "always", BOND_PRI_RESELECT_ALWAYS, BOND_VALFLAG_DEFAULT},
{ "better", BOND_PRI_RESELECT_BETTER, 0},
@@ -213,6 +225,13 @@ static const struct bond_opt_value bond_ad_user_port_key_tbl[] = {
{ NULL, -1, 0},
};
+static const struct bond_opt_value bond_missed_max_tbl[] = {
+ { "minval", 1, BOND_VALFLAG_MIN},
+ { "maxval", 255, BOND_VALFLAG_MAX},
+ { "default", 2, BOND_VALFLAG_DEFAULT},
+ { NULL, -1, 0},
+};
+
static const struct bond_option bond_opts[BOND_OPT_LAST] = {
[BOND_OPT_MODE] = {
.id = BOND_OPT_MODE,
@@ -270,6 +289,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_arp_interval_set
},
+ [BOND_OPT_MISSED_MAX] = {
+ .id = BOND_OPT_MISSED_MAX,
+ .name = "arp_missed_max",
+ .desc = "Maximum number of missed ARP interval",
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
+ .values = bond_missed_max_tbl,
+ .set = bond_option_missed_max_set
+ },
[BOND_OPT_ARP_TARGETS] = {
.id = BOND_OPT_ARP_TARGETS,
.name = "arp_ip_target",
@@ -277,6 +305,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.flags = BOND_OPTFLAG_RAWVAL,
.set = bond_option_arp_ip_targets_set
},
+ [BOND_OPT_NS_TARGETS] = {
+ .id = BOND_OPT_NS_TARGETS,
+ .name = "ns_ip6_target",
+ .desc = "NS targets in ffff:ffff::ffff:ffff form",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .set = bond_option_ns_ip6_targets_set
+ },
[BOND_OPT_DOWNDELAY] = {
.id = BOND_OPT_DOWNDELAY,
.name = "downdelay",
@@ -338,6 +373,16 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.values = bond_intmax_tbl,
.set = bond_option_miimon_set
},
+ [BOND_OPT_PRIO] = {
+ .id = BOND_OPT_PRIO,
+ .name = "prio",
+ .desc = "Link priority for failover re-selection",
+ .flags = BOND_OPTFLAG_RAWVAL,
+ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP) |
+ BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB)),
+ .set = bond_option_prio_set
+ },
[BOND_OPT_PRIMARY] = {
.id = BOND_OPT_PRIMARY,
.name = "primary",
@@ -449,7 +494,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
.id = BOND_OPT_PEER_NOTIF_DELAY,
.name = "peer_notif_delay",
.desc = "Delay between each peer notification on failover event, in milliseconds",
- .values = bond_intmax_tbl,
+ .values = bond_peer_notif_delay_tbl,
.set = bond_option_peer_notif_delay_set
}
};
@@ -605,27 +650,35 @@ static int bond_opt_check_deps(struct bonding *bond,
}
static void bond_opt_dep_print(struct bonding *bond,
- const struct bond_option *opt)
+ const struct bond_option *opt,
+ struct nlattr *bad_attr,
+ struct netlink_ext_ack *extack)
{
const struct bond_opt_value *modeval;
struct bond_params *params;
params = &bond->params;
modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
- if (test_bit(params->mode, &opt->unsuppmodes))
+ if (test_bit(params->mode, &opt->unsuppmodes)) {
netdev_err(bond->dev, "option %s: mode dependency failed, not supported in mode %s(%llu)\n",
opt->name, modeval->string, modeval->value);
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "option not supported in mode");
+ }
}
static void bond_opt_error_interpret(struct bonding *bond,
const struct bond_option *opt,
- int error, const struct bond_opt_value *val)
+ int error, const struct bond_opt_value *val,
+ struct nlattr *bad_attr,
+ struct netlink_ext_ack *extack)
{
const struct bond_opt_value *minval, *maxval;
char *p;
switch (error) {
case -EINVAL:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr, "invalid option value");
if (val) {
if (val->string) {
/* sometimes RAWVAL opts may have new lines */
@@ -647,13 +700,17 @@ static void bond_opt_error_interpret(struct bonding *bond,
opt->name, minval ? minval->value : 0, maxval->value);
break;
case -EACCES:
- bond_opt_dep_print(bond, opt);
+ bond_opt_dep_print(bond, opt, bad_attr, extack);
break;
case -ENOTEMPTY:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "unable to set option because the bond device has slaves");
netdev_err(bond->dev, "option %s: unable to set because the bond device has slaves\n",
opt->name);
break;
case -EBUSY:
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "unable to set option because the bond is up");
netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n",
opt->name);
break;
@@ -664,6 +721,8 @@ static void bond_opt_error_interpret(struct bonding *bond,
*p = '\0';
netdev_err(bond->dev, "option %s: interface %s does not exist!\n",
opt->name, val->string);
+ NL_SET_ERR_MSG_ATTR(extack, bad_attr,
+ "interface does not exist");
}
break;
default:
@@ -676,13 +735,17 @@ static void bond_opt_error_interpret(struct bonding *bond,
* @bond: target bond device
* @option: option to set
* @val: value to set it to
+ * @bad_attr: netlink attribue that caused the error
+ * @extack: extended netlink error structure, used when an error message
+ * needs to be returned to the caller via netlink
*
* This function is used to change the bond's option value, it can be
* used for both enabling/changing an option and for disabling it. RTNL lock
* must be obtained before calling this function.
*/
int __bond_opt_set(struct bonding *bond,
- unsigned int option, struct bond_opt_value *val)
+ unsigned int option, struct bond_opt_value *val,
+ struct nlattr *bad_attr, struct netlink_ext_ack *extack)
{
const struct bond_opt_value *retval = NULL;
const struct bond_option *opt;
@@ -704,7 +767,7 @@ int __bond_opt_set(struct bonding *bond,
ret = opt->set(bond, retval);
out:
if (ret)
- bond_opt_error_interpret(bond, opt, ret, val);
+ bond_opt_error_interpret(bond, opt, ret, val, bad_attr, extack);
return ret;
}
@@ -726,7 +789,7 @@ int __bond_opt_set_notify(struct bonding *bond,
ASSERT_RTNL();
- ret = __bond_opt_set(bond, option, val);
+ ret = __bond_opt_set(bond, option, val, NULL, NULL);
if (!ret && (bond->dev->reg_state == NETREG_REGISTERED))
call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
@@ -785,19 +848,6 @@ static bool bond_set_xfrm_features(struct bonding *bond)
return true;
}
-static bool bond_set_tls_features(struct bonding *bond)
-{
- if (!IS_ENABLED(CONFIG_TLS_DEVICE))
- return false;
-
- if (bond_sk_check(bond))
- bond->dev->wanted_features |= BOND_TLS_FEATURES;
- else
- bond->dev->wanted_features &= ~BOND_TLS_FEATURES;
-
- return true;
-}
-
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -828,12 +878,13 @@ static int bond_option_mode_set(struct bonding *bond,
bool update = false;
update |= bond_set_xfrm_features(bond);
- update |= bond_set_tls_features(bond);
if (update)
netdev_update_features(bond->dev);
}
+ bond_xdp_set_features(bond->dev);
+
return 0;
}
@@ -1034,7 +1085,7 @@ static int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_rcv_validate;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -1166,6 +1217,71 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
return ret;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot,
+ struct in6_addr *target,
+ unsigned long last_rx)
+{
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) {
+ bond_for_each_slave(bond, slave, iter)
+ slave->target_last_arp_rx[slot] = last_rx;
+ targets[slot] = *target;
+ }
+}
+
+void bond_option_ns_ip6_targets_clear(struct bonding *bond)
+{
+ struct in6_addr addr_any = in6addr_any;
+ int i;
+
+ for (i = 0; i < BOND_MAX_NS_TARGETS; i++)
+ _bond_options_ns_ip6_target_set(bond, i, &addr_any, 0);
+}
+
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct in6_addr *target = (struct in6_addr *)newval->extra;
+ struct in6_addr *targets = bond->params.ns_targets;
+ struct in6_addr addr_any = in6addr_any;
+ int index;
+
+ if (!bond_is_ip6_target_ok(target)) {
+ netdev_err(bond->dev, "invalid NS target %pI6c specified for addition\n",
+ target);
+ return -EINVAL;
+ }
+
+ if (bond_get_targets_ip6(targets, target) != -1) { /* dup */
+ netdev_err(bond->dev, "NS target %pI6c is already present\n",
+ target);
+ return -EINVAL;
+ }
+
+ index = bond_get_targets_ip6(targets, &addr_any); /* first free slot */
+ if (index == -1) {
+ netdev_err(bond->dev, "NS target table is full!\n");
+ return -EINVAL;
+ }
+
+ netdev_dbg(bond->dev, "Adding NS target %pI6c\n", target);
+
+ _bond_options_ns_ip6_target_set(bond, index, target, jiffies);
+
+ return 0;
+}
+#else
+static int bond_option_ns_ip6_targets_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ return -EPERM;
+}
+#endif
+
static int bond_option_arp_validate_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1186,6 +1302,37 @@ static int bond_option_arp_all_targets_set(struct bonding *bond,
return 0;
}
+static int bond_option_missed_max_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ netdev_dbg(bond->dev, "Setting missed max to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.missed_max = newval->value;
+
+ return 0;
+}
+
+static int bond_option_prio_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+{
+ struct slave *slave;
+
+ slave = bond_slave_get_rtnl(newval->slave_dev);
+ if (!slave) {
+ netdev_dbg(newval->slave_dev, "%s called on NULL slave\n", __func__);
+ return -ENODEV;
+ }
+ slave->prio = newval->value;
+
+ if (rtnl_dereference(bond->primary_slave))
+ slave_warn(bond->dev, slave->dev,
+ "prio updated, but will not affect failover re-selection as primary slave have been set\n");
+ else
+ bond_select_active_slave(bond);
+
+ return 0;
+}
+
static int bond_option_primary_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
@@ -1265,10 +1412,6 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
newval->string, newval->value);
bond->params.xmit_policy = newval->value;
- if (bond->dev->reg_state == NETREG_REGISTERED)
- if (bond_set_tls_features(bond))
- netdev_update_features(bond->dev);
-
return 0;
}
@@ -1526,7 +1669,7 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
mac = (u8 *)&newval->value;
}
- if (!is_valid_ether_addr(mac))
+ if (is_multicast_ether_addr(mac))
goto err;
netdev_dbg(bond->dev, "Setting ad_actor_system to %pM\n", mac);
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index f3e3bfd72556..43be458422b3 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -11,7 +11,7 @@
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
loff_t off = 0;
@@ -30,7 +30,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
bool found = false;
@@ -57,7 +57,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
const struct bond_opt_value *optval;
struct slave *curr, *primary;
int i;
@@ -115,6 +115,8 @@ static void bond_info_show_master(struct seq_file *seq)
seq_printf(seq, "ARP Polling Interval (ms): %d\n",
bond->params.arp_interval);
+ seq_printf(seq, "ARP Missed Max: %u\n",
+ bond->params.missed_max);
seq_printf(seq, "ARP IP target/s (n.n.n.n form):");
@@ -127,6 +129,21 @@ static void bond_info_show_master(struct seq_file *seq)
printed = 1;
}
seq_printf(seq, "\n");
+
+#if IS_ENABLED(CONFIG_IPV6)
+ printed = 0;
+ seq_printf(seq, "NS IPv6 target/s (xx::xx form):");
+
+ for (i = 0; (i < BOND_MAX_NS_TARGETS); i++) {
+ if (ipv6_addr_any(&bond->params.ns_targets[i]))
+ break;
+ if (printed)
+ seq_printf(seq, ",");
+ seq_printf(seq, " %pI6c", &bond->params.ns_targets[i]);
+ printed = 1;
+ }
+ seq_printf(seq, "\n");
+#endif
}
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -173,7 +190,7 @@ static void bond_info_show_master(struct seq_file *seq)
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
@@ -305,7 +322,6 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
}
/* Destroy the bonding directory under /proc/net, if empty.
- * Caller must hold rtnl_lock.
*/
void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index c48b77167fab..0bb59da24922 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -31,12 +31,12 @@
/* "show" function for the bond_masters attribute.
* The class parameter is ignored.
*/
-static ssize_t bonding_show_bonds(struct class *cls,
- struct class_attribute *attr,
+static ssize_t bonding_show_bonds(const struct class *cls,
+ const struct class_attribute *attr,
char *buf)
{
- struct bond_net *bn =
- container_of(attr, struct bond_net, class_attr_bonding_masters);
+ const struct bond_net *bn =
+ container_of_const(attr, struct bond_net, class_attr_bonding_masters);
int res = 0;
struct bonding *bond;
@@ -47,10 +47,10 @@ static ssize_t bonding_show_bonds(struct class *cls,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", bond->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", bond->dev->name);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -59,7 +59,7 @@ static ssize_t bonding_show_bonds(struct class *cls,
return res;
}
-static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifname)
+static struct net_device *bond_get_by_name(const struct bond_net *bn, const char *ifname)
{
struct bonding *bond;
@@ -75,12 +75,12 @@ static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifna
*
* The class parameter is ignored.
*/
-static ssize_t bonding_store_bonds(struct class *cls,
- struct class_attribute *attr,
+static ssize_t bonding_store_bonds(const struct class *cls,
+ const struct class_attribute *attr,
const char *buffer, size_t count)
{
- struct bond_net *bn =
- container_of(attr, struct bond_net, class_attr_bonding_masters);
+ const struct bond_net *bn =
+ container_of_const(attr, struct bond_net, class_attr_bonding_masters);
char command[IFNAMSIZ + 1] = {0, };
char *ifname;
int rv, res = count;
@@ -178,10 +178,10 @@ static ssize_t bonding_show_slaves(struct device *d,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", slave->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
rtnl_unlock();
@@ -203,7 +203,7 @@ static ssize_t bonding_show_mode(struct device *d,
val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
- return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
+ return sysfs_emit(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option);
@@ -217,7 +217,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
- return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static DEVICE_ATTR(xmit_hash_policy, 0644,
bonding_show_xmit_hash, bonding_sysfs_store_option);
@@ -233,7 +233,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
- return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate,
bonding_sysfs_store_option);
@@ -248,7 +248,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
- return sprintf(buf, "%s %d\n",
+ return sysfs_emit(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
static DEVICE_ATTR(arp_all_targets, 0644,
@@ -265,7 +265,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
- return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static DEVICE_ATTR(fail_over_mac, 0644,
bonding_show_fail_over_mac, bonding_sysfs_store_option);
@@ -277,7 +277,7 @@ static ssize_t bonding_show_arp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.arp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.arp_interval);
}
static DEVICE_ATTR(arp_interval, 0644,
bonding_show_arp_interval, bonding_sysfs_store_option);
@@ -292,8 +292,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
- res += sprintf(buf + res, "%pI4 ",
- &bond->params.arp_targets[i]);
+ res += sysfs_emit_at(buf, res, "%pI4 ",
+ &bond->params.arp_targets[i]);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -303,6 +303,18 @@ static ssize_t bonding_show_arp_targets(struct device *d,
static DEVICE_ATTR(arp_ip_target, 0644,
bonding_show_arp_targets, bonding_sysfs_store_option);
+/* Show the arp missed max. */
+static ssize_t bonding_show_missed_max(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct bonding *bond = to_bond(d);
+
+ return sysfs_emit(buf, "%u\n", bond->params.missed_max);
+}
+static DEVICE_ATTR(arp_missed_max, 0644,
+ bonding_show_missed_max, bonding_sysfs_store_option);
+
/* Show the up and down delays. */
static ssize_t bonding_show_downdelay(struct device *d,
struct device_attribute *attr,
@@ -310,7 +322,7 @@ static ssize_t bonding_show_downdelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
static DEVICE_ATTR(downdelay, 0644,
bonding_show_downdelay, bonding_sysfs_store_option);
@@ -321,7 +333,7 @@ static ssize_t bonding_show_updelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
static DEVICE_ATTR(updelay, 0644,
@@ -333,8 +345,8 @@ static ssize_t bonding_show_peer_notif_delay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n",
- bond->params.peer_notif_delay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
}
static DEVICE_ATTR(peer_notif_delay, 0644,
bonding_show_peer_notif_delay, bonding_sysfs_store_option);
@@ -349,7 +361,7 @@ static ssize_t bonding_show_lacp_active(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_active);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_active);
}
static DEVICE_ATTR(lacp_active, 0644,
bonding_show_lacp_active, bonding_sysfs_store_option);
@@ -363,7 +375,7 @@ static ssize_t bonding_show_lacp_rate(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static DEVICE_ATTR(lacp_rate, 0644,
bonding_show_lacp_rate, bonding_sysfs_store_option);
@@ -374,7 +386,7 @@ static ssize_t bonding_show_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.min_links);
+ return sysfs_emit(buf, "%u\n", bond->params.min_links);
}
static DEVICE_ATTR(min_links, 0644,
bonding_show_min_links, bonding_sysfs_store_option);
@@ -388,7 +400,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
- return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.ad_select);
}
static DEVICE_ATTR(ad_select, 0644,
bonding_show_ad_select, bonding_sysfs_store_option);
@@ -400,7 +412,7 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.num_peer_notif);
+ return sysfs_emit(buf, "%d\n", bond->params.num_peer_notif);
}
static DEVICE_ATTR(num_grat_arp, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
@@ -414,7 +426,7 @@ static ssize_t bonding_show_miimon(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.miimon);
}
static DEVICE_ATTR(miimon, 0644,
bonding_show_miimon, bonding_sysfs_store_option);
@@ -431,7 +443,7 @@ static ssize_t bonding_show_primary(struct device *d,
rcu_read_lock();
primary = rcu_dereference(bond->primary_slave);
if (primary)
- count = sprintf(buf, "%s\n", primary->dev->name);
+ count = sysfs_emit(buf, "%s\n", primary->dev->name);
rcu_read_unlock();
return count;
@@ -450,8 +462,8 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
- return sprintf(buf, "%s %d\n",
- val->string, bond->params.primary_reselect);
+ return sysfs_emit(buf, "%s %d\n",
+ val->string, bond->params.primary_reselect);
}
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
@@ -463,7 +475,7 @@ static ssize_t bonding_show_carrier(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
@@ -481,7 +493,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
rcu_read_lock();
slave_dev = bond_option_active_slave_get_rcu(bond);
if (slave_dev)
- count = sprintf(buf, "%s\n", slave_dev->name);
+ count = sysfs_emit(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -497,7 +509,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct bonding *bond = to_bond(d);
bool active = netif_carrier_ok(bond->dev);
- return sprintf(buf, "%s\n", active ? "up" : "down");
+ return sysfs_emit(buf, "%s\n", active ? "up" : "down");
}
static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL);
@@ -512,9 +524,9 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.aggregator_id);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.aggregator_id);
}
return count;
@@ -533,9 +545,9 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.ports);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.ports);
}
return count;
@@ -554,9 +566,9 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.actor_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.actor_key);
}
return count;
@@ -575,9 +587,9 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.partner_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.partner_key);
}
return count;
@@ -597,7 +609,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
- count = sprintf(buf, "%pM\n", ad_info.partner_system);
+ count = sysfs_emit(buf, "%pM\n", ad_info.partner_system);
}
return count;
@@ -622,11 +634,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ res += sysfs_emit_at(buf, res, "%s:%d ",
+ slave->dev->name, slave->queue_id);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -646,7 +658,7 @@ static ssize_t bonding_show_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+ return sysfs_emit(buf, "%d\n", bond->params.all_slaves_active);
}
static DEVICE_ATTR(all_slaves_active, 0644,
bonding_show_slaves_active, bonding_sysfs_store_option);
@@ -658,7 +670,7 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.resend_igmp);
+ return sysfs_emit(buf, "%d\n", bond->params.resend_igmp);
}
static DEVICE_ATTR(resend_igmp, 0644,
bonding_show_resend_igmp, bonding_sysfs_store_option);
@@ -670,7 +682,7 @@ static ssize_t bonding_show_lp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.lp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.lp_interval);
}
static DEVICE_ATTR(lp_interval, 0644,
bonding_show_lp_interval, bonding_sysfs_store_option);
@@ -681,7 +693,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
+ return sysfs_emit(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
static DEVICE_ATTR(tlb_dynamic_lb, 0644,
bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
@@ -693,7 +705,7 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
+ return sysfs_emit(buf, "%u\n", packets_per_slave);
}
static DEVICE_ATTR(packets_per_slave, 0644,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
@@ -705,7 +717,7 @@ static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_actor_sys_prio);
return 0;
}
@@ -719,7 +731,7 @@ static ssize_t bonding_show_ad_actor_system(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+ return sysfs_emit(buf, "%pM\n", bond->params.ad_actor_system);
return 0;
}
@@ -734,7 +746,7 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_user_port_key);
return 0;
}
@@ -779,6 +791,7 @@ static struct attribute *per_bond_attrs[] = {
&dev_attr_ad_actor_sys_prio.attr,
&dev_attr_ad_actor_system.attr,
&dev_attr_ad_user_port_key.attr,
+ &dev_attr_arp_missed_max.attr,
NULL,
};
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 6a6cdd0bb258..313866f2c0e4 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -15,43 +15,37 @@ struct slave_attribute {
ssize_t (*show)(struct slave *, char *);
};
-#define SLAVE_ATTR(_name, _mode, _show) \
-const struct slave_attribute slave_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
-};
#define SLAVE_ATTR_RO(_name) \
- SLAVE_ATTR(_name, 0444, _name##_show)
+const struct slave_attribute slave_attr_##_name = __ATTR_RO(_name)
static ssize_t state_show(struct slave *slave, char *buf)
{
switch (bond_slave_state(slave)) {
case BOND_STATE_ACTIVE:
- return sprintf(buf, "active\n");
+ return sysfs_emit(buf, "active\n");
case BOND_STATE_BACKUP:
- return sprintf(buf, "backup\n");
+ return sysfs_emit(buf, "backup\n");
default:
- return sprintf(buf, "UNKNOWN\n");
+ return sysfs_emit(buf, "UNKNOWN\n");
}
}
static SLAVE_ATTR_RO(state);
static ssize_t mii_status_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+ return sysfs_emit(buf, "%s\n", bond_slave_link_status(slave->link));
}
static SLAVE_ATTR_RO(mii_status);
static ssize_t link_failure_count_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->link_failure_count);
+ return sysfs_emit(buf, "%d\n", slave->link_failure_count);
}
static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%*phC\n",
+ return sysfs_emit(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
@@ -59,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", slave->queue_id);
}
static SLAVE_ATTR_RO(queue_id);
@@ -70,11 +64,11 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
- return sprintf(buf, "%d\n",
- agg->aggregator_identifier);
+ return sysfs_emit(buf, "%d\n",
+ agg->aggregator_identifier);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_aggregator_id);
@@ -85,11 +79,11 @@ static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->actor_oper_port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_actor_oper_port_state);
@@ -100,11 +94,11 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->partner_oper.port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_partner_oper_port_state);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 2a7af611d43a..688075859ae4 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -196,7 +196,7 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb_reset_mac_header(skb);
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
- ret = netif_rx_ni(skb);
+ ret = netif_rx(skb);
if (!ret) {
ser->dev->stats.rx_packets++;
ser->dev->stats.rx_bytes += count;
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 91230894692d..0b0f234b0b50 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -714,20 +714,29 @@ static int cfv_probe(struct virtio_device *vdev)
/* Initialize NAPI poll context data */
vringh_kiov_init(&cfv->ctx.riov, NULL, 0);
cfv->ctx.head = USHRT_MAX;
- netif_napi_add(netdev, &cfv->napi, cfv_rx_poll, CFV_DEFAULT_QUOTA);
+ netif_napi_add_weight(netdev, &cfv->napi, cfv_rx_poll,
+ CFV_DEFAULT_QUOTA);
tasklet_setup(&cfv->tx_release_tasklet, cfv_tx_release_tasklet);
/* Carrier is off until netdevice is opened */
netif_carrier_off(netdev);
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
/* register Netdev */
- err = register_netdev(netdev);
+ err = register_netdevice(netdev);
if (err) {
+ rtnl_unlock();
dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err);
goto err;
}
+ virtio_device_ready(vdev);
+
+ rtnl_unlock();
+
debugfs_init(cfv);
return 0;
@@ -754,7 +763,7 @@ static void cfv_remove(struct virtio_device *vdev)
debugfs_remove_recursive(cfv->debugfs);
vringh_kiov_cleanup(&cfv->ctx.riov);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->vringh_config->del_vrhs(cfv->vdev);
cfv->vr_rx = NULL;
vdev->config->del_vqs(cfv->vdev);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index fff259247d52..3ceccafd701b 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -1,5 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "CAN Device Drivers"
+
+menuconfig CAN_DEV
+ tristate "CAN Device Drivers"
+ default y
+ depends on CAN
+ help
+ Controller Area Network (CAN) is serial communications protocol up to
+ 1Mbit/s for its original release (now known as Classical CAN) and up
+ to 8Mbit/s for the more recent CAN with Flexible Data-Rate
+ (CAN-FD). The CAN bus was originally mainly for automotive, but is now
+ widely used in marine (NMEA2000), industrial, and medical
+ applications. More information on the CAN network protocol family
+ PF_CAN is contained in <Documentation/networking/can.rst>.
+
+ This section contains all the CAN(-FD) device drivers including the
+ virtual ones. If you own such devices or plan to use the virtual CAN
+ interfaces to develop applications, say Y here.
+
+ To compile as a module, choose M here: the module will be called
+ can-dev.
+
+if CAN_DEV
config CAN_VCAN
tristate "Virtual Local CAN Interface (vcan)"
@@ -28,35 +49,22 @@ config CAN_VXCAN
This driver can also be built as a module. If so, the module
will be called vxcan.
-config CAN_SLCAN
- tristate "Serial / USB serial CAN Adaptors (slcan)"
- depends on TTY
+config CAN_NETLINK
+ bool "CAN device drivers with Netlink support"
+ default y
help
- CAN driver for several 'low cost' CAN interfaces that are attached
- via serial lines or via USB-to-serial adapters using the LAWICEL
- ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+ Enables the common framework for CAN device drivers. This is the
+ standard library and provides features for the Netlink interface such
+ as bittiming validation, support of CAN error states, device restart
+ and others.
- As only the sending and receiving of CAN frames is implemented, this
- driver should work with the (serial/USB) CAN hardware from:
- www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+ The additional features selected by this option will be added to the
+ can-dev module.
- Userspace tools to attach the SLCAN line discipline (slcan_attach,
- slcand) can be found in the can-utils at the linux-can project, see
- https://github.com/linux-can/can-utils for details.
-
- The slcan driver supports up to 10 CAN netdevices by default which
- can be changed by the 'maxdev=xx' module option. This driver can
- also be built as a module. If so, the module will be called slcan.
-
-config CAN_DEV
- tristate "Platform CAN drivers with Netlink support"
- default y
- help
- Enables the common framework for platform CAN drivers with Netlink
- support. This is the standard library for CAN drivers.
- If unsure, say Y.
+ This is required by all platform and hardware CAN drivers. If you
+ plan to use such devices or if unsure, say Y.
-if CAN_DEV
+if CAN_NETLINK
config CAN_CALC_BITTIMING
bool "CAN bit-timing calculation"
@@ -69,24 +77,14 @@ config CAN_CALC_BITTIMING
source clock frequencies. Disabling saves some space, but then the
bit-timing parameters must be specified directly using the Netlink
arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
- If unsure, say Y.
-config CAN_LEDS
- bool "Enable LED triggers for Netlink based drivers"
- depends on LEDS_CLASS
- # The netdev trigger (LEDS_TRIGGER_NETDEV) should be able to do
- # everything that this driver is doing. This is marked as broken
- # because it uses stuff that is intended to be changed or removed.
- # Please consider switching to the netdev trigger and confirm it
- # fulfills your needs instead of fixing this driver.
- depends on BROKEN
- select LEDS_TRIGGERS
- help
- This option adds two LED triggers for packet receive and transmit
- events on each supported CAN device.
+ The additional features selected by this option will be added to the
+ can-dev module.
+
+ If unsure, say Y.
- Say Y here if you are working on a system with led-class supported
- LEDs and you want to use them as canbus activity indicators.
+config CAN_RX_OFFLOAD
+ bool
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
@@ -95,10 +93,41 @@ config CAN_AT91
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
+config CAN_BXCAN
+ tristate "STM32 Basic Extended CAN (bxCAN) devices"
+ depends on OF || ARCH_STM32 || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CAN_RX_OFFLOAD
+ help
+ Say yes here to build support for the STMicroelectronics STM32 basic
+ extended CAN Controller (bxCAN).
+
+ This driver can also be built as a module. If so, the module
+ will be called bxcan.
+
+config CAN_CAN327
+ tristate "Serial / USB serial ELM327 based OBD-II Interfaces (can327)"
+ depends on TTY
+ select CAN_RX_OFFLOAD
+ help
+ CAN driver for several 'low cost' OBD-II interfaces based on the
+ ELM327 OBD-II interpreter chip.
+
+ This is a best effort driver - the ELM327 interface was never
+ designed to be used as a standalone CAN interface. However, it can
+ still be used for simple request-response protocols (such as OBD II),
+ and to monitor broadcast messages on a bus (such as in a vehicle).
+
+ Please refer to the documentation for information on how to use it:
+ Documentation/networking/device_drivers/can/can327.rst
+
+ If this driver is built as a module, it will be called can327.
+
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
depends on OF || COLDFIRE || COMPILE_TEST
depends on HAS_IOMEM
+ select CAN_RX_OFFLOAD
help
Say Y here if you want to support for Freescale FlexCAN.
@@ -135,6 +164,26 @@ config CAN_KVASER_PCIEFD
Kvaser Mini PCI Express HS v2
Kvaser Mini PCI Express 2xHS v2
+config CAN_SLCAN
+ tristate "Serial / USB serial CAN Adaptors (slcan)"
+ depends on TTY
+ help
+ CAN driver for several 'low cost' CAN interfaces that are attached
+ via serial lines or via USB-to-serial adapters using the LAWICEL
+ ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+ As only the sending and receiving of CAN frames is implemented, this
+ driver should work with the (serial/USB) CAN hardware from:
+ www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
+
+ Userspace tools to attach the SLCAN line discipline (slcan_attach,
+ slcand) can be found in the can-utils at the linux-can project, see
+ https://github.com/linux-can/can-utils for details.
+
+ The slcan driver supports up to 10 CAN netdevices by default which
+ can be changed by the 'maxdev=xx' module option. This driver can
+ also be built as a module. If so, the module will be called slcan.
+
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -148,6 +197,7 @@ config CAN_SUN4I
config CAN_TI_HECC
depends on ARM
tristate "TI High End CAN Controller"
+ select CAN_RX_OFFLOAD
help
Driver for TI HECC (High End CAN Controller) module found on many
TI devices. The device specifications are available from www.ti.com
@@ -160,16 +210,9 @@ config CAN_XILINXCAN
Xilinx CAN driver. This driver supports both soft AXI CAN IP and
Zynq CANPS IP.
-config PCH_CAN
- tristate "Intel EG20T PCH CAN controller"
- depends on PCI && (X86_32 || COMPILE_TEST)
- help
- This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
- is an IOH for x86 embedded processor (Intel Atom E6xx series).
- This driver can access CAN bus.
-
source "drivers/net/can/c_can/Kconfig"
source "drivers/net/can/cc770/Kconfig"
+source "drivers/net/can/ctucanfd/Kconfig"
source "drivers/net/can/ifi_canfd/Kconfig"
source "drivers/net/can/m_can/Kconfig"
source "drivers/net/can/mscan/Kconfig"
@@ -180,7 +223,7 @@ source "drivers/net/can/softing/Kconfig"
source "drivers/net/can/spi/Kconfig"
source "drivers/net/can/usb/Kconfig"
-endif
+endif #CAN_NETLINK
config CAN_DEBUG_DEVICES
bool "CAN devices debugging messages"
@@ -190,4 +233,4 @@ config CAN_DEBUG_DEVICES
a problem with CAN support and want to see more of what is going
on.
-endmenu
+endif #CAN_DEV
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index a2b4463d8480..ff8f76295d13 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_CAN_VCAN) += vcan.o
obj-$(CONFIG_CAN_VXCAN) += vxcan.o
-obj-$(CONFIG_CAN_SLCAN) += slcan.o
+obj-$(CONFIG_CAN_SLCAN) += slcan/
obj-y += dev/
obj-y += rcar/
@@ -14,9 +14,12 @@ obj-y += usb/
obj-y += softing/
obj-$(CONFIG_CAN_AT91) += at91_can.o
+obj-$(CONFIG_CAN_BXCAN) += bxcan.o
+obj-$(CONFIG_CAN_CAN327) += can327.o
obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_C_CAN) += c_can/
-obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+obj-$(CONFIG_CAN_CTUCANFD) += ctucanfd/
+obj-$(CONFIG_CAN_FLEXCAN) += flexcan/
obj-$(CONFIG_CAN_GRCAN) += grcan.o
obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
@@ -28,6 +31,5 @@ obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o
-obj-$(CONFIG_PCH_CAN) += pch_can.o
subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 3aea32c9b108..199cb200f2bd 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -23,7 +24,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#define AT91_MB_MASK(i) ((1 << (i)) - 1)
@@ -448,12 +448,11 @@ static void at91_chip_stop(struct net_device *dev, enum can_state state)
static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
mb = get_tx_next_mb(priv);
@@ -480,8 +479,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This triggers transmission */
at91_write(priv, AT91_MCR(mb), reg_mcr);
- stats->tx_bytes += cf->len;
-
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0);
@@ -553,8 +550,6 @@ static void at91_rx_overflow_err(struct net_device *dev)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
}
@@ -619,10 +614,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
at91_read_mb(dev, mb, cf);
stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
- can_led_event(dev, CAN_LED_EVENT_RX);
+ netif_receive_skb(skb);
}
/**
@@ -779,8 +774,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
at91_poll_err_frame(dev, cf, reg_sr);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -854,9 +847,11 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
- can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL);
+ dev->stats.tx_bytes +=
+ can_get_echo_skb(dev,
+ mb - get_mb_tx_first(priv),
+ NULL);
dev->stats.tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
}
}
@@ -1037,8 +1032,6 @@ static void at91_irq_err(struct net_device *dev)
at91_irq_err_state(dev, cf, new_state);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += cf->len;
netif_rx(skb);
priv->can.state = new_state;
@@ -1105,8 +1098,6 @@ static int at91_open(struct net_device *dev)
goto out_close;
}
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
/* start chip and queuing */
at91_chip_start(dev);
napi_enable(&priv->napi);
@@ -1137,8 +1128,6 @@ static int at91_close(struct net_device *dev)
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1164,6 +1153,10 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops at91_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static ssize_t mb0_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1305,6 +1298,7 @@ static int at91_can_probe(struct platform_device *pdev)
}
dev->netdev_ops = &at91_netdev_ops;
+ dev->ethtool_ops = &at91_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -1321,7 +1315,7 @@ static int at91_can_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(&pdev->dev);
priv->mb0_id = 0x7ff;
- netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
+ netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group;
@@ -1335,8 +1329,6 @@ static int at91_can_probe(struct platform_device *pdev)
goto exit_free;
}
- devm_can_led_init(dev);
-
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->reg_base, dev->irq);
diff --git a/drivers/net/can/bxcan.c b/drivers/net/can/bxcan.c
new file mode 100644
index 000000000000..e26ccd41e3cb
--- /dev/null
+++ b/drivers/net/can/bxcan.c
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// bxcan.c - STM32 Basic Extended CAN controller driver
+//
+// Copyright (c) 2022 Dario Binacchi <dario.binacchi@amarulasolutions.com>
+//
+// NOTE: The ST documentation uses the terms master/slave instead of
+// primary/secondary.
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/rx-offload.h>
+#include <linux/clk.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define BXCAN_NAPI_WEIGHT 3
+#define BXCAN_TIMEOUT_US 10000
+
+#define BXCAN_RX_MB_NUM 2
+#define BXCAN_TX_MB_NUM 3
+
+/* Primary control register (MCR) bits */
+#define BXCAN_MCR_RESET BIT(15)
+#define BXCAN_MCR_TTCM BIT(7)
+#define BXCAN_MCR_ABOM BIT(6)
+#define BXCAN_MCR_AWUM BIT(5)
+#define BXCAN_MCR_NART BIT(4)
+#define BXCAN_MCR_RFLM BIT(3)
+#define BXCAN_MCR_TXFP BIT(2)
+#define BXCAN_MCR_SLEEP BIT(1)
+#define BXCAN_MCR_INRQ BIT(0)
+
+/* Primary status register (MSR) bits */
+#define BXCAN_MSR_ERRI BIT(2)
+#define BXCAN_MSR_SLAK BIT(1)
+#define BXCAN_MSR_INAK BIT(0)
+
+/* Transmit status register (TSR) bits */
+#define BXCAN_TSR_RQCP2 BIT(16)
+#define BXCAN_TSR_RQCP1 BIT(8)
+#define BXCAN_TSR_RQCP0 BIT(0)
+
+/* Receive FIFO 0 register (RF0R) bits */
+#define BXCAN_RF0R_RFOM0 BIT(5)
+#define BXCAN_RF0R_FMP0_MASK GENMASK(1, 0)
+
+/* Interrupt enable register (IER) bits */
+#define BXCAN_IER_SLKIE BIT(17)
+#define BXCAN_IER_WKUIE BIT(16)
+#define BXCAN_IER_ERRIE BIT(15)
+#define BXCAN_IER_LECIE BIT(11)
+#define BXCAN_IER_BOFIE BIT(10)
+#define BXCAN_IER_EPVIE BIT(9)
+#define BXCAN_IER_EWGIE BIT(8)
+#define BXCAN_IER_FOVIE1 BIT(6)
+#define BXCAN_IER_FFIE1 BIT(5)
+#define BXCAN_IER_FMPIE1 BIT(4)
+#define BXCAN_IER_FOVIE0 BIT(3)
+#define BXCAN_IER_FFIE0 BIT(2)
+#define BXCAN_IER_FMPIE0 BIT(1)
+#define BXCAN_IER_TMEIE BIT(0)
+
+/* Error status register (ESR) bits */
+#define BXCAN_ESR_REC_MASK GENMASK(31, 24)
+#define BXCAN_ESR_TEC_MASK GENMASK(23, 16)
+#define BXCAN_ESR_LEC_MASK GENMASK(6, 4)
+#define BXCAN_ESR_BOFF BIT(2)
+#define BXCAN_ESR_EPVF BIT(1)
+#define BXCAN_ESR_EWGF BIT(0)
+
+/* Bit timing register (BTR) bits */
+#define BXCAN_BTR_SILM BIT(31)
+#define BXCAN_BTR_LBKM BIT(30)
+#define BXCAN_BTR_SJW_MASK GENMASK(25, 24)
+#define BXCAN_BTR_TS2_MASK GENMASK(22, 20)
+#define BXCAN_BTR_TS1_MASK GENMASK(19, 16)
+#define BXCAN_BTR_BRP_MASK GENMASK(9, 0)
+
+/* TX mailbox identifier register (TIxR, x = 0..2) bits */
+#define BXCAN_TIxR_STID_MASK GENMASK(31, 21)
+#define BXCAN_TIxR_EXID_MASK GENMASK(31, 3)
+#define BXCAN_TIxR_IDE BIT(2)
+#define BXCAN_TIxR_RTR BIT(1)
+#define BXCAN_TIxR_TXRQ BIT(0)
+
+/* TX mailbox data length and time stamp register (TDTxR, x = 0..2 bits */
+#define BXCAN_TDTxR_DLC_MASK GENMASK(3, 0)
+
+/* RX FIFO mailbox identifier register (RIxR, x = 0..1 */
+#define BXCAN_RIxR_STID_MASK GENMASK(31, 21)
+#define BXCAN_RIxR_EXID_MASK GENMASK(31, 3)
+#define BXCAN_RIxR_IDE BIT(2)
+#define BXCAN_RIxR_RTR BIT(1)
+
+/* RX FIFO mailbox data length and timestamp register (RDTxR, x = 0..1) bits */
+#define BXCAN_RDTxR_TIME_MASK GENMASK(31, 16)
+#define BXCAN_RDTxR_DLC_MASK GENMASK(3, 0)
+
+#define BXCAN_FMR_REG 0x00
+#define BXCAN_FM1R_REG 0x04
+#define BXCAN_FS1R_REG 0x0c
+#define BXCAN_FFA1R_REG 0x14
+#define BXCAN_FA1R_REG 0x1c
+#define BXCAN_FiR1_REG(b) (0x40 + (b) * 8)
+#define BXCAN_FiR2_REG(b) (0x44 + (b) * 8)
+
+#define BXCAN_FILTER_ID(primary) (primary ? 0 : 14)
+
+/* Filter primary register (FMR) bits */
+#define BXCAN_FMR_CANSB_MASK GENMASK(13, 8)
+#define BXCAN_FMR_FINIT BIT(0)
+
+enum bxcan_lec_code {
+ BXCAN_LEC_NO_ERROR = 0,
+ BXCAN_LEC_STUFF_ERROR,
+ BXCAN_LEC_FORM_ERROR,
+ BXCAN_LEC_ACK_ERROR,
+ BXCAN_LEC_BIT1_ERROR,
+ BXCAN_LEC_BIT0_ERROR,
+ BXCAN_LEC_CRC_ERROR,
+ BXCAN_LEC_UNUSED
+};
+
+/* Structure of the message buffer */
+struct bxcan_mb {
+ u32 id; /* can identifier */
+ u32 dlc; /* data length control and timestamp */
+ u32 data[2]; /* data */
+};
+
+/* Structure of the hardware registers */
+struct bxcan_regs {
+ u32 mcr; /* 0x00 - primary control */
+ u32 msr; /* 0x04 - primary status */
+ u32 tsr; /* 0x08 - transmit status */
+ u32 rf0r; /* 0x0c - FIFO 0 */
+ u32 rf1r; /* 0x10 - FIFO 1 */
+ u32 ier; /* 0x14 - interrupt enable */
+ u32 esr; /* 0x18 - error status */
+ u32 btr; /* 0x1c - bit timing*/
+ u32 reserved0[88]; /* 0x20 */
+ struct bxcan_mb tx_mb[BXCAN_TX_MB_NUM]; /* 0x180 - tx mailbox */
+ struct bxcan_mb rx_mb[BXCAN_RX_MB_NUM]; /* 0x1b0 - rx mailbox */
+};
+
+struct bxcan_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct device *dev;
+ struct net_device *ndev;
+
+ struct bxcan_regs __iomem *regs;
+ struct regmap *gcan;
+ int tx_irq;
+ int sce_irq;
+ bool primary;
+ struct clk *clk;
+ spinlock_t rmw_lock; /* lock for read-modify-write operations */
+ unsigned int tx_head;
+ unsigned int tx_tail;
+ u32 timestamp;
+};
+
+static const struct can_bittiming_const bxcan_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr,
+ u32 clear, u32 set)
+{
+ unsigned long flags;
+ u32 old, val;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ old = readl(addr);
+ val = (old & ~clear) | set;
+ if (val != old)
+ writel(val, addr);
+
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+}
+
+static void bxcan_disable_filters(struct bxcan_priv *priv, bool primary)
+{
+ unsigned int fid = BXCAN_FILTER_ID(primary);
+ u32 fmask = BIT(fid);
+
+ regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
+}
+
+static void bxcan_enable_filters(struct bxcan_priv *priv, bool primary)
+{
+ unsigned int fid = BXCAN_FILTER_ID(primary);
+ u32 fmask = BIT(fid);
+
+ /* Filter settings:
+ *
+ * Accept all messages.
+ * Assign filter 0 to CAN1 and filter 14 to CAN2 in identifier
+ * mask mode with 32 bits width.
+ */
+
+ /* Enter filter initialization mode and assing filters to CAN
+ * controllers.
+ */
+ regmap_update_bits(priv->gcan, BXCAN_FMR_REG,
+ BXCAN_FMR_CANSB_MASK | BXCAN_FMR_FINIT,
+ FIELD_PREP(BXCAN_FMR_CANSB_MASK, 14) |
+ BXCAN_FMR_FINIT);
+
+ /* Deactivate filter */
+ regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
+
+ /* Two 32-bit registers in identifier mask mode */
+ regmap_update_bits(priv->gcan, BXCAN_FM1R_REG, fmask, 0);
+
+ /* Single 32-bit scale configuration */
+ regmap_update_bits(priv->gcan, BXCAN_FS1R_REG, fmask, fmask);
+
+ /* Assign filter to FIFO 0 */
+ regmap_update_bits(priv->gcan, BXCAN_FFA1R_REG, fmask, 0);
+
+ /* Accept all messages */
+ regmap_write(priv->gcan, BXCAN_FiR1_REG(fid), 0);
+ regmap_write(priv->gcan, BXCAN_FiR2_REG(fid), 0);
+
+ /* Activate filter */
+ regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, fmask);
+
+ /* Exit filter initialization mode */
+ regmap_update_bits(priv->gcan, BXCAN_FMR_REG, BXCAN_FMR_FINIT, 0);
+}
+
+static inline u8 bxcan_get_tx_head(const struct bxcan_priv *priv)
+{
+ return priv->tx_head % BXCAN_TX_MB_NUM;
+}
+
+static inline u8 bxcan_get_tx_tail(const struct bxcan_priv *priv)
+{
+ return priv->tx_tail % BXCAN_TX_MB_NUM;
+}
+
+static inline u8 bxcan_get_tx_free(const struct bxcan_priv *priv)
+{
+ return BXCAN_TX_MB_NUM - (priv->tx_head - priv->tx_tail);
+}
+
+static bool bxcan_tx_busy(const struct bxcan_priv *priv)
+{
+ if (bxcan_get_tx_free(priv) > 0)
+ return false;
+
+ netif_stop_queue(priv->ndev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (bxcan_get_tx_free(priv) == 0) {
+ netdev_dbg(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ priv->tx_head, priv->tx_tail,
+ priv->tx_head - priv->tx_tail);
+
+ return true;
+ }
+
+ netif_start_queue(priv->ndev);
+
+ return false;
+}
+
+static int bxcan_chip_softreset(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_RESET);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static int bxcan_enter_init_mode(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_INRQ);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ value & BXCAN_MSR_INAK, BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static int bxcan_leave_init_mode(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_INRQ, 0);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ !(value & BXCAN_MSR_INAK), BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static int bxcan_enter_sleep_mode(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_SLEEP);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static int bxcan_leave_sleep_mode(struct bxcan_priv *priv)
+{
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 value;
+
+ bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_SLEEP, 0);
+ return readx_poll_timeout(readl, &regs->msr, value,
+ !(value & BXCAN_MSR_SLAK), BXCAN_TIMEOUT_US,
+ USEC_PER_SEC);
+}
+
+static inline
+struct bxcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+ return container_of(offload, struct bxcan_priv, offload);
+}
+
+static struct sk_buff *bxcan_mailbox_read(struct can_rx_offload *offload,
+ unsigned int mbxno, u32 *timestamp,
+ bool drop)
+{
+ struct bxcan_priv *priv = rx_offload_to_priv(offload);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ struct bxcan_mb __iomem *mb_regs = &regs->rx_mb[0];
+ struct sk_buff *skb = NULL;
+ struct can_frame *cf;
+ u32 rf0r, id, dlc;
+
+ rf0r = readl(&regs->rf0r);
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
+ if (!(rf0r & BXCAN_RF0R_FMP0_MASK))
+ goto mark_as_read;
+
+ skb = alloc_can_skb(offload->dev, &cf);
+ if (unlikely(!skb)) {
+ skb = ERR_PTR(-ENOMEM);
+ goto mark_as_read;
+ }
+
+ id = readl(&mb_regs->id);
+ if (id & BXCAN_RIxR_IDE)
+ cf->can_id = FIELD_GET(BXCAN_RIxR_EXID_MASK, id) | CAN_EFF_FLAG;
+ else
+ cf->can_id = FIELD_GET(BXCAN_RIxR_STID_MASK, id) & CAN_SFF_MASK;
+
+ dlc = readl(&mb_regs->dlc);
+ priv->timestamp = FIELD_GET(BXCAN_RDTxR_TIME_MASK, dlc);
+ cf->len = can_cc_dlc2len(FIELD_GET(BXCAN_RDTxR_DLC_MASK, dlc));
+
+ if (id & BXCAN_RIxR_RTR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ int i, j;
+
+ for (i = 0, j = 0; i < cf->len; i += 4, j++)
+ *(u32 *)(cf->data + i) = readl(&mb_regs->data[j]);
+ }
+
+ mark_as_read:
+ rf0r |= BXCAN_RF0R_RFOM0;
+ writel(rf0r, &regs->rf0r);
+ return skb;
+}
+
+static irqreturn_t bxcan_rx_isr(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 rf0r;
+
+ rf0r = readl(&regs->rf0r);
+ if (!(rf0r & BXCAN_RF0R_FMP0_MASK))
+ return IRQ_NONE;
+
+ can_rx_offload_irq_offload_fifo(&priv->offload);
+ can_rx_offload_irq_finish(&priv->offload);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bxcan_tx_isr(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ struct net_device_stats *stats = &ndev->stats;
+ u32 tsr, rqcp_bit;
+ int idx;
+
+ tsr = readl(&regs->tsr);
+ if (!(tsr & (BXCAN_TSR_RQCP0 | BXCAN_TSR_RQCP1 | BXCAN_TSR_RQCP2)))
+ return IRQ_NONE;
+
+ while (priv->tx_head - priv->tx_tail > 0) {
+ idx = bxcan_get_tx_tail(priv);
+ rqcp_bit = BXCAN_TSR_RQCP0 << (idx << 3);
+ if (!(tsr & rqcp_bit))
+ break;
+
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(ndev, idx, NULL);
+ priv->tx_tail++;
+ }
+
+ writel(tsr, &regs->tsr);
+
+ if (bxcan_get_tx_free(priv)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(ndev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void bxcan_handle_state_change(struct net_device *ndev, u32 esr)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ enum can_state new_state = priv->can.state;
+ struct can_berr_counter bec;
+ enum can_state rx_state, tx_state;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ /* Early exit if no error flag is set */
+ if (!(esr & (BXCAN_ESR_EWGF | BXCAN_ESR_EPVF | BXCAN_ESR_BOFF)))
+ return;
+
+ bec.txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr);
+ bec.rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr);
+
+ if (esr & BXCAN_ESR_BOFF)
+ new_state = CAN_STATE_BUS_OFF;
+ else if (esr & BXCAN_ESR_EPVF)
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ else if (esr & BXCAN_ESR_EWGF)
+ new_state = CAN_STATE_ERROR_WARNING;
+
+ /* state hasn't changed */
+ if (unlikely(new_state == priv->can.state))
+ return;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ tx_state = bec.txerr >= bec.rxerr ? new_state : 0;
+ rx_state = bec.txerr <= bec.rxerr ? new_state : 0;
+ can_change_state(ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ can_bus_off(ndev);
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ if (skb) {
+ int err;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
+ priv->timestamp);
+ if (err)
+ ndev->stats.rx_fifo_errors++;
+ }
+}
+
+static void bxcan_handle_bus_err(struct net_device *ndev, u32 esr)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ enum bxcan_lec_code lec_code;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ lec_code = FIELD_GET(BXCAN_ESR_LEC_MASK, esr);
+
+ /* Early exit if no lec update or no error.
+ * No lec update means that no CAN bus event has been detected
+ * since CPU wrote BXCAN_LEC_UNUSED value to status reg.
+ */
+ if (lec_code == BXCAN_LEC_UNUSED || lec_code == BXCAN_LEC_NO_ERROR)
+ return;
+
+ /* Common for all type of bus errors */
+ priv->can.can_stats.bus_error++;
+
+ /* Propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ switch (lec_code) {
+ case BXCAN_LEC_STUFF_ERROR:
+ netdev_dbg(ndev, "Stuff error\n");
+ ndev->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+
+ case BXCAN_LEC_FORM_ERROR:
+ netdev_dbg(ndev, "Form error\n");
+ ndev->stats.rx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+
+ case BXCAN_LEC_ACK_ERROR:
+ netdev_dbg(ndev, "Ack error\n");
+ ndev->stats.tx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
+ break;
+
+ case BXCAN_LEC_BIT1_ERROR:
+ netdev_dbg(ndev, "Bit error (recessive)\n");
+ ndev->stats.tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+
+ case BXCAN_LEC_BIT0_ERROR:
+ netdev_dbg(ndev, "Bit error (dominant)\n");
+ ndev->stats.tx_errors++;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+
+ case BXCAN_LEC_CRC_ERROR:
+ netdev_dbg(ndev, "CRC error\n");
+ ndev->stats.rx_errors++;
+ if (skb) {
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (skb) {
+ int err;
+
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
+ priv->timestamp);
+ if (err)
+ ndev->stats.rx_fifo_errors++;
+ }
+}
+
+static irqreturn_t bxcan_state_change_isr(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 msr, esr;
+
+ msr = readl(&regs->msr);
+ if (!(msr & BXCAN_MSR_ERRI))
+ return IRQ_NONE;
+
+ esr = readl(&regs->esr);
+ bxcan_handle_state_change(ndev, esr);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ bxcan_handle_bus_err(ndev, esr);
+
+ msr |= BXCAN_MSR_ERRI;
+ writel(msr, &regs->msr);
+ can_rx_offload_irq_finish(&priv->offload);
+
+ return IRQ_HANDLED;
+}
+
+static int bxcan_chip_start(struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 clr, set;
+ int err;
+
+ err = bxcan_chip_softreset(priv);
+ if (err) {
+ netdev_err(ndev, "failed to reset chip, error %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = bxcan_leave_sleep_mode(priv);
+ if (err) {
+ netdev_err(ndev, "failed to leave sleep mode, error %pe\n",
+ ERR_PTR(err));
+ goto failed_leave_sleep;
+ }
+
+ err = bxcan_enter_init_mode(priv);
+ if (err) {
+ netdev_err(ndev, "failed to enter init mode, error %pe\n",
+ ERR_PTR(err));
+ goto failed_enter_init;
+ }
+
+ /* MCR
+ *
+ * select request order priority
+ * enable time triggered mode
+ * bus-off state left on sw request
+ * sleep mode left on sw request
+ * retransmit automatically on error
+ * do not lock RX FIFO on overrun
+ */
+ bxcan_rmw(priv, &regs->mcr,
+ BXCAN_MCR_ABOM | BXCAN_MCR_AWUM | BXCAN_MCR_NART |
+ BXCAN_MCR_RFLM, BXCAN_MCR_TTCM | BXCAN_MCR_TXFP);
+
+ /* Bit timing register settings */
+ set = FIELD_PREP(BXCAN_BTR_BRP_MASK, bt->brp - 1) |
+ FIELD_PREP(BXCAN_BTR_TS1_MASK, bt->phase_seg1 +
+ bt->prop_seg - 1) |
+ FIELD_PREP(BXCAN_BTR_TS2_MASK, bt->phase_seg2 - 1) |
+ FIELD_PREP(BXCAN_BTR_SJW_MASK, bt->sjw - 1);
+
+ /* loopback + silent mode put the controller in test mode,
+ * useful for hot self-test
+ */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ set |= BXCAN_BTR_LBKM;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ set |= BXCAN_BTR_SILM;
+
+ bxcan_rmw(priv, &regs->btr, BXCAN_BTR_SILM | BXCAN_BTR_LBKM |
+ BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK |
+ BXCAN_BTR_SJW_MASK, set);
+
+ bxcan_enable_filters(priv, priv->primary);
+
+ /* Clear all internal status */
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
+ err = bxcan_leave_init_mode(priv);
+ if (err) {
+ netdev_err(ndev, "failed to leave init mode, error %pe\n",
+ ERR_PTR(err));
+ goto failed_leave_init;
+ }
+
+ /* Set a `lec` value so that we can check for updates later */
+ bxcan_rmw(priv, &regs->esr, BXCAN_ESR_LEC_MASK,
+ FIELD_PREP(BXCAN_ESR_LEC_MASK, BXCAN_LEC_UNUSED));
+
+ /* IER
+ *
+ * Enable interrupt for:
+ * bus-off
+ * passive error
+ * warning error
+ * last error code
+ * RX FIFO pending message
+ * TX mailbox empty
+ */
+ clr = BXCAN_IER_WKUIE | BXCAN_IER_SLKIE | BXCAN_IER_FOVIE1 |
+ BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
+ BXCAN_IER_FFIE0;
+ set = BXCAN_IER_ERRIE | BXCAN_IER_BOFIE | BXCAN_IER_EPVIE |
+ BXCAN_IER_EWGIE | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ set |= BXCAN_IER_LECIE;
+ else
+ clr |= BXCAN_IER_LECIE;
+
+ bxcan_rmw(priv, &regs->ier, clr, set);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
+
+failed_leave_init:
+failed_enter_init:
+failed_leave_sleep:
+ bxcan_chip_softreset(priv);
+ return err;
+}
+
+static int bxcan_open(struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "failed to enable clock, error %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %pe\n",
+ ERR_PTR(err));
+ goto out_disable_clock;
+ }
+
+ can_rx_offload_enable(&priv->offload);
+ err = request_irq(ndev->irq, bxcan_rx_isr, IRQF_SHARED, ndev->name,
+ ndev);
+ if (err) {
+ netdev_err(ndev, "failed to register rx irq(%d), error %pe\n",
+ ndev->irq, ERR_PTR(err));
+ goto out_close_candev;
+ }
+
+ err = request_irq(priv->tx_irq, bxcan_tx_isr, IRQF_SHARED, ndev->name,
+ ndev);
+ if (err) {
+ netdev_err(ndev, "failed to register tx irq(%d), error %pe\n",
+ priv->tx_irq, ERR_PTR(err));
+ goto out_free_rx_irq;
+ }
+
+ err = request_irq(priv->sce_irq, bxcan_state_change_isr, IRQF_SHARED,
+ ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev, "failed to register sce irq(%d), error %pe\n",
+ priv->sce_irq, ERR_PTR(err));
+ goto out_free_tx_irq;
+ }
+
+ err = bxcan_chip_start(ndev);
+ if (err)
+ goto out_free_sce_irq;
+
+ netif_start_queue(ndev);
+ return 0;
+
+out_free_sce_irq:
+ free_irq(priv->sce_irq, ndev);
+out_free_tx_irq:
+ free_irq(priv->tx_irq, ndev);
+out_free_rx_irq:
+ free_irq(ndev->irq, ndev);
+out_close_candev:
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+out_disable_clock:
+ clk_disable_unprepare(priv->clk);
+ return err;
+}
+
+static void bxcan_chip_stop(struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+
+ /* disable all interrupts */
+ bxcan_rmw(priv, &regs->ier, BXCAN_IER_SLKIE | BXCAN_IER_WKUIE |
+ BXCAN_IER_ERRIE | BXCAN_IER_LECIE | BXCAN_IER_BOFIE |
+ BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 |
+ BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
+ BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0);
+ bxcan_disable_filters(priv, priv->primary);
+ bxcan_enter_sleep_mode(priv);
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int bxcan_stop(struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ bxcan_chip_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ free_irq(priv->tx_irq, ndev);
+ free_irq(priv->sce_irq, ndev);
+ can_rx_offload_disable(&priv->offload);
+ close_candev(ndev);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct bxcan_regs __iomem *regs = priv->regs;
+ struct bxcan_mb __iomem *mb_regs;
+ unsigned int idx;
+ u32 id;
+ int i, j;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (bxcan_tx_busy(priv))
+ return NETDEV_TX_BUSY;
+
+ idx = bxcan_get_tx_head(priv);
+ priv->tx_head++;
+ if (bxcan_get_tx_free(priv) == 0)
+ netif_stop_queue(ndev);
+
+ mb_regs = &regs->tx_mb[idx];
+ if (cf->can_id & CAN_EFF_FLAG)
+ id = FIELD_PREP(BXCAN_TIxR_EXID_MASK, cf->can_id) |
+ BXCAN_TIxR_IDE;
+ else
+ id = FIELD_PREP(BXCAN_TIxR_STID_MASK, cf->can_id);
+
+ if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
+ id |= BXCAN_TIxR_RTR;
+ } else {
+ for (i = 0, j = 0; i < cf->len; i += 4, j++)
+ writel(*(u32 *)(cf->data + i), &mb_regs->data[j]);
+ }
+
+ writel(FIELD_PREP(BXCAN_TDTxR_DLC_MASK, cf->len), &mb_regs->dlc);
+
+ can_put_echo_skb(skb, ndev, idx, 0);
+
+ /* Start transmission */
+ writel(id | BXCAN_TIxR_TXRQ, &mb_regs->id);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops bxcan_netdev_ops = {
+ .ndo_open = bxcan_open,
+ .ndo_stop = bxcan_stop,
+ .ndo_start_xmit = bxcan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops bxcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static int bxcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = bxcan_chip_start(ndev);
+ if (err)
+ return err;
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int bxcan_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ struct bxcan_priv *priv = netdev_priv(ndev);
+ struct bxcan_regs __iomem *regs = priv->regs;
+ u32 esr;
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ esr = readl(&regs->esr);
+ bec->txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr);
+ bec->rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int bxcan_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct bxcan_priv *priv;
+ struct clk *clk = NULL;
+ void __iomem *regs;
+ struct regmap *gcan;
+ bool primary;
+ int err, rx_irq, tx_irq, sce_irq;
+
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs)) {
+ dev_err(dev, "failed to get base address\n");
+ return PTR_ERR(regs);
+ }
+
+ gcan = syscon_regmap_lookup_by_phandle(np, "st,gcan");
+ if (IS_ERR(gcan)) {
+ dev_err(dev, "failed to get shared memory base address\n");
+ return PTR_ERR(gcan);
+ }
+
+ primary = of_property_read_bool(np, "st,can-primary");
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ rx_irq = platform_get_irq_byname(pdev, "rx0");
+ if (rx_irq < 0) {
+ dev_err(dev, "failed to get rx0 irq\n");
+ return rx_irq;
+ }
+
+ tx_irq = platform_get_irq_byname(pdev, "tx");
+ if (tx_irq < 0) {
+ dev_err(dev, "failed to get tx irq\n");
+ return tx_irq;
+ }
+
+ sce_irq = platform_get_irq_byname(pdev, "sce");
+ if (sce_irq < 0) {
+ dev_err(dev, "failed to get sce irq\n");
+ return sce_irq;
+ }
+
+ ndev = alloc_candev(sizeof(struct bxcan_priv), BXCAN_TX_MB_NUM);
+ if (!ndev) {
+ dev_err(dev, "alloc_candev() failed\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(ndev);
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->netdev_ops = &bxcan_netdev_ops;
+ ndev->ethtool_ops = &bxcan_ethtool_ops;
+ ndev->irq = rx_irq;
+ ndev->flags |= IFF_ECHO;
+
+ priv->dev = dev;
+ priv->ndev = ndev;
+ priv->regs = regs;
+ priv->gcan = gcan;
+ priv->clk = clk;
+ priv->tx_irq = tx_irq;
+ priv->sce_irq = sce_irq;
+ priv->primary = primary;
+ priv->can.clock.freq = clk_get_rate(clk);
+ spin_lock_init(&priv->rmw_lock);
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ priv->can.bittiming_const = &bxcan_bittiming_const;
+ priv->can.do_set_mode = bxcan_do_set_mode;
+ priv->can.do_get_berr_counter = bxcan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING;
+
+ priv->offload.mailbox_read = bxcan_mailbox_read;
+ err = can_rx_offload_add_fifo(ndev, &priv->offload, BXCAN_NAPI_WEIGHT);
+ if (err) {
+ dev_err(dev, "failed to add FIFO rx_offload\n");
+ goto out_free_candev;
+ }
+
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(dev, "failed to register netdev\n");
+ goto out_can_rx_offload_del;
+ }
+
+ dev_info(dev, "clk: %d Hz, IRQs: %d, %d, %d\n", priv->can.clock.freq,
+ tx_irq, rx_irq, sce_irq);
+ return 0;
+
+out_can_rx_offload_del:
+ can_rx_offload_del(&priv->offload);
+out_free_candev:
+ free_candev(ndev);
+ return err;
+}
+
+static int bxcan_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct bxcan_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(ndev);
+ clk_disable_unprepare(priv->clk);
+ can_rx_offload_del(&priv->offload);
+ free_candev(ndev);
+ return 0;
+}
+
+static int __maybe_unused bxcan_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct bxcan_priv *priv = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+
+ bxcan_enter_sleep_mode(priv);
+ priv->can.state = CAN_STATE_SLEEPING;
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int __maybe_unused bxcan_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct bxcan_priv *priv = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return 0;
+
+ clk_prepare_enable(priv->clk);
+ bxcan_leave_sleep_mode(priv);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(bxcan_pm_ops, bxcan_suspend, bxcan_resume);
+
+static const struct of_device_id bxcan_of_match[] = {
+ {.compatible = "st,stm32f4-bxcan"},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bxcan_of_match);
+
+static struct platform_driver bxcan_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .pm = &bxcan_pm_ops,
+ .of_match_table = bxcan_of_match,
+ },
+ .probe = bxcan_probe,
+ .remove = bxcan_remove,
+};
+
+module_platform_driver(bxcan_driver);
+
+MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
+MODULE_DESCRIPTION("STMicroelectronics Basic Extended CAN controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index 962725788b0a..1f0e9acb69ec 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -20,5 +20,6 @@ config CAN_C_CAN_PCI
depends on PCI
help
This driver adds support for the C_CAN/D_CAN chips connected
- to the PCI bus.
+ to the PCI bus. E.g. for the C_CAN controller IP inside the
+ Intel Atom E6xx series IOH (aka EG20T 'PCH CAN').
endif
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 08b6efa7a1a7..029cd8194ed5 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -211,7 +211,6 @@ struct c_can_priv {
struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
void (*raminit)(const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
- u32 dlc[];
};
struct net_device *alloc_c_can_dev(int msg_obj_num);
@@ -224,7 +223,7 @@ int c_can_power_up(struct net_device *dev);
int c_can_power_down(struct net_device *dev);
#endif
-void c_can_set_ethtool_ops(struct net_device *dev);
+extern const struct ethtool_ops c_can_ethtool_ops;
static inline u8 c_can_get_tx_head(const struct c_can_tx_ring *ring)
{
@@ -236,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring)
+static inline u8 c_can_get_tx_free(const struct c_can_priv *priv,
+ const struct c_can_tx_ring *ring)
{
- return ring->obj_num - (ring->head - ring->tail);
+ u8 head = c_can_get_tx_head(ring);
+ u8 tail = c_can_get_tx_tail(ring);
+
+ if (priv->type == BOSCH_D_CAN)
+ return ring->obj_num - (ring->head - ring->tail);
+
+ /* This is not a FIFO. C/D_CAN sends out the buffers
+ * prioritized. The lowest buffer number wins.
+ */
+ if (head < tail)
+ return 0;
+
+ return ring->obj_num - head;
}
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
index 377c7d2e7612..e41167eda673 100644
--- a/drivers/net/can/c_can/c_can_ethtool.c
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -11,16 +11,10 @@
#include "c_can.h"
-static void c_can_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *info)
-{
- struct c_can_priv *priv = netdev_priv(netdev);
- strscpy(info->driver, "c_can", sizeof(info->driver));
- strscpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info));
-}
-
static void c_can_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct c_can_priv *priv = netdev_priv(netdev);
@@ -30,12 +24,7 @@ static void c_can_get_ringparam(struct net_device *netdev,
ring->tx_pending = priv->msg_obj_tx_num;
}
-static const struct ethtool_ops c_can_ethtool_ops = {
- .get_drvinfo = c_can_get_drvinfo,
+const struct ethtool_ops c_can_ethtool_ops = {
.get_ringparam = c_can_get_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
};
-
-void c_can_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &c_can_ethtool_ops;
-}
diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c
index 52671d1ea17d..c63f7fc1e691 100644
--- a/drivers/net/can/c_can/c_can_main.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -40,7 +40,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include "c_can.h"
@@ -403,10 +402,10 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
frame->data[i + 1] = data >> 8;
}
}
- }
+ stats->rx_bytes += frame->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += frame->len;
netif_receive_skb(skb);
return 0;
@@ -430,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
static bool c_can_tx_busy(const struct c_can_priv *priv,
const struct c_can_tx_ring *tx_ring)
{
- if (c_can_get_tx_free(tx_ring) > 0)
+ if (c_can_get_tx_free(priv, tx_ring) > 0)
return false;
netif_stop_queue(priv->dev);
@@ -438,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv,
/* Memory barrier before checking tx_free (head and tail) */
smp_mb();
- if (c_can_get_tx_free(tx_ring) == 0) {
+ if (c_can_get_tx_free(priv, tx_ring) == 0) {
netdev_dbg(priv->dev,
"Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
tx_ring->head, tx_ring->tail,
@@ -458,7 +457,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
struct c_can_tx_ring *tx_ring = &priv->tx;
u32 idx, obj, cmd = IF_COMM_TX;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
if (c_can_tx_busy(priv, tx_ring))
@@ -466,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
idx = c_can_get_tx_head(tx_ring);
tx_ring->head++;
- if (c_can_get_tx_free(tx_ring) == 0)
+ if (c_can_get_tx_free(priv, tx_ring) == 0)
netif_stop_queue(dev);
if (idx < c_can_get_tx_tail(tx_ring))
@@ -477,7 +476,6 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
* transmit as we might race against do_tx().
*/
c_can_setup_tx_object(dev, IF_TX, frame, idx);
- priv->dlc[idx] = frame->len;
can_put_echo_skb(skb, dev, idx, 0);
obj = idx + priv->msg_obj_tx_first;
c_can_object_put(dev, IF_TX, obj, cmd);
@@ -742,8 +740,7 @@ static void c_can_do_tx(struct net_device *dev)
* NAPI. We are not transmitting.
*/
c_can_inval_tx_object(dev, IF_NAPI, obj);
- can_get_echo_skb(dev, idx, NULL);
- bytes += priv->dlc[idx];
+ bytes += can_get_echo_skb(dev, idx, NULL);
pkts++;
}
@@ -751,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev)
return;
tx_ring->tail += pkts;
- if (c_can_get_tx_free(tx_ring)) {
+ if (c_can_get_tx_free(priv, tx_ring)) {
/* Make sure that anybody stopping the queue after
* this sees the new tx_ring->tail.
*/
@@ -761,11 +758,9 @@ static void c_can_do_tx(struct net_device *dev)
stats->tx_bytes += bytes;
stats->tx_packets += pkts;
- can_led_event(dev, CAN_LED_EVENT_TX);
tail = c_can_get_tx_tail(tx_ring);
-
- if (tail == 0) {
+ if (priv->type == BOSCH_D_CAN && tail == 0) {
u8 head = c_can_get_tx_head(tx_ring);
/* Start transmission for all cached messages */
@@ -908,9 +903,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
quota -= n;
}
- if (pkts)
- can_led_event(dev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -920,7 +912,6 @@ static int c_can_handle_state_change(struct net_device *dev,
unsigned int reg_err_counter;
unsigned int rx_err_passive;
struct c_can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -960,15 +951,14 @@ static int c_can_handle_state_change(struct net_device *dev,
switch (error_type) {
case C_CAN_NO_ERROR:
- /* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
break;
case C_CAN_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -978,7 +968,7 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
case C_CAN_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
if (rx_err_passive)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
@@ -996,8 +986,6 @@ static int c_can_handle_state_change(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -1064,8 +1052,6 @@ static int c_can_handle_bus_err(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
}
@@ -1189,8 +1175,6 @@ static int c_can_open(struct net_device *dev)
if (err)
goto exit_start_fail;
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
napi_enable(&priv->napi);
/* enable status change, error and module interrupts */
c_can_irq_control(priv, true);
@@ -1221,8 +1205,6 @@ static int c_can_close(struct net_device *dev)
c_can_reset_ram(priv, false);
c_can_pm_runtime_put_sync(priv);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -1232,8 +1214,7 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
struct c_can_priv *priv;
int msg_obj_tx_num = msg_obj_num / 2;
- dev = alloc_candev(struct_size(priv, dlc, msg_obj_tx_num),
- msg_obj_tx_num);
+ dev = alloc_candev(sizeof(*priv), msg_obj_tx_num);
if (!dev)
return NULL;
@@ -1254,7 +1235,8 @@ struct net_device *alloc_c_can_dev(int msg_obj_num)
priv->tx.tail = 0;
priv->tx.obj_num = msg_obj_tx_num;
- netif_napi_add(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num);
+ netif_napi_add_weight(dev, &priv->napi, c_can_poll,
+ priv->msg_obj_rx_num);
priv->dev = dev;
priv->can.bittiming_const = &c_can_bittiming_const;
@@ -1372,8 +1354,6 @@ static const struct net_device_ops c_can_netdev_ops = {
int register_c_can_dev(struct net_device *dev)
{
- int err;
-
/* Deactivate pins to prevent DRA7 DCAN IP from being
* stuck in transition when module is disabled.
* Pins are activated in c_can_start() and deactivated
@@ -1383,12 +1363,9 @@ int register_c_can_dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
- c_can_set_ethtool_ops(dev);
+ dev->ethtool_ops = &c_can_ethtool_ops;
- err = register_candev(dev);
- if (!err)
- devm_can_led_init(dev);
- return err;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_c_can_dev);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index bf2f8c3da1c1..093bea597f4e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -227,7 +227,6 @@ out_iounmap:
pci_iounmap(pdev, addr);
out_release_regions:
pci_disable_msi(pdev);
- pci_clear_master(pdev);
pci_release_regions(pdev);
out_disable_device:
pci_disable_device(pdev);
@@ -247,7 +246,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
pci_iounmap(pdev, addr);
pci_disable_msi(pdev);
- pci_clear_master(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 86e95e9d6533..03ccb7cfacaf 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -290,8 +290,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
goto exit;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(&pdev->dev, mem);
+ addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto exit;
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
new file mode 100644
index 000000000000..dc7192ecb001
--- /dev/null
+++ b/drivers/net/can/can327.c
@@ -0,0 +1,1149 @@
+// SPDX-License-Identifier: GPL-2.0
+/* ELM327 based CAN interface driver (tty line discipline)
+ *
+ * This driver started as a derivative of linux/drivers/net/can/slcan.c
+ * and my thanks go to the original authors for their inspiration.
+ *
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/tty_ldisc.h>
+#include <linux/workqueue.h>
+
+#include <uapi/linux/tty.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/rx-offload.h>
+
+#define CAN327_NAPI_WEIGHT 4
+
+#define CAN327_SIZE_TXBUF 32
+#define CAN327_SIZE_RXBUF 1024
+
+#define CAN327_CAN_CONFIG_SEND_SFF 0x8000
+#define CAN327_CAN_CONFIG_VARIABLE_DLC 0x4000
+#define CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF 0x2000
+#define CAN327_CAN_CONFIG_BAUDRATE_MULT_8_7 0x1000
+
+#define CAN327_DUMMY_CHAR 'y'
+#define CAN327_DUMMY_STRING "y"
+#define CAN327_READY_CHAR '>'
+
+/* Bits in elm->cmds_todo */
+enum can327_tx_do {
+ CAN327_TX_DO_CAN_DATA = 0,
+ CAN327_TX_DO_CANID_11BIT,
+ CAN327_TX_DO_CANID_29BIT_LOW,
+ CAN327_TX_DO_CANID_29BIT_HIGH,
+ CAN327_TX_DO_CAN_CONFIG_PART2,
+ CAN327_TX_DO_CAN_CONFIG,
+ CAN327_TX_DO_RESPONSES,
+ CAN327_TX_DO_SILENT_MONITOR,
+ CAN327_TX_DO_INIT,
+};
+
+struct can327 {
+ /* This must be the first member when using alloc_candev() */
+ struct can_priv can;
+
+ struct can_rx_offload offload;
+
+ /* TTY buffers */
+ u8 txbuf[CAN327_SIZE_TXBUF];
+ u8 rxbuf[CAN327_SIZE_RXBUF];
+
+ /* Per-channel lock */
+ spinlock_t lock;
+
+ /* TTY and netdev devices that we're bridging */
+ struct tty_struct *tty;
+ struct net_device *dev;
+
+ /* TTY buffer accounting */
+ struct work_struct tx_work; /* Flushes TTY TX buffer */
+ u8 *txhead; /* Next TX byte */
+ size_t txleft; /* Bytes left to TX */
+ int rxfill; /* Bytes already RX'd in buffer */
+
+ /* State machine */
+ enum {
+ CAN327_STATE_NOTINIT = 0,
+ CAN327_STATE_GETDUMMYCHAR,
+ CAN327_STATE_GETPROMPT,
+ CAN327_STATE_RECEIVING,
+ } state;
+
+ /* Things we have yet to send */
+ char **next_init_cmd;
+ unsigned long cmds_todo;
+
+ /* The CAN frame and config the ELM327 is sending/using,
+ * or will send/use after finishing all cmds_todo
+ */
+ struct can_frame can_frame_to_send;
+ u16 can_config;
+ u8 can_bitrate_divisor;
+
+ /* Parser state */
+ bool drop_next_line;
+
+ /* Stop the channel on UART side hardware failure, e.g. stray
+ * characters or neverending lines. This may be caused by bad
+ * UART wiring, a bad ELM327, a bad UART bridge...
+ * Once this is true, nothing will be sent to the TTY.
+ */
+ bool uart_side_failure;
+};
+
+static inline void can327_uart_side_failure(struct can327 *elm);
+
+static void can327_send(struct can327 *elm, const void *buf, size_t len)
+{
+ int written;
+
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->uart_side_failure)
+ return;
+
+ memcpy(elm->txbuf, buf, len);
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+ written = elm->tty->ops->write(elm->tty, elm->txbuf, len);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+ return;
+ }
+
+ elm->txleft = len - written;
+ elm->txhead = elm->txbuf + written;
+}
+
+/* Take the ELM327 out of almost any state and back into command mode.
+ * We send CAN327_DUMMY_CHAR which will either abort any running
+ * operation, or be echoed back to us in case we're already in command
+ * mode.
+ */
+static void can327_kick_into_cmd_mode(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (elm->state != CAN327_STATE_GETDUMMYCHAR &&
+ elm->state != CAN327_STATE_GETPROMPT) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+
+ elm->state = CAN327_STATE_GETDUMMYCHAR;
+ }
+}
+
+/* Schedule a CAN frame and necessary config changes to be sent to the TTY. */
+static void can327_send_frame(struct can327 *elm, struct can_frame *frame)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Schedule any necessary changes in ELM327's CAN configuration */
+ if (elm->can_frame_to_send.can_id != frame->can_id) {
+ /* Set the new CAN ID for transmission. */
+ if ((frame->can_id ^ elm->can_frame_to_send.can_id)
+ & CAN_EFF_FLAG) {
+ elm->can_config =
+ (frame->can_id & CAN_EFF_FLAG ? 0 : CAN327_CAN_CONFIG_SEND_SFF) |
+ CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF |
+ elm->can_bitrate_divisor;
+
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+ }
+
+ if (frame->can_id & CAN_EFF_FLAG) {
+ clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo);
+ } else {
+ set_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_LOW,
+ &elm->cmds_todo);
+ clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH,
+ &elm->cmds_todo);
+ }
+ }
+
+ /* Schedule the CAN frame itself. */
+ elm->can_frame_to_send = *frame;
+ set_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+/* ELM327 initialisation sequence.
+ * The line length is limited by the buffer in can327_handle_prompt().
+ */
+static char *can327_init_script[] = {
+ "AT WS\r", /* v1.0: Warm Start */
+ "AT PP FF OFF\r", /* v1.0: All Programmable Parameters Off */
+ "AT M0\r", /* v1.0: Memory Off */
+ "AT AL\r", /* v1.0: Allow Long messages */
+ "AT BI\r", /* v1.0: Bypass Initialisation */
+ "AT CAF0\r", /* v1.0: CAN Auto Formatting Off */
+ "AT CFC0\r", /* v1.0: CAN Flow Control Off */
+ "AT CF 000\r", /* v1.0: Reset CAN ID Filter */
+ "AT CM 000\r", /* v1.0: Reset CAN ID Mask */
+ "AT E1\r", /* v1.0: Echo On */
+ "AT H1\r", /* v1.0: Headers On */
+ "AT L0\r", /* v1.0: Linefeeds Off */
+ "AT SH 7DF\r", /* v1.0: Set CAN sending ID to 0x7df */
+ "AT ST FF\r", /* v1.0: Set maximum Timeout for response after TX */
+ "AT AT0\r", /* v1.2: Adaptive Timing Off */
+ "AT D1\r", /* v1.3: Print DLC On */
+ "AT S1\r", /* v1.3: Spaces On */
+ "AT TP B\r", /* v1.0: Try Protocol B */
+ NULL
+};
+
+static void can327_init_device(struct can327 *elm)
+{
+ lockdep_assert_held(&elm->lock);
+
+ elm->state = CAN327_STATE_NOTINIT;
+ elm->can_frame_to_send.can_id = 0x7df; /* ELM327 HW default */
+ elm->rxfill = 0;
+ elm->drop_next_line = 0;
+
+ /* We can only set the bitrate as a fraction of 500000.
+ * The bitrates listed in can327_bitrate_const will
+ * limit the user to the right values.
+ */
+ elm->can_bitrate_divisor = 500000 / elm->can.bittiming.bitrate;
+ elm->can_config =
+ CAN327_CAN_CONFIG_SEND_SFF | CAN327_CAN_CONFIG_VARIABLE_DLC |
+ CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | elm->can_bitrate_divisor;
+
+ /* Configure ELM327 and then start monitoring */
+ elm->next_init_cmd = &can327_init_script[0];
+ set_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo);
+ set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo);
+
+ can327_kick_into_cmd_mode(elm);
+}
+
+static void can327_feed_frame_to_netdev(struct can327 *elm, struct sk_buff *skb)
+{
+ lockdep_assert_held(&elm->lock);
+
+ if (!netif_running(elm->dev)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /* Queue for NAPI pickup.
+ * rx-offload will update stats and LEDs for us.
+ */
+ if (can_rx_offload_queue_tail(&elm->offload, skb))
+ elm->dev->stats.rx_fifo_errors++;
+
+ /* Wake NAPI */
+ can_rx_offload_irq_finish(&elm->offload);
+}
+
+/* Called when we're out of ideas and just want it all to end. */
+static inline void can327_uart_side_failure(struct can327 *elm)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ elm->uart_side_failure = true;
+
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ elm->can.can_stats.bus_off++;
+ netif_stop_queue(elm->dev);
+ elm->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(elm->dev);
+
+ netdev_err(elm->dev,
+ "ELM327 misbehaved. Blocking further communication.\n");
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ return;
+
+ frame->can_id |= CAN_ERR_BUSOFF;
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Compares a byte buffer (non-NUL terminated) to the payload part of
+ * a string, and returns true iff the buffer (content *and* length) is
+ * exactly that string, without the terminating NUL byte.
+ *
+ * Example: If reference is "BUS ERROR", then this returns true iff nbytes == 9
+ * and !memcmp(buf, "BUS ERROR", 9).
+ *
+ * The reason to use strings is so we can easily include them in the C
+ * code, and to avoid hardcoding lengths.
+ */
+static inline bool can327_rxbuf_cmp(const u8 *buf, size_t nbytes,
+ const char *reference)
+{
+ size_t ref_len = strlen(reference);
+
+ return (nbytes == ref_len) && !memcmp(buf, reference, ref_len);
+}
+
+static void can327_parse_error(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_err_skb(elm->dev, &frame);
+ if (!skb)
+ /* It's okay to return here:
+ * The outer parsing loop will drop this UART buffer.
+ */
+ return;
+
+ /* Filter possible error messages based on length of RX'd line */
+ if (can327_rxbuf_cmp(elm->rxbuf, len, "UNABLE TO CONNECT")) {
+ netdev_err(elm->dev,
+ "ELM327 reported UNABLE TO CONNECT. Please check your setup.\n");
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUFFER FULL")) {
+ /* This will only happen if the last data line was complete.
+ * Otherwise, can327_parse_frame() will heuristically
+ * emit this kind of error frame instead.
+ */
+ frame->can_id |= CAN_ERR_CRTL;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS ERROR")) {
+ frame->can_id |= CAN_ERR_BUSERROR;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "CAN ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "<RX ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS BUSY")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_OVERLOAD;
+ } else if (can327_rxbuf_cmp(elm->rxbuf, len, "FB ERROR")) {
+ frame->can_id |= CAN_ERR_PROT;
+ frame->data[2] = CAN_ERR_PROT_TX;
+ } else if (len == 5 && !memcmp(elm->rxbuf, "ERR", 3)) {
+ /* ERR is followed by two digits, hence line length 5 */
+ netdev_err(elm->dev, "ELM327 reported an ERR%c%c. Please power it off and on again.\n",
+ elm->rxbuf[3], elm->rxbuf[4]);
+ frame->can_id |= CAN_ERR_CRTL;
+ } else {
+ /* Something else has happened.
+ * Maybe garbage on the UART line.
+ * Emit a generic error frame.
+ */
+ }
+
+ can327_feed_frame_to_netdev(elm, skb);
+}
+
+/* Parse CAN frames coming as ASCII from ELM327.
+ * They can be of various formats:
+ *
+ * 29-bit ID (EFF): 12 34 56 78 D PL PL PL PL PL PL PL PL
+ * 11-bit ID (!EFF): 123 D PL PL PL PL PL PL PL PL
+ *
+ * where D = DLC, PL = payload byte
+ *
+ * Instead of a payload, RTR indicates a remote request.
+ *
+ * We will use the spaces and line length to guess the format.
+ */
+static int can327_parse_frame(struct can327 *elm, size_t len)
+{
+ struct can_frame *frame;
+ struct sk_buff *skb;
+ int hexlen;
+ int datastart;
+ int i;
+
+ lockdep_assert_held(&elm->lock);
+
+ skb = alloc_can_skb(elm->dev, &frame);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Find first non-hex and non-space character:
+ * - In the simplest case, there is none.
+ * - For RTR frames, 'R' is the first non-hex character.
+ * - An error message may replace the end of the data line.
+ */
+ for (hexlen = 0; hexlen <= len; hexlen++) {
+ if (hex_to_bin(elm->rxbuf[hexlen]) < 0 &&
+ elm->rxbuf[hexlen] != ' ') {
+ break;
+ }
+ }
+
+ /* Sanity check whether the line is really a clean hexdump,
+ * or terminated by an error message, or contains garbage.
+ */
+ if (hexlen < len && !isdigit(elm->rxbuf[hexlen]) &&
+ !isupper(elm->rxbuf[hexlen]) && '<' != elm->rxbuf[hexlen] &&
+ ' ' != elm->rxbuf[hexlen]) {
+ /* The line is likely garbled anyway, so bail.
+ * The main code will restart listening.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* Use spaces in CAN ID to distinguish 29 or 11 bit address length.
+ * No out-of-bounds access:
+ * We use the fact that we can always read from elm->rxbuf.
+ */
+ if (elm->rxbuf[2] == ' ' && elm->rxbuf[5] == ' ' &&
+ elm->rxbuf[8] == ' ' && elm->rxbuf[11] == ' ' &&
+ elm->rxbuf[13] == ' ') {
+ frame->can_id = CAN_EFF_FLAG;
+ datastart = 14;
+ } else if (elm->rxbuf[3] == ' ' && elm->rxbuf[5] == ' ') {
+ datastart = 6;
+ } else {
+ /* This is not a well-formatted data line.
+ * Assume it's an error message.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ if (hexlen < datastart) {
+ /* The line is too short to be a valid frame hex dump.
+ * Something interrupted the hex dump or it is invalid.
+ */
+ kfree_skb(skb);
+ return -ENODATA;
+ }
+
+ /* From here on all chars up to buf[hexlen] are hex or spaces,
+ * at well-defined offsets.
+ */
+
+ /* Read CAN data length */
+ frame->len = (hex_to_bin(elm->rxbuf[datastart - 2]) << 0);
+
+ /* Read CAN ID */
+ if (frame->can_id & CAN_EFF_FLAG) {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 28) |
+ (hex_to_bin(elm->rxbuf[1]) << 24) |
+ (hex_to_bin(elm->rxbuf[3]) << 20) |
+ (hex_to_bin(elm->rxbuf[4]) << 16) |
+ (hex_to_bin(elm->rxbuf[6]) << 12) |
+ (hex_to_bin(elm->rxbuf[7]) << 8) |
+ (hex_to_bin(elm->rxbuf[9]) << 4) |
+ (hex_to_bin(elm->rxbuf[10]) << 0);
+ } else {
+ frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 8) |
+ (hex_to_bin(elm->rxbuf[1]) << 4) |
+ (hex_to_bin(elm->rxbuf[2]) << 0);
+ }
+
+ /* Check for RTR frame */
+ if (elm->rxfill >= hexlen + 3 &&
+ !memcmp(&elm->rxbuf[hexlen], "RTR", 3)) {
+ frame->can_id |= CAN_RTR_FLAG;
+ }
+
+ /* Is the line long enough to hold the advertised payload?
+ * Note: RTR frames have a DLC, but no actual payload.
+ */
+ if (!(frame->can_id & CAN_RTR_FLAG) &&
+ (hexlen < frame->len * 3 + datastart)) {
+ /* Incomplete frame.
+ * Probably the ELM327's RS232 TX buffer was full.
+ * Emit an error frame and exit.
+ */
+ frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+ frame->len = CAN_ERR_DLC;
+ frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ can327_feed_frame_to_netdev(elm, skb);
+
+ /* Signal failure to parse.
+ * The line will be re-parsed as an error line, which will fail.
+ * However, this will correctly drop the state machine back into
+ * command mode.
+ */
+ return -ENODATA;
+ }
+
+ /* Parse the data nibbles. */
+ for (i = 0; i < frame->len; i++) {
+ frame->data[i] =
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i]) << 4) |
+ (hex_to_bin(elm->rxbuf[datastart + 3 * i + 1]));
+ }
+
+ /* Feed the frame to the network layer. */
+ can327_feed_frame_to_netdev(elm, skb);
+
+ return 0;
+}
+
+static void can327_parse_line(struct can327 *elm, size_t len)
+{
+ lockdep_assert_held(&elm->lock);
+
+ /* Skip empty lines */
+ if (!len)
+ return;
+
+ /* Skip echo lines */
+ if (elm->drop_next_line) {
+ elm->drop_next_line = 0;
+ return;
+ } else if (!memcmp(elm->rxbuf, "AT", 2)) {
+ return;
+ }
+
+ /* Regular parsing */
+ if (elm->state == CAN327_STATE_RECEIVING &&
+ can327_parse_frame(elm, len)) {
+ /* Parse an error line. */
+ can327_parse_error(elm, len);
+
+ /* Start afresh. */
+ can327_kick_into_cmd_mode(elm);
+ }
+}
+
+static void can327_handle_prompt(struct can327 *elm)
+{
+ struct can_frame *frame = &elm->can_frame_to_send;
+ /* Size this buffer for the largest ELM327 line we may generate,
+ * which is currently an 8 byte CAN frame's payload hexdump.
+ * Items in can327_init_script must fit here, too!
+ */
+ char local_txbuf[sizeof("0102030405060708\r")];
+
+ lockdep_assert_held(&elm->lock);
+
+ if (!elm->cmds_todo) {
+ /* Enter CAN monitor mode */
+ can327_send(elm, "ATMA\r", 5);
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+
+ return;
+ }
+
+ /* Reconfigure ELM327 step by step as indicated by elm->cmds_todo */
+ if (test_bit(CAN327_TX_DO_INIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf), "%s",
+ *elm->next_init_cmd);
+
+ elm->next_init_cmd++;
+ if (!(*elm->next_init_cmd)) {
+ clear_bit(CAN327_TX_DO_INIT, &elm->cmds_todo);
+ /* Init finished. */
+ }
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCSM%i\r",
+ !!(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATR%i\r",
+ !(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPC\r");
+ set_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATPB%04X\r",
+ elm->can_config);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATCP%02X\r",
+ (frame->can_id & CAN_EFF_MASK) >> 24);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%06X\r",
+ frame->can_id & CAN_EFF_MASK & ((1 << 24) - 1));
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo)) {
+ snprintf(local_txbuf, sizeof(local_txbuf),
+ "ATSH%03X\r",
+ frame->can_id & CAN_SFF_MASK);
+
+ } else if (test_and_clear_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo)) {
+ if (frame->can_id & CAN_RTR_FLAG) {
+ /* Send an RTR frame. Their DLC is fixed.
+ * Some chips don't send them at all.
+ */
+ snprintf(local_txbuf, sizeof(local_txbuf), "ATRTR\r");
+ } else {
+ /* Send a regular CAN data frame */
+ int i;
+
+ for (i = 0; i < frame->len; i++) {
+ snprintf(&local_txbuf[2 * i],
+ sizeof(local_txbuf), "%02X",
+ frame->data[i]);
+ }
+
+ snprintf(&local_txbuf[2 * i], sizeof(local_txbuf),
+ "\r");
+ }
+
+ elm->drop_next_line = 1;
+ elm->state = CAN327_STATE_RECEIVING;
+
+ /* We will be in the default state once this command is
+ * sent, so enable the TX packet queue.
+ */
+ netif_wake_queue(elm->dev);
+ }
+
+ can327_send(elm, local_txbuf, strlen(local_txbuf));
+}
+
+static bool can327_is_ready_char(char c)
+{
+ /* Bits 0xc0 are sometimes set (randomly), hence the mask.
+ * Probably bad hardware.
+ */
+ return (c & 0x3f) == CAN327_READY_CHAR;
+}
+
+static void can327_drop_bytes(struct can327 *elm, size_t i)
+{
+ lockdep_assert_held(&elm->lock);
+
+ memmove(&elm->rxbuf[0], &elm->rxbuf[i], CAN327_SIZE_RXBUF - i);
+ elm->rxfill -= i;
+}
+
+static void can327_parse_rxbuf(struct can327 *elm, size_t first_new_char_idx)
+{
+ size_t len, pos;
+
+ lockdep_assert_held(&elm->lock);
+
+ switch (elm->state) {
+ case CAN327_STATE_NOTINIT:
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_GETDUMMYCHAR:
+ /* Wait for 'y' or '>' */
+ for (pos = 0; pos < elm->rxfill; pos++) {
+ if (elm->rxbuf[pos] == CAN327_DUMMY_CHAR) {
+ can327_send(elm, "\r", 1);
+ elm->state = CAN327_STATE_GETPROMPT;
+ pos++;
+ break;
+ } else if (can327_is_ready_char(elm->rxbuf[pos])) {
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ pos++;
+ break;
+ }
+ }
+
+ can327_drop_bytes(elm, pos);
+ break;
+
+ case CAN327_STATE_GETPROMPT:
+ /* Wait for '>' */
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1]))
+ can327_handle_prompt(elm);
+
+ elm->rxfill = 0;
+ break;
+
+ case CAN327_STATE_RECEIVING:
+ /* Find <CR> delimiting feedback lines. */
+ len = first_new_char_idx;
+ while (len < elm->rxfill && elm->rxbuf[len] != '\r')
+ len++;
+
+ if (len == CAN327_SIZE_RXBUF) {
+ /* Assume the buffer ran full with garbage.
+ * Did we even connect at the right baud rate?
+ */
+ netdev_err(elm->dev,
+ "RX buffer overflow. Faulty ELM327 or UART?\n");
+ can327_uart_side_failure(elm);
+ } else if (len == elm->rxfill) {
+ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) {
+ /* The ELM327's AT ST response timeout ran out,
+ * so we got a prompt.
+ * Clear RX buffer and restart listening.
+ */
+ elm->rxfill = 0;
+
+ can327_handle_prompt(elm);
+ }
+
+ /* No <CR> found - we haven't received a full line yet.
+ * Wait for more data.
+ */
+ } else {
+ /* We have a full line to parse. */
+ can327_parse_line(elm, len);
+
+ /* Remove parsed data from RX buffer. */
+ can327_drop_bytes(elm, len + 1);
+
+ /* More data to parse? */
+ if (elm->rxfill)
+ can327_parse_rxbuf(elm, 0);
+ }
+ }
+}
+
+static int can327_netdev_open(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ int err;
+
+ spin_lock_bh(&elm->lock);
+
+ if (!elm->tty) {
+ spin_unlock_bh(&elm->lock);
+ return -ENODEV;
+ }
+
+ if (elm->uart_side_failure)
+ netdev_warn(elm->dev,
+ "Reopening netdev after a UART side fault has been detected.\n");
+
+ /* Clear TTY buffers */
+ elm->rxfill = 0;
+ elm->txleft = 0;
+
+ /* open_candev() checks for elm->can.bittiming.bitrate != 0 */
+ err = open_candev(dev);
+ if (err) {
+ spin_unlock_bh(&elm->lock);
+ return err;
+ }
+
+ can327_init_device(elm);
+ spin_unlock_bh(&elm->lock);
+
+ err = can_rx_offload_add_manual(dev, &elm->offload, CAN327_NAPI_WEIGHT);
+ if (err) {
+ close_candev(dev);
+ return err;
+ }
+
+ can_rx_offload_enable(&elm->offload);
+
+ elm->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int can327_netdev_close(struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+
+ /* Interrupt whatever the ELM327 is doing right now */
+ spin_lock_bh(&elm->lock);
+ can327_send(elm, CAN327_DUMMY_STRING, 1);
+ spin_unlock_bh(&elm->lock);
+
+ netif_stop_queue(dev);
+
+ /* We don't flush the UART TX queue here, as we want final stop
+ * commands (like the above dummy char) to be flushed out.
+ */
+
+ can_rx_offload_disable(&elm->offload);
+ elm->can.state = CAN_STATE_STOPPED;
+ can_rx_offload_del(&elm->offload);
+ close_candev(dev);
+
+ return 0;
+}
+
+/* Send a can_frame to a TTY. */
+static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct can327 *elm = netdev_priv(dev);
+ struct can_frame *frame = (struct can_frame *)skb->data;
+
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ /* We shouldn't get here after a hardware fault:
+ * can_bus_off() calls netif_carrier_off()
+ */
+ if (elm->uart_side_failure) {
+ WARN_ON_ONCE(elm->uart_side_failure);
+ goto out;
+ }
+
+ netif_stop_queue(dev);
+
+ /* BHs are already disabled, so no spin_lock_bh().
+ * See Documentation/networking/netdevices.rst
+ */
+ spin_lock(&elm->lock);
+ can327_send_frame(elm, frame);
+ spin_unlock(&elm->lock);
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += frame->can_id & CAN_RTR_FLAG ? 0 : frame->len;
+
+ skb_tx_timestamp(skb);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops can327_netdev_ops = {
+ .ndo_open = can327_netdev_open,
+ .ndo_stop = can327_netdev_close,
+ .ndo_start_xmit = can327_netdev_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops can327_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static bool can327_is_valid_rx_char(u8 c)
+{
+ static const bool lut_char_is_valid['z'] = {
+ ['\r'] = true,
+ [' '] = true,
+ ['.'] = true,
+ ['0'] = true, true, true, true, true,
+ ['5'] = true, true, true, true, true,
+ ['<'] = true,
+ [CAN327_READY_CHAR] = true,
+ ['?'] = true,
+ ['A'] = true, true, true, true, true, true, true,
+ ['H'] = true, true, true, true, true, true, true,
+ ['O'] = true, true, true, true, true, true, true,
+ ['V'] = true, true, true, true, true,
+ ['a'] = true,
+ ['b'] = true,
+ ['v'] = true,
+ [CAN327_DUMMY_CHAR] = true,
+ };
+ BUILD_BUG_ON(CAN327_DUMMY_CHAR >= 'z');
+
+ return (c < ARRAY_SIZE(lut_char_is_valid) && lut_char_is_valid[c]);
+}
+
+/* Handle incoming ELM327 ASCII data.
+ * This will not be re-entered while running, but other ldisc
+ * functions may be called in parallel.
+ */
+static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp,
+ const char *fp, int count)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+ size_t first_new_char_idx;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ /* Store old rxfill, so can327_parse_rxbuf() will have
+ * the option of skipping already checked characters.
+ */
+ first_new_char_idx = elm->rxfill;
+
+ while (count-- && elm->rxfill < CAN327_SIZE_RXBUF) {
+ if (fp && *fp++) {
+ netdev_err(elm->dev,
+ "Error in received character stream. Check your wiring.");
+
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ /* Ignore NUL characters, which the PIC microcontroller may
+ * inadvertently insert due to a known hardware bug.
+ * See ELM327 documentation, which refers to a Microchip PIC
+ * bug description.
+ */
+ if (*cp) {
+ /* Check for stray characters on the UART line.
+ * Likely caused by bad hardware.
+ */
+ if (!can327_is_valid_rx_char(*cp)) {
+ netdev_err(elm->dev,
+ "Received illegal character %02x.\n",
+ *cp);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->rxbuf[elm->rxfill++] = *cp;
+ }
+
+ cp++;
+ }
+
+ if (count >= 0) {
+ netdev_err(elm->dev,
+ "Receive buffer overflowed. Bad chip or wiring? count = %i",
+ count);
+
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ can327_parse_rxbuf(elm, first_new_char_idx);
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Write out remaining transmit buffer.
+ * Scheduled when TTY is writable.
+ */
+static void can327_ldisc_tx_worker(struct work_struct *work)
+{
+ struct can327 *elm = container_of(work, struct can327, tx_work);
+ ssize_t written;
+
+ if (elm->uart_side_failure)
+ return;
+
+ spin_lock_bh(&elm->lock);
+
+ if (elm->txleft) {
+ written = elm->tty->ops->write(elm->tty, elm->txhead,
+ elm->txleft);
+ if (written < 0) {
+ netdev_err(elm->dev, "Failed to write to tty %s.\n",
+ elm->tty->name);
+ can327_uart_side_failure(elm);
+
+ spin_unlock_bh(&elm->lock);
+ return;
+ }
+
+ elm->txleft -= written;
+ elm->txhead += written;
+ }
+
+ if (!elm->txleft)
+ clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags);
+
+ spin_unlock_bh(&elm->lock);
+}
+
+/* Called by the driver when there's room for more data. */
+static void can327_ldisc_tx_wakeup(struct tty_struct *tty)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+
+ schedule_work(&elm->tx_work);
+}
+
+/* ELM327 can only handle bitrates that are integer divisors of 500 kHz,
+ * or 7/8 of that. Divisors are 1 to 64.
+ * Currently we don't implement support for 7/8 rates.
+ */
+static const u32 can327_bitrate_const[] = {
+ 7812, 7936, 8064, 8196, 8333, 8474, 8620, 8771,
+ 8928, 9090, 9259, 9433, 9615, 9803, 10000, 10204,
+ 10416, 10638, 10869, 11111, 11363, 11627, 11904, 12195,
+ 12500, 12820, 13157, 13513, 13888, 14285, 14705, 15151,
+ 15625, 16129, 16666, 17241, 17857, 18518, 19230, 20000,
+ 20833, 21739, 22727, 23809, 25000, 26315, 27777, 29411,
+ 31250, 33333, 35714, 38461, 41666, 45454, 50000, 55555,
+ 62500, 71428, 83333, 100000, 125000, 166666, 250000, 500000
+};
+
+static int can327_ldisc_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct can327 *elm;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(struct can327), 0);
+ if (!dev)
+ return -ENFILE;
+ elm = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ spin_lock_init(&elm->lock);
+ INIT_WORK(&elm->tx_work, can327_ldisc_tx_worker);
+
+ /* Configure CAN metadata */
+ elm->can.bitrate_const = can327_bitrate_const;
+ elm->can.bitrate_const_cnt = ARRAY_SIZE(can327_bitrate_const);
+ elm->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ elm->dev = dev;
+ dev->netdev_ops = &can327_netdev_ops;
+ dev->ethtool_ops = &can327_ethtool_ops;
+
+ /* Mark ldisc channel as alive */
+ elm->tty = tty;
+ tty->disc_data = elm;
+
+ /* Let 'er rip */
+ err = register_candev(elm->dev);
+ if (err) {
+ free_candev(elm->dev);
+ return err;
+ }
+
+ netdev_info(elm->dev, "can327 on %s.\n", tty->name);
+
+ return 0;
+}
+
+/* Close down a can327 channel.
+ * This means flushing out any pending queues, and then returning.
+ * This call is serialized against other ldisc functions:
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this function for a hangup event.
+ */
+static void can327_ldisc_close(struct tty_struct *tty)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+
+ /* unregister_netdev() calls .ndo_stop() so we don't have to. */
+ unregister_candev(elm->dev);
+
+ /* Give UART one final chance to flush.
+ * No need to clear TTY_DO_WRITE_WAKEUP since .write_wakeup() is
+ * serialised against .close() and will not be called once we return.
+ */
+ flush_work(&elm->tx_work);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&elm->lock);
+ tty->disc_data = NULL;
+ elm->tty = NULL;
+ spin_unlock_bh(&elm->lock);
+
+ netdev_info(elm->dev, "can327 off %s.\n", tty->name);
+
+ free_candev(elm->dev);
+}
+
+static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct can327 *elm = (struct can327 *)tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strnlen(elm->dev->name, IFNAMSIZ - 1) + 1;
+ if (copy_to_user((void __user *)arg, elm->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops can327_ldisc = {
+ .owner = THIS_MODULE,
+ .name = KBUILD_MODNAME,
+ .num = N_CAN327,
+ .receive_buf = can327_ldisc_rx,
+ .write_wakeup = can327_ldisc_tx_wakeup,
+ .open = can327_ldisc_open,
+ .close = can327_ldisc_close,
+ .ioctl = can327_ldisc_ioctl,
+};
+
+static int __init can327_init(void)
+{
+ int status;
+
+ status = tty_register_ldisc(&can327_ldisc);
+ if (status)
+ pr_err("Can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit can327_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&can327_ldisc);
+}
+
+module_init(can327_init);
+module_exit(can327_exit);
+
+MODULE_ALIAS_LDISC(N_CAN327);
+MODULE_DESCRIPTION("ELM327 based CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Max Staudt <max@enpas.org>");
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index f8a130f594e2..30909f3aab57 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -17,6 +17,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -428,7 +429,7 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cc770_priv *priv = netdev_priv(dev);
unsigned int mo = obj2msgobj(CC770_OBJ_TX);
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -489,17 +490,17 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
cf->len = can_cc_dlc2len((config & 0xf0) >> 4);
for (i = 0; i < cf->len; i++)
cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
- }
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
static int cc770_err(struct net_device *dev, u8 status)
{
struct cc770_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u8 lec;
@@ -512,6 +513,7 @@ static int cc770_err(struct net_device *dev, u8 status)
/* Use extended functions of the CC770 */
if (priv->control_normal_mode & CTRL_EAF) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = cc770_read_reg(priv, tx_error_counter);
cf->data[7] = cc770_read_reg(priv, rx_error_counter);
}
@@ -571,8 +573,6 @@ static int cc770_err(struct net_device *dev, u8 status)
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -666,7 +666,6 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
struct cc770_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
unsigned int mo = obj2msgobj(o);
- struct can_frame *cf;
u8 ctrl1;
ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
@@ -698,12 +697,9 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
return;
}
- cf = (struct can_frame *)priv->tx_skb->data;
- stats->tx_bytes += cf->len;
- stats->tx_packets++;
-
can_put_echo_skb(priv->tx_skb, dev, 0, 0);
- can_get_echo_skb(dev, 0, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
+ stats->tx_packets++;
priv->tx_skb = NULL;
netif_wake_queue(dev);
@@ -841,6 +837,10 @@ static const struct net_device_ops cc770_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops cc770_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_cc770dev(struct net_device *dev)
{
struct cc770_priv *priv = netdev_priv(dev);
@@ -851,6 +851,7 @@ int register_cc770dev(struct net_device *dev)
return err;
dev->netdev_ops = &cc770_netdev_ops;
+ dev->ethtool_ops = &cc770_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
index 194c86e0f340..8f6dccd5a587 100644
--- a/drivers/net/can/cc770/cc770_isa.c
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -264,22 +264,24 @@ static int cc770_isa_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev,
"couldn't register device (err=%d)\n", err);
- goto exit_unmap;
+ goto exit_free;
}
dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
priv->reg_base, dev->irq);
return 0;
- exit_unmap:
+exit_free:
+ free_cc770dev(dev);
+exit_unmap:
if (mem[idx])
iounmap(base);
- exit_release:
+exit_release:
if (mem[idx])
release_mem_region(mem[idx], iosize);
else
release_region(port[idx], iosize);
- exit:
+exit:
return err;
}
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 8d916e2ee6c2..8dcc32e4e30e 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -93,20 +93,20 @@ static int cc770_get_of_node_data(struct platform_device *pdev,
if (priv->can.clock.freq > 8000000)
priv->cpu_interface |= CPUIF_DMC;
- if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+ if (of_property_read_bool(np, "bosch,divide-memory-clock"))
priv->cpu_interface |= CPUIF_DMC;
- if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+ if (of_property_read_bool(np, "bosch,iso-low-speed-mux"))
priv->cpu_interface |= CPUIF_MUX;
if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
priv->bus_config |= BUSCFG_CBY;
- if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-rx0-input"))
priv->bus_config |= BUSCFG_DR0;
- if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-rx1-input"))
priv->bus_config |= BUSCFG_DR1;
- if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+ if (of_property_read_bool(np, "bosch,disconnect-tx1-output"))
priv->bus_config |= BUSCFG_DT1;
- if (of_get_property(np, "bosch,polarity-dominant", NULL))
+ if (of_property_read_bool(np, "bosch,polarity-dominant"))
priv->bus_config |= BUSCFG_POL;
prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
diff --git a/drivers/net/can/ctucanfd/Kconfig b/drivers/net/can/ctucanfd/Kconfig
new file mode 100644
index 000000000000..f52407f5c5d8
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Kconfig
@@ -0,0 +1,34 @@
+config CAN_CTUCANFD
+ tristate "CTU CAN-FD IP core" if COMPILE_TEST
+ help
+ This driver adds support for the CTU CAN FD open-source IP core.
+ More documentation and core sources at project page
+ (https://gitlab.fel.cvut.cz/canbus/ctucanfd_ip_core).
+ The core integration to Xilinx Zynq system as platform driver
+ is available (https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top).
+ Implementation on Intel FPGA-based PCI Express board is available
+ from project (https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd) and
+ on Intel SoC from project (https://gitlab.fel.cvut.cz/canbus/intel-soc-ctucanfd).
+ Guidepost CTU FEE CAN bus projects page https://canbus.pages.fel.cvut.cz/ .
+
+config CAN_CTUCANFD_PCI
+ tristate "CTU CAN-FD IP core PCI/PCIe driver"
+ depends on PCI
+ select CAN_CTUCANFD
+ help
+ This driver adds PCI/PCIe support for CTU CAN-FD IP core.
+ The project providing FPGA design for Intel EP4CGX15 based DB4CGX15
+ PCIe board with PiKRON.com designed transceiver riser shield is available
+ at https://gitlab.fel.cvut.cz/canbus/pcie-ctucanfd .
+
+config CAN_CTUCANFD_PLATFORM
+ tristate "CTU CAN-FD IP core platform (FPGA, SoC) driver"
+ depends on HAS_IOMEM && OF
+ select CAN_CTUCANFD
+ help
+ The core has been tested together with OpenCores SJA1000
+ modified to be CAN FD frames tolerant on MicroZed Zynq based
+ MZ_APO education kits designed by Petr Porazil from PiKRON.com
+ company. FPGA design https://gitlab.fel.cvut.cz/canbus/zynq/zynq-can-sja1000-top.
+ The kit description at the Computer Architectures course pages
+ https://cw.fel.cvut.cz/wiki/courses/b35apo/documentation/mz_apo/start .
diff --git a/drivers/net/can/ctucanfd/Makefile b/drivers/net/can/ctucanfd/Makefile
new file mode 100644
index 000000000000..8078f1f2c30f
--- /dev/null
+++ b/drivers/net/can/ctucanfd/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for the CTU CAN-FD IP module drivers
+#
+
+obj-$(CONFIG_CAN_CTUCANFD) := ctucanfd.o
+ctucanfd-y := ctucanfd_base.o
+
+obj-$(CONFIG_CAN_CTUCANFD_PCI) += ctucanfd_pci.o
+obj-$(CONFIG_CAN_CTUCANFD_PLATFORM) += ctucanfd_platform.o
diff --git a/drivers/net/can/ctucanfd/ctucanfd.h b/drivers/net/can/ctucanfd/ctucanfd.h
new file mode 100644
index 000000000000..0e9904f6a05d
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#ifndef __CTUCANFD__
+#define __CTUCANFD__
+
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+#include <linux/list.h>
+
+enum ctu_can_fd_can_registers;
+
+struct ctucan_priv {
+ struct can_priv can; /* must be first member! */
+
+ void __iomem *mem_base;
+ u32 (*read_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg);
+ void (*write_reg)(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val);
+
+ unsigned int txb_head;
+ unsigned int txb_tail;
+ u32 txb_prio;
+ unsigned int ntxbufs;
+ spinlock_t tx_lock; /* spinlock to serialize allocation and processing of TX buffers */
+
+ struct napi_struct napi;
+ struct device *dev;
+ struct clk *can_clk;
+
+ int irq_flags;
+ unsigned long drv_flags;
+
+ u32 rxfrm_first_word;
+
+ struct list_head peers_on_pdev;
+};
+
+/**
+ * ctucan_probe_common - Device type independent registration call
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * @dev: Handle to the generic device structure
+ * @addr: Base address of CTU CAN FD core address
+ * @irq: Interrupt number
+ * @ntxbufs: Number of implemented Tx buffers
+ * @can_clk_rate: Clock rate, if 0 then clock are taken from device node
+ * @pm_enable_call: Whether pm_runtime_enable should be called
+ * @set_drvdata_fnc: Function to set network driver data for physical device
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ctucan_probe_common(struct device *dev, void __iomem *addr,
+ int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate,
+ int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev,
+ struct net_device *ndev));
+
+int ctucan_suspend(struct device *dev) __maybe_unused;
+int ctucan_resume(struct device *dev) __maybe_unused;
+
+#endif /*__CTUCANFD__*/
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
new file mode 100644
index 000000000000..64c349fd4600
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -0,0 +1,1458 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/bitfield.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/can/error.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+#include "ctucanfd_kregs.h"
+#include "ctucanfd_kframe.h"
+
+#ifdef DEBUG
+#define ctucan_netdev_dbg(ndev, args...) \
+ netdev_dbg(ndev, args)
+#else
+#define ctucan_netdev_dbg(...) do { } while (0)
+#endif
+
+#define CTUCANFD_ID 0xCAFD
+
+/* TX buffer rotation:
+ * - when a buffer transitions to empty state, rotate order and priorities
+ * - if more buffers seem to transition at the same time, rotate by the number of buffers
+ * - it may be assumed that buffers transition to empty state in FIFO order (because we manage
+ * priorities that way)
+ * - at frame filling, do not rotate anything, just increment buffer modulo counter
+ */
+
+#define CTUCANFD_FLAG_RX_FFW_BUFFERED 1
+
+#define CTUCAN_STATE_TO_TEXT_ENTRY(st) \
+ [st] = #st
+
+enum ctucan_txtb_status {
+ TXT_NOT_EXIST = 0x0,
+ TXT_RDY = 0x1,
+ TXT_TRAN = 0x2,
+ TXT_ABTP = 0x3,
+ TXT_TOK = 0x4,
+ TXT_ERR = 0x6,
+ TXT_ABT = 0x7,
+ TXT_ETY = 0x8,
+};
+
+enum ctucan_txtb_command {
+ TXT_CMD_SET_EMPTY = 0x01,
+ TXT_CMD_SET_READY = 0x02,
+ TXT_CMD_SET_ABORT = 0x04
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 190,
+ .tseg2_min = 1,
+ .tseg2_max = 63,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 8,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = {
+ .name = "ctu_can_fd",
+ .tseg1_min = 2,
+ .tseg1_max = 94,
+ .tseg2_min = 1,
+ .tseg2_max = 31,
+ .sjw_max = 31,
+ .brp_min = 1,
+ .brp_max = 2,
+ .brp_inc = 1,
+};
+
+static const char * const ctucan_state_strings[CAN_STATE_MAX] = {
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED),
+ CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING)
+};
+
+static void ctucan_write32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32(val, priv->mem_base + reg);
+}
+
+static void ctucan_write32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg, u32 val)
+{
+ iowrite32be(val, priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_le(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32(priv->mem_base + reg);
+}
+
+static u32 ctucan_read32_be(struct ctucan_priv *priv,
+ enum ctu_can_fd_can_registers reg)
+{
+ return ioread32be(priv->mem_base + reg);
+}
+
+static void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val)
+{
+ priv->write_reg(priv, reg, val);
+}
+
+static u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg)
+{
+ return priv->read_reg(priv, reg);
+}
+
+static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base,
+ u32 offset, u32 val)
+{
+ priv->write_reg(priv, buf_base + offset, val);
+}
+
+#define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS)))
+#define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE)))
+
+/**
+ * ctucan_state_to_str() - Converts CAN controller state code to corresponding text
+ * @state: CAN controller state code
+ *
+ * Return: Pointer to string representation of the error state
+ */
+static const char *ctucan_state_to_str(enum can_state state)
+{
+ const char *txt = NULL;
+
+ if (state >= 0 && state < CAN_STATE_MAX)
+ txt = ctucan_state_strings[state];
+ return txt ? txt : "UNKNOWN";
+}
+
+/**
+ * ctucan_reset() - Issues software reset request to CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset
+ */
+static int ctucan_reset(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int i = 100;
+
+ ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST);
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+
+ do {
+ u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID,
+ ctucan_read32(priv, CTUCANFD_DEVICE_ID));
+
+ if (device_id == 0xCAFD)
+ return 0;
+ if (!i--) {
+ netdev_warn(ndev, "device did not leave reset\n");
+ return -ETIMEDOUT;
+ }
+ usleep_range(100, 200);
+ } while (1);
+}
+
+/**
+ * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ * @bt: Pointer to Bit timing structure
+ * @nominal: True - Nominal bit timing, False - Data bit timing
+ *
+ * Return: 0 - OK, -%EPERM if controller is enabled
+ */
+static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int max_ph1_len = 31;
+ u32 btr = 0;
+ u32 prop_seg = bt->prop_seg;
+ u32 phase_seg1 = bt->phase_seg1;
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ if (nominal)
+ max_ph1_len = 63;
+
+ /* The timing calculation functions have only constraints on tseg1, which is prop_seg +
+ * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1.
+ * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the
+ * values here.
+ */
+ if (phase_seg1 > max_ph1_len) {
+ prop_seg += phase_seg1 - max_ph1_len;
+ phase_seg1 = max_ph1_len;
+ bt->prop_seg = prop_seg;
+ bt->phase_seg1 = phase_seg1;
+ }
+
+ if (nominal) {
+ btr = FIELD_PREP(REG_BTR_PROP, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_BRP, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR, btr);
+ } else {
+ btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg);
+ btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1);
+ btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2);
+ btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp);
+ btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw);
+
+ ctucan_write32(priv, CTUCANFD_BTR_FD, btr);
+ }
+
+ return 0;
+}
+
+/**
+ * ctucan_set_bittiming() - CAN set nominal bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+
+ /* Note that bt may be modified here */
+ return ctucan_set_btr(ndev, bt, true);
+}
+
+/**
+ * ctucan_set_data_bittiming() - CAN set data bit timing routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM on error
+ */
+static int ctucan_set_data_bittiming(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+
+ /* Note that dbt may be modified here */
+ return ctucan_set_btr(ndev, dbt, false);
+}
+
+/**
+ * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 on success, -%EPERM if controller is enabled
+ */
+static int ctucan_set_secondary_sample_point(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ int ssp_offset = 0;
+ u32 ssp_cfg = 0; /* No SSP by default */
+
+ if (CTU_CAN_FD_ENABLED(priv)) {
+ netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n");
+ return -EPERM;
+ }
+
+ /* Use SSP for bit-rates above 1 Mbits/s */
+ if (dbt->bitrate > 1000000) {
+ /* Calculate SSP in minimal time quanta */
+ ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate;
+
+ if (ssp_offset > 127) {
+ netdev_warn(ndev, "SSP offset saturated to 127\n");
+ ssp_offset = 127;
+ }
+
+ ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset);
+ ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1);
+ }
+
+ ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg);
+
+ return 0;
+}
+
+/**
+ * ctucan_set_mode() - Sets CTU CAN FDs mode
+ * @priv: Pointer to private data
+ * @mode: Pointer to controller modes to be set
+ */
+static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode)
+{
+ u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ?
+ (mode_reg | REG_MODE_ILBP) :
+ (mode_reg & ~REG_MODE_ILBP);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ?
+ (mode_reg | REG_MODE_BMM) :
+ (mode_reg & ~REG_MODE_BMM);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD) ?
+ (mode_reg | REG_MODE_FDE) :
+ (mode_reg & ~REG_MODE_FDE);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ?
+ (mode_reg | REG_MODE_ACF) :
+ (mode_reg & ~REG_MODE_ACF);
+
+ mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ?
+ (mode_reg | REG_MODE_NISOFD) :
+ (mode_reg & ~REG_MODE_NISOFD);
+
+ /* One shot mode supported indirectly via Retransmit limit */
+ mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF);
+ mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ?
+ (mode_reg | REG_MODE_RTRLE) :
+ (mode_reg & ~REG_MODE_RTRLE);
+
+ /* Some bits fixed:
+ * TSTM - Off, User shall not be able to change REC/TEC by hand during operation
+ */
+ mode_reg &= ~REG_MODE_TSTM;
+
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+}
+
+/**
+ * ctucan_chip_start() - This routine starts the driver
+ * @ndev: Pointer to net_device structure
+ *
+ * Routine expects that chip is in reset state. It setups initial
+ * Tx buffers for FIFO priorities, sets bittiming, enables interrupts,
+ * switches core to operational mode and changes controller
+ * state to %CAN_STATE_STOPPED.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_chip_start(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 int_ena, int_msk;
+ u32 mode_reg;
+ int err;
+ struct can_ctrlmode mode;
+
+ priv->txb_prio = 0x01234567;
+ priv->txb_head = 0;
+ priv->txb_tail = 0;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio);
+
+ /* Configure bit-rates and ssp */
+ err = ctucan_set_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_data_bittiming(ndev);
+ if (err < 0)
+ return err;
+
+ err = ctucan_set_secondary_sample_point(ndev);
+ if (err < 0)
+ return err;
+
+ /* Configure modes */
+ mode.flags = priv->can.ctrlmode;
+ mode.mask = 0xFFFFFFFF;
+ ctucan_set_mode(priv, &mode);
+
+ /* Configure interrupts */
+ int_ena = REG_INT_STAT_RBNEI |
+ REG_INT_STAT_TXBHCI |
+ REG_INT_STAT_EWLI |
+ REG_INT_STAT_FCSI;
+
+ /* Bus error reporting -> Allow Error/Arb.lost interrupts */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ int_ena |= REG_INT_STAT_ALI |
+ REG_INT_STAT_BEI;
+ }
+
+ int_msk = ~int_ena; /* Mask all disabled interrupts */
+
+ /* It's after reset, so there is no need to clear anything */
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk);
+ ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena);
+
+ /* Controller enters ERROR_ACTIVE on initial FCSI */
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Enable the controller */
+ mode_reg = ctucan_read32(priv, CTUCANFD_MODE);
+ mode_reg |= REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode_reg);
+
+ return 0;
+}
+
+/**
+ * ctucan_do_set_mode() - Sets mode of the driver
+ * @ndev: Pointer to net_device structure
+ * @mode: Tells the mode of the driver
+ *
+ * This check the drivers state and calls the corresponding modes to set.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ return ret;
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ return ret;
+ }
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ctucan_get_tx_status() - Gets status of TXT buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: Status of TXT buffer
+ */
+static enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf)
+{
+ u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS);
+ enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7;
+
+ return status;
+}
+
+/**
+ * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer
+ * @priv: Pointer to private data
+ * @buf: Buffer index (0-based)
+ *
+ * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be
+ * inserted to TXT Buffer
+ */
+static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf)
+{
+ enum ctucan_txtb_status buf_status;
+
+ buf_status = ctucan_get_tx_status(priv, buf);
+ if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP)
+ return false;
+
+ return true;
+}
+
+/**
+ * ctucan_insert_frame() - Inserts frame to TXT buffer
+ * @priv: Pointer to private data
+ * @cf: Pointer to CAN frame to be inserted
+ * @buf: TXT Buffer index to which frame is inserted (0-based)
+ * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame
+ *
+ * Return: True - Frame inserted successfully
+ * False - Frame was not inserted due to one of:
+ * 1. TXT Buffer is not writable (it is in wrong state)
+ * 2. Invalid TXT buffer index
+ * 3. Invalid frame length
+ */
+static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf,
+ bool isfdf)
+{
+ u32 buf_base;
+ u32 ffw = 0;
+ u32 idw = 0;
+ unsigned int i;
+
+ if (buf >= priv->ntxbufs)
+ return false;
+
+ if (!ctucan_is_txt_buf_writable(priv, buf))
+ return false;
+
+ if (cf->len > CANFD_MAX_DLEN)
+ return false;
+
+ /* Prepare Frame format */
+ if (cf->can_id & CAN_RTR_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_RTR;
+
+ if (cf->can_id & CAN_EFF_FLAG)
+ ffw |= REG_FRAME_FORMAT_W_IDE;
+
+ if (isfdf) {
+ ffw |= REG_FRAME_FORMAT_W_FDF;
+ if (cf->flags & CANFD_BRS)
+ ffw |= REG_FRAME_FORMAT_W_BRS;
+ }
+
+ ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len));
+
+ /* Prepare identifier */
+ if (cf->can_id & CAN_EFF_FLAG)
+ idw = cf->can_id & CAN_EFF_MASK;
+ else
+ idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK);
+
+ /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */
+ buf_base = (buf + 1) * 0x100;
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw);
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw);
+
+ /* Write Data payload */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i += 4) {
+ u32 data = le32_to_cpu(*(__le32 *)(cf->data + i));
+
+ ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data);
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ctucan_give_txtb_cmd() - Applies command on TXT buffer
+ * @priv: Pointer to private data
+ * @cmd: Command to give
+ * @buf: Buffer index (0-based)
+ */
+static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf)
+{
+ u32 tx_cmd = cmd;
+
+ tx_cmd |= 1 << (buf + 8);
+ ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd);
+}
+
+/**
+ * ctucan_start_xmit() - Starts the transmission
+ * @skb: sk_buff pointer that contains data to be Txed
+ * @ndev: Pointer to net_device structure
+ *
+ * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and
+ * populates its fields to start the transmission.
+ *
+ * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available,
+ * negative return values reserved for error cases
+ */
+static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u32 txtb_id;
+ bool ok;
+ unsigned long flags;
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (unlikely(!CTU_CAN_FD_TXTNF(priv))) {
+ netif_stop_queue(ndev);
+ netdev_err(ndev, "BUG!, no TXB free when queue awake!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ txtb_id = priv->txb_head % priv->ntxbufs;
+ ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id);
+ ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb));
+
+ if (!ok) {
+ netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?");
+ kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ can_put_echo_skb(skb, ndev, txtb_id, 0);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id);
+ priv->txb_head++;
+
+ /* Check if all TX buffers are full */
+ if (!CTU_CAN_FD_TXTNF(priv))
+ netif_stop_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ctucan_read_rx_frame() - Reads frame from RX FIFO
+ * @priv: Pointer to CTU CAN FD's private data
+ * @cf: Pointer to CAN frame struct
+ * @ffw: Previously read frame format word
+ *
+ * Note: Frame format word must be read separately and provided in 'ffw'.
+ */
+static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw)
+{
+ u32 idw;
+ unsigned int i;
+ unsigned int wc;
+ unsigned int len;
+
+ idw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw))
+ cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (idw >> 18) & CAN_SFF_MASK;
+
+ /* BRS, ESI, RTR Flags */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
+ cf->flags |= CANFD_BRS;
+ if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw))
+ cf->flags |= CANFD_ESI;
+ } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) {
+ cf->can_id |= CAN_RTR_FLAG;
+ }
+
+ wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3;
+
+ /* DLC */
+ if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) {
+ len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw);
+ } else {
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ len = wc << 2;
+ else
+ len = 8;
+ }
+ cf->len = len;
+ if (unlikely(len > wc * 4))
+ len = wc * 4;
+
+ /* Timestamp - Read and throw away */
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+
+ /* Data */
+ for (i = 0; i < len; i += 4) {
+ u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ *(__le32 *)(cf->data + i) = cpu_to_le32(data);
+ }
+ while (unlikely(i < wc * 4)) {
+ ctucan_read32(priv, CTUCANFD_RX_DATA);
+ i += 4;
+ }
+}
+
+/**
+ * ctucan_rx() - Called from CAN ISR to complete the received frame processing
+ * @ndev: Pointer to net_device structure
+ *
+ * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal
+ * processing and invokes "netif_receive_skb" to complete further processing.
+ * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but
+ * system is out of free SKBs temporally and left code to resolve SKB allocation later,
+ * -%EAGAIN in a case of empty Rx FIFO.
+ */
+static int ctucan_rx(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct canfd_frame *cf;
+ struct sk_buff *skb;
+ u32 ffw;
+
+ if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) {
+ ffw = priv->rxfrm_first_word;
+ clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ } else {
+ ffw = ctucan_read32(priv, CTUCANFD_RX_DATA);
+ }
+
+ if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw))
+ return -EAGAIN;
+
+ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw))
+ skb = alloc_canfd_skb(ndev, &cf);
+ else
+ skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
+
+ if (unlikely(!skb)) {
+ priv->rxfrm_first_word = ffw;
+ set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags);
+ return 0;
+ }
+
+ ctucan_read_rx_frame(priv, cf, ffw);
+
+ stats->rx_bytes += cf->len;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
+/**
+ * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state.
+ * @priv: Pointer to private data
+ *
+ * Returns: Fault confinement state of controller
+ */
+static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv)
+{
+ u32 fs;
+ u32 rec_tec;
+ u32 ewl;
+
+ fs = ctucan_read32(priv, CTUCANFD_EWL);
+ rec_tec = ctucan_read32(priv, CTUCANFD_REC);
+ ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs);
+
+ if (FIELD_GET(REG_EWL_ERA, fs)) {
+ if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) &&
+ ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec))
+ return CAN_STATE_ERROR_ACTIVE;
+ else
+ return CAN_STATE_ERROR_WARNING;
+ } else if (FIELD_GET(REG_EWL_ERP, fs)) {
+ return CAN_STATE_ERROR_PASSIVE;
+ } else if (FIELD_GET(REG_EWL_BOF, fs)) {
+ return CAN_STATE_BUS_OFF;
+ }
+
+ WARN(true, "Invalid error state");
+ return CAN_STATE_ERROR_PASSIVE;
+}
+
+/**
+ * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller
+ * @priv: Pointer to private data
+ * @bec: Pointer to Error counter structure
+ */
+static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec)
+{
+ u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC);
+
+ bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs);
+ bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs);
+}
+
+/**
+ * ctucan_err_interrupt() - Error frame ISR
+ * @ndev: net_device pointer
+ * @isr: interrupt status register value
+ *
+ * This is the CAN error interrupt and it will check the type of error and forward the error
+ * frame to upper layers.
+ */
+static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ enum can_state state;
+ struct can_berr_counter bec;
+ u32 err_capt_alc;
+ int dologerr = net_ratelimit();
+
+ ctucan_get_rec_tec(priv, &bec);
+ state = ctucan_read_fault_state(priv);
+ err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT);
+
+ if (dologerr)
+ netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n",
+ __func__, isr, bec.rxerr, bec.txerr,
+ FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc),
+ FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc));
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ /* EWLI: error warning limit condition met
+ * FCSI: fault confinement state changed
+ * ALI: arbitration lost (just informative)
+ * BEI: bus error interrupt
+ */
+ if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) {
+ netdev_info(ndev, "state changes from %s to %s\n",
+ ctucan_state_to_str(priv->can.state),
+ ctucan_state_to_str(state));
+
+ if (priv->can.state == state)
+ netdev_warn(ndev,
+ "current and previous state is the same! (missed interrupt?)\n");
+
+ priv->can.state = state;
+ switch (state) {
+ case CAN_STATE_BUS_OFF:
+ priv->can.can_stats.bus_off++;
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ break;
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can.can_stats.error_passive++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] = (bec.rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ priv->can.can_stats.error_warning++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
+ cf->data[1] |= (bec.txerr > bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+ break;
+ case CAN_STATE_ERROR_ACTIVE:
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ break;
+ default:
+ netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
+ state, ctucan_state_to_str(state));
+ break;
+ }
+ }
+
+ /* Check for Arbitration Lost interrupt */
+ if (FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ if (dologerr)
+ netdev_info(ndev, "arbitration lost\n");
+ priv->can.can_stats.arbitration_lost++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_LOSTARB;
+ cf->data[0] = CAN_ERR_LOSTARB_UNSPEC;
+ }
+ }
+
+ /* Check for Bus Error interrupt */
+ if (FIELD_GET(REG_INT_STAT_BEI, isr)) {
+ netdev_info(ndev, "bus error\n");
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+/**
+ * ctucan_rx_poll() - Poll routine for rx packets (NAPI)
+ * @napi: NAPI structure pointer
+ * @quota: Max number of rx packets to be processed.
+ *
+ * This is the poll routine for rx part. It will process the packets maximux quota value.
+ *
+ * Return: Number of packets received
+ */
+static int ctucan_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int work_done = 0;
+ u32 status;
+ u32 framecnt;
+ int res = 1;
+
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ while (framecnt && work_done < quota && res > 0) {
+ res = ctucan_rx(ndev);
+ work_done++;
+ framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS));
+ }
+
+ /* Check for RX FIFO Overflow */
+ status = ctucan_read32(priv, CTUCANFD_STATUS);
+ if (FIELD_GET(REG_STATUS_DOR, status)) {
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ netdev_info(ndev, "rx_poll: rx fifo overflow\n");
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+
+ /* Clear Data Overrun */
+ ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO);
+ }
+
+ if (!framecnt && res != 0) {
+ if (napi_complete_done(napi, work_done)) {
+ /* Clear and enable RBNEI. It is level-triggered, so
+ * there is no race condition.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI);
+ }
+ }
+
+ return work_done;
+}
+
+/**
+ * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers
+ * @ndev: net_device pointer
+ */
+static void ctucan_rotate_txb_prio(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 prio = priv->txb_prio;
+
+ prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF);
+ ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio);
+ priv->txb_prio = prio;
+ ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio);
+}
+
+/**
+ * ctucan_tx_interrupt() - Tx done Isr
+ * @ndev: net_device pointer
+ */
+static void ctucan_tx_interrupt(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ bool first = true;
+ bool some_buffers_processed;
+ unsigned long flags;
+ enum ctucan_txtb_status txtb_status;
+ u32 txtb_id;
+
+ /* read tx_status
+ * if txb[n].finished (bit 2)
+ * if ok -> echo
+ * if error / aborted -> ?? (find how to handle oneshot mode)
+ * txb_tail++
+ */
+ do {
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ some_buffers_processed = false;
+ while ((int)(priv->txb_head - priv->txb_tail) > 0) {
+ txtb_id = priv->txb_tail % priv->ntxbufs;
+ txtb_status = ctucan_get_tx_status(priv, txtb_id);
+
+ ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status);
+
+ switch (txtb_status) {
+ case TXT_TOK:
+ ctucan_netdev_dbg(ndev, "TXT_OK\n");
+ stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_packets++;
+ break;
+ case TXT_ERR:
+ /* This indicated that retransmit limit has been reached. Obviously
+ * we should not echo the frame, but also not indicate any kind of
+ * error. If desired, it was already reported (possible multiple
+ * times) on each arbitration lost.
+ */
+ netdev_warn(ndev, "TXB in Error state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ case TXT_ABT:
+ /* Same as for TXT_ERR, only with different cause. We *could*
+ * re-queue the frame, but multiqueue/abort is not supported yet
+ * anyway.
+ */
+ netdev_warn(ndev, "TXB in Aborted state\n");
+ can_free_echo_skb(ndev, txtb_id, NULL);
+ stats->tx_dropped++;
+ break;
+ default:
+ /* Bug only if the first buffer is not finished, otherwise it is
+ * pretty much expected.
+ */
+ if (first) {
+ netdev_err(ndev,
+ "BUG: TXB#%u not in a finished state (0x%x)!\n",
+ txtb_id, txtb_status);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ /* do not clear nor wake */
+ return;
+ }
+ goto clear;
+ }
+ priv->txb_tail++;
+ first = false;
+ some_buffers_processed = true;
+ /* Adjust priorities *before* marking the buffer as empty. */
+ ctucan_rotate_txb_prio(ndev);
+ ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id);
+ }
+clear:
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ /* If no buffers were processed this time, we cannot clear - that would introduce
+ * a race condition.
+ */
+ if (some_buffers_processed) {
+ /* Clear the interrupt again. We do not want to receive again interrupt for
+ * the buffer already handled. If it is the last finished one then it would
+ * cause log of spurious interrupt.
+ */
+ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI);
+ }
+ } while (some_buffers_processed);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ /* Check if at least one TX buffer is free */
+ if (CTU_CAN_FD_TXTNF(priv))
+ netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+}
+
+/**
+ * ctucan_interrupt() - CAN Isr
+ * @irq: irq number
+ * @dev_id: device id pointer
+ *
+ * This is the CTU CAN FD ISR. It checks for the type of interrupt
+ * and invokes the corresponding ISR.
+ *
+ * Return:
+ * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
+ */
+static irqreturn_t ctucan_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 isr, icr;
+ u32 imask;
+ int irq_loops;
+
+ for (irq_loops = 0; irq_loops < 10000; irq_loops++) {
+ /* Get the interrupt status */
+ isr = ctucan_read32(priv, CTUCANFD_INT_STAT);
+
+ if (!isr)
+ return irq_loops ? IRQ_HANDLED : IRQ_NONE;
+
+ /* Receive Buffer Not Empty Interrupt */
+ if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) {
+ ctucan_netdev_dbg(ndev, "RXBNEI\n");
+ /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if
+ * another IRQ fires, RBNEI will always be 0 (masked).
+ */
+ icr = REG_INT_STAT_RBNEI;
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ napi_schedule(&priv->napi);
+ }
+
+ /* TXT Buffer HW Command Interrupt */
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ ctucan_netdev_dbg(ndev, "TXBHCI\n");
+ /* Cleared inside */
+ ctucan_tx_interrupt(ndev);
+ }
+
+ /* Error interrupts */
+ if (FIELD_GET(REG_INT_STAT_EWLI, isr) ||
+ FIELD_GET(REG_INT_STAT_FCSI, isr) ||
+ FIELD_GET(REG_INT_STAT_ALI, isr)) {
+ icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI);
+
+ ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr);
+ ctucan_write32(priv, CTUCANFD_INT_STAT, icr);
+ ctucan_err_interrupt(ndev, isr);
+ }
+ /* Ignore RI, TI, LFI, RFI, BSI */
+ }
+
+ netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr);
+
+ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) {
+ int i;
+
+ netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n",
+ priv->txb_head, priv->txb_tail);
+ for (i = 0; i < priv->ntxbufs; i++) {
+ u32 status = ctucan_get_tx_status(priv, i);
+
+ netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status);
+ }
+ }
+
+ imask = 0xffffffff;
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ctucan_chip_stop() - Driver stop routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the drivers stop routine. It will disable the
+ * interrupts and disable the controller.
+ */
+static void ctucan_chip_stop(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ u32 mask = 0xffffffff;
+ u32 mode;
+
+ /* Disable interrupts and disable CAN */
+ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask);
+ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask);
+ mode = ctucan_read32(priv, CTUCANFD_MODE);
+ mode &= ~REG_MODE_ENA;
+ ctucan_write32(priv, CTUCANFD_MODE, mode);
+
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+/**
+ * ctucan_open() - Driver open routine
+ * @ndev: Pointer to net_device structure
+ *
+ * This is the driver open routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_open(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_reset;
+
+ /* Common open */
+ ret = open_candev(ndev);
+ if (ret) {
+ netdev_warn(ndev, "open_candev failed!\n");
+ goto err_open;
+ }
+
+ ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "irq allocation for CAN failed\n");
+ goto err_irq;
+ }
+
+ ret = ctucan_chip_start(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "ctucan_chip_start failed!\n");
+ goto err_chip_start;
+ }
+
+ netdev_info(ndev, "ctu_can_fd device registered\n");
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_chip_start:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ close_candev(ndev);
+err_open:
+err_reset:
+ pm_runtime_put(priv->dev);
+
+ return ret;
+}
+
+/**
+ * ctucan_close() - Driver close routine
+ * @ndev: Pointer to net_device structure
+ *
+ * Return: 0 always
+ */
+static int ctucan_close(struct net_device *ndev)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ ctucan_chip_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ close_candev(ndev);
+
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+/**
+ * ctucan_get_berr_counter() - error counter routine
+ * @ndev: Pointer to net_device structure
+ * @bec: Pointer to can_berr_counter structure
+ *
+ * This is the driver error counter routine.
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec)
+{
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ ctucan_get_rec_tec(priv, bec);
+ pm_runtime_put(priv->dev);
+
+ return 0;
+}
+
+static const struct net_device_ops ctucan_netdev_ops = {
+ .ndo_open = ctucan_open,
+ .ndo_stop = ctucan_close,
+ .ndo_start_xmit = ctucan_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops ctucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+int ctucan_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_suspend);
+
+int ctucan_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ctucan_resume);
+
+int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs,
+ unsigned long can_clk_rate, int pm_enable_call,
+ void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev))
+{
+ struct ctucan_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ /* Create a CAN device instance */
+ ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ spin_lock_init(&priv->tx_lock);
+ INIT_LIST_HEAD(&priv->peers_on_pdev);
+ priv->ntxbufs = ntxbufs;
+ priv->dev = dev;
+ priv->can.bittiming_const = &ctu_can_fd_bit_timing_max;
+ priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max;
+ priv->can.do_set_mode = ctucan_do_set_mode;
+
+ /* Needed for timing adjustment to be performed as soon as possible */
+ priv->can.do_set_bittiming = ctucan_set_bittiming;
+ priv->can.do_set_data_bittiming = ctucan_set_data_bittiming;
+
+ priv->can.do_get_berr_counter = ctucan_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK
+ | CAN_CTRLMODE_LISTENONLY
+ | CAN_CTRLMODE_FD
+ | CAN_CTRLMODE_PRESUME_ACK
+ | CAN_CTRLMODE_BERR_REPORTING
+ | CAN_CTRLMODE_FD_NON_ISO
+ | CAN_CTRLMODE_ONE_SHOT;
+ priv->mem_base = addr;
+
+ /* Get IRQ for the device */
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO; /* We support local echo */
+
+ if (set_drvdata_fnc)
+ set_drvdata_fnc(dev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->netdev_ops = &ctucan_netdev_ops;
+ ndev->ethtool_ops = &ctucan_ethtool_ops;
+
+ /* Getting the can_clk info */
+ if (!can_clk_rate) {
+ priv->can_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->can_clk)) {
+ dev_err(dev, "Device clock not found.\n");
+ ret = PTR_ERR(priv->can_clk);
+ goto err_free;
+ }
+ can_clk_rate = clk_get_rate(priv->can_clk);
+ }
+
+ priv->write_reg = ctucan_write32_le;
+ priv->read_reg = ctucan_read32_le;
+
+ if (pm_enable_call)
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
+ __func__, ret);
+ pm_runtime_put_noidle(priv->dev);
+ goto err_pmdisable;
+ }
+
+ /* Check for big-endianity and set according IO-accessors */
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ priv->write_reg = ctucan_write32_be;
+ priv->read_reg = ctucan_read32_be;
+ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) {
+ netdev_err(ndev, "CTU_CAN_FD signature not found\n");
+ ret = -ENODEV;
+ goto err_deviceoff;
+ }
+ }
+
+ ret = ctucan_reset(ndev);
+ if (ret < 0)
+ goto err_deviceoff;
+
+ priv->can.clock.freq = can_clk_rate;
+
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll);
+
+ ret = register_candev(ndev);
+ if (ret) {
+ dev_err(dev, "fail to register failed (err=%d)\n", ret);
+ goto err_deviceoff;
+ }
+
+ pm_runtime_put(dev);
+
+ netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n",
+ priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs);
+
+ return 0;
+
+err_deviceoff:
+ pm_runtime_put(priv->dev);
+err_pmdisable:
+ if (pm_enable_call)
+ pm_runtime_disable(dev);
+err_free:
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ return ret;
+}
+EXPORT_SYMBOL(ctucan_probe_common);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek <martin.jerabek01@gmail.com>");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_AUTHOR("Ondrej Ille <ondrej.ille@gmail.com>");
+MODULE_DESCRIPTION("CTU CAN FD interface");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kframe.h b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
new file mode 100644
index 000000000000..3491299eaac2
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kframe.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+#define __CTU_CAN_FD_CAN_FD_FRAME_FORMAT__
+
+#include <linux/bits.h>
+
+/* CAN_Frame_format memory map */
+enum ctu_can_fd_can_frame_format {
+ CTUCANFD_FRAME_FORMAT_W = 0x0,
+ CTUCANFD_IDENTIFIER_W = 0x4,
+ CTUCANFD_TIMESTAMP_L_W = 0x8,
+ CTUCANFD_TIMESTAMP_U_W = 0xc,
+ CTUCANFD_DATA_1_4_W = 0x10,
+ CTUCANFD_DATA_5_8_W = 0x14,
+ CTUCANFD_DATA_61_64_W = 0x4c,
+};
+
+/* CAN_FD_Frame_format memory region */
+
+/* FRAME_FORMAT_W registers */
+#define REG_FRAME_FORMAT_W_DLC GENMASK(3, 0)
+#define REG_FRAME_FORMAT_W_RTR BIT(5)
+#define REG_FRAME_FORMAT_W_IDE BIT(6)
+#define REG_FRAME_FORMAT_W_FDF BIT(7)
+#define REG_FRAME_FORMAT_W_BRS BIT(9)
+#define REG_FRAME_FORMAT_W_ESI_RSV BIT(10)
+#define REG_FRAME_FORMAT_W_RWCNT GENMASK(15, 11)
+
+/* IDENTIFIER_W registers */
+#define REG_IDENTIFIER_W_IDENTIFIER_EXT GENMASK(17, 0)
+#define REG_IDENTIFIER_W_IDENTIFIER_BASE GENMASK(28, 18)
+
+/* TIMESTAMP_L_W registers */
+#define REG_TIMESTAMP_L_W_TIME_STAMP_L_W GENMASK(31, 0)
+
+/* TIMESTAMP_U_W registers */
+#define REG_TIMESTAMP_U_W_TIMESTAMP_U_W GENMASK(31, 0)
+
+/* DATA_1_4_W registers */
+#define REG_DATA_1_4_W_DATA_1 GENMASK(7, 0)
+#define REG_DATA_1_4_W_DATA_2 GENMASK(15, 8)
+#define REG_DATA_1_4_W_DATA_3 GENMASK(23, 16)
+#define REG_DATA_1_4_W_DATA_4 GENMASK(31, 24)
+
+/* DATA_5_8_W registers */
+#define REG_DATA_5_8_W_DATA_5 GENMASK(7, 0)
+#define REG_DATA_5_8_W_DATA_6 GENMASK(15, 8)
+#define REG_DATA_5_8_W_DATA_7 GENMASK(23, 16)
+#define REG_DATA_5_8_W_DATA_8 GENMASK(31, 24)
+
+/* DATA_61_64_W registers */
+#define REG_DATA_61_64_W_DATA_61 GENMASK(7, 0)
+#define REG_DATA_61_64_W_DATA_62 GENMASK(15, 8)
+#define REG_DATA_61_64_W_DATA_63 GENMASK(23, 16)
+#define REG_DATA_61_64_W_DATA_64 GENMASK(31, 24)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_kregs.h b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
new file mode 100644
index 000000000000..0c181ab51bf8
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_kregs.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+/* This file is autogenerated, DO NOT EDIT! */
+
+#ifndef __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+#define __CTU_CAN_FD_CAN_FD_REGISTER_MAP__
+
+#include <linux/bits.h>
+
+/* CAN_Registers memory map */
+enum ctu_can_fd_can_registers {
+ CTUCANFD_DEVICE_ID = 0x0,
+ CTUCANFD_VERSION = 0x2,
+ CTUCANFD_MODE = 0x4,
+ CTUCANFD_SETTINGS = 0x6,
+ CTUCANFD_STATUS = 0x8,
+ CTUCANFD_COMMAND = 0xc,
+ CTUCANFD_INT_STAT = 0x10,
+ CTUCANFD_INT_ENA_SET = 0x14,
+ CTUCANFD_INT_ENA_CLR = 0x18,
+ CTUCANFD_INT_MASK_SET = 0x1c,
+ CTUCANFD_INT_MASK_CLR = 0x20,
+ CTUCANFD_BTR = 0x24,
+ CTUCANFD_BTR_FD = 0x28,
+ CTUCANFD_EWL = 0x2c,
+ CTUCANFD_ERP = 0x2d,
+ CTUCANFD_FAULT_STATE = 0x2e,
+ CTUCANFD_REC = 0x30,
+ CTUCANFD_TEC = 0x32,
+ CTUCANFD_ERR_NORM = 0x34,
+ CTUCANFD_ERR_FD = 0x36,
+ CTUCANFD_CTR_PRES = 0x38,
+ CTUCANFD_FILTER_A_MASK = 0x3c,
+ CTUCANFD_FILTER_A_VAL = 0x40,
+ CTUCANFD_FILTER_B_MASK = 0x44,
+ CTUCANFD_FILTER_B_VAL = 0x48,
+ CTUCANFD_FILTER_C_MASK = 0x4c,
+ CTUCANFD_FILTER_C_VAL = 0x50,
+ CTUCANFD_FILTER_RAN_LOW = 0x54,
+ CTUCANFD_FILTER_RAN_HIGH = 0x58,
+ CTUCANFD_FILTER_CONTROL = 0x5c,
+ CTUCANFD_FILTER_STATUS = 0x5e,
+ CTUCANFD_RX_MEM_INFO = 0x60,
+ CTUCANFD_RX_POINTERS = 0x64,
+ CTUCANFD_RX_STATUS = 0x68,
+ CTUCANFD_RX_SETTINGS = 0x6a,
+ CTUCANFD_RX_DATA = 0x6c,
+ CTUCANFD_TX_STATUS = 0x70,
+ CTUCANFD_TX_COMMAND = 0x74,
+ CTUCANFD_TXTB_INFO = 0x76,
+ CTUCANFD_TX_PRIORITY = 0x78,
+ CTUCANFD_ERR_CAPT = 0x7c,
+ CTUCANFD_RETR_CTR = 0x7d,
+ CTUCANFD_ALC = 0x7e,
+ CTUCANFD_TS_INFO = 0x7f,
+ CTUCANFD_TRV_DELAY = 0x80,
+ CTUCANFD_SSP_CFG = 0x82,
+ CTUCANFD_RX_FR_CTR = 0x84,
+ CTUCANFD_TX_FR_CTR = 0x88,
+ CTUCANFD_DEBUG_REGISTER = 0x8c,
+ CTUCANFD_YOLO_REG = 0x90,
+ CTUCANFD_TIMESTAMP_LOW = 0x94,
+ CTUCANFD_TIMESTAMP_HIGH = 0x98,
+ CTUCANFD_TXTB1_DATA_1 = 0x100,
+ CTUCANFD_TXTB1_DATA_2 = 0x104,
+ CTUCANFD_TXTB1_DATA_20 = 0x14c,
+ CTUCANFD_TXTB2_DATA_1 = 0x200,
+ CTUCANFD_TXTB2_DATA_2 = 0x204,
+ CTUCANFD_TXTB2_DATA_20 = 0x24c,
+ CTUCANFD_TXTB3_DATA_1 = 0x300,
+ CTUCANFD_TXTB3_DATA_2 = 0x304,
+ CTUCANFD_TXTB3_DATA_20 = 0x34c,
+ CTUCANFD_TXTB4_DATA_1 = 0x400,
+ CTUCANFD_TXTB4_DATA_2 = 0x404,
+ CTUCANFD_TXTB4_DATA_20 = 0x44c,
+};
+
+/* Control_registers memory region */
+
+/* DEVICE_ID VERSION registers */
+#define REG_DEVICE_ID_DEVICE_ID GENMASK(15, 0)
+#define REG_DEVICE_ID_VER_MINOR GENMASK(23, 16)
+#define REG_DEVICE_ID_VER_MAJOR GENMASK(31, 24)
+
+/* MODE SETTINGS registers */
+#define REG_MODE_RST BIT(0)
+#define REG_MODE_BMM BIT(1)
+#define REG_MODE_STM BIT(2)
+#define REG_MODE_AFM BIT(3)
+#define REG_MODE_FDE BIT(4)
+#define REG_MODE_TTTM BIT(5)
+#define REG_MODE_ROM BIT(6)
+#define REG_MODE_ACF BIT(7)
+#define REG_MODE_TSTM BIT(8)
+#define REG_MODE_RXBAM BIT(9)
+#define REG_MODE_SAM BIT(11)
+#define REG_MODE_RTRLE BIT(16)
+#define REG_MODE_RTRTH GENMASK(20, 17)
+#define REG_MODE_ILBP BIT(21)
+#define REG_MODE_ENA BIT(22)
+#define REG_MODE_NISOFD BIT(23)
+#define REG_MODE_PEX BIT(24)
+#define REG_MODE_TBFBO BIT(25)
+#define REG_MODE_FDRF BIT(26)
+
+/* STATUS registers */
+#define REG_STATUS_RXNE BIT(0)
+#define REG_STATUS_DOR BIT(1)
+#define REG_STATUS_TXNF BIT(2)
+#define REG_STATUS_EFT BIT(3)
+#define REG_STATUS_RXS BIT(4)
+#define REG_STATUS_TXS BIT(5)
+#define REG_STATUS_EWL BIT(6)
+#define REG_STATUS_IDLE BIT(7)
+#define REG_STATUS_PEXS BIT(8)
+#define REG_STATUS_STCNT BIT(16)
+
+/* COMMAND registers */
+#define REG_COMMAND_RXRPMV BIT(1)
+#define REG_COMMAND_RRB BIT(2)
+#define REG_COMMAND_CDO BIT(3)
+#define REG_COMMAND_ERCRST BIT(4)
+#define REG_COMMAND_RXFCRST BIT(5)
+#define REG_COMMAND_TXFCRST BIT(6)
+#define REG_COMMAND_CPEXS BIT(7)
+
+/* INT_STAT registers */
+#define REG_INT_STAT_RXI BIT(0)
+#define REG_INT_STAT_TXI BIT(1)
+#define REG_INT_STAT_EWLI BIT(2)
+#define REG_INT_STAT_DOI BIT(3)
+#define REG_INT_STAT_FCSI BIT(4)
+#define REG_INT_STAT_ALI BIT(5)
+#define REG_INT_STAT_BEI BIT(6)
+#define REG_INT_STAT_OFI BIT(7)
+#define REG_INT_STAT_RXFI BIT(8)
+#define REG_INT_STAT_BSI BIT(9)
+#define REG_INT_STAT_RBNEI BIT(10)
+#define REG_INT_STAT_TXBHCI BIT(11)
+
+/* INT_ENA_SET registers */
+#define REG_INT_ENA_SET_INT_ENA_SET GENMASK(11, 0)
+
+/* INT_ENA_CLR registers */
+#define REG_INT_ENA_CLR_INT_ENA_CLR GENMASK(11, 0)
+
+/* INT_MASK_SET registers */
+#define REG_INT_MASK_SET_INT_MASK_SET GENMASK(11, 0)
+
+/* INT_MASK_CLR registers */
+#define REG_INT_MASK_CLR_INT_MASK_CLR GENMASK(11, 0)
+
+/* BTR registers */
+#define REG_BTR_PROP GENMASK(6, 0)
+#define REG_BTR_PH1 GENMASK(12, 7)
+#define REG_BTR_PH2 GENMASK(18, 13)
+#define REG_BTR_BRP GENMASK(26, 19)
+#define REG_BTR_SJW GENMASK(31, 27)
+
+/* BTR_FD registers */
+#define REG_BTR_FD_PROP_FD GENMASK(5, 0)
+#define REG_BTR_FD_PH1_FD GENMASK(11, 7)
+#define REG_BTR_FD_PH2_FD GENMASK(17, 13)
+#define REG_BTR_FD_BRP_FD GENMASK(26, 19)
+#define REG_BTR_FD_SJW_FD GENMASK(31, 27)
+
+/* EWL ERP FAULT_STATE registers */
+#define REG_EWL_EW_LIMIT GENMASK(7, 0)
+#define REG_EWL_ERP_LIMIT GENMASK(15, 8)
+#define REG_EWL_ERA BIT(16)
+#define REG_EWL_ERP BIT(17)
+#define REG_EWL_BOF BIT(18)
+
+/* REC TEC registers */
+#define REG_REC_REC_VAL GENMASK(8, 0)
+#define REG_REC_TEC_VAL GENMASK(24, 16)
+
+/* ERR_NORM ERR_FD registers */
+#define REG_ERR_NORM_ERR_NORM_VAL GENMASK(15, 0)
+#define REG_ERR_NORM_ERR_FD_VAL GENMASK(31, 16)
+
+/* CTR_PRES registers */
+#define REG_CTR_PRES_CTPV GENMASK(8, 0)
+#define REG_CTR_PRES_PTX BIT(9)
+#define REG_CTR_PRES_PRX BIT(10)
+#define REG_CTR_PRES_ENORM BIT(11)
+#define REG_CTR_PRES_EFD BIT(12)
+
+/* FILTER_A_MASK registers */
+#define REG_FILTER_A_MASK_BIT_MASK_A_VAL GENMASK(28, 0)
+
+/* FILTER_A_VAL registers */
+#define REG_FILTER_A_VAL_BIT_VAL_A_VAL GENMASK(28, 0)
+
+/* FILTER_B_MASK registers */
+#define REG_FILTER_B_MASK_BIT_MASK_B_VAL GENMASK(28, 0)
+
+/* FILTER_B_VAL registers */
+#define REG_FILTER_B_VAL_BIT_VAL_B_VAL GENMASK(28, 0)
+
+/* FILTER_C_MASK registers */
+#define REG_FILTER_C_MASK_BIT_MASK_C_VAL GENMASK(28, 0)
+
+/* FILTER_C_VAL registers */
+#define REG_FILTER_C_VAL_BIT_VAL_C_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_LOW registers */
+#define REG_FILTER_RAN_LOW_BIT_RAN_LOW_VAL GENMASK(28, 0)
+
+/* FILTER_RAN_HIGH registers */
+#define REG_FILTER_RAN_HIGH_BIT_RAN_HIGH_VAL GENMASK(28, 0)
+
+/* FILTER_CONTROL FILTER_STATUS registers */
+#define REG_FILTER_CONTROL_FANB BIT(0)
+#define REG_FILTER_CONTROL_FANE BIT(1)
+#define REG_FILTER_CONTROL_FAFB BIT(2)
+#define REG_FILTER_CONTROL_FAFE BIT(3)
+#define REG_FILTER_CONTROL_FBNB BIT(4)
+#define REG_FILTER_CONTROL_FBNE BIT(5)
+#define REG_FILTER_CONTROL_FBFB BIT(6)
+#define REG_FILTER_CONTROL_FBFE BIT(7)
+#define REG_FILTER_CONTROL_FCNB BIT(8)
+#define REG_FILTER_CONTROL_FCNE BIT(9)
+#define REG_FILTER_CONTROL_FCFB BIT(10)
+#define REG_FILTER_CONTROL_FCFE BIT(11)
+#define REG_FILTER_CONTROL_FRNB BIT(12)
+#define REG_FILTER_CONTROL_FRNE BIT(13)
+#define REG_FILTER_CONTROL_FRFB BIT(14)
+#define REG_FILTER_CONTROL_FRFE BIT(15)
+#define REG_FILTER_CONTROL_SFA BIT(16)
+#define REG_FILTER_CONTROL_SFB BIT(17)
+#define REG_FILTER_CONTROL_SFC BIT(18)
+#define REG_FILTER_CONTROL_SFR BIT(19)
+
+/* RX_MEM_INFO registers */
+#define REG_RX_MEM_INFO_RX_BUFF_SIZE GENMASK(12, 0)
+#define REG_RX_MEM_INFO_RX_MEM_FREE GENMASK(28, 16)
+
+/* RX_POINTERS registers */
+#define REG_RX_POINTERS_RX_WPP GENMASK(11, 0)
+#define REG_RX_POINTERS_RX_RPP GENMASK(27, 16)
+
+/* RX_STATUS RX_SETTINGS registers */
+#define REG_RX_STATUS_RXE BIT(0)
+#define REG_RX_STATUS_RXF BIT(1)
+#define REG_RX_STATUS_RXMOF BIT(2)
+#define REG_RX_STATUS_RXFRC GENMASK(14, 4)
+#define REG_RX_STATUS_RTSOP BIT(16)
+
+/* RX_DATA registers */
+#define REG_RX_DATA_RX_DATA GENMASK(31, 0)
+
+/* TX_STATUS registers */
+#define REG_TX_STATUS_TX1S GENMASK(3, 0)
+#define REG_TX_STATUS_TX2S GENMASK(7, 4)
+#define REG_TX_STATUS_TX3S GENMASK(11, 8)
+#define REG_TX_STATUS_TX4S GENMASK(15, 12)
+#define REG_TX_STATUS_TX5S GENMASK(19, 16)
+#define REG_TX_STATUS_TX6S GENMASK(23, 20)
+#define REG_TX_STATUS_TX7S GENMASK(27, 24)
+#define REG_TX_STATUS_TX8S GENMASK(31, 28)
+
+/* TX_COMMAND TXTB_INFO registers */
+#define REG_TX_COMMAND_TXCE BIT(0)
+#define REG_TX_COMMAND_TXCR BIT(1)
+#define REG_TX_COMMAND_TXCA BIT(2)
+#define REG_TX_COMMAND_TXB1 BIT(8)
+#define REG_TX_COMMAND_TXB2 BIT(9)
+#define REG_TX_COMMAND_TXB3 BIT(10)
+#define REG_TX_COMMAND_TXB4 BIT(11)
+#define REG_TX_COMMAND_TXB5 BIT(12)
+#define REG_TX_COMMAND_TXB6 BIT(13)
+#define REG_TX_COMMAND_TXB7 BIT(14)
+#define REG_TX_COMMAND_TXB8 BIT(15)
+#define REG_TX_COMMAND_TXT_BUFFER_COUNT GENMASK(19, 16)
+
+/* TX_PRIORITY registers */
+#define REG_TX_PRIORITY_TXT1P GENMASK(2, 0)
+#define REG_TX_PRIORITY_TXT2P GENMASK(6, 4)
+#define REG_TX_PRIORITY_TXT3P GENMASK(10, 8)
+#define REG_TX_PRIORITY_TXT4P GENMASK(14, 12)
+#define REG_TX_PRIORITY_TXT5P GENMASK(18, 16)
+#define REG_TX_PRIORITY_TXT6P GENMASK(22, 20)
+#define REG_TX_PRIORITY_TXT7P GENMASK(26, 24)
+#define REG_TX_PRIORITY_TXT8P GENMASK(30, 28)
+
+/* ERR_CAPT RETR_CTR ALC TS_INFO registers */
+#define REG_ERR_CAPT_ERR_POS GENMASK(4, 0)
+#define REG_ERR_CAPT_ERR_TYPE GENMASK(7, 5)
+#define REG_ERR_CAPT_RETR_CTR_VAL GENMASK(11, 8)
+#define REG_ERR_CAPT_ALC_BIT GENMASK(20, 16)
+#define REG_ERR_CAPT_ALC_ID_FIELD GENMASK(23, 21)
+#define REG_ERR_CAPT_TS_BITS GENMASK(29, 24)
+
+/* TRV_DELAY SSP_CFG registers */
+#define REG_TRV_DELAY_TRV_DELAY_VALUE GENMASK(6, 0)
+#define REG_TRV_DELAY_SSP_OFFSET GENMASK(23, 16)
+#define REG_TRV_DELAY_SSP_SRC GENMASK(25, 24)
+
+/* RX_FR_CTR registers */
+#define REG_RX_FR_CTR_RX_FR_CTR_VAL GENMASK(31, 0)
+
+/* TX_FR_CTR registers */
+#define REG_TX_FR_CTR_TX_FR_CTR_VAL GENMASK(31, 0)
+
+/* DEBUG_REGISTER registers */
+#define REG_DEBUG_REGISTER_STUFF_COUNT GENMASK(2, 0)
+#define REG_DEBUG_REGISTER_DESTUFF_COUNT GENMASK(5, 3)
+#define REG_DEBUG_REGISTER_PC_ARB BIT(6)
+#define REG_DEBUG_REGISTER_PC_CON BIT(7)
+#define REG_DEBUG_REGISTER_PC_DAT BIT(8)
+#define REG_DEBUG_REGISTER_PC_STC BIT(9)
+#define REG_DEBUG_REGISTER_PC_CRC BIT(10)
+#define REG_DEBUG_REGISTER_PC_CRCD BIT(11)
+#define REG_DEBUG_REGISTER_PC_ACK BIT(12)
+#define REG_DEBUG_REGISTER_PC_ACKD BIT(13)
+#define REG_DEBUG_REGISTER_PC_EOF BIT(14)
+#define REG_DEBUG_REGISTER_PC_INT BIT(15)
+#define REG_DEBUG_REGISTER_PC_SUSP BIT(16)
+#define REG_DEBUG_REGISTER_PC_OVR BIT(17)
+#define REG_DEBUG_REGISTER_PC_SOF BIT(18)
+
+/* YOLO_REG registers */
+#define REG_YOLO_REG_YOLO_VAL GENMASK(31, 0)
+
+/* TIMESTAMP_LOW registers */
+#define REG_TIMESTAMP_LOW_TIMESTAMP_LOW GENMASK(31, 0)
+
+/* TIMESTAMP_HIGH registers */
+#define REG_TIMESTAMP_HIGH_TIMESTAMP_HIGH GENMASK(31, 0)
+
+#endif
diff --git a/drivers/net/can/ctucanfd/ctucanfd_pci.c b/drivers/net/can/ctucanfd/ctucanfd_pci.c
new file mode 100644
index 000000000000..9da09e7dd63a
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_pci.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ctucanfd.h"
+
+#ifndef PCI_DEVICE_DATA
+#define PCI_DEVICE_DATA(vend, dev, data) \
+.vendor = PCI_VENDOR_ID_##vend, \
+.device = PCI_DEVICE_ID_##vend##_##dev, \
+.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+.driver_data = (kernel_ulong_t)(data)
+#endif
+
+#ifndef PCI_VENDOR_ID_TEDIA
+#define PCI_VENDOR_ID_TEDIA 0x1760
+#endif
+
+#ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21
+#define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00
+#endif
+
+#define CTUCAN_BAR0_CTUCAN_ID 0x0000
+#define CTUCAN_BAR0_CRA_BASE 0x4000
+#define CYCLONE_IV_CRA_A2P_IE (0x0050)
+
+#define CTUCAN_WITHOUT_CTUCAN_ID 0
+#define CTUCAN_WITH_CTUCAN_ID 1
+
+struct ctucan_pci_board_data {
+ void __iomem *bar0_base;
+ void __iomem *cra_base;
+ void __iomem *bar1_base;
+ struct list_head ndev_list_head;
+ int use_msi;
+};
+
+static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev)
+{
+ return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev);
+}
+
+static void ctucan_pci_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ list_add(&priv->peers_on_pdev, &bdata->ndev_list_head);
+ priv->irq_flags = IRQF_SHARED;
+}
+
+/**
+ * ctucan_pci_probe - PCI registration call
+ * @pdev: Handle to the pci device structure
+ * @ent: Pointer to the entry from ctucan_pci_tbl
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long driver_data = ent->driver_data;
+ struct ctucan_pci_board_data *bdata;
+ void __iomem *addr;
+ void __iomem *cra_addr;
+ void __iomem *bar0_base;
+ u32 cra_a2p_ie;
+ u32 ctucan_id = 0;
+ int ret;
+ unsigned int ntxbufs;
+ unsigned int num_cores = 1;
+ unsigned int core_i = 0;
+ int irq;
+ int msi_ok = 0;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device FAILED\n");
+ goto err;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(dev, "pci_request_regions FAILED\n");
+ goto err_disable_device;
+ }
+
+ ret = pci_enable_msi(pdev);
+ if (!ret) {
+ dev_info(dev, "MSI enabled\n");
+ pci_set_master(pdev);
+ msi_ok = 1;
+ }
+
+ dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 0),
+ (long long)pci_resource_len(pdev, 0));
+
+ dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n",
+ (long long)pci_resource_start(pdev, 1),
+ (long long)pci_resource_len(pdev, 1));
+
+ addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
+ if (!addr) {
+ dev_err(dev, "PCI BAR 1 cannot be mapped\n");
+ ret = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ /* Cyclone IV PCI Express Control Registers Area */
+ bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!bar0_base) {
+ dev_err(dev, "PCI BAR 0 cannot be mapped\n");
+ ret = -EIO;
+ goto err_pci_iounmap_bar1;
+ }
+
+ if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) {
+ cra_addr = bar0_base;
+ num_cores = 2;
+ } else {
+ cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE;
+ ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID);
+ dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id);
+ num_cores = ctucan_id & 0xf;
+ }
+
+ irq = pdev->irq;
+
+ ntxbufs = 4;
+
+ bdata = kzalloc(sizeof(*bdata), GFP_KERNEL);
+ if (!bdata) {
+ ret = -ENOMEM;
+ goto err_pci_iounmap_bar0;
+ }
+
+ INIT_LIST_HEAD(&bdata->ndev_list_head);
+ bdata->bar0_base = bar0_base;
+ bdata->cra_base = cra_addr;
+ bdata->bar1_base = addr;
+ bdata->use_msi = msi_ok;
+
+ pci_set_drvdata(pdev, bdata);
+
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0)
+ goto err_free_board;
+
+ core_i++;
+
+ while (core_i < num_cores) {
+ addr += 0x4000;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000,
+ 0, ctucan_pci_set_drvdata);
+ if (ret < 0) {
+ dev_info(dev, "CTU CAN FD core %d initialization failed\n",
+ core_i);
+ break;
+ }
+ core_i++;
+ }
+
+ /* enable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+ cra_a2p_ie |= 1;
+ iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE);
+ dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie);
+
+ return 0;
+
+err_free_board:
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+err_pci_iounmap_bar0:
+ pci_iounmap(pdev, cra_addr);
+err_pci_iounmap_bar1:
+ pci_iounmap(pdev, addr);
+err_release_regions:
+ if (msi_ok)
+ pci_disable_msi(pdev);
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return ret;
+}
+
+/**
+ * ctucan_pci_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the pci device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static void ctucan_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+ struct ctucan_priv *priv = NULL;
+ struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev);
+
+ dev_dbg(&pdev->dev, "ctucan_remove");
+
+ if (!bdata) {
+ dev_err(&pdev->dev, "%s: no list of devices\n", __func__);
+ return;
+ }
+
+ /* disable interrupt in
+ * Avalon-MM to PCI Express Interrupt Enable Register
+ */
+ if (bdata->cra_base)
+ iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE);
+
+ while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv,
+ peers_on_pdev)) != NULL) {
+ ndev = priv->can.dev;
+
+ unregister_candev(ndev);
+
+ netif_napi_del(&priv->napi);
+
+ list_del_init(&priv->peers_on_pdev);
+ free_candev(ndev);
+ }
+
+ pci_iounmap(pdev, bdata->bar1_base);
+
+ if (bdata->use_msi)
+ pci_disable_msi(pdev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ pci_iounmap(pdev, bdata->bar0_base);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(bdata);
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume);
+
+static const struct pci_device_id ctucan_pci_tbl[] = {
+ {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21,
+ CTUCAN_WITH_CTUCAN_ID)},
+ {},
+};
+
+static struct pci_driver ctucan_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ctucan_pci_tbl,
+ .probe = ctucan_pci_probe,
+ .remove = ctucan_pci_remove,
+ .driver.pm = &ctucan_pci_pm_ops,
+};
+
+module_pci_driver(ctucan_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pavel Pisa <pisa@cmp.felk.cvut.cz>");
+MODULE_DESCRIPTION("CTU CAN FD for PCI bus");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
new file mode 100644
index 000000000000..a17561d97192
--- /dev/null
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*******************************************************************************
+ *
+ * CTU CAN FD IP Core
+ *
+ * Copyright (C) 2015-2018 Ondrej Ille <ondrej.ille@gmail.com> FEE CTU
+ * Copyright (C) 2018-2021 Ondrej Ille <ondrej.ille@gmail.com> self-funded
+ * Copyright (C) 2018-2019 Martin Jerabek <martin.jerabek01@gmail.com> FEE CTU
+ * Copyright (C) 2018-2022 Pavel Pisa <pisa@cmp.felk.cvut.cz> FEE CTU/self-funded
+ *
+ * Project advisors:
+ * Jiri Novak <jnovak@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ *
+ * Department of Measurement (http://meas.fel.cvut.cz/)
+ * Faculty of Electrical Engineering (http://www.fel.cvut.cz)
+ * Czech Technical University (http://www.cvut.cz/)
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "ctucanfd.h"
+
+#define DRV_NAME "ctucanfd"
+
+static void ctucan_platform_set_drvdata(struct device *dev,
+ struct net_device *ndev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+
+ platform_set_drvdata(pdev, ndev);
+}
+
+/**
+ * ctucan_platform_probe - Platform registration call
+ * @pdev: Handle to the platform device structure
+ *
+ * This function does all the memory allocation and registration for the CAN
+ * device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int ctucan_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *addr;
+ int ret;
+ unsigned int ntxbufs;
+ int irq;
+
+ /* Get the virtual base address for the device */
+ addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(addr)) {
+ ret = PTR_ERR(addr);
+ goto err;
+ }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err;
+ }
+
+ /* Number of tx bufs might be change in HW for future. If so,
+ * it will be passed as property via device tree
+ */
+ ntxbufs = 4;
+ ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0,
+ 1, ctucan_platform_set_drvdata);
+
+ if (ret < 0)
+ platform_set_drvdata(pdev, NULL);
+
+err:
+ return ret;
+}
+
+/**
+ * ctucan_platform_remove - Unregister the device after releasing the resources
+ * @pdev: Handle to the platform device structure
+ *
+ * This function frees all the resources allocated to the device.
+ * Return: 0 always
+ */
+static int ctucan_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct ctucan_priv *priv = netdev_priv(ndev);
+
+ netdev_dbg(ndev, "ctucan_remove");
+
+ unregister_candev(ndev);
+ pm_runtime_disable(&pdev->dev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume);
+
+/* Match table for OF platform binding */
+static const struct of_device_id ctucan_of_match[] = {
+ { .compatible = "ctu,ctucanfd-2", },
+ { .compatible = "ctu,ctucanfd", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, ctucan_of_match);
+
+static struct platform_driver ctucanfd_driver = {
+ .probe = ctucan_platform_probe,
+ .remove = ctucan_platform_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &ctucan_platform_pm_ops,
+ .of_match_table = ctucan_of_match,
+ },
+};
+
+module_platform_driver(ctucanfd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Jerabek");
+MODULE_DESCRIPTION("CTU CAN FD for platform");
diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
index 3e2e207861fc..633687d6b6c0 100644
--- a/drivers/net/can/dev/Makefile
+++ b/drivers/net/can/dev/Makefile
@@ -1,11 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-y += bittiming.o
-can-dev-y += dev.o
-can-dev-y += length.o
-can-dev-y += netlink.o
-can-dev-y += rx-offload.o
-can-dev-y += skb.o
+obj-$(CONFIG_CAN_DEV) += can-dev.o
-can-dev-$(CONFIG_CAN_LEDS) += led.o
+can-dev-y += skb.o
+
+can-dev-$(CONFIG_CAN_CALC_BITTIMING) += calc_bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += bittiming.o
+can-dev-$(CONFIG_CAN_NETLINK) += dev.o
+can-dev-$(CONFIG_CAN_NETLINK) += length.o
+can-dev-$(CONFIG_CAN_NETLINK) += netlink.o
+can-dev-$(CONFIG_CAN_RX_OFFLOAD) += rx-offload.o
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 0509625c3082..0b93900b1dfa 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -6,221 +6,81 @@
#include <linux/can/dev.h>
-#ifdef CONFIG_CAN_CALC_BITTIMING
-#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-
-/* Bit-timing calculation derived from:
- *
- * Code based on LinCAN sources and H8S2638 project
- * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
- * Copyright 2005 Stanislav Marek
- * email: pisa@cmp.felk.cvut.cz
- *
- * Calculates proper bit-timing parameters for a specified bit-rate
- * and sample-point, which can then be used to set the bit-timing
- * registers of the CAN controller. You can find more information
- * in the header file linux/can/netlink.h.
- */
-static int
-can_update_sample_point(const struct can_bittiming_const *btc,
- unsigned int sample_point_nominal, unsigned int tseg,
- unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
- unsigned int *sample_point_error_ptr)
+void can_sjw_set_default(struct can_bittiming *bt)
{
- unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
- unsigned int sample_point, best_sample_point = 0;
- unsigned int tseg1, tseg2;
- int i;
-
- for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
- 1000 - i;
- tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
- tseg1 = tseg - tseg2;
- if (tseg1 > btc->tseg1_max) {
- tseg1 = btc->tseg1_max;
- tseg2 = tseg - tseg1;
- }
-
- sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
- (tseg + CAN_SYNC_SEG);
- sample_point_error = abs(sample_point_nominal - sample_point);
-
- if (sample_point <= sample_point_nominal &&
- sample_point_error < best_sample_point_error) {
- best_sample_point = sample_point;
- best_sample_point_error = sample_point_error;
- *tseg1_ptr = tseg1;
- *tseg2_ptr = tseg2;
- }
- }
-
- if (sample_point_error_ptr)
- *sample_point_error_ptr = best_sample_point_error;
+ if (bt->sjw)
+ return;
- return best_sample_point;
+ /* If user space provides no sjw, use sane default of phase_seg2 / 2 */
+ bt->sjw = max(1U, min(bt->phase_seg1, bt->phase_seg2 / 2));
}
-int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int bitrate; /* current bitrate */
- unsigned int bitrate_error; /* difference between current and nominal value */
- unsigned int best_bitrate_error = UINT_MAX;
- unsigned int sample_point_error; /* difference between current and nominal value */
- unsigned int best_sample_point_error = UINT_MAX;
- unsigned int sample_point_nominal; /* nominal sample point */
- unsigned int best_tseg = 0; /* current best value for tseg */
- unsigned int best_brp = 0; /* current best value for brp */
- unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
- u64 v64;
-
- /* Use CiA recommended sample points */
- if (bt->sample_point) {
- sample_point_nominal = bt->sample_point;
- } else {
- if (bt->bitrate > 800 * CAN_KBPS)
- sample_point_nominal = 750;
- else if (bt->bitrate > 500 * CAN_KBPS)
- sample_point_nominal = 800;
- else
- sample_point_nominal = 875;
- }
-
- /* tseg even = round down, odd = round up */
- for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
- tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_SYNC_SEG + tseg / 2;
-
- /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
- brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
-
- /* choose brp step which is possible in system */
- brp = (brp / btc->brp_inc) * btc->brp_inc;
- if (brp < btc->brp_min || brp > btc->brp_max)
- continue;
-
- bitrate = priv->clock.freq / (brp * tsegall);
- bitrate_error = abs(bt->bitrate - bitrate);
-
- /* tseg brp biterror */
- if (bitrate_error > best_bitrate_error)
- continue;
-
- /* reset sample point error if we have a better bitrate */
- if (bitrate_error < best_bitrate_error)
- best_sample_point_error = UINT_MAX;
-
- can_update_sample_point(btc, sample_point_nominal, tseg / 2,
- &tseg1, &tseg2, &sample_point_error);
- if (sample_point_error > best_sample_point_error)
- continue;
-
- best_sample_point_error = sample_point_error;
- best_bitrate_error = bitrate_error;
- best_tseg = tseg / 2;
- best_brp = brp;
-
- if (bitrate_error == 0 && sample_point_error == 0)
- break;
+ if (bt->sjw > btc->sjw_max) {
+ NL_SET_ERR_MSG_FMT(extack, "sjw: %u greater than max sjw: %u",
+ bt->sjw, btc->sjw_max);
+ return -EINVAL;
}
- if (best_bitrate_error) {
- /* Error in one-tenth of a percent */
- v64 = (u64)best_bitrate_error * 1000;
- do_div(v64, bt->bitrate);
- bitrate_error = (u32)v64;
- if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
- }
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
+ if (bt->sjw > bt->phase_seg1) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg1: %u",
+ bt->sjw, bt->phase_seg1);
+ return -EINVAL;
}
- /* real sample point */
- bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
- best_tseg, &tseg1, &tseg2,
- NULL);
-
- v64 = (u64)best_brp * 1000 * 1000 * 1000;
- do_div(v64, priv->clock.freq);
- bt->tq = (u32)v64;
- bt->prop_seg = tseg1 / 2;
- bt->phase_seg1 = tseg1 - bt->prop_seg;
- bt->phase_seg2 = tseg2;
-
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
+ if (bt->sjw > bt->phase_seg2) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg2: %u",
+ bt->sjw, bt->phase_seg2);
+ return -EINVAL;
}
- bt->brp = best_brp;
-
- /* real bitrate */
- bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
-
return 0;
}
-void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
- const struct can_bittiming *dbt,
- u32 *ctrlmode, u32 ctrlmode_supported)
-
-{
- if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
- return;
-
- *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
-
- /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
- * delay compensation" (TDC) is only applicable if data BRP is
- * one or two.
- */
- if (dbt->brp == 1 || dbt->brp == 2) {
- /* Sample point in clock periods */
- u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
- dbt->phase_seg1) * dbt->brp;
-
- if (sample_point_in_tc < tdc_const->tdco_min)
- return;
- tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
- *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
- }
-}
-#endif /* CONFIG_CAN_CALC_BITTIMING */
-
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
-static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc,
+ struct netlink_ext_ack *extack)
{
- struct can_priv *priv = netdev_priv(dev);
- unsigned int tseg1, alltseg;
+ const unsigned int tseg1 = bt->prop_seg + bt->phase_seg1;
+ const struct can_priv *priv = netdev_priv(dev);
u64 brp64;
+ int err;
+
+ if (tseg1 < btc->tseg1_min) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u less than tseg1-min: %u",
+ tseg1, btc->tseg1_min);
+ return -EINVAL;
+ }
+ if (tseg1 > btc->tseg1_max) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u greater than tseg1-max: %u",
+ tseg1, btc->tseg1_max);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 < btc->tseg2_min) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u less than tseg2-min: %u",
+ bt->phase_seg2, btc->tseg2_min);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 > btc->tseg2_max) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u greater than tseg2-max: %u",
+ bt->phase_seg2, btc->tseg2_max);
+ return -EINVAL;
+ }
+
+ can_sjw_set_default(bt);
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
brp64 = (u64)priv->clock.freq * (u64)bt->tq;
if (btc->brp_inc > 1)
@@ -231,57 +91,63 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
brp64 *= btc->brp_inc;
bt->brp = (u32)brp64;
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ if (bt->brp < btc->brp_min) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u less than brp-min: %u",
+ bt->brp, btc->brp_min);
+ return -EINVAL;
+ }
+ if (bt->brp > btc->brp_max) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u greater than brp-max: %u",
+ bt->brp, btc->brp_max);
return -EINVAL;
+ }
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+ bt->bitrate = priv->clock.freq / (bt->brp * can_bit_time(bt));
+ bt->sample_point = ((CAN_SYNC_SEG + tseg1) * 1000) / can_bit_time(bt);
+ bt->tq = DIV_U64_ROUND_CLOSEST(mul_u32_u32(bt->brp, NSEC_PER_SEC),
+ priv->clock.freq);
return 0;
}
/* Checks the validity of predefined bitrate settings */
static int
-can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
- struct can_priv *priv = netdev_priv(dev);
unsigned int i;
for (i = 0; i < bitrate_const_cnt; i++) {
if (bt->bitrate == bitrate_const[i])
- break;
+ return 0;
}
- if (i >= priv->bitrate_const_cnt)
- return -EINVAL;
+ NL_SET_ERR_MSG_FMT(extack, "bitrate %u bps not supported",
+ bt->brp);
- return 0;
+ return -EINVAL;
}
-int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
+int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
- int err;
-
/* Depending on the given can_bittiming parameter structure the CAN
* timing parameters are calculated based on the provided bitrate OR
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
* provided directly which are then checked and fixed up.
*/
if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
-
- return err;
+ return can_calc_bittiming(dev, bt, btc, extack);
+ if (bt->tq && !bt->bitrate && btc)
+ return can_fixup_bittiming(dev, bt, btc, extack);
+ if (!bt->tq && bt->bitrate && bitrate_const)
+ return can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt, extack);
+
+ return -EINVAL;
}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
new file mode 100644
index 000000000000..3809c148fb88
--- /dev/null
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ */
+
+#include <linux/units.h>
+#include <linux/can/dev.h>
+
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+/* Bit-timing calculation derived from:
+ *
+ * Code based on LinCAN sources and H8S2638 project
+ * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
+ * Copyright 2005 Stanislav Marek
+ * email: pisa@cmp.felk.cvut.cz
+ *
+ * Calculates proper bit-timing parameters for a specified bit-rate
+ * and sample-point, which can then be used to set the bit-timing
+ * registers of the CAN controller. You can find more information
+ * in the header file linux/can/netlink.h.
+ */
+static int
+can_update_sample_point(const struct can_bittiming_const *btc,
+ const unsigned int sample_point_nominal, const unsigned int tseg,
+ unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
+ unsigned int *sample_point_error_ptr)
+{
+ unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
+ unsigned int sample_point, best_sample_point = 0;
+ unsigned int tseg1, tseg2;
+ int i;
+
+ for (i = 0; i <= 1; i++) {
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
+ 1000 - i;
+ tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
+ tseg1 = tseg - tseg2;
+ if (tseg1 > btc->tseg1_max) {
+ tseg1 = btc->tseg1_max;
+ tseg2 = tseg - tseg1;
+ }
+
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
+ sample_point_error = abs(sample_point_nominal - sample_point);
+
+ if (sample_point <= sample_point_nominal &&
+ sample_point_error < best_sample_point_error) {
+ best_sample_point = sample_point;
+ best_sample_point_error = sample_point_error;
+ *tseg1_ptr = tseg1;
+ *tseg2_ptr = tseg2;
+ }
+ }
+
+ if (sample_point_error_ptr)
+ *sample_point_error_ptr = best_sample_point_error;
+
+ return best_sample_point;
+}
+
+int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int bitrate; /* current bitrate */
+ unsigned int bitrate_error; /* difference between current and nominal value */
+ unsigned int best_bitrate_error = UINT_MAX;
+ unsigned int sample_point_error; /* difference between current and nominal value */
+ unsigned int best_sample_point_error = UINT_MAX;
+ unsigned int sample_point_nominal; /* nominal sample point */
+ unsigned int best_tseg = 0; /* current best value for tseg */
+ unsigned int best_brp = 0; /* current best value for brp */
+ unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
+ u64 v64;
+ int err;
+
+ /* Use CiA recommended sample points */
+ if (bt->sample_point) {
+ sample_point_nominal = bt->sample_point;
+ } else {
+ if (bt->bitrate > 800 * KILO /* BPS */)
+ sample_point_nominal = 750;
+ else if (bt->bitrate > 500 * KILO /* BPS */)
+ sample_point_nominal = 800;
+ else
+ sample_point_nominal = 875;
+ }
+
+ /* tseg even = round down, odd = round up */
+ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+ tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+ tsegall = CAN_SYNC_SEG + tseg / 2;
+
+ /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
+
+ /* choose brp step which is possible in system */
+ brp = (brp / btc->brp_inc) * btc->brp_inc;
+ if (brp < btc->brp_min || brp > btc->brp_max)
+ continue;
+
+ bitrate = priv->clock.freq / (brp * tsegall);
+ bitrate_error = abs(bt->bitrate - bitrate);
+
+ /* tseg brp biterror */
+ if (bitrate_error > best_bitrate_error)
+ continue;
+
+ /* reset sample point error if we have a better bitrate */
+ if (bitrate_error < best_bitrate_error)
+ best_sample_point_error = UINT_MAX;
+
+ can_update_sample_point(btc, sample_point_nominal, tseg / 2,
+ &tseg1, &tseg2, &sample_point_error);
+ if (sample_point_error >= best_sample_point_error)
+ continue;
+
+ best_sample_point_error = sample_point_error;
+ best_bitrate_error = bitrate_error;
+ best_tseg = tseg / 2;
+ best_brp = brp;
+
+ if (bitrate_error == 0 && sample_point_error == 0)
+ break;
+ }
+
+ if (best_bitrate_error) {
+ /* Error in one-tenth of a percent */
+ v64 = (u64)best_bitrate_error * 1000;
+ do_div(v64, bt->bitrate);
+ bitrate_error = (u32)v64;
+ if (bitrate_error > CAN_CALC_MAX_ERROR) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%u%% too high",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EINVAL;
+ }
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%u%%",
+ bitrate_error / 10, bitrate_error % 10);
+ }
+
+ /* real sample point */
+ bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
+ best_tseg, &tseg1, &tseg2,
+ NULL);
+
+ v64 = (u64)best_brp * 1000 * 1000 * 1000;
+ do_div(v64, priv->clock.freq);
+ bt->tq = (u32)v64;
+ bt->prop_seg = tseg1 / 2;
+ bt->phase_seg1 = tseg1 - bt->prop_seg;
+ bt->phase_seg2 = tseg2;
+
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
+
+ bt->brp = best_brp;
+
+ /* real bitrate */
+ bt->bitrate = priv->clock.freq /
+ (bt->brp * can_bit_time(bt));
+
+ return 0;
+}
+
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
+
+{
+ if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
+ return;
+
+ *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+
+ /* As specified in ISO 11898-1 section 11.3.3 "Transmitter
+ * delay compensation" (TDC) is only applicable if data BRP is
+ * one or two.
+ */
+ if (dbt->brp == 1 || dbt->brp == 2) {
+ /* Sample point in clock periods */
+ u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ if (sample_point_in_tc < tdc_const->tdco_min)
+ return;
+ tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
+ *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
+ }
+}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index e3d840b81357..7f9334a8af50 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -4,7 +4,6 @@
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
@@ -14,16 +13,9 @@
#include <linux/can/can-ml.h>
#include <linux/can/dev.h>
#include <linux/can/skb.h>
-#include <linux/can/led.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#define MOD_DESC "CAN device driver interface"
-
-MODULE_DESCRIPTION(MOD_DESC);
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-
static void can_update_state_error_stats(struct net_device *dev,
enum can_state new_state)
{
@@ -136,7 +128,6 @@ EXPORT_SYMBOL_GPL(can_change_state);
static void can_restart(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *cf;
int err;
@@ -155,10 +146,7 @@ static void can_restart(struct net_device *dev)
cf->can_id |= CAN_ERR_RESTARTED;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
- netif_rx_ni(skb);
+ netif_rx(skb);
restart:
netdev_dbg(dev, "restarted\n");
@@ -300,6 +288,7 @@ EXPORT_SYMBOL_GPL(free_candev);
int can_change_mtu(struct net_device *dev, int new_mtu)
{
struct can_priv *priv = netdev_priv(dev);
+ u32 ctrlmode_static = can_get_static_ctrlmode(priv);
/* Do not allow changing the MTU while running */
if (dev->flags & IFF_UP)
@@ -309,7 +298,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
switch (new_mtu) {
case CAN_MTU:
/* 'CANFD-only' controllers can not switch to CAN_MTU */
- if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+ if (ctrlmode_static & CAN_CTRLMODE_FD)
return -EINVAL;
priv->ctrlmode &= ~CAN_CTRLMODE_FD;
@@ -318,7 +307,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
case CANFD_MTU:
/* check for potential CANFD ability */
if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
- !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ !(ctrlmode_static & CAN_CTRLMODE_FD))
return -EINVAL;
priv->ctrlmode |= CAN_CTRLMODE_FD;
@@ -333,6 +322,56 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
}
EXPORT_SYMBOL_GPL(can_change_mtu);
+/* generic implementation of netdev_ops::ndo_eth_ioctl for CAN devices
+ * supporting hardware timestamps
+ */
+int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_ON &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_ON;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(can_eth_ioctl_hwts);
+
+/* generic implementation of ethtool_ops::get_ts_info for CAN devices
+ * supporting hardware timestamps
+ */
+int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+EXPORT_SYMBOL(can_ethtool_op_get_ts_info_hwts);
+
/* Common open function when the device gets opened.
*
* This function should be called in the open function of the device
@@ -459,6 +498,18 @@ static int can_get_termination(struct net_device *ndev)
return 0;
}
+static bool
+can_bittiming_const_valid(const struct can_bittiming_const *btc)
+{
+ if (!btc)
+ return true;
+
+ if (!btc->sjw_max)
+ return false;
+
+ return true;
+}
+
/* Register the CAN network device */
int register_candev(struct net_device *dev)
{
@@ -479,6 +530,15 @@ int register_candev(struct net_device *dev)
if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
return -EINVAL;
+ /* We only support either fixed bit rates or bit timing const. */
+ if ((priv->bitrate_const || priv->data_bitrate_const) &&
+ (priv->bittiming_const || priv->data_bittiming_const))
+ return -EINVAL;
+
+ if (!can_bittiming_const_valid(priv->bittiming_const) ||
+ !can_bittiming_const_valid(priv->data_bittiming_const))
+ return -EINVAL;
+
if (!priv->termination_const) {
err = can_get_termination(dev);
if (err)
@@ -515,11 +575,9 @@ static __init int can_dev_init(void)
{
int err;
- can_led_notifier_init();
-
err = can_netlink_register();
if (!err)
- pr_info(MOD_DESC "\n");
+ pr_info("CAN device driver interface\n");
return err;
}
@@ -528,8 +586,6 @@ module_init(can_dev_init);
static __exit void can_dev_exit(void)
{
can_netlink_unregister();
-
- can_led_notifier_exit();
}
module_exit(can_dev_exit);
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 95cca4e5251f..036d85ef07f5 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -21,6 +21,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
[IFLA_CAN_TDC] = { .type = NLA_NESTED },
+ [IFLA_CAN_CTRLMODE_EXT] = { .type = NLA_NESTED },
};
static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
@@ -35,10 +36,24 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
+static int can_validate_bittiming(const struct can_bittiming *bt,
+ struct netlink_ext_ack *extack)
+{
+ /* sample point is in one-tenth of a percent */
+ if (bt->sample_point >= 1000) {
+ NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
bool is_can_fd = false;
+ int err;
/* Make sure that valid CAN FD configurations always consist of
* - nominal/arbitration bittiming
@@ -50,6 +65,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (!data)
return 0;
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
@@ -70,7 +94,6 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
*/
if (data[IFLA_CAN_TDC]) {
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
- int err;
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
data[IFLA_CAN_TDC],
@@ -101,6 +124,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
return -EOPNOTSUPP;
}
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -175,20 +207,23 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->bittiming_const && !priv->do_set_bittiming)
+ if (!priv->bittiming_const && !priv->do_set_bittiming &&
+ !priv->bitrate_const)
return -EOPNOTSUPP;
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
err = can_get_bittiming(dev, &bt,
priv->bittiming_const,
priv->bitrate_const,
- priv->bitrate_const_cnt);
+ priv->bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+ bt.bitrate, priv->bitrate_max);
return -EINVAL;
}
@@ -211,7 +246,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- ctrlstatic = priv->ctrlmode_static;
+ ctrlstatic = can_get_static_ctrlmode(priv);
maskedflags = cm->flags & cm->mask;
/* check whether provided bits are allowed to be passed */
@@ -277,7 +312,8 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
* directly via do_set_bitrate(). Bail out if neither
* is given.
*/
- if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming &&
+ !priv->data_bitrate_const)
return -EOPNOTSUPP;
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
@@ -285,13 +321,15 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_get_bittiming(dev, &dbt,
priv->data_bittiming_const,
priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
+ priv->data_bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
return -EINVAL;
}
@@ -383,6 +421,12 @@ static size_t can_tdc_get_size(const struct net_device *dev)
return size;
}
+static size_t can_ctrlmode_ext_get_size(void)
+{
+ return nla_total_size(0) + /* nest IFLA_CAN_CTRLMODE_EXT */
+ nla_total_size(sizeof(u32)); /* IFLA_CAN_CTRLMODE_SUPPORTED */
+}
+
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -415,6 +459,7 @@ static size_t can_get_size(const struct net_device *dev)
priv->data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
+ size += can_ctrlmode_ext_get_size(); /* IFLA_CAN_CTRLMODE_EXT */
return size;
}
@@ -472,6 +517,25 @@ err_cancel:
return -EMSGSIZE;
}
+static int can_ctrlmode_ext_fill_info(struct sk_buff *skb,
+ const struct can_priv *priv)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, IFLA_CAN_CTRLMODE_EXT);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, IFLA_CAN_CTRLMODE_SUPPORTED,
+ priv->ctrlmode_supported)) {
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+}
+
static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -482,7 +546,8 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (priv->do_get_state)
priv->do_get_state(dev, &state);
- if ((priv->bittiming.bitrate &&
+ if ((priv->bittiming.bitrate != CAN_BITRATE_UNSET &&
+ priv->bittiming.bitrate != CAN_BITRATE_UNKNOWN &&
nla_put(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming)) ||
@@ -531,7 +596,9 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
sizeof(priv->bitrate_max),
&priv->bitrate_max)) ||
- (can_tdc_fill_info(skb, dev))
+ can_tdc_fill_info(skb, dev) ||
+
+ can_ctrlmode_ext_fill_info(skb, priv)
)
return -EMSGSIZE;
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index 37b0cc65237b..81ebf0562c89 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -54,8 +54,11 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
struct can_frame *cf = (struct can_frame *)skb->data;
work_done++;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_ERR_FLAG)) {
+ stats->rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
+ }
netif_receive_skb(skb);
}
@@ -67,8 +70,6 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
napi_reschedule(&offload->napi);
}
- can_led_event(offload->dev, CAN_LED_EVENT_RX);
-
return work_done;
}
@@ -218,7 +219,7 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
}
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
-int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
@@ -237,7 +238,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
return 0;
}
-EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp,
@@ -246,14 +247,14 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
int err;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
return 0;
- err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
@@ -328,13 +329,14 @@ static int can_rx_offload_init_queue(struct net_device *dev,
{
offload->dev = dev;
- /* Limit queue len to 4x the weight (rounted to next power of two) */
+ /* Limit queue len to 4x the weight (rounded to next power of two) */
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
__skb_queue_head_init(&offload->skb_irq_queue);
- netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+ netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
+ weight);
dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
__func__, offload->skb_queue_len_max);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 61660248c69e..241ec636e91f 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -5,6 +5,13 @@
*/
#include <linux/can/dev.h>
+#include <linux/module.h>
+
+#define MOD_DESC "CAN device driver interface"
+
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
/* Local echo of CAN messages
*
@@ -64,6 +71,9 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
/* save frame_len to reuse it when transmission is completed */
can_skb_prv(skb)->frame_len = frame_len;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
skb_tx_timestamp(skb);
/* save this skb for tx interrupt echo handling */
@@ -80,8 +90,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
- unsigned int *frame_len_ptr)
+__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr, unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
@@ -97,13 +107,12 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
+ skb_tstamp_tx(skb, skb_hwtstamps(skb));
/* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
+ *len_ptr = can_skb_get_data_len(skb);
if (frame_len_ptr)
*frame_len_ptr = can_skb_priv->frame_len;
@@ -133,7 +142,7 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr)
{
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
@@ -177,6 +186,20 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
}
EXPORT_SYMBOL_GPL(can_free_echo_skb);
+/* fill common values for CAN sk_buffs */
+static void init_can_skb_reserve(struct sk_buff *skb)
+{
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->skbcnt = 0;
+}
+
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -190,16 +213,8 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cf = skb_put_zero(skb, sizeof(struct can_frame));
@@ -221,23 +236,51 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
}
skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+ /* set CAN FD flag by default */
+ (*cfd)->flags = CANFD_FDF;
+
return skb;
}
EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len)
+{
+ struct sk_buff *skb;
+
+ if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
+ goto out_error;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ CANXL_HDR_SIZE + data_len);
+ if (unlikely(!skb))
+ goto out_error;
+
+ skb->protocol = htons(ETH_P_CANXL);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
+
+ /* set CAN XL flag and length information by default */
+ (*cxl)->flags = CANXL_XLF;
+ (*cxl)->len = data_len;
+
+ return skb;
+
+out_error:
+ *cxl = NULL;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_canxl_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -252,3 +295,75 @@ struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
return skb;
}
EXPORT_SYMBOL_GPL(alloc_can_err_skb);
+
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* perform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ /* set CANFD_FDF flag for CAN FD frames */
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd;
+
+ cfd = (struct canfd_frame *)skb->data;
+ cfd->flags |= CANFD_FDF;
+ }
+ }
+
+ return true;
+}
+
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_CAN:
+ if (!can_is_can_skb(skb))
+ goto inval_skb;
+ break;
+
+ case ETH_P_CANFD:
+ if (!can_is_canfd_skb(skb))
+ goto inval_skb;
+ break;
+
+ case ETH_P_CANXL:
+ if (!can_is_canxl_skb(skb))
+ goto inval_skb;
+ break;
+
+ default:
+ goto inval_skb;
+ }
+
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
+ return false;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return true;
+}
+EXPORT_SYMBOL_GPL(can_dropped_invalid_skb);
diff --git a/drivers/net/can/flexcan/Makefile b/drivers/net/can/flexcan/Makefile
new file mode 100644
index 000000000000..89d5695c902e
--- /dev/null
+++ b/drivers/net/can/flexcan/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+
+flexcan-objs :=
+flexcan-objs += flexcan-core.o
+flexcan-objs += flexcan-ethtool.o
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan/flexcan-core.c
index 12b60ad95b02..6d638c93977b 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -14,8 +14,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
-#include <linux/can/rx-offload.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/firmware/imx/sci.h>
@@ -33,6 +31,8 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include "flexcan.h"
+
#define DRV_NAME "flexcan"
/* 8 for RX fifo and 2 error handling */
@@ -173,9 +173,9 @@
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
-#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
-#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
+#define FLEXCAN_TX_MB_RESERVED_RX_FIFO 8
+#define FLEXCAN_TX_MB_RESERVED_RX_MAILBOX 0
+#define FLEXCAN_RX_MB_RX_MAILBOX_FIRST (FLEXCAN_TX_MB_RESERVED_RX_MAILBOX + 1)
#define FLEXCAN_IFLAG_MB(x) BIT_ULL(x)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
@@ -206,53 +206,6 @@
#define FLEXCAN_TIMEOUT_US (250)
-/* FLEXCAN hardware feature flags
- *
- * Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
- * Filter? connected? Passive detection ption in MB Supported?
- * MCF5441X FlexCAN2 ? no yes no no yes no 16
- * MX25 FlexCAN2 03.00.00.00 no no no no no no 64
- * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
- * MX35 FlexCAN2 03.00.00.00 no no no no no no 64
- * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64
- * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64
- * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64
- * VF610 FlexCAN3 ? no yes no yes yes? no 64
- * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64
- * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64
- *
- * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
- */
-
-/* [TR]WRN_INT not connected */
-#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
- /* Disable RX FIFO Global mask */
-#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
-/* Enable EACEN and RRS bit in ctrl2 */
-#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
-/* Disable non-correctable errors interrupt and freeze mode */
-#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
-/* Use timestamp based offloading */
-#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5)
-/* No interrupt for error passive */
-#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
-/* default to BE register access */
-#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
-/* Setup stop mode with GPR to support wakeup */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8)
-/* Support CAN-FD mode */
-#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
-/* support memory detection and correction */
-#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
-/* Setup stop mode with SCU firmware to support wakeup */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
-/* Setup 3 separate interrupts, main, boff and err */
-#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12)
-/* Setup 16 mailboxes */
-#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
-
/* Structure of the message buffer */
struct flexcan_mb {
u32 can_ctrl;
@@ -339,106 +292,90 @@ struct flexcan_regs {
static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
-struct flexcan_devtype_data {
- u32 quirks; /* quirks needed for different IP cores */
-};
-
-struct flexcan_stop_mode {
- struct regmap *gpr;
- u8 req_gpr;
- u8 req_bit;
-};
-
-struct flexcan_priv {
- struct can_priv can;
- struct can_rx_offload offload;
- struct device *dev;
-
- struct flexcan_regs __iomem *regs;
- struct flexcan_mb __iomem *tx_mb;
- struct flexcan_mb __iomem *tx_mb_reserved;
- u8 tx_mb_idx;
- u8 mb_count;
- u8 mb_size;
- u8 clk_src; /* clock source of CAN Protocol Engine */
- u8 scu_idx;
-
- u64 rx_mask;
- u64 tx_mask;
- u32 reg_ctrl_default;
-
- struct clk *clk_ipg;
- struct clk *clk_per;
- const struct flexcan_devtype_data *devtype_data;
- struct regulator *reg_xceiver;
- struct flexcan_stop_mode stm;
-
- int irq_boff;
- int irq_err;
-
- /* IPC handle when setup stop mode by System Controller firmware(scfw) */
- struct imx_sc_ipc *sc_ipc_handle;
-
- /* Read and Write APIs */
- u32 (*read)(void __iomem *addr);
- void (*write)(u32 val, void __iomem *addr);
-};
-
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16,
+ FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
+ FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
- .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
- FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
+};
+
+static struct flexcan_devtype_data fsl_imx93_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_AUTO_STOP_MODE |
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD |
- FLEXCAN_QUIRK_SUPPORT_ECC,
+ FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
+ FLEXCAN_QUIRK_SUPPORT_ECC |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -600,13 +537,18 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
priv->write(reg_mcr, &regs->mcr);
/* enable stop request */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, true);
if (ret < 0)
return ret;
- } else {
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) {
+ /* For the auto stop mode, software do nothing, hardware will cover
+ * all the operation automatically after system go into low power mode.
+ */
+ return 0;
}
return flexcan_low_power_enter_ack(priv);
@@ -619,11 +561,11 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
int ret;
/* remove stop request */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) {
ret = flexcan_stop_mode_enable_scfw(priv, false);
if (ret < 0)
return ret;
- } else {
+ } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0);
}
@@ -632,6 +574,12 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
+ /* For the auto stop mode, hardware will exist stop mode
+ * automatically after system go out of low power mode.
+ */
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
+ return 0;
+
return flexcan_low_power_exit_ack(priv);
}
@@ -794,11 +742,9 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
const struct flexcan_priv *priv = netdev_priv(dev);
int err;
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = __flexcan_get_berr_counter(dev, bec);
@@ -816,7 +762,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16);
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -916,7 +862,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors)
dev->stats.tx_errors++;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -963,7 +909,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
dev->stats.rx_fifo_errors++;
}
@@ -1015,14 +961,9 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
- if (unlikely(drop)) {
- skb = ERR_PTR(-ENOBUFS);
- goto mark_as_read;
- }
-
mb = flexcan_get_mb(priv, n);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
u32 code;
do {
@@ -1048,6 +989,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
+ if (unlikely(drop)) {
+ skb = ERR_PTR(-ENOBUFS);
+ goto mark_as_read;
+ }
+
if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
skb = alloc_canfd_skb(offload->dev, &cfd);
else
@@ -1087,7 +1033,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
}
mark_as_read:
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), &regs->iflag1);
else
priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
@@ -1113,7 +1059,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
enum can_state last_state = priv->can.state;
/* reception interrupt */
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
u64 reg_iflag_rx;
int ret;
@@ -1154,7 +1100,6 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
can_rx_offload_get_echo_skb(&priv->offload, 0,
reg_ctrl << 16, NULL);
stats->tx_packets++;
- can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
@@ -1173,7 +1118,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* state change interrupt or broken error state quirk fix is enabled */
if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
- (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ (priv->devtype_data.quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
flexcan_irq_state(dev, reg_esr);
@@ -1195,11 +1140,11 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
* (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
*/
if ((last_state != priv->can.state) &&
- (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
+ (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
- if (priv->devtype_data->quirks &
+ if (priv->devtype_data.quirks &
FLEXCAN_QUIRK_BROKEN_WERR_STATE)
flexcan_error_irq_enable(priv);
else
@@ -1423,26 +1368,26 @@ static int flexcan_rx_offload_setup(struct net_device *dev)
else
priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_MB_16)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_MB_16)
priv->mb_count = 16;
else
priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
(sizeof(priv->regs->mb[1]) / priv->mb_size);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
priv->tx_mb_reserved =
- flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP);
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_MAILBOX);
else
priv->tx_mb_reserved =
- flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_FIFO);
priv->tx_mb_idx = priv->mb_count - 1;
priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
priv->offload.mailbox_read = flexcan_mailbox_read;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
+ priv->offload.mb_first = FLEXCAN_RX_MB_RX_MAILBOX_FIRST;
priv->offload.mb_last = priv->mb_count - 2;
priv->rx_mask = GENMASK_ULL(priv->offload.mb_last,
@@ -1506,7 +1451,7 @@ static int flexcan_chip_start(struct net_device *dev)
if (err)
goto out_chip_disable;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_ECC)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_ECC)
flexcan_ram_init(dev);
flexcan_set_bittiming(dev);
@@ -1532,10 +1477,10 @@ static int flexcan_chip_start(struct net_device *dev)
/* MCR
*
* FIFO:
- * - disable for timestamp mode
+ * - disable for mailbox mode
* - enable for FIFO mode
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
reg_mcr &= ~FLEXCAN_MCR_FEN;
else
reg_mcr |= FLEXCAN_MCR_FEN;
@@ -1586,7 +1531,7 @@ static int flexcan_chip_start(struct net_device *dev)
* on most Flexcan cores, too. Otherwise we don't get
* any error warning or passive interrupts.
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
else
@@ -1599,7 +1544,7 @@ static int flexcan_chip_start(struct net_device *dev)
netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
priv->write(reg_ctrl, &regs->ctrl);
- if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
+ if ((priv->devtype_data.quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
priv->write(reg_ctrl2, &regs->ctrl2);
@@ -1631,7 +1576,7 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_fdctrl, &regs->fdctrl);
}
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
@@ -1639,7 +1584,7 @@ static int flexcan_chip_start(struct net_device *dev)
}
} else {
/* clear and invalidate unused mailboxes first */
- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < priv->mb_count; i++) {
+ for (i = FLEXCAN_TX_MB_RESERVED_RX_FIFO; i < priv->mb_count; i++) {
mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
&mb->can_ctrl);
@@ -1659,7 +1604,7 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(0x0, &regs->rx14mask);
priv->write(0x0, &regs->rx15mask);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
priv->write(0x0, &regs->rxfgmask);
/* clear acceptance filters */
@@ -1673,7 +1618,7 @@ static int flexcan_chip_start(struct net_device *dev)
* This also works around errata e5295 which generates false
* positive memory errors and put the device in freeze mode.
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
/* Follow the protocol as described in "Detection
* and Correction of Memory Errors" to write to
* MECR register (step 1 - 5)
@@ -1771,11 +1716,9 @@ static int flexcan_open(struct net_device *dev)
return -EINVAL;
}
- err = pm_runtime_get_sync(priv->dev);
- if (err < 0) {
- pm_runtime_put_noidle(priv->dev);
+ err = pm_runtime_resume_and_get(priv->dev);
+ if (err < 0)
return err;
- }
err = open_candev(dev);
if (err)
@@ -1799,7 +1742,7 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_can_rx_offload_disable;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
err = request_irq(priv->irq_boff,
flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
@@ -1813,8 +1756,6 @@ static int flexcan_open(struct net_device *dev)
flexcan_chip_interrupts_enable(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
netif_start_queue(dev);
return 0;
@@ -1845,7 +1786,7 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
flexcan_chip_interrupts_disable(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
free_irq(priv->irq_err, dev);
free_irq(priv->irq_boff, dev);
}
@@ -1860,8 +1801,6 @@ static int flexcan_close(struct net_device *dev)
pm_runtime_put(priv->dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -2051,10 +1990,12 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
priv = netdev_priv(dev);
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW)
ret = flexcan_setup_stop_mode_scfw(pdev);
- else if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
ret = flexcan_setup_stop_mode_gpr(pdev);
+ else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
+ ret = 0;
else
/* return 0 directly if doesn't support stop mode feature */
return 0;
@@ -2073,6 +2014,7 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
{ .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
+ { .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -2164,8 +2106,25 @@ static int flexcan_probe(struct platform_device *pdev)
return -ENODEV;
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
- !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) {
- dev_err(&pdev->dev, "CAN-FD mode doesn't work with FIFO mode!\n");
+ !((devtype_data->quirks &
+ (FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) ==
+ (FLEXCAN_QUIRK_USE_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) {
+ dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n");
+ return -EINVAL;
+ }
+
+ if ((devtype_data->quirks &
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) {
+ dev_err(&pdev->dev,
+ "Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n",
+ devtype_data->quirks);
return -EINVAL;
}
@@ -2177,13 +2136,15 @@ static int flexcan_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
dev->netdev_ops = &flexcan_netdev_ops;
+ dev->ethtool_ops = &flexcan_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
priv = netdev_priv(dev);
+ priv->devtype_data = *devtype_data;
if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
- devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
priv->read = flexcan_read_be;
priv->write = flexcan_write_be;
} else {
@@ -2202,10 +2163,9 @@ static int flexcan_probe(struct platform_device *pdev)
priv->clk_ipg = clk_ipg;
priv->clk_per = clk_per;
priv->clk_src = clk_src;
- priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
- if (devtype_data->quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) {
priv->irq_boff = platform_get_irq(pdev, 1);
if (priv->irq_boff <= 0) {
err = -ENODEV;
@@ -2218,7 +2178,7 @@ static int flexcan_probe(struct platform_device *pdev)
}
}
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
CAN_CTRLMODE_FD_NON_ISO;
priv->can.bittiming_const = &flexcan_fd_bittiming_const;
@@ -2240,13 +2200,11 @@ static int flexcan_probe(struct platform_device *pdev)
err = flexcan_setup_stop_mode(pdev);
if (err < 0) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "setup stop mode failed\n");
+ dev_err_probe(&pdev->dev, err, "setup stop mode failed\n");
goto failed_setup_stop_mode;
}
of_can_transceiver(dev);
- devm_can_led_init(dev);
return 0;
@@ -2364,8 +2322,16 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
if (netif_running(dev)) {
int err;
- if (device_may_wakeup(device))
+ if (device_may_wakeup(device)) {
flexcan_enable_wakeup_irq(priv, true);
+ /* For auto stop mode, need to keep the clock on before
+ * system go into low power mode. After system go into
+ * low power mode, hardware will config the flexcan into
+ * stop mode, and gate off the clock automatically.
+ */
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
+ return 0;
+ }
err = pm_runtime_force_suspend(device);
if (err)
@@ -2383,9 +2349,15 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
if (netif_running(dev)) {
int err;
- err = pm_runtime_force_resume(device);
- if (err)
- return err;
+ /* For the wakeup in auto stop mode, no need to gate on the
+ * clock here, hardware will do this automatically.
+ */
+ if (!(device_may_wakeup(device) &&
+ priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) {
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+ }
if (device_may_wakeup(device))
flexcan_enable_wakeup_irq(priv, false);
diff --git a/drivers/net/can/flexcan/flexcan-ethtool.c b/drivers/net/can/flexcan/flexcan-ethtool.c
new file mode 100644
index 000000000000..50e86b2da532
--- /dev/null
+++ b/drivers/net/can/flexcan/flexcan-ethtool.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ * Copyright (c) 2022 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ */
+
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "flexcan.h"
+
+static const char flexcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define FLEXCAN_PRIV_FLAGS_RX_RTR BIT(0)
+ "rx-rtr",
+};
+
+static void
+flexcan_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *ext_ack)
+{
+ const struct flexcan_priv *priv = netdev_priv(ndev);
+
+ ring->rx_max_pending = priv->mb_count;
+ ring->tx_max_pending = priv->mb_count;
+
+ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX)
+ ring->rx_pending = priv->offload.mb_last -
+ priv->offload.mb_first + 1;
+ else
+ ring->rx_pending = 6; /* RX-FIFO depth is fixed */
+
+ /* the drive currently supports only on TX buffer */
+ ring->tx_pending = 1;
+}
+
+static void
+flexcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, flexcan_priv_flags_strings,
+ sizeof(flexcan_priv_flags_strings));
+ }
+}
+
+static u32 flexcan_get_priv_flags(struct net_device *ndev)
+{
+ const struct flexcan_priv *priv = netdev_priv(ndev);
+ u32 priv_flags = 0;
+
+ if (flexcan_active_rx_rtr(priv))
+ priv_flags |= FLEXCAN_PRIV_FLAGS_RX_RTR;
+
+ return priv_flags;
+}
+
+static int flexcan_set_priv_flags(struct net_device *ndev, u32 priv_flags)
+{
+ struct flexcan_priv *priv = netdev_priv(ndev);
+ u32 quirks = priv->devtype_data.quirks;
+
+ if (priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) {
+ if (flexcan_supports_rx_mailbox_rtr(priv))
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else if (flexcan_supports_rx_fifo(priv))
+ quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ } else {
+ if (flexcan_supports_rx_mailbox(priv))
+ quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ else
+ quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX;
+ }
+
+ if (quirks != priv->devtype_data.quirks && netif_running(ndev))
+ return -EBUSY;
+
+ priv->devtype_data.quirks = quirks;
+
+ if (!(priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) &&
+ !flexcan_active_rx_rtr(priv))
+ netdev_info(ndev,
+ "Activating RX mailbox mode, cannot receive RTR frames.\n");
+
+ return 0;
+}
+
+static int flexcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(flexcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+const struct ethtool_ops flexcan_ethtool_ops = {
+ .get_ringparam = flexcan_get_ringparam,
+ .get_strings = flexcan_get_strings,
+ .get_priv_flags = flexcan_get_priv_flags,
+ .set_priv_flags = flexcan_set_priv_flags,
+ .get_sset_count = flexcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
new file mode 100644
index 000000000000..91402977780b
--- /dev/null
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * flexcan.c - FLEXCAN CAN controller driver
+ *
+ * Copyright (c) 2005-2006 Varma Electronics Oy
+ * Copyright (c) 2009 Sascha Hauer, Pengutronix
+ * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+ *
+ */
+
+#ifndef _FLEXCAN_H
+#define _FLEXCAN_H
+
+#include <linux/can/rx-offload.h>
+
+/* FLEXCAN hardware feature flags
+ *
+ * Below is some version info we got:
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
+ * Filter? connected? Passive detection ption in MB Supported?
+ * MCF5441X FlexCAN2 ? no yes no no no no 16
+ * MX25 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
+ * MX35 FlexCAN2 03.00.00.00 no no no no no no 64
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no no 64
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no 64
+ * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes 64
+ * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes 64
+ * VF610 FlexCAN3 ? no yes no yes yes? no 64
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no 64
+ * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes 64
+ *
+ * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
+ */
+
+/* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
+ /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
+/* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
+/* Disable non-correctable errors interrupt and freeze mode */
+#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
+/* Use mailboxes (not FIFO) for RX path */
+#define FLEXCAN_QUIRK_USE_RX_MAILBOX BIT(5)
+/* No interrupt for error passive */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
+/* default to BE register access */
+#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
+/* Setup stop mode with GPR to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR BIT(8)
+/* Support CAN-FD mode */
+#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
+/* support memory detection and correction */
+#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
+/* Setup stop mode with SCU firmware to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW BIT(11)
+/* Setup 3 separate interrupts, main, boff and err */
+#define FLEXCAN_QUIRK_NR_IRQ_3 BIT(12)
+/* Setup 16 mailboxes */
+#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
+/* Device supports RX via mailboxes */
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX BIT(14)
+/* Device supports RTR reception via mailboxes */
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
+/* Device supports RX via FIFO */
+#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
+/* auto enter stop mode to support wakeup */
+#define FLEXCAN_QUIRK_AUTO_STOP_MODE BIT(17)
+
+struct flexcan_devtype_data {
+ u32 quirks; /* quirks needed for different IP cores */
+};
+
+struct flexcan_stop_mode {
+ struct regmap *gpr;
+ u8 req_gpr;
+ u8 req_bit;
+};
+
+struct flexcan_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct device *dev;
+
+ struct flexcan_regs __iomem *regs;
+ struct flexcan_mb __iomem *tx_mb;
+ struct flexcan_mb __iomem *tx_mb_reserved;
+ u8 tx_mb_idx;
+ u8 mb_count;
+ u8 mb_size;
+ u8 clk_src; /* clock source of CAN Protocol Engine */
+ u8 scu_idx;
+
+ u64 rx_mask;
+ u64 tx_mask;
+ u32 reg_ctrl_default;
+
+ struct clk *clk_ipg;
+ struct clk *clk_per;
+ struct flexcan_devtype_data devtype_data;
+ struct regulator *reg_xceiver;
+ struct flexcan_stop_mode stm;
+
+ int irq_boff;
+ int irq_err;
+
+ /* IPC handle when setup stop mode by System Controller firmware(scfw) */
+ struct imx_sc_ipc *sc_ipc_handle;
+
+ /* Read and Write APIs */
+ u32 (*read)(void __iomem *addr);
+ void (*write)(u32 val, void __iomem *addr);
+};
+
+extern const struct ethtool_ops flexcan_ethtool_ops;
+
+static inline bool
+flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX;
+}
+
+static inline bool
+flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return (quirks & (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR);
+}
+
+static inline bool
+flexcan_supports_rx_fifo(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_FIFO;
+}
+
+static inline bool
+flexcan_active_rx_rtr(const struct flexcan_priv *priv)
+{
+ const u32 quirks = priv->devtype_data.quirks;
+
+ if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
+ if (quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)
+ return true;
+ } else {
+ /* RX-FIFO is always RTR capable */
+ return true;
+ }
+
+ return false;
+}
+
+
+#endif /* _FLEXCAN_H */
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 78e27940b2af..4bedcc3eea0d 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/can/dev.h>
#include <linux/spinlock.h>
@@ -241,13 +242,14 @@ struct grcan_device_config {
.rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
}
-#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
+#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
#define GRLIB_VERSION_MASK 0xffff
/* GRCAN private data structure */
struct grcan_priv {
struct can_priv can; /* must be the first member */
struct net_device *dev;
+ struct device *ofdev_dev;
struct napi_struct napi;
struct grcan_registers __iomem *regs; /* ioremap'ed registers */
@@ -255,7 +257,6 @@ struct grcan_priv {
struct grcan_dma dma;
struct sk_buff **echo_skb; /* We allocate this on our own */
- u8 *txdlc; /* Length of queued frames */
/* The echo skb pointer, pointing into echo_skb and indicating which
* frames can be echoed back. See the "Notes on the tx cyclic buffer
@@ -515,9 +516,7 @@ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo)
if (echo) {
/* Normal echo of messages */
stats->tx_packets++;
- stats->tx_bytes += priv->txdlc[i];
- priv->txdlc[i] = 0;
- can_get_echo_skb(dev, i, NULL);
+ stats->tx_bytes += can_get_echo_skb(dev, i, NULL);
} else {
/* For cleanup of untransmitted messages */
can_free_echo_skb(dev, i, NULL);
@@ -673,6 +672,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status)
/* There are no others at this point */
break;
}
+ cf.can_id |= CAN_ERR_CNT;
cf.data[6] = txerr;
cf.data[7] = rxerr;
priv->can.state = state;
@@ -924,7 +924,7 @@ static void grcan_free_dma_buffers(struct net_device *dev)
struct grcan_priv *priv = netdev_priv(dev);
struct grcan_dma *dma = &priv->dma;
- dma_free_coherent(&dev->dev, dma->base_size, dma->base_buf,
+ dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf,
dma->base_handle);
memset(dma, 0, sizeof(*dma));
}
@@ -949,7 +949,7 @@ static int grcan_allocate_dma_buffers(struct net_device *dev,
/* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */
dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT;
- dma->base_buf = dma_alloc_coherent(&dev->dev,
+ dma->base_buf = dma_alloc_coherent(priv->ofdev_dev,
dma->base_size,
&dma->base_handle,
GFP_KERNEL);
@@ -1062,16 +1062,10 @@ static int grcan_open(struct net_device *dev)
priv->can.echo_skb_max = dma->tx.size;
priv->can.echo_skb = priv->echo_skb;
- priv->txdlc = kcalloc(dma->tx.size, sizeof(*priv->txdlc), GFP_KERNEL);
- if (!priv->txdlc) {
- err = -ENOMEM;
- goto exit_free_echo_skb;
- }
-
/* Get can device up */
err = open_candev(dev);
if (err)
- goto exit_free_txdlc;
+ goto exit_free_echo_skb;
err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED,
dev->name, dev);
@@ -1093,8 +1087,6 @@ static int grcan_open(struct net_device *dev)
exit_close_candev:
close_candev(dev);
-exit_free_txdlc:
- kfree(priv->txdlc);
exit_free_echo_skb:
kfree(priv->echo_skb);
exit_free_dma_buffers:
@@ -1113,8 +1105,10 @@ static int grcan_close(struct net_device *dev)
priv->closing = true;
if (priv->need_txbug_workaround) {
+ spin_unlock_irqrestore(&priv->lock, flags);
del_timer_sync(&priv->hang_timer);
del_timer_sync(&priv->rr_timer);
+ spin_lock_irqsave(&priv->lock, flags);
}
netif_stop_queue(dev);
grcan_stop_hardware(dev);
@@ -1129,12 +1123,11 @@ static int grcan_close(struct net_device *dev)
priv->can.echo_skb_max = 0;
priv->can.echo_skb = NULL;
kfree(priv->echo_skb);
- kfree(priv->txdlc);
return 0;
}
-static int grcan_transmit_catch_up(struct net_device *dev, int budget)
+static void grcan_transmit_catch_up(struct net_device *dev)
{
struct grcan_priv *priv = netdev_priv(dev);
unsigned long flags;
@@ -1142,7 +1135,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
spin_lock_irqsave(&priv->lock, flags);
- work_done = catch_up_echo_skb(dev, budget, true);
+ work_done = catch_up_echo_skb(dev, -1, true);
if (work_done) {
if (!priv->resetting && !priv->closing &&
!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
@@ -1156,8 +1149,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- return work_done;
}
static int grcan_receive(struct net_device *dev, int budget)
@@ -1211,11 +1202,11 @@ static int grcan_receive(struct net_device *dev, int budget)
shift = GRCAN_MSG_DATA_SHIFT(i);
cf->data[i] = (u8)(slot[j] >> shift);
}
- }
- /* Update statistics and read pointer */
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_receive_skb(skb);
rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
@@ -1239,19 +1230,13 @@ static int grcan_poll(struct napi_struct *napi, int budget)
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
- int tx_work_done, rx_work_done;
- int rx_budget = budget / 2;
- int tx_budget = budget - rx_budget;
+ int work_done;
- /* Half of the budget for receiving messages */
- rx_work_done = grcan_receive(dev, rx_budget);
+ work_done = grcan_receive(dev, budget);
- /* Half of the budget for transmitting messages as that can trigger echo
- * frames being received
- */
- tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
+ grcan_transmit_catch_up(dev);
- if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
+ if (work_done < budget) {
napi_complete(napi);
/* Guarantee no interference with a running reset that otherwise
@@ -1268,7 +1253,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&priv->lock, flags);
}
- return rx_work_done + tx_work_done;
+ return work_done;
}
/* Work tx bug by waiting while for the risky situation to clear. If that fails,
@@ -1360,7 +1345,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
unsigned long flags;
u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
/* Trying to transmit in silent mode will generate error interrupts, but
@@ -1447,7 +1432,6 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb,
* can_put_echo_skb would be an error unless other measures are
* taken.
*/
- priv->txdlc[slotindex] = cf->len; /* Store dlc for statistics */
can_put_echo_skb(skb, dev, slotindex, 0);
/* Make sure everything is written before allowing hardware to
@@ -1578,6 +1562,10 @@ static const struct net_device_ops grcan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops grcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int grcan_setup_netdev(struct platform_device *ofdev,
void __iomem *base,
int irq, u32 ambafreq, bool txbug)
@@ -1594,12 +1582,14 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
dev->irq = irq;
dev->flags |= IFF_ECHO;
dev->netdev_ops = &grcan_netdev_ops;
+ dev->ethtool_ops = &grcan_ethtool_ops;
dev->sysfs_groups[0] = &sysfs_grcan_group;
priv = netdev_priv(dev);
memcpy(&priv->config, &grcan_module_config,
sizeof(struct grcan_device_config));
priv->dev = dev;
+ priv->ofdev_dev = &ofdev->dev;
priv->regs = base;
priv->can.bittiming_const = &grcan_bittiming_const;
priv->can.do_set_bittiming = grcan_set_bittiming;
@@ -1626,7 +1616,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0);
}
- netif_napi_add(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
+ netif_napi_add_weight(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT);
SET_NETDEV_DEV(dev, &ofdev->dev);
dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n",
@@ -1652,6 +1642,7 @@ exit_free_candev:
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct device_node *sysid_parent;
u32 sysid, ambafreq;
int irq, err;
void __iomem *base;
@@ -1660,10 +1651,14 @@ static int grcan_probe(struct platform_device *ofdev)
/* Compare GRLIB version number with the first that does not
* have the tx bug (see start_xmit)
*/
- err = of_property_read_u32(np, "systemid", &sysid);
- if (!err && ((sysid & GRLIB_VERSION_MASK)
- >= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
- txbug = false;
+ sysid_parent = of_find_node_by_path("/ambapp0");
+ if (sysid_parent) {
+ err = of_property_read_u32(sysid_parent, "systemid", &sysid);
+ if (!err && ((sysid & GRLIB_VERSION_MASK) >=
+ GRCAN_TXBUG_SAFE_GRLIB_VERSION))
+ txbug = false;
+ of_node_put(sysid_parent);
+ }
err = of_property_read_u32(np, "freq", &ambafreq);
if (err) {
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 5bb957a26bc6..07eaf724a572 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -309,15 +310,15 @@ static void ifi_canfd_read_fifo(struct net_device *ndev)
*(u32 *)(cf->data + i) =
readl(priv->base + IFI_CANFD_RXFIFO_DATA + i);
}
+
+ stats->rx_bytes += cf->len;
}
+ stats->rx_packets++;
/* Remove the packet from FIFO */
writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD);
writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
netif_receive_skb(skb);
}
@@ -345,9 +346,6 @@ static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota)
rxst = readl(priv->base + IFI_CANFD_RXSTCMD);
}
- if (pkts)
- can_led_event(ndev, CAN_LED_EVENT_RX);
-
return pkts;
}
@@ -430,8 +428,6 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev)
priv->base + IFI_CANFD_INTERRUPT);
writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -456,7 +452,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
enum can_state new_state)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -498,7 +493,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -507,7 +502,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if (bec.txerr > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
@@ -522,8 +517,6 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_receive_skb(skb);
return 1;
@@ -631,7 +624,6 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
stats->tx_bytes += can_get_echo_skb(ndev, 0, NULL);
stats->tx_packets++;
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
if (isr & tx_irq_mask)
@@ -835,7 +827,6 @@ static int ifi_canfd_open(struct net_device *ndev)
ifi_canfd_start(ndev);
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -858,8 +849,6 @@ static int ifi_canfd_close(struct net_device *ndev)
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -871,7 +860,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb,
u32 txst, txid, txdlc;
int i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
/* Check if the TX buffer is full */
@@ -937,6 +926,10 @@ static const struct net_device_ops ifi_canfd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ifi_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int ifi_canfd_plat_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -974,12 +967,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
ndev->irq = irq;
ndev->flags |= IFF_ECHO; /* we support local echo */
ndev->netdev_ops = &ifi_canfd_netdev_ops;
+ ndev->ethtool_ops = &ifi_canfd_ethtool_ops;
priv = netdev_priv(ndev);
priv->ndev = ndev;
priv->base = addr;
- netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64);
+ netif_napi_add(ndev, &priv->napi, ifi_canfd_poll);
priv->can.state = CAN_STATE_STOPPED;
@@ -1009,8 +1003,6 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
goto err_reg;
}
- devm_can_led_init(ndev);
-
dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n",
priv->base, ndev->irq, priv->can.clock.freq);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 32006dbf5abd..0732a5092141 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
@@ -1127,7 +1128,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* bus error interrupt */
if (isrc == CEVTIND_BEI) {
mod->can.can_stats.bus_error++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
switch (ecc & ECC_MASK) {
case ECC_BIT:
@@ -1153,7 +1154,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING ||
state == CAN_STATE_ERROR_PASSIVE)) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
if (state == CAN_STATE_ERROR_WARNING) {
mod->can.can_stats.error_warning++;
cf->data[1] = (txerr > rxerr) ?
@@ -1277,6 +1278,8 @@ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
if (!skb)
return;
+ skb_tx_timestamp(skb);
+
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
}
@@ -1285,7 +1288,7 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
{
struct sk_buff *skb = skb_dequeue(&mod->echoq);
struct can_frame *cf;
- u8 dlc;
+ u8 dlc = 0;
/* this should never trigger unless there is a driver bug */
if (!skb) {
@@ -1294,7 +1297,8 @@ static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
}
cf = (struct can_frame *)skb->data;
- dlc = cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ dlc = cf->len;
/* check flag whether this packet has to be looped back */
if (skb->pkt_type != PACKET_LOOPBACK) {
@@ -1421,7 +1425,8 @@ static int ican3_recv_skb(struct ican3_dev *mod)
/* update statistics, receive the skb */
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
netif_receive_skb(skb);
err_noalloc:
@@ -1688,7 +1693,7 @@ static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
void __iomem *desc_addr;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
spin_lock_irqsave(&mod->lock, flags);
@@ -1750,6 +1755,10 @@ static const struct net_device_ops ican3_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ican3_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/*
* Low-level CAN Device
*/
@@ -1908,7 +1917,7 @@ static int ican3_probe(struct platform_device *pdev)
mod = netdev_priv(ndev);
mod->ndev = ndev;
mod->num = pdata->modno;
- netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ netif_napi_add_weight(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
skb_queue_head_init(&mod->echoq);
spin_lock_init(&mod->lock);
init_completion(&mod->termination_comp);
@@ -1921,6 +1930,7 @@ static int ican3_probe(struct platform_device *pdev)
mod->free_page = DPM_FREE_START;
ndev->netdev_ops = &ican3_netdev_ops;
+ ndev->ethtool_ops = &ican3_ethtool_ops;
ndev->flags |= IFF_ECHO;
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index eb74cdf26b88..53e8a914c88b 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/pci.h>
#include <linux/can/dev.h>
#include <linux/timer.h>
@@ -328,12 +329,9 @@ MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
{
u32 res;
- int ret;
-
- ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
- res, res & msk, 0, 10);
- return ret;
+ return readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
+ res, res & msk, 0, 10);
}
static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
@@ -774,7 +772,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
int nwords;
u8 count;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
@@ -919,10 +917,15 @@ static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
static const struct net_device_ops kvaser_pciefd_netdev_ops = {
.ndo_open = kvaser_pciefd_open,
.ndo_stop = kvaser_pciefd_stop,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_start_xmit = kvaser_pciefd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops kvaser_pciefd_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
{
int i;
@@ -939,6 +942,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
can = netdev_priv(netdev);
netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
+ netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops;
can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
@@ -1185,20 +1189,21 @@ static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
cf->len = can_fd_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
- if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
+ if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
shhwtstamps = skb_hwtstamps(skb);
shhwtstamps->hwtstamp =
ns_to_ktime(div_u64(p->timestamp * 1000,
pcie->freq_to_ticks_div));
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
-
return netif_rx(skb);
}
@@ -1305,14 +1310,11 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
shhwtstamps->hwtstamp =
ns_to_ktime(div_u64(p->timestamp * 1000,
can->kv_pcie->freq_to_ticks_div));
- cf->can_id |= CAN_ERR_BUSERROR;
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
netif_rx(skb);
return 0;
}
@@ -1510,8 +1512,6 @@ static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
if (skb) {
cf->can_id |= CAN_ERR_BUSERROR;
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
netif_rx(skb);
} else {
stats->rx_dropped++;
@@ -1907,7 +1907,6 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev)
free_irq(pcie->pci->irq, pcie);
- pci_clear_master(pdev);
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/can/led.c b/drivers/net/can/led.c
deleted file mode 100644
index db14897f8e16..000000000000
--- a/drivers/net/can/led.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2012, Fabio Baltieri <fabio.baltieri@gmail.com>
- * Copyright 2012, Kurt Van Dijck <kurt.van.dijck@eia.be>
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/can/dev.h>
-
-#include <linux/can/led.h>
-
-static unsigned long led_delay = 50;
-module_param(led_delay, ulong, 0644);
-MODULE_PARM_DESC(led_delay,
- "blink delay time for activity leds (msecs, default: 50).");
-
-/* Trigger a LED event in response to a CAN device event */
-void can_led_event(struct net_device *netdev, enum can_led_event event)
-{
- struct can_priv *priv = netdev_priv(netdev);
-
- switch (event) {
- case CAN_LED_EVENT_OPEN:
- led_trigger_event(priv->tx_led_trig, LED_FULL);
- led_trigger_event(priv->rx_led_trig, LED_FULL);
- led_trigger_event(priv->rxtx_led_trig, LED_FULL);
- break;
- case CAN_LED_EVENT_STOP:
- led_trigger_event(priv->tx_led_trig, LED_OFF);
- led_trigger_event(priv->rx_led_trig, LED_OFF);
- led_trigger_event(priv->rxtx_led_trig, LED_OFF);
- break;
- case CAN_LED_EVENT_TX:
- if (led_delay) {
- led_trigger_blink_oneshot(priv->tx_led_trig,
- &led_delay, &led_delay, 1);
- led_trigger_blink_oneshot(priv->rxtx_led_trig,
- &led_delay, &led_delay, 1);
- }
- break;
- case CAN_LED_EVENT_RX:
- if (led_delay) {
- led_trigger_blink_oneshot(priv->rx_led_trig,
- &led_delay, &led_delay, 1);
- led_trigger_blink_oneshot(priv->rxtx_led_trig,
- &led_delay, &led_delay, 1);
- }
- break;
- }
-}
-EXPORT_SYMBOL_GPL(can_led_event);
-
-static void can_led_release(struct device *gendev, void *res)
-{
- struct can_priv *priv = netdev_priv(to_net_dev(gendev));
-
- led_trigger_unregister_simple(priv->tx_led_trig);
- led_trigger_unregister_simple(priv->rx_led_trig);
- led_trigger_unregister_simple(priv->rxtx_led_trig);
-}
-
-/* Register CAN LED triggers for a CAN device
- *
- * This is normally called from a driver's probe function
- */
-void devm_can_led_init(struct net_device *netdev)
-{
- struct can_priv *priv = netdev_priv(netdev);
- void *res;
-
- res = devres_alloc(can_led_release, 0, GFP_KERNEL);
- if (!res) {
- netdev_err(netdev, "cannot register LED triggers\n");
- return;
- }
-
- snprintf(priv->tx_led_trig_name, sizeof(priv->tx_led_trig_name),
- "%s-tx", netdev->name);
- snprintf(priv->rx_led_trig_name, sizeof(priv->rx_led_trig_name),
- "%s-rx", netdev->name);
- snprintf(priv->rxtx_led_trig_name, sizeof(priv->rxtx_led_trig_name),
- "%s-rxtx", netdev->name);
-
- led_trigger_register_simple(priv->tx_led_trig_name,
- &priv->tx_led_trig);
- led_trigger_register_simple(priv->rx_led_trig_name,
- &priv->rx_led_trig);
- led_trigger_register_simple(priv->rxtx_led_trig_name,
- &priv->rxtx_led_trig);
-
- devres_add(&netdev->dev, res);
-}
-EXPORT_SYMBOL_GPL(devm_can_led_init);
-
-/* NETDEV rename notifier to rename the associated led triggers too */
-static int can_led_notifier(struct notifier_block *nb, unsigned long msg,
- void *ptr)
-{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- struct can_priv *priv = safe_candev_priv(netdev);
- char name[CAN_LED_NAME_SZ];
-
- if (!priv)
- return NOTIFY_DONE;
-
- if (!priv->tx_led_trig || !priv->rx_led_trig || !priv->rxtx_led_trig)
- return NOTIFY_DONE;
-
- if (msg == NETDEV_CHANGENAME) {
- snprintf(name, sizeof(name), "%s-tx", netdev->name);
- led_trigger_rename_static(name, priv->tx_led_trig);
-
- snprintf(name, sizeof(name), "%s-rx", netdev->name);
- led_trigger_rename_static(name, priv->rx_led_trig);
-
- snprintf(name, sizeof(name), "%s-rxtx", netdev->name);
- led_trigger_rename_static(name, priv->rxtx_led_trig);
- }
-
- return NOTIFY_DONE;
-}
-
-/* notifier block for netdevice event */
-static struct notifier_block can_netdev_notifier __read_mostly = {
- .notifier_call = can_led_notifier,
-};
-
-int __init can_led_notifier_init(void)
-{
- return register_netdevice_notifier(&can_netdev_notifier);
-}
-
-void __exit can_led_notifier_exit(void)
-{
- unregister_netdevice_notifier(&can_netdev_notifier);
-}
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index 45ad1b3f0cd0..fc2afab36279 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig CAN_M_CAN
tristate "Bosch M_CAN support"
+ select CAN_RX_OFFLOAD
help
Say Y here if you want support for Bosch M_CAN controller framework.
This is common support for devices that embed the Bosch M_CAN IP.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index c2a8421e7845..a5003435802b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -9,19 +9,20 @@
*/
#include <linux/bitfield.h>
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/iopoll.h>
-#include <linux/can/dev.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/phy/phy.h>
#include "m_can.h"
@@ -77,9 +78,6 @@ enum m_can_reg {
M_CAN_TXEFA = 0xf8,
};
-/* napi related */
-#define M_CAN_NAPI_WEIGHT 64
-
/* message ram configuration data length */
#define MRAM_CFG_LEN 8
@@ -158,6 +156,7 @@ enum m_can_reg {
#define PSR_EW BIT(6)
#define PSR_EP BIT(5)
#define PSR_LEC_MASK GENMASK(2, 0)
+#define PSR_DLEC_MASK GENMASK(10, 8)
/* Interrupt Register (IR) */
#define IR_ALL_INT 0xffffffff
@@ -211,7 +210,7 @@ enum m_can_reg {
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
-#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
+#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
IR_RF0L)
#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
@@ -336,6 +335,9 @@ m_can_fifo_read(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
}
@@ -346,6 +348,9 @@ m_can_fifo_write(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
}
@@ -364,9 +369,14 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
+static inline bool _m_can_tx_fifo_full(u32 txfqs)
+{
+ return !!(txfqs & TXFQS_TFQF);
+}
+
static inline bool m_can_tx_fifo_full(struct m_can_classdev *cdev)
{
- return !!(m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQF);
+ return _m_can_tx_fifo_full(m_can_read(cdev, M_CAN_TXFQS));
}
static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
@@ -458,7 +468,7 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
struct net_device_stats *stats = &cdev->net->stats;
int err;
- err = can_rx_offload_queue_sorted(&cdev->offload, skb,
+ err = can_rx_offload_queue_timestamp(&cdev->offload, skb,
timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -467,19 +477,16 @@ static void m_can_receive_skb(struct m_can_classdev *cdev,
}
}
-static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
+static int m_can_read_fifo(struct net_device *dev, u32 fgi)
{
struct net_device_stats *stats = &dev->stats;
struct m_can_classdev *cdev = netdev_priv(dev);
struct canfd_frame *cf;
struct sk_buff *skb;
struct id_and_dlc fifo_header;
- u32 fgi;
u32 timestamp = 0;
int err;
- /* calculate the fifo get index for where to read data */
- fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
err = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID, &fifo_header, 2);
if (err)
goto out_fail;
@@ -518,15 +525,12 @@ static int m_can_read_fifo(struct net_device *dev, u32 rxfs)
cf->data, DIV_ROUND_UP(cf->len, 4));
if (err)
goto out_free_skb;
- }
-
- /* acknowledge rx fifo 0 */
- m_can_write(cdev, M_CAN_RXF0A, fgi);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
- timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc);
+ timestamp = FIELD_GET(RX_BUF_RXTS_MASK, fifo_header.dlc) << 16;
m_can_receive_skb(cdev, skb, timestamp);
@@ -544,7 +548,11 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
struct m_can_classdev *cdev = netdev_priv(dev);
u32 pkts = 0;
u32 rxfs;
- int err;
+ u32 rx_count;
+ u32 fgi;
+ int ack_fgi = -1;
+ int i;
+ int err = 0;
rxfs = m_can_read(cdev, M_CAN_RXF0S);
if (!(rxfs & RXFS_FFL_MASK)) {
@@ -552,18 +560,25 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
return 0;
}
- while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
- err = m_can_read_fifo(dev, rxfs);
+ rx_count = FIELD_GET(RXFS_FFL_MASK, rxfs);
+ fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
+
+ for (i = 0; i < rx_count && quota > 0; ++i) {
+ err = m_can_read_fifo(dev, fgi);
if (err)
- return err;
+ break;
quota--;
pkts++;
- rxfs = m_can_read(cdev, M_CAN_RXF0S);
+ ack_fgi = fgi;
+ fgi = (++fgi >= cdev->mcfg[MRAM_RXF0].num ? 0 : fgi);
}
- if (pkts)
- can_led_event(dev, CAN_LED_EVENT_RX);
+ if (ack_fgi != -1)
+ m_can_write(cdev, M_CAN_RXF0A, ack_fgi);
+
+ if (err)
+ return err;
return pkts;
}
@@ -647,9 +662,6 @@ static int m_can_handle_lec_err(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -706,7 +718,6 @@ static int m_can_handle_state_change(struct net_device *dev,
enum can_state new_state)
{
struct m_can_classdev *cdev = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
struct can_berr_counter bec;
@@ -745,7 +756,7 @@ static int m_can_handle_state_change(struct net_device *dev,
switch (new_state) {
case CAN_STATE_ERROR_WARNING:
/* error warning state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -754,7 +765,7 @@ static int m_can_handle_state_change(struct net_device *dev,
break;
case CAN_STATE_ERROR_PASSIVE:
/* error passive state */
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
ecr = m_can_read(cdev, M_CAN_ECR);
if (ecr & ECR_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
@@ -771,9 +782,6 @@ static int m_can_handle_state_change(struct net_device *dev,
break;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
@@ -822,11 +830,9 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
netdev_err(dev, "Message RAM access failure occurred\n");
}
-static inline bool is_lec_err(u32 psr)
+static inline bool is_lec_err(u8 lec)
{
- psr &= LEC_UNUSED;
-
- return psr && (psr != LEC_UNUSED);
+ return lec != LEC_NO_ERROR && lec != LEC_NO_CHANGE;
}
static inline bool m_can_is_protocol_err(u32 irqstatus)
@@ -881,9 +887,20 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
work_done += m_can_handle_lost_msg(dev);
/* handle lec errors on the bus */
- if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
- is_lec_err(psr))
- work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
+ if (cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ u8 lec = FIELD_GET(PSR_LEC_MASK, psr);
+ u8 dlec = FIELD_GET(PSR_DLEC_MASK, psr);
+
+ if (is_lec_err(lec)) {
+ netdev_dbg(dev, "Arbitration phase error detected\n");
+ work_done += m_can_handle_lec_err(dev, lec);
+ }
+
+ if (is_lec_err(dlec)) {
+ netdev_dbg(dev, "Data phase error detected\n");
+ work_done += m_can_handle_lec_err(dev, dlec);
+ }
+ }
/* handle protocol errors in arbitration phase */
if ((cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
@@ -896,14 +913,12 @@ static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
return work_done;
}
-static int m_can_rx_handler(struct net_device *dev, int quota)
+static int m_can_rx_handler(struct net_device *dev, int quota, u32 irqstatus)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int rx_work_or_err;
int work_done = 0;
- u32 irqstatus, psr;
- irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
if (!irqstatus)
goto end;
@@ -928,13 +943,13 @@ static int m_can_rx_handler(struct net_device *dev, int quota)
}
}
- psr = m_can_read(cdev, M_CAN_PSR);
-
if (irqstatus & IR_ERR_STATE)
- work_done += m_can_handle_state_errors(dev, psr);
+ work_done += m_can_handle_state_errors(dev,
+ m_can_read(cdev, M_CAN_PSR));
if (irqstatus & IR_ERR_BUS_30X)
- work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
+ work_done += m_can_handle_bus_errors(dev, irqstatus,
+ m_can_read(cdev, M_CAN_PSR));
if (irqstatus & IR_RF0N) {
rx_work_or_err = m_can_do_rx_poll(dev, (quota - work_done));
@@ -947,18 +962,18 @@ end:
return work_done;
}
-static int m_can_rx_peripheral(struct net_device *dev)
+static int m_can_rx_peripheral(struct net_device *dev, u32 irqstatus)
{
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done;
- work_done = m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
+ work_done = m_can_rx_handler(dev, NAPI_POLL_WEIGHT, irqstatus);
/* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure).
*/
- if (work_done >= 0)
- m_can_enable_all_interrupts(cdev);
+ if (work_done < 0)
+ m_can_disable_all_interrupts(cdev);
return work_done;
}
@@ -968,8 +983,11 @@ static int m_can_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev;
struct m_can_classdev *cdev = netdev_priv(dev);
int work_done;
+ u32 irqstatus;
+
+ irqstatus = cdev->irqstatus | m_can_read(cdev, M_CAN_IR);
- work_done = m_can_rx_handler(dev, quota);
+ work_done = m_can_rx_handler(dev, quota, irqstatus);
/* Don't re-enable interrupts if the driver had a fatal error
* (e.g., FIFO read failure).
@@ -1010,7 +1028,9 @@ static int m_can_echo_tx_event(struct net_device *dev)
u32 txe_count = 0;
u32 m_can_txefs;
u32 fgi = 0;
+ int ack_fgi = -1;
int i = 0;
+ int err = 0;
unsigned int msg_mark;
struct m_can_classdev *cdev = netdev_priv(dev);
@@ -1020,34 +1040,34 @@ static int m_can_echo_tx_event(struct net_device *dev)
/* Get Tx Event fifo element count */
txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs);
+ fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_txefs);
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
u32 txe, timestamp = 0;
- int err;
-
- /* retrieve get index */
- fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS));
/* get message marker, timestamp */
err = m_can_txe_fifo_read(cdev, fgi, 4, &txe);
if (err) {
netdev_err(dev, "TXE FIFO read returned %d\n", err);
- return err;
+ break;
}
msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
- timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
+ timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe) << 16;
- /* ack txe element */
- m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
- fgi));
+ ack_fgi = fgi;
+ fgi = (++fgi >= cdev->mcfg[MRAM_TXE].num ? 0 : fgi);
/* update stats */
m_can_tx_update_stats(cdev, msg_mark, timestamp);
}
- return 0;
+ if (ack_fgi != -1)
+ m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
+ ack_fgi));
+
+ return err;
}
static irqreturn_t m_can_isr(int irq, void *dev_id)
@@ -1063,8 +1083,7 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
return IRQ_NONE;
/* ACK all irqs */
- if (ir & IR_ALL_INT)
- m_can_write(cdev, M_CAN_IR, ir);
+ m_can_write(cdev, M_CAN_IR, ir);
if (cdev->ops->clear_interrupts)
cdev->ops->clear_interrupts(cdev);
@@ -1076,11 +1095,12 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
*/
if ((ir & IR_RF0N) || (ir & IR_ERR_ALL_30X)) {
cdev->irqstatus = ir;
- m_can_disable_all_interrupts(cdev);
- if (!cdev->is_peripheral)
+ if (!cdev->is_peripheral) {
+ m_can_disable_all_interrupts(cdev);
napi_schedule(&cdev->napi);
- else if (m_can_rx_peripheral(dev) < 0)
+ } else if (m_can_rx_peripheral(dev, ir) < 0) {
goto out_fail;
+ }
}
if (cdev->version == 30) {
@@ -1091,8 +1111,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (cdev->is_peripheral)
timestamp = m_can_get_timestamp(cdev);
m_can_tx_update_stats(cdev, 0, timestamp);
-
- can_led_event(dev, CAN_LED_EVENT_TX);
netif_wake_queue(dev);
}
} else {
@@ -1101,7 +1119,6 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
if (m_can_echo_tx_event(dev) != 0)
goto out_fail;
- can_led_event(dev, CAN_LED_EVENT_TX);
if (netif_queue_stopped(dev) &&
!m_can_tx_fifo_full(cdev))
netif_wake_queue(dev);
@@ -1242,10 +1259,23 @@ static int m_can_set_bittiming(struct net_device *dev)
* - setup bittiming
* - configure timestamp generation
*/
-static void m_can_chip_config(struct net_device *dev)
+static int m_can_chip_config(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ u32 interrupts = IR_ALL_INT;
u32 cccr, test;
+ int err;
+
+ err = m_can_init_ram(cdev);
+ if (err) {
+ dev_err(cdev->dev, "Message RAM configuration failed\n");
+ return err;
+ }
+
+ /* Disable unused interrupts */
+ interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TEFW | IR_TFE |
+ IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N |
+ IR_RF0F | IR_RF0W);
m_can_config_endisable(cdev, true);
@@ -1341,16 +1371,13 @@ static void m_can_chip_config(struct net_device *dev)
m_can_write(cdev, M_CAN_TEST, test);
/* Enable interrupts */
- m_can_write(cdev, M_CAN_IR, IR_ALL_INT);
- if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ if (!(cdev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
if (cdev->version == 30)
- m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
- ~(IR_ERR_LEC_30X));
+ interrupts &= ~(IR_ERR_LEC_30X);
else
- m_can_write(cdev, M_CAN_IE, IR_ALL_INT &
- ~(IR_ERR_LEC_31X));
- else
- m_can_write(cdev, M_CAN_IE, IR_ALL_INT);
+ interrupts &= ~(IR_ERR_LEC_31X);
+ }
+ m_can_write(cdev, M_CAN_IE, interrupts);
/* route all interrupts to INT0 */
m_can_write(cdev, M_CAN_ILS, ILS_ALL_INT0);
@@ -1358,27 +1385,36 @@ static void m_can_chip_config(struct net_device *dev)
/* set bittiming params */
m_can_set_bittiming(dev);
- /* enable internal timestamp generation, with a prescalar of 16. The
- * prescalar is applied to the nominal bit timing
+ /* enable internal timestamp generation, with a prescaler of 16. The
+ * prescaler is applied to the nominal bit timing
*/
- m_can_write(cdev, M_CAN_TSCC, FIELD_PREP(TSCC_TCP_MASK, 0xf));
+ m_can_write(cdev, M_CAN_TSCC,
+ FIELD_PREP(TSCC_TCP_MASK, 0xf) |
+ FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
m_can_config_endisable(cdev, false);
if (cdev->ops->init)
cdev->ops->init(cdev);
+
+ return 0;
}
-static void m_can_start(struct net_device *dev)
+static int m_can_start(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
/* basic m_can configuration */
- m_can_chip_config(dev);
+ ret = m_can_chip_config(dev);
+ if (ret)
+ return ret;
cdev->can.state = CAN_STATE_ERROR_ACTIVE;
m_can_enable_all_interrupts(cdev);
+
+ return 0;
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
@@ -1463,7 +1499,7 @@ static bool m_can_niso_supported(struct m_can_classdev *cdev)
static int m_can_dev_setup(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- int m_can_version;
+ int m_can_version, err;
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
@@ -1474,8 +1510,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
}
if (!cdev->is_peripheral)
- netif_napi_add(dev, &cdev->napi,
- m_can_poll, M_CAN_NAPI_WEIGHT);
+ netif_napi_add(dev, &cdev->napi, m_can_poll);
/* Shared properties of all M_CAN versions */
cdev->version = m_can_version;
@@ -1493,33 +1528,25 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
switch (cdev->version) {
case 30:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
- can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_30X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_30X;
+ err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ if (err)
+ return err;
+ cdev->can.bittiming_const = &m_can_bittiming_const_30X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_30X;
break;
case 31:
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
- can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ err = can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ if (err)
+ return err;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
break;
case 32:
case 33:
/* Support both MCAN version v3.2.x and v3.3.0 */
- cdev->can.bittiming_const = cdev->bit_timing ?
- cdev->bit_timing : &m_can_bittiming_const_31X;
-
- cdev->can.data_bittiming_const = cdev->data_timing ?
- cdev->data_timing :
- &m_can_data_bittiming_const_31X;
+ cdev->can.bittiming_const = &m_can_bittiming_const_31X;
+ cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
cdev->can.ctrlmode_supported |=
(m_can_niso_supported(cdev) ?
@@ -1568,13 +1595,10 @@ static int m_can_close(struct net_device *dev)
cdev->tx_skb = NULL;
destroy_workqueue(cdev->tx_wq);
cdev->tx_wq = NULL;
- }
-
- if (cdev->is_peripheral)
can_rx_offload_disable(&cdev->offload);
+ }
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
phy_power_off(cdev->transceiver);
@@ -1602,6 +1626,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
struct sk_buff *skb = cdev->tx_skb;
struct id_and_dlc fifo_header;
u32 cccr, fdflags;
+ u32 txfqs;
int err;
int putidx;
@@ -1634,8 +1659,6 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
if (err)
goto out_fail;
- can_put_echo_skb(skb, dev, 0, 0);
-
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
cccr &= ~CCCR_CMR_MASK;
@@ -1652,13 +1675,18 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
m_can_write(cdev, M_CAN_CCCR, cccr);
}
m_can_write(cdev, M_CAN_TXBTIE, 0x1);
+
+ can_put_echo_skb(skb, dev, 0, 0);
+
m_can_write(cdev, M_CAN_TXBAR, 0x1);
/* End of xmit function for version 3.0.x */
} else {
/* Transmit routine for version >= v3.1.x */
+ txfqs = m_can_read(cdev, M_CAN_TXFQS);
+
/* Check if FIFO full */
- if (m_can_tx_fifo_full(cdev)) {
+ if (_m_can_tx_fifo_full(txfqs)) {
/* This shouldn't happen */
netif_stop_queue(dev);
netdev_warn(dev,
@@ -1674,8 +1702,7 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
/* get put index for frame */
- putidx = FIELD_GET(TXFQS_TFQPI_MASK,
- m_can_read(cdev, M_CAN_TXFQS));
+ putidx = FIELD_GET(TXFQS_TFQPI_MASK, txfqs);
/* Construct DLC Field, with CAN-FD configuration.
* Use the put index of the fifo as the message marker,
@@ -1737,7 +1764,7 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
{
struct m_can_classdev *cdev = netdev_priv(dev);
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
if (cdev->is_peripheral) {
@@ -1815,9 +1842,9 @@ static int m_can_open(struct net_device *dev)
}
/* start the m_can controller */
- m_can_start(dev);
-
- can_led_event(dev, CAN_LED_EVENT_OPEN);
+ err = m_can_start(dev);
+ if (err)
+ goto exit_irq_fail;
if (!cdev->is_peripheral)
napi_enable(&cdev->napi);
@@ -1847,10 +1874,15 @@ static const struct net_device_ops m_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops m_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int register_m_can_dev(struct net_device *dev)
{
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &m_can_netdev_ops;
+ dev->ethtool_ops = &m_can_ethtool_ops;
return register_candev(dev);
}
@@ -1922,7 +1954,7 @@ int m_can_class_get_clocks(struct m_can_classdev *cdev)
cdev->hclk = devm_clk_get(cdev->dev, "hclk");
cdev->cclk = devm_clk_get(cdev->dev, "cclk");
- if (IS_ERR(cdev->cclk)) {
+ if (IS_ERR(cdev->hclk) || IS_ERR(cdev->cclk)) {
dev_err(cdev->dev, "no clock found\n");
ret = -ENODEV;
}
@@ -1990,7 +2022,7 @@ int m_can_class_register(struct m_can_classdev *cdev)
if (cdev->is_peripheral) {
ret = can_rx_offload_add_manual(cdev->net, &cdev->offload,
- M_CAN_NAPI_WEIGHT);
+ NAPI_POLL_WEIGHT);
if (ret)
goto clk_disable;
}
@@ -2006,8 +2038,6 @@ int m_can_class_register(struct m_can_classdev *cdev)
goto rx_offload_del;
}
- devm_can_led_init(cdev->net);
-
of_can_transceiver(cdev->net);
dev_info(cdev->dev, "%s device registered (irq=%d, version=%d)\n",
@@ -2073,9 +2103,13 @@ int m_can_class_resume(struct device *dev)
ret = m_can_clk_start(cdev);
if (ret)
return ret;
+ ret = m_can_start(ndev);
+ if (ret) {
+ m_can_clk_stop(cdev);
+
+ return ret;
+ }
- m_can_init_ram(cdev);
- m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
}
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 2c5d40997168..a839dc71dc9b 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -7,28 +7,27 @@
#define _CAN_M_CAN_H_
#include <linux/can/core.h>
-#include <linux/can/led.h>
+#include <linux/can/dev.h>
#include <linux/can/rx-offload.h>
+#include <linux/clk.h>
#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/freezer.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/iopoll.h>
-#include <linux/can/dev.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
/* m_can lec values */
enum m_can_lec_type {
@@ -39,7 +38,7 @@ enum m_can_lec_type {
LEC_BIT1_ERROR,
LEC_BIT0_ERROR,
LEC_CRC_ERROR,
- LEC_UNUSED,
+ LEC_NO_CHANGE,
};
enum m_can_mram_cfg {
@@ -85,9 +84,6 @@ struct m_can_classdev {
struct sk_buff *tx_skb;
struct phy *transceiver;
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
-
struct m_can_ops *ops;
int version;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index b56a54d6c5a9..f2219aa2824b 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -18,14 +18,9 @@
#define M_CAN_PCI_MMIO_BAR 0
+#define M_CAN_CLOCK_FREQ_EHL 200000000
#define CTL_CSR_INT_CTL_OFFSET 0x508
-struct m_can_pci_config {
- const struct can_bittiming_const *bit_timing;
- const struct can_bittiming_const *data_timing;
- unsigned int clock_freq;
-};
-
struct m_can_pci_priv {
struct m_can_classdev cdev;
@@ -89,40 +84,9 @@ static struct m_can_ops m_can_pci_ops = {
.read_fifo = iomap_read_fifo,
};
-static const struct can_bittiming_const m_can_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 128,
- .sjw_max = 128,
- .brp_min = 1,
- .brp_max = 512,
- .brp_inc = 1,
-};
-
-static const struct can_bittiming_const m_can_data_bittiming_const_ehl = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 16,
- .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static const struct m_can_pci_config m_can_pci_ehl = {
- .bit_timing = &m_can_bittiming_const_ehl,
- .data_timing = &m_can_data_bittiming_const_ehl,
- .clock_freq = 200000000,
-};
-
static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
- const struct m_can_pci_config *cfg;
struct m_can_classdev *mcan_class;
struct m_can_pci_priv *priv;
void __iomem *base;
@@ -150,29 +114,25 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (!mcan_class)
return -ENOMEM;
- cfg = (const struct m_can_pci_config *)id->driver_data;
-
priv = cdev_to_priv(mcan_class);
priv->base = base;
ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
- return ret;
+ goto err_free_dev;
mcan_class->dev = &pci->dev;
mcan_class->net->irq = pci_irq_vector(pci, 0);
mcan_class->pm_clock_support = 1;
- mcan_class->bit_timing = cfg->bit_timing;
- mcan_class->data_timing = cfg->data_timing;
- mcan_class->can.clock.freq = cfg->clock_freq;
+ mcan_class->can.clock.freq = id->driver_data;
mcan_class->ops = &m_can_pci_ops;
pci_set_drvdata(pci, mcan_class);
ret = m_can_class_register(mcan_class);
if (ret)
- goto err;
+ goto err_free_irq;
/* Enable interrupt control at CAN wrapper IP */
writel(0x1, base + CTL_CSR_INT_CTL_OFFSET);
@@ -184,8 +144,10 @@ static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
return 0;
-err:
+err_free_irq:
pci_free_irq_vectors(pci);
+err_free_dev:
+ m_can_class_free_dev(mcan_class->net);
return ret;
}
@@ -201,6 +163,7 @@ static void m_can_pci_remove(struct pci_dev *pci)
writel(0x0, priv->base + CTL_CSR_INT_CTL_OFFSET);
m_can_class_unregister(mcan_class);
+ m_can_class_free_dev(mcan_class->net);
pci_free_irq_vectors(pci);
}
@@ -218,8 +181,8 @@ static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops,
m_can_pci_suspend, m_can_pci_resume);
static const struct pci_device_id m_can_pci_id_table[] = {
- { PCI_VDEVICE(INTEL, 0x4bc1), (kernel_ulong_t)&m_can_pci_ehl, },
- { PCI_VDEVICE(INTEL, 0x4bc2), (kernel_ulong_t)&m_can_pci_ehl, },
+ { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, },
+ { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, m_can_pci_id_table);
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index eee47bad0592..9c1dcf838006 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -5,8 +5,8 @@
//
// Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/
-#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include "m_can.h"
@@ -140,10 +140,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mcan_class);
- ret = m_can_init_ram(mcan_class);
- if (ret)
- goto probe_fail;
-
pm_runtime_enable(mcan_class->dev);
ret = m_can_class_register(mcan_class);
if (ret)
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 04687b15b250..2342aa011647 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -10,7 +10,7 @@
#define TCAN4X5X_DEV_ID1 0x04
#define TCAN4X5X_REV 0x08
#define TCAN4X5X_STATUS 0x0C
-#define TCAN4X5X_ERROR_STATUS 0x10
+#define TCAN4X5X_ERROR_STATUS_MASK 0x10
#define TCAN4X5X_CONTROL 0x14
#define TCAN4X5X_CONFIG 0x800
@@ -204,17 +204,7 @@ static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev)
if (ret)
return ret;
- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_MCAN_INT_REG,
- TCAN4X5X_ENABLE_MCAN_INT);
- if (ret)
- return ret;
-
- ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
- TCAN4X5X_CLEAR_ALL_INT);
- if (ret)
- return ret;
-
- return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS,
+ return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS,
TCAN4X5X_CLEAR_ALL_INT);
}
@@ -234,8 +224,8 @@ static int tcan4x5x_init(struct m_can_classdev *cdev)
if (ret)
return ret;
- /* Zero out the MCAN buffers */
- ret = m_can_init_ram(cdev);
+ ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK,
+ TCAN4X5X_CLEAR_ALL_INT);
if (ret)
return ret;
@@ -388,7 +378,7 @@ out_power:
return ret;
}
-static int tcan4x5x_can_remove(struct spi_device *spi)
+static void tcan4x5x_can_remove(struct spi_device *spi)
{
struct tcan4x5x_priv *priv = spi_get_drvdata(spi);
@@ -397,8 +387,6 @@ static int tcan4x5x_can_remove(struct spi_device *spi)
tcan4x5x_power_enable(priv->power, 0);
m_can_class_free_dev(priv->cdev.net);
-
- return 0;
}
static const struct of_device_id tcan4x5x_of_match[] = {
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
index ca80dbaf7a3f..2b218ce04e9f 100644
--- a/drivers/net/can/m_can/tcan4x5x-regmap.c
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -12,7 +12,7 @@
#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
-#define TCAN4X5X_MAX_REGISTER 0x8ffc
+#define TCAN4X5X_MAX_REGISTER 0x87fc
static int tcan4x5x_regmap_gather_write(void *context,
const void *reg, size_t reg_len,
@@ -90,16 +90,47 @@ static int tcan4x5x_regmap_read(void *context,
return 0;
}
-static const struct regmap_range tcan4x5x_reg_table_yes_range[] = {
- regmap_reg_range(0x0000, 0x002c), /* Device ID and SPI Registers */
- regmap_reg_range(0x0800, 0x083c), /* Device configuration registers and Interrupt Flags*/
+static const struct regmap_range tcan4x5x_reg_table_wr_range[] = {
+ /* Device ID and SPI Registers */
+ regmap_reg_range(0x000c, 0x0010),
+ /* Device configuration registers and Interrupt Flags*/
+ regmap_reg_range(0x0800, 0x080c),
+ regmap_reg_range(0x0814, 0x0814),
+ regmap_reg_range(0x0820, 0x0820),
+ regmap_reg_range(0x0830, 0x0830),
+ /* M_CAN */
+ regmap_reg_range(0x100c, 0x102c),
+ regmap_reg_range(0x1048, 0x1048),
+ regmap_reg_range(0x1050, 0x105c),
+ regmap_reg_range(0x1080, 0x1088),
+ regmap_reg_range(0x1090, 0x1090),
+ regmap_reg_range(0x1098, 0x10a0),
+ regmap_reg_range(0x10a8, 0x10b0),
+ regmap_reg_range(0x10b8, 0x10c0),
+ regmap_reg_range(0x10c8, 0x10c8),
+ regmap_reg_range(0x10d0, 0x10d4),
+ regmap_reg_range(0x10e0, 0x10e4),
+ regmap_reg_range(0x10f0, 0x10f0),
+ regmap_reg_range(0x10f8, 0x10f8),
+ /* MRAM */
+ regmap_reg_range(0x8000, 0x87fc),
+};
+
+static const struct regmap_range tcan4x5x_reg_table_rd_range[] = {
+ regmap_reg_range(0x0000, 0x0010), /* Device ID and SPI Registers */
+ regmap_reg_range(0x0800, 0x0830), /* Device configuration registers and Interrupt Flags*/
regmap_reg_range(0x1000, 0x10fc), /* M_CAN */
regmap_reg_range(0x8000, 0x87fc), /* MRAM */
};
-static const struct regmap_access_table tcan4x5x_reg_table = {
- .yes_ranges = tcan4x5x_reg_table_yes_range,
- .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_yes_range),
+static const struct regmap_access_table tcan4x5x_reg_table_wr = {
+ .yes_ranges = tcan4x5x_reg_table_wr_range,
+ .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_wr_range),
+};
+
+static const struct regmap_access_table tcan4x5x_reg_table_rd = {
+ .yes_ranges = tcan4x5x_reg_table_rd_range,
+ .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_rd_range),
};
static const struct regmap_config tcan4x5x_regmap = {
@@ -107,8 +138,8 @@ static const struct regmap_config tcan4x5x_regmap = {
.reg_stride = 4,
.pad_bits = 8,
.val_bits = 32,
- .wr_table = &tcan4x5x_reg_table,
- .rd_table = &tcan4x5x_reg_table,
+ .wr_table = &tcan4x5x_reg_table_wr,
+ .rd_table = &tcan4x5x_reg_table_rd,
.max_register = TCAN4X5X_MAX_REGISTER,
.cache_type = REGCACHE_NONE,
.read_flag_mask = (__force unsigned long)
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index de4ddf79ba9b..b0ed798ae70f 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/can/dev.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <linux/clk.h>
@@ -61,7 +63,7 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
else
*mscan_clksrc = MSCAN_CLKSRC_XTAL;
- freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
if (!freq)
return 0;
@@ -320,14 +322,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
&mscan_clksrc);
if (!priv->can.clock.freq) {
dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
- goto exit_free_mscan;
+ goto exit_put_clock;
}
err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_free_mscan;
+ goto exit_put_clock;
}
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
@@ -335,7 +337,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
return 0;
-exit_free_mscan:
+exit_put_clock:
+ if (data->put_clock)
+ data->put_clock(ofdev);
free_candev(dev);
exit_dispose_irq:
irq_dispose_mapping(irq);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index fa32e418eb29..a6829cdc0e81 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -191,7 +191,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
@@ -401,13 +401,15 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
continue;
}
- if (canrflg & MSCAN_RXF)
+ if (canrflg & MSCAN_RXF) {
mscan_get_rx_frame(dev, frame);
- else if (canrflg & MSCAN_ERR_IF)
+ stats->rx_packets++;
+ if (!(frame->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += frame->len;
+ } else if (canrflg & MSCAN_ERR_IF) {
mscan_get_err_frame(dev, frame, canrflg);
+ }
- stats->rx_packets++;
- stats->rx_bytes += frame->len;
work_done++;
netif_receive_skb(skb);
}
@@ -446,9 +448,9 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
continue;
out_8(&regs->cantbsel, mask);
- stats->tx_bytes += in_8(&regs->tx.dlr);
+ stats->tx_bytes += can_get_echo_skb(dev, entry->id,
+ NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, entry->id, NULL);
priv->tx_active &= ~mask;
list_del(pos);
}
@@ -614,6 +616,10 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops mscan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
@@ -674,10 +680,11 @@ struct net_device *alloc_mscandev(void)
priv = netdev_priv(dev);
dev->netdev_ops = &mscan_netdev_ops;
+ dev->ethtool_ops = &mscan_ethtool_ops;
dev->flags |= IFF_ECHO; /* we support local echo */
- netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
+ netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8);
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
deleted file mode 100644
index 964c8a09226a..000000000000
--- a/drivers/net/can/pch_can.c
+++ /dev/null
@@ -1,1247 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 LAPIS SEMICONDUCTOR CO., LTD.
- */
-
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
-#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
-#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
-#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
-#define PCH_CTRL_CCE BIT(6)
-#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
-#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
-#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
-
-#define PCH_CMASK_RX_TX_SET 0x00f3
-#define PCH_CMASK_RX_TX_GET 0x0073
-#define PCH_CMASK_ALL 0xff
-#define PCH_CMASK_NEWDAT BIT(2)
-#define PCH_CMASK_CLRINTPND BIT(3)
-#define PCH_CMASK_CTRL BIT(4)
-#define PCH_CMASK_ARB BIT(5)
-#define PCH_CMASK_MASK BIT(6)
-#define PCH_CMASK_RDWR BIT(7)
-#define PCH_IF_MCONT_NEWDAT BIT(15)
-#define PCH_IF_MCONT_MSGLOST BIT(14)
-#define PCH_IF_MCONT_INTPND BIT(13)
-#define PCH_IF_MCONT_UMASK BIT(12)
-#define PCH_IF_MCONT_TXIE BIT(11)
-#define PCH_IF_MCONT_RXIE BIT(10)
-#define PCH_IF_MCONT_RMTEN BIT(9)
-#define PCH_IF_MCONT_TXRQXT BIT(8)
-#define PCH_IF_MCONT_EOB BIT(7)
-#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
-#define PCH_ID2_DIR BIT(13)
-#define PCH_ID2_XTD BIT(14)
-#define PCH_ID_MSGVAL BIT(15)
-#define PCH_IF_CREQ_BUSY BIT(15)
-
-#define PCH_STATUS_INT 0x8000
-#define PCH_RP 0x00008000
-#define PCH_REC 0x00007f00
-#define PCH_TEC 0x000000ff
-
-#define PCH_TX_OK BIT(3)
-#define PCH_RX_OK BIT(4)
-#define PCH_EPASSIV BIT(5)
-#define PCH_EWARN BIT(6)
-#define PCH_BUS_OFF BIT(7)
-
-/* bit position of certain controller bits. */
-#define PCH_BIT_BRP_SHIFT 0
-#define PCH_BIT_SJW_SHIFT 6
-#define PCH_BIT_TSEG1_SHIFT 8
-#define PCH_BIT_TSEG2_SHIFT 12
-#define PCH_BIT_BRPE_BRPE_SHIFT 6
-
-#define PCH_MSK_BITT_BRP 0x3f
-#define PCH_MSK_BRPE_BRPE 0x3c0
-#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
-#define PCH_COUNTER_LIMIT 10
-
-#define PCH_CAN_CLK 50000000 /* 50MHz */
-
-/*
- * Define the number of message object.
- * PCH CAN communications are done via Message RAM.
- * The Message RAM consists of 32 message objects.
- */
-#define PCH_RX_OBJ_NUM 26
-#define PCH_TX_OBJ_NUM 6
-#define PCH_RX_OBJ_START 1
-#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
-#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
-#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
-
-#define PCH_FIFO_THRESH 16
-
-/* TxRqst2 show status of MsgObjNo.17~32 */
-#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
- (PCH_RX_OBJ_END - 16))
-
-enum pch_ifreg {
- PCH_RX_IFREG,
- PCH_TX_IFREG,
-};
-
-enum pch_can_err {
- PCH_STUF_ERR = 1,
- PCH_FORM_ERR,
- PCH_ACK_ERR,
- PCH_BIT1_ERR,
- PCH_BIT0_ERR,
- PCH_CRC_ERR,
- PCH_LEC_ALL,
-};
-
-enum pch_can_mode {
- PCH_CAN_ENABLE,
- PCH_CAN_DISABLE,
- PCH_CAN_ALL,
- PCH_CAN_NONE,
- PCH_CAN_STOP,
- PCH_CAN_RUN,
-};
-
-struct pch_can_if_regs {
- u32 creq;
- u32 cmask;
- u32 mask1;
- u32 mask2;
- u32 id1;
- u32 id2;
- u32 mcont;
- u32 data[4];
- u32 rsv[13];
-};
-
-struct pch_can_regs {
- u32 cont;
- u32 stat;
- u32 errc;
- u32 bitt;
- u32 intr;
- u32 opt;
- u32 brpe;
- u32 reserve;
- struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
- u32 reserve1[8];
- u32 treq1;
- u32 treq2;
- u32 reserve2[6];
- u32 data1;
- u32 data2;
- u32 reserve3[6];
- u32 canipend1;
- u32 canipend2;
- u32 reserve4[6];
- u32 canmval1;
- u32 canmval2;
- u32 reserve5[37];
- u32 srst;
-};
-
-struct pch_can_priv {
- struct can_priv can;
- struct pci_dev *dev;
- u32 tx_enable[PCH_TX_OBJ_END];
- u32 rx_enable[PCH_TX_OBJ_END];
- u32 rx_link[PCH_TX_OBJ_END];
- u32 int_enables;
- struct net_device *ndev;
- struct pch_can_regs __iomem *regs;
- struct napi_struct napi;
- int tx_obj; /* Point next Tx Obj index */
- int use_msi;
-};
-
-static const struct can_bittiming_const pch_can_bittiming_const = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 2,
- .tseg1_max = 16,
- .tseg2_min = 1,
- .tseg2_max = 8,
- .sjw_max = 4,
- .brp_min = 1,
- .brp_max = 1024, /* 6bit + extended 4bit */
- .brp_inc = 1,
-};
-
-static const struct pci_device_id pch_pci_tbl[] = {
- {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
- {0,}
-};
-MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
-
-static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
-{
- iowrite32(ioread32(addr) | mask, addr);
-}
-
-static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
-{
- iowrite32(ioread32(addr) & ~mask, addr);
-}
-
-static void pch_can_set_run_mode(struct pch_can_priv *priv,
- enum pch_can_mode mode)
-{
- switch (mode) {
- case PCH_CAN_RUN:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
- break;
-
- case PCH_CAN_STOP:
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
- break;
-
- default:
- netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
- break;
- }
-}
-
-static void pch_can_set_optmode(struct pch_can_priv *priv)
-{
- u32 reg_val = ioread32(&priv->regs->opt);
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- reg_val |= PCH_OPT_SILENT;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
- reg_val |= PCH_OPT_LBACK;
-
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
- iowrite32(reg_val, &priv->regs->opt);
-}
-
-static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
-{
- int counter = PCH_COUNTER_LIMIT;
- u32 ifx_creq;
-
- iowrite32(num, creq_addr);
- while (counter) {
- ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
- if (!ifx_creq)
- break;
- counter--;
- udelay(1);
- }
- if (!counter)
- pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
-}
-
-static void pch_can_set_int_enables(struct pch_can_priv *priv,
- enum pch_can_mode interrupt_no)
-{
- switch (interrupt_no) {
- case PCH_CAN_DISABLE:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
- break;
-
- case PCH_CAN_ALL:
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
- break;
-
- case PCH_CAN_NONE:
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
- break;
-
- default:
- netdev_err(priv->ndev, "Invalid interrupt number.\n");
- break;
- }
-}
-
-static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
- int set, enum pch_ifreg dir)
-{
- u32 ie;
-
- if (dir)
- ie = PCH_IF_MCONT_TXIE;
- else
- ie = PCH_IF_MCONT_RXIE;
-
- /* Reading the Msg buffer from Message RAM to IF1/2 registers. */
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-
- /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
- &priv->regs->ifregs[dir].cmask);
-
- if (set) {
- /* Setting the MsgVal and RxIE/TxIE bits */
- pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
- pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
- } else {
- /* Clearing the MsgVal and RxIE/TxIE bits */
- pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
- pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
- }
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-}
-
-static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
-{
- int i;
-
- /* Traversing to obtain the object configured as receivers. */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
-}
-
-static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
-{
- int i;
-
- /* Traversing to obtain the object configured as transmit object. */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
-}
-
-static u32 pch_can_int_pending(struct pch_can_priv *priv)
-{
- return ioread32(&priv->regs->intr) & 0xffff;
-}
-
-static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
-{
- int i; /* Msg Obj ID (1~32) */
-
- for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
- iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
- iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
- iowrite32(0x0, &priv->regs->ifregs[0].id1);
- iowrite32(0x0, &priv->regs->ifregs[0].id2);
- iowrite32(0x0, &priv->regs->ifregs[0].mcont);
- iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
- iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
- PCH_CMASK_ARB | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
- }
-}
-
-static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
-{
- int i;
-
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
-
- iowrite32(0x0, &priv->regs->ifregs[0].id1);
- iowrite32(0x0, &priv->regs->ifregs[0].id2);
-
- pch_can_bit_set(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_UMASK);
-
- /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
- if (i == PCH_RX_OBJ_END)
- pch_can_bit_set(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
- else
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
-
- iowrite32(0, &priv->regs->ifregs[0].mask1);
- pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
- 0x1fff | PCH_MASK2_MDIR_MXTD);
-
- /* Setting CMASK for writing */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
- PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
- }
-
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
-
- /* Resetting DIR bit for reception */
- iowrite32(0x0, &priv->regs->ifregs[1].id1);
- iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
-
- /* Setting EOB bit for transmitter */
- iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
- &priv->regs->ifregs[1].mcont);
-
- iowrite32(0, &priv->regs->ifregs[1].mask1);
- pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
-
- /* Setting CMASK for writing */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
- PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
- }
-}
-
-static void pch_can_init(struct pch_can_priv *priv)
-{
- /* Stopping the Can device. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Clearing all the message object buffers. */
- pch_can_clear_if_buffers(priv);
-
- /* Configuring the respective message object as either rx/tx object. */
- pch_can_config_rx_tx_buffers(priv);
-
- /* Enabling the interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_ALL);
-}
-
-static void pch_can_release(struct pch_can_priv *priv)
-{
- /* Stooping the CAN device. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Disabling the interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_NONE);
-
- /* Disabling all the receive object. */
- pch_can_set_rx_all(priv, 0);
-
- /* Disabling all the transmit object. */
- pch_can_set_tx_all(priv, 0);
-}
-
-/* This function clears interrupt(s) from the CAN device. */
-static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
-{
- /* Clear interrupt for transmit object */
- if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
- /* Setting CMASK for clearing the reception interrupts. */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
- &priv->regs->ifregs[0].cmask);
-
- /* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
-
- /* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
- } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
- /*
- * Setting CMASK for clearing interrupts for frame transmission.
- */
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
- &priv->regs->ifregs[1].cmask);
-
- /* Resetting the ID registers. */
- pch_can_bit_set(&priv->regs->ifregs[1].id2,
- PCH_ID2_DIR | (0x7ff << 2));
- iowrite32(0x0, &priv->regs->ifregs[1].id1);
-
- /* Clearing NewDat, TxRqst & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
- PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
- PCH_IF_MCONT_TXRQXT);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
- }
-}
-
-static void pch_can_reset(struct pch_can_priv *priv)
-{
- /* write to sw reset register */
- iowrite32(1, &priv->regs->srst);
- iowrite32(0, &priv->regs->srst);
-}
-
-static void pch_can_error(struct net_device *ndev, u32 status)
-{
- struct sk_buff *skb;
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct can_frame *cf;
- u32 errc, lec;
- struct net_device_stats *stats = &(priv->ndev->stats);
- enum can_state state = priv->can.state;
-
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb)
- return;
-
- if (status & PCH_BUS_OFF) {
- pch_can_set_tx_all(priv, 0);
- pch_can_set_rx_all(priv, 0);
- state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- priv->can.can_stats.bus_off++;
- can_bus_off(ndev);
- }
-
- errc = ioread32(&priv->regs->errc);
- /* Warning interrupt. */
- if (status & PCH_EWARN) {
- state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
- cf->can_id |= CAN_ERR_CRTL;
- if (((errc & PCH_REC) >> 8) > 96)
- cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
- if ((errc & PCH_TEC) > 96)
- cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
- netdev_dbg(ndev,
- "%s -> Error Counter is more than 96.\n", __func__);
- }
- /* Error passive interrupt. */
- if (status & PCH_EPASSIV) {
- priv->can.can_stats.error_passive++;
- state = CAN_STATE_ERROR_PASSIVE;
- cf->can_id |= CAN_ERR_CRTL;
- if (errc & PCH_RP)
- cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
- if ((errc & PCH_TEC) > 127)
- cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
- netdev_dbg(ndev,
- "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
- }
-
- lec = status & PCH_LEC_ALL;
- switch (lec) {
- case PCH_STUF_ERR:
- cf->data[2] |= CAN_ERR_PROT_STUFF;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_FORM_ERR:
- cf->data[2] |= CAN_ERR_PROT_FORM;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_ACK_ERR:
- cf->can_id |= CAN_ERR_ACK;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_BIT1_ERR:
- case PCH_BIT0_ERR:
- cf->data[2] |= CAN_ERR_PROT_BIT;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_CRC_ERR:
- cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
- priv->can.can_stats.bus_error++;
- stats->rx_errors++;
- break;
- case PCH_LEC_ALL: /* Written by CPU. No error status */
- break;
- }
-
- cf->data[6] = errc & PCH_TEC;
- cf->data[7] = (errc & PCH_REC) >> 8;
-
- priv->can.state = state;
- netif_receive_skb(skb);
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
-}
-
-static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
-{
- struct net_device *ndev = (struct net_device *)dev_id;
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- if (!pch_can_int_pending(priv))
- return IRQ_NONE;
-
- pch_can_set_int_enables(priv, PCH_CAN_NONE);
- napi_schedule(&priv->napi);
- return IRQ_HANDLED;
-}
-
-static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
-{
- if (obj_id < PCH_FIFO_THRESH) {
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
- PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
-
- /* Clearing the Dir bit. */
- pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
-
- /* Clearing NewDat & IntPnd */
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_INTPND);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
- } else if (obj_id > PCH_FIFO_THRESH) {
- pch_can_int_clr(priv, obj_id);
- } else if (obj_id == PCH_FIFO_THRESH) {
- int cnt;
- for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
- pch_can_int_clr(priv, cnt + 1);
- }
-}
-
-static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
- struct sk_buff *skb;
- struct can_frame *cf;
-
- netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_MSGLOST);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
-
- skb = alloc_can_err_skb(ndev, &cf);
- if (!skb)
- return;
-
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_over_errors++;
- stats->rx_errors++;
-
- netif_receive_skb(skb);
-}
-
-static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
-{
- u32 reg;
- canid_t id;
- int rcv_pkts = 0;
- struct sk_buff *skb;
- struct can_frame *cf;
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
- int i;
- u32 id2;
- u16 data_reg;
-
- do {
- /* Reading the message object from the Message RAM */
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
-
- /* Reading the MCONT register. */
- reg = ioread32(&priv->regs->ifregs[0].mcont);
-
- if (reg & PCH_IF_MCONT_EOB)
- break;
-
- /* If MsgLost bit set. */
- if (reg & PCH_IF_MCONT_MSGLOST) {
- pch_can_rx_msg_lost(ndev, obj_num);
- rcv_pkts++;
- quota--;
- obj_num++;
- continue;
- } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
- obj_num++;
- continue;
- }
-
- skb = alloc_can_skb(priv->ndev, &cf);
- if (!skb) {
- netdev_err(ndev, "alloc_can_skb Failed\n");
- return rcv_pkts;
- }
-
- /* Get Received data */
- id2 = ioread32(&priv->regs->ifregs[0].id2);
- if (id2 & PCH_ID2_XTD) {
- id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
- id |= (((id2) & 0x1fff) << 16);
- cf->can_id = id | CAN_EFF_FLAG;
- } else {
- id = (id2 >> 2) & CAN_SFF_MASK;
- cf->can_id = id;
- }
-
- if (id2 & PCH_ID2_DIR)
- cf->can_id |= CAN_RTR_FLAG;
-
- cf->len = can_cc_dlc2len((ioread32(&priv->regs->
- ifregs[0].mcont)) & 0xF);
-
- for (i = 0; i < cf->len; i += 2) {
- data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
- cf->data[i] = data_reg;
- cf->data[i + 1] = data_reg >> 8;
- }
-
- rcv_pkts++;
- stats->rx_packets++;
- quota--;
- stats->rx_bytes += cf->len;
- netif_receive_skb(skb);
-
- pch_fifo_thresh(priv, obj_num);
- obj_num++;
- } while (quota > 0);
-
- return rcv_pkts;
-}
-
-static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &(priv->ndev->stats);
- u32 dlc;
-
- can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1, NULL);
- iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
- &priv->regs->ifregs[1].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
- dlc = can_cc_dlc2len(ioread32(&priv->regs->ifregs[1].mcont) &
- PCH_IF_MCONT_DLC);
- stats->tx_bytes += dlc;
- stats->tx_packets++;
- if (int_stat == PCH_TX_OBJ_END)
- netif_wake_queue(ndev);
-}
-
-static int pch_can_poll(struct napi_struct *napi, int quota)
-{
- struct net_device *ndev = napi->dev;
- struct pch_can_priv *priv = netdev_priv(ndev);
- u32 int_stat;
- u32 reg_stat;
- int quota_save = quota;
-
- int_stat = pch_can_int_pending(priv);
- if (!int_stat)
- goto end;
-
- if (int_stat == PCH_STATUS_INT) {
- reg_stat = ioread32(&priv->regs->stat);
-
- if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
- ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
- pch_can_error(ndev, reg_stat);
- quota--;
- }
-
- if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
- pch_can_bit_clear(&priv->regs->stat,
- reg_stat & (PCH_TX_OK | PCH_RX_OK));
-
- int_stat = pch_can_int_pending(priv);
- }
-
- if (quota == 0)
- goto end;
-
- if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
- quota -= pch_can_rx_normal(ndev, int_stat, quota);
- } else if ((int_stat >= PCH_TX_OBJ_START) &&
- (int_stat <= PCH_TX_OBJ_END)) {
- /* Handle transmission interrupt */
- pch_can_tx_complete(ndev, int_stat);
- }
-
-end:
- napi_complete(napi);
- pch_can_set_int_enables(priv, PCH_CAN_ALL);
-
- return quota_save - quota;
-}
-
-static int pch_set_bittiming(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- const struct can_bittiming *bt = &priv->can.bittiming;
- u32 canbit;
- u32 bepe;
-
- /* Setting the CCE bit for accessing the Can Timing register. */
- pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
-
- canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
- canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
- canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
- canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
- bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
- iowrite32(canbit, &priv->regs->bitt);
- iowrite32(bepe, &priv->regs->brpe);
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
-
- return 0;
-}
-
-static void pch_can_start(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- if (priv->can.state != CAN_STATE_STOPPED)
- pch_can_reset(priv);
-
- pch_set_bittiming(ndev);
- pch_can_set_optmode(priv);
-
- pch_can_set_tx_all(priv, 1);
- pch_can_set_rx_all(priv, 1);
-
- /* Setting the CAN to run mode. */
- pch_can_set_run_mode(priv, PCH_CAN_RUN);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- return;
-}
-
-static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
-{
- int ret = 0;
-
- switch (mode) {
- case CAN_MODE_START:
- pch_can_start(ndev);
- netif_wake_queue(ndev);
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- return ret;
-}
-
-static int pch_can_open(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- int retval;
-
- /* Registering the interrupt. */
- retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
- ndev->name, ndev);
- if (retval) {
- netdev_err(ndev, "request_irq failed.\n");
- goto req_irq_err;
- }
-
- /* Open common can device */
- retval = open_candev(ndev);
- if (retval) {
- netdev_err(ndev, "open_candev() failed %d\n", retval);
- goto err_open_candev;
- }
-
- pch_can_init(priv);
- pch_can_start(ndev);
- napi_enable(&priv->napi);
- netif_start_queue(ndev);
-
- return 0;
-
-err_open_candev:
- free_irq(priv->dev->irq, ndev);
-req_irq_err:
- pch_can_release(priv);
-
- return retval;
-}
-
-static int pch_close(struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- netif_stop_queue(ndev);
- napi_disable(&priv->napi);
- pch_can_release(priv);
- free_irq(priv->dev->irq, ndev);
- close_candev(ndev);
- priv->can.state = CAN_STATE_STOPPED;
- return 0;
-}
-
-static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct pch_can_priv *priv = netdev_priv(ndev);
- struct can_frame *cf = (struct can_frame *)skb->data;
- int tx_obj_no;
- int i;
- u32 id2;
-
- if (can_dropped_invalid_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- tx_obj_no = priv->tx_obj;
- if (priv->tx_obj == PCH_TX_OBJ_END) {
- if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
- netif_stop_queue(ndev);
-
- priv->tx_obj = PCH_TX_OBJ_START;
- } else {
- priv->tx_obj++;
- }
-
- /* Setting the CMASK register. */
- pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
-
- /* If ID extended is set. */
- if (cf->can_id & CAN_EFF_FLAG) {
- iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
- id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
- } else {
- iowrite32(0, &priv->regs->ifregs[1].id1);
- id2 = (cf->can_id & CAN_SFF_MASK) << 2;
- }
-
- id2 |= PCH_ID_MSGVAL;
-
- /* If remote frame has to be transmitted.. */
- if (!(cf->can_id & CAN_RTR_FLAG))
- id2 |= PCH_ID2_DIR;
-
- iowrite32(id2, &priv->regs->ifregs[1].id2);
-
- /* Copy data to register */
- for (i = 0; i < cf->len; i += 2) {
- iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
- &priv->regs->ifregs[1].data[i / 2]);
- }
-
- can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1, 0);
-
- /* Set the size of the data. Update if2_mcont */
- iowrite32(cf->len | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
- PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
-
- return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops pch_can_netdev_ops = {
- .ndo_open = pch_can_open,
- .ndo_stop = pch_close,
- .ndo_start_xmit = pch_xmit,
- .ndo_change_mtu = can_change_mtu,
-};
-
-static void pch_can_remove(struct pci_dev *pdev)
-{
- struct net_device *ndev = pci_get_drvdata(pdev);
- struct pch_can_priv *priv = netdev_priv(ndev);
-
- unregister_candev(priv->ndev);
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pch_can_reset(priv);
- pci_iounmap(pdev, priv->regs);
- free_candev(priv->ndev);
-}
-
-static void __maybe_unused pch_can_set_int_custom(struct pch_can_priv *priv)
-{
- /* Clearing the IE, SIE and EIE bits of Can control register. */
- pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
-
- /* Appropriately setting them. */
- pch_can_bit_set(&priv->regs->cont,
- ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
-}
-
-/* This function retrieves interrupt enabled for the CAN device. */
-static u32 __maybe_unused pch_can_get_int_enables(struct pch_can_priv *priv)
-{
- /* Obtaining the status of IE, SIE and EIE interrupt bits. */
- return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
-}
-
-static u32 __maybe_unused pch_can_get_rxtx_ir(struct pch_can_priv *priv,
- u32 buff_num, enum pch_ifreg dir)
-{
- u32 ie, enable;
-
- if (dir)
- ie = PCH_IF_MCONT_RXIE;
- else
- ie = PCH_IF_MCONT_TXIE;
-
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
-
- if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
- ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
- enable = 1;
- else
- enable = 0;
-
- return enable;
-}
-
-static void __maybe_unused pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, int set)
-{
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
- iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
- &priv->regs->ifregs[0].cmask);
- if (set)
- pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
- PCH_IF_MCONT_EOB);
- else
- pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
-
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
-}
-
-static u32 __maybe_unused pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num)
-{
- u32 link;
-
- iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
- pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
-
- if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
- link = 0;
- else
- link = 1;
- return link;
-}
-
-static int __maybe_unused pch_can_get_buffer_status(struct pch_can_priv *priv)
-{
- return (ioread32(&priv->regs->treq1) & 0xffff) |
- (ioread32(&priv->regs->treq2) << 16);
-}
-
-static int __maybe_unused pch_can_suspend(struct device *dev_d)
-{
- int i;
- u32 buf_stat; /* Variable for reading the transmit buffer status. */
- int counter = PCH_COUNTER_LIMIT;
-
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct pch_can_priv *priv = netdev_priv(dev);
-
- /* Stop the CAN controller */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Indicate that we are aboutto/in suspend */
- priv->can.state = CAN_STATE_STOPPED;
-
- /* Waiting for all transmission to complete. */
- while (counter) {
- buf_stat = pch_can_get_buffer_status(priv);
- if (!buf_stat)
- break;
- counter--;
- udelay(1);
- }
- if (!counter)
- dev_err(dev_d, "%s -> Transmission time out.\n", __func__);
-
- /* Save interrupt configuration and then disable them */
- priv->int_enables = pch_can_get_int_enables(priv);
- pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
-
- /* Save Tx buffer enable state */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
- PCH_TX_IFREG);
-
- /* Disable all Transmit buffers */
- pch_can_set_tx_all(priv, 0);
-
- /* Save Rx buffer enable state */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
- PCH_RX_IFREG);
- priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
- }
-
- /* Disable all Receive buffers */
- pch_can_set_rx_all(priv, 0);
-
- return 0;
-}
-
-static int __maybe_unused pch_can_resume(struct device *dev_d)
-{
- int i;
- struct net_device *dev = dev_get_drvdata(dev_d);
- struct pch_can_priv *priv = netdev_priv(dev);
-
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
-
- /* Disabling all interrupts. */
- pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
-
- /* Setting the CAN device in Stop Mode. */
- pch_can_set_run_mode(priv, PCH_CAN_STOP);
-
- /* Configuring the transmit and receive buffers. */
- pch_can_config_rx_tx_buffers(priv);
-
- /* Restore the CAN state */
- pch_set_bittiming(dev);
-
- /* Listen/Active */
- pch_can_set_optmode(priv);
-
- /* Enabling the transmit buffer. */
- for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
- pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
-
- /* Configuring the receive buffer and enabling them. */
- for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
- /* Restore buffer link */
- pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
-
- /* Restore buffer enables */
- pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
- }
-
- /* Enable CAN Interrupts */
- pch_can_set_int_custom(priv);
-
- /* Restore Run Mode */
- pch_can_set_run_mode(priv, PCH_CAN_RUN);
-
- return 0;
-}
-
-static int pch_can_get_berr_counter(const struct net_device *dev,
- struct can_berr_counter *bec)
-{
- struct pch_can_priv *priv = netdev_priv(dev);
- u32 errc = ioread32(&priv->regs->errc);
-
- bec->txerr = errc & PCH_TEC;
- bec->rxerr = (errc & PCH_REC) >> 8;
-
- return 0;
-}
-
-static int pch_can_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- struct net_device *ndev;
- struct pch_can_priv *priv;
- int rc;
- void __iomem *addr;
-
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
- goto probe_exit_endev;
- }
-
- rc = pci_request_regions(pdev, KBUILD_MODNAME);
- if (rc) {
- dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
- goto probe_exit_pcireq;
- }
-
- addr = pci_iomap(pdev, 1, 0);
- if (!addr) {
- rc = -EIO;
- dev_err(&pdev->dev, "Failed pci_iomap\n");
- goto probe_exit_ipmap;
- }
-
- ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
- if (!ndev) {
- rc = -ENOMEM;
- dev_err(&pdev->dev, "Failed alloc_candev\n");
- goto probe_exit_alloc_candev;
- }
-
- priv = netdev_priv(ndev);
- priv->ndev = ndev;
- priv->regs = addr;
- priv->dev = pdev;
- priv->can.bittiming_const = &pch_can_bittiming_const;
- priv->can.do_set_mode = pch_can_do_set_mode;
- priv->can.do_get_berr_counter = pch_can_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_LOOPBACK;
- priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
-
- ndev->irq = pdev->irq;
- ndev->flags |= IFF_ECHO;
-
- pci_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->netdev_ops = &pch_can_netdev_ops;
- priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
-
- netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
-
- rc = pci_enable_msi(priv->dev);
- if (rc) {
- netdev_err(ndev, "PCH CAN opened without MSI\n");
- priv->use_msi = 0;
- } else {
- netdev_err(ndev, "PCH CAN opened with MSI\n");
- pci_set_master(pdev);
- priv->use_msi = 1;
- }
-
- rc = register_candev(ndev);
- if (rc) {
- dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
- goto probe_exit_reg_candev;
- }
-
- return 0;
-
-probe_exit_reg_candev:
- if (priv->use_msi)
- pci_disable_msi(priv->dev);
- free_candev(ndev);
-probe_exit_alloc_candev:
- pci_iounmap(pdev, addr);
-probe_exit_ipmap:
- pci_release_regions(pdev);
-probe_exit_pcireq:
- pci_disable_device(pdev);
-probe_exit_endev:
- return rc;
-}
-
-static SIMPLE_DEV_PM_OPS(pch_can_pm_ops,
- pch_can_suspend,
- pch_can_resume);
-
-static struct pci_driver pch_can_pci_driver = {
- .name = "pch_can",
- .id_table = pch_pci_tbl,
- .probe = pch_can_probe,
- .remove = pch_can_remove,
- .driver.pm = &pch_can_pm_ops,
-};
-
-module_pci_driver(pch_can_pci_driver);
-
-MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION("0.94");
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index d08718e98e11..31c9c127e24b 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -7,6 +7,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/ethtool.h>
#include "peak_canfd_user.h"
@@ -266,10 +267,9 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->echo_lock, flags);
- can_get_echo_skb(priv->ndev, msg->client, NULL);
/* count bytes of the echo instead of skb */
- stats->tx_bytes += cf_len;
+ stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL);
stats->tx_packets++;
/* restart tx queue (a slot is free) */
@@ -310,12 +310,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
if (rx_msg_flags & PUCAN_MSG_EXT_ID)
cf->can_id |= CAN_EFF_FLAG;
- if (rx_msg_flags & PUCAN_MSG_RTR)
+ if (rx_msg_flags & PUCAN_MSG_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, msg->d, cf->len);
- stats->rx_bytes += cf->len;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
@@ -373,7 +374,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
@@ -386,7 +387,7 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
@@ -409,8 +410,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
return -ENOMEM;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
return 0;
@@ -432,14 +431,12 @@ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
return -ENOMEM;
}
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
cf->data[6] = priv->bec.txerr;
cf->data[7] = priv->bec.rxerr;
- stats->rx_bytes += cf->len;
- stats->rx_packets++;
netif_rx(skb);
return 0;
@@ -654,7 +651,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
int room_left;
u8 len;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
@@ -746,13 +743,59 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops peak_canfd_netdev_ops = {
.ndo_open = peak_canfd_open,
.ndo_stop = peak_canfd_close,
+ .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_canfd_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static int peak_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops peak_canfd_ethtool_ops = {
+ .get_ts_info = peak_get_ts_info,
+};
+
struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
int echo_skb_max)
{
@@ -793,6 +836,7 @@ struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
ndev->flags |= IFF_ECHO;
ndev->netdev_ops = &peak_canfd_netdev_ops;
+ ndev->ethtool_ops = &peak_canfd_ethtool_ops;
ndev->dev_id = index;
return ndev;
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 8999ec9455ec..cc43c9c5e38c 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -10,9 +10,9 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
-#include <linux/can/led.h>
#include <linux/can/dev.h>
#include <linux/clk.h>
#include <linux/of.h>
@@ -94,7 +94,6 @@ struct rcar_can_priv {
struct rcar_can_regs __iomem *regs;
struct clk *clk;
struct clk *can_clk;
- u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
u32 tx_head;
u32 tx_tail;
u8 clock_select;
@@ -223,7 +222,6 @@ static void tx_failure_cleanup(struct net_device *ndev)
static void rcar_can_error(struct net_device *ndev)
{
struct rcar_can_priv *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
u8 eifr, txerr = 0, rxerr = 0;
@@ -235,11 +233,8 @@ static void rcar_can_error(struct net_device *ndev)
if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
txerr = readb(&priv->regs->tecr);
rxerr = readb(&priv->regs->recr);
- if (skb) {
+ if (skb)
cf->can_id |= CAN_ERR_CRTL;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
}
if (eifr & RCAR_CAN_EIFR_BEIF) {
int rx_errors = 0, tx_errors = 0;
@@ -339,6 +334,10 @@ static void rcar_can_error(struct net_device *ndev)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
if (eifr & RCAR_CAN_EIFR_ORIF) {
netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
@@ -362,11 +361,8 @@ static void rcar_can_error(struct net_device *ndev)
}
}
- if (skb) {
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (skb)
netif_rx(skb);
- }
}
static void rcar_can_tx_done(struct net_device *ndev)
@@ -383,17 +379,17 @@ static void rcar_can_tx_done(struct net_device *ndev)
if (priv->tx_head - priv->tx_tail <= unsent)
break;
stats->tx_packets++;
- stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
- RCAR_CAN_FIFO_DEPTH];
- priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
- can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL);
+ stats->tx_bytes +=
+ can_get_echo_skb(ndev,
+ priv->tx_tail % RCAR_CAN_FIFO_DEPTH,
+ NULL);
+
priv->tx_tail++;
netif_wake_queue(ndev);
}
/* Clear interrupt */
isr = readb(&priv->regs->isr);
writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
@@ -535,7 +531,6 @@ static int rcar_can_open(struct net_device *ndev)
ndev->irq, err);
goto out_close;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
rcar_can_start(ndev);
netif_start_queue(ndev);
return 0;
@@ -585,7 +580,6 @@ static int rcar_can_close(struct net_device *ndev)
clk_disable_unprepare(priv->can_clk);
clk_disable_unprepare(priv->clk);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -596,7 +590,7 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
u32 data, i;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
@@ -616,7 +610,6 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
- priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->len;
can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0);
priv->tx_head++;
/* Start Tx: write 0xff to the TFPCR register to increment
@@ -638,6 +631,10 @@ static const struct net_device_ops rcar_can_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops rcar_can_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
@@ -666,12 +663,11 @@ static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
for (dlc = 0; dlc < cf->len; dlc++)
cf->data[dlc] =
readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
- }
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
-
- stats->rx_bytes += cf->len;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
+
netif_receive_skb(skb);
}
@@ -794,6 +790,7 @@ static int rcar_can_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->ethtool_ops = &rcar_can_ethtool_ops;
ndev->irq = irq;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
@@ -807,8 +804,8 @@ static int rcar_can_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
- RCAR_CAN_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll,
+ RCAR_CAN_NAPI_WEIGHT);
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed, error %d\n",
@@ -816,8 +813,6 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail_candev;
}
- devm_can_led_init(ndev);
-
dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
return 0;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index ff9d0f5ae0dd..963c42f43755 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,31 +21,27 @@
* wherever it is modified to a readable name.
*/
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/can/led.h>
-#include <linux/can/dev.h>
-#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/types.h>
#define RCANFD_DRV_NAME "rcar_canfd"
-enum rcanfd_chip_id {
- RENESAS_RCAR_GEN3 = 0,
- RENESAS_RZG2L,
-};
-
/* Global register bits */
/* RSCFDnCFDGRMCFG */
@@ -79,27 +75,33 @@ enum rcanfd_chip_id {
#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3))
/* RSCFDnCFDGERFL / RSCFDnGERFL */
-#define RCANFD_GERFL_EEF1 BIT(17)
-#define RCANFD_GERFL_EEF0 BIT(16)
+#define RCANFD_GERFL_EEF0_7 GENMASK(23, 16)
+#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch))
#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */
#define RCANFD_GERFL_THLES BIT(2)
#define RCANFD_GERFL_MES BIT(1)
#define RCANFD_GERFL_DEF BIT(0)
-#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\
- RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\
- (gpriv->fdmode ?\
- RCANFD_GERFL_CMPOF : 0)))
+#define RCANFD_GERFL_ERR(gpriv, x) \
+ ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \
+ RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
+ RCANFD_GERFL_MES | \
+ ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
/* AFL Rx rules registers */
/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8))
-#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff)
+#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
+ (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \
+ (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8)))
+
+#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
+ (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \
+ reg_gen4(gpriv, 0x1ff, 0xff))
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f)
+#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f))
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -116,9 +118,15 @@ enum rcanfd_chip_id {
#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0)
/* RSCFDnCFDCmNCFG - CAN FD only */
-#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24)
-#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16)
-#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11)
+#define RCANFD_NCFG_NTSEG2(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24))
+
+#define RCANFD_NCFG_NTSEG1(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16))
+
+#define RCANFD_NCFG_NSJW(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11))
+
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
/* RSCFDnCFDCmCTR / RSCFDnCmCTR */
@@ -179,12 +187,19 @@ enum rcanfd_chip_id {
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
-#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20)
-#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16)
+#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24)
+
+#define RCANFD_DCFG_DTSEG2(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20))
+
+#define RCANFD_DCFG_DTSEG1(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16))
+
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCmFDCFG */
+#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
+#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
@@ -219,10 +234,11 @@ enum rcanfd_chip_id {
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20)
-#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16)
+#define RCANFD_CFCC_CFTML(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20))
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16))
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8)
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8))
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -282,33 +298,31 @@ enum rcanfd_chip_id {
#define RCANFD_GTSC (0x0094)
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR (0x0098)
-/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
-#define RCANFD_GAFLCFG0 (0x009c)
-/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */
-#define RCANFD_GAFLCFG1 (0x00a0)
+/* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */
+#define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2)))
/* RSCFDnCFDRMNB / RSCFDnRMNB */
#define RCANFD_RMNB (0x00a4)
/* RSCFDnCFDRMND / RSCFDnRMND */
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
-#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x)))
+#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
-#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x)))
+#define RCANFD_RFPCTR(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x40)
/* Common FIFO Control registers */
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
-#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFCC(gpriv, ch, idx) \
+ (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
-#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFSTS(gpriv, ch, idx) \
+ (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
-#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \
- (0x04 * (idx)))
+#define RCANFD_CFPCTR(gpriv, ch, idx) \
+ (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -387,22 +401,23 @@ enum rcanfd_chip_id {
#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q)))
/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */
-#define RCANFD_C_RFOFFSET (0x0e00)
-#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
-#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \
- (0x10 * (x)))
-#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \
- (0x10 * (x)) + (0x04 * (df)))
+#define RCANFD_C_RFOFFSET (0x0e00)
+#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x)))
+#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x)))
+#define RCANFD_C_RFDF(x, df) \
+ (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df)))
/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */
#define RCANFD_C_CFOFFSET (0x0e80)
-#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \
- (0x10 * (idx)))
-#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \
- (0x30 * (ch)) + (0x10 * (idx)))
-#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \
- (0x30 * (ch)) + (0x10 * (idx)) + \
- (0x04 * (df)))
+
+#define RCANFD_C_CFID(ch, idx) \
+ (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFPTR(ch, idx) \
+ (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx)))
+
+#define RCANFD_C_CFDF(ch, idx, df) \
+ (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df)))
/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */
#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p)))
@@ -415,10 +430,15 @@ enum rcanfd_chip_id {
/* RSCFDnRPGACCr */
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
+/* R-Car Gen4 Classical and CAN FD mode specific register map */
+#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m)))
+
+#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
+
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
+#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m)))
#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
@@ -434,26 +454,28 @@ enum rcanfd_chip_id {
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET (0x3000)
-#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x)))
-#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \
- (0x80 * (x)))
-#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \
- (0x80 * (x)))
-#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \
- (0x80 * (x)) + (0x04 * (df)))
+#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
+#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
+#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
+#define RCANFD_F_RFDF(gpriv, x, df) \
+ (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET (0x3400)
-#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \
- (0x80 * (idx)))
-#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \
- (0x180 * (ch)) + (0x80 * (idx)))
-#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \
- (0x180 * (ch)) + (0x80 * (idx)) + \
- (0x04 * (df)))
+#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400)
+
+#define RCANFD_F_CFID(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFPTR(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx)))
+
+#define RCANFD_F_CFDF(gpriv, ch, idx, df) \
+ (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \
+ (0x04 * (df)))
/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */
#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p)))
@@ -470,7 +492,7 @@ enum rcanfd_chip_id {
#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */
#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */
-#define RCANFD_NUM_CHANNELS 2 /* Two channels max */
+#define RCANFD_NUM_CHANNELS 8 /* Eight channels max */
#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1)
#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16)
@@ -495,14 +517,22 @@ enum rcar_canfd_fcanclk {
struct rcar_canfd_global;
+struct rcar_canfd_hw_info {
+ u8 max_channels;
+ u8 postdiv;
+ /* hardware features */
+ unsigned shared_global_irqs:1; /* Has shared global irqs */
+ unsigned multi_channel_irqs:1; /* Has multiple channel irqs */
+};
+
/* Channel priv data */
struct rcar_canfd_channel {
struct can_priv can; /* Must be the first member */
struct net_device *ndev;
struct rcar_canfd_global *gpriv; /* Controller reference */
void __iomem *base; /* Register base address */
+ struct phy *transceiver; /* Optional transceiver */
struct napi_struct napi;
- u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */
u32 tx_head; /* Incremented on xmit */
u32 tx_tail; /* Incremented on xmit done */
u32 channel; /* Channel number */
@@ -521,7 +551,7 @@ struct rcar_canfd_global {
bool fdmode; /* CAN FD or Classical CAN only mode */
struct reset_control *rstc1;
struct reset_control *rstc2;
- enum rcanfd_chip_id chip_id;
+ const struct rcar_canfd_hw_info *info;
};
/* CAN FD mode nominal rate constants */
@@ -563,7 +593,36 @@ static const struct can_bittiming_const rcar_canfd_bittiming_const = {
.brp_inc = 1,
};
+static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
+ .max_channels = 2,
+ .postdiv = 2,
+ .shared_global_irqs = 1,
+};
+
+static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .max_channels = 8,
+ .postdiv = 2,
+ .shared_global_irqs = 1,
+};
+
+static const struct rcar_canfd_hw_info rzg2l_hw_info = {
+ .max_channels = 2,
+ .postdiv = 1,
+ .multi_channel_irqs = 1,
+};
+
/* Helper functions */
+static inline bool is_gen4(struct rcar_canfd_global *gpriv)
+{
+ return gpriv->info == &rcar_gen4_hw_info;
+}
+
+static inline u32 reg_gen4(struct rcar_canfd_global *gpriv,
+ u32 gen4, u32 not_gen4)
+{
+ return is_gen4(gpriv) ? gen4 : not_gen4;
+}
+
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
{
u32 data = readl(reg);
@@ -629,6 +688,26 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
can_free_echo_skb(ndev, i, NULL);
}
+static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
+{
+ if (is_gen4(gpriv)) {
+ u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
+ : RCANFD_GEN4_FDCFG_CLOE;
+
+ for_each_set_bit(ch, &gpriv->channels_mask,
+ gpriv->info->max_channels)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch),
+ val);
+ } else {
+ if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ else
+ rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
+ RCANFD_GRMCFG_RCMC);
+ }
+}
+
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
{
u32 sts, ch;
@@ -661,15 +740,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
/* Set the controller into appropriate mode */
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
- else
- rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
- RCANFD_GRMCFG_RCMC);
+ rcar_canfd_set_mode(gpriv);
/* Transition all Channels to reset mode */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR);
@@ -710,7 +784,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
/* Channel configuration settings */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch),
RCANFD_CCTR_ERRD);
rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch),
@@ -730,20 +804,22 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
start = 0; /* Channel 0 always starts from 0th rule */
} else {
/* Get number of Channel 0 rules and adjust */
- cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0);
- start = RCANFD_GAFLCFG_GETRNC(0, cfg);
+ cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch));
+ start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg);
}
/* Enable write access to entry */
page = RCANFD_GAFL_PAGENUM(start);
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR,
- (RCANFD_GAFLECTR_AFLPN(page) |
+ (RCANFD_GAFLECTR_AFLPN(gpriv, page) |
RCANFD_GAFLECTR_AFLDAE));
/* Write number of rules for channel */
- rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0,
- RCANFD_GAFLCFG_SETRNC(ch, num_rules));
- if (gpriv->fdmode)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
+ RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
+ if (is_gen4(gpriv))
+ offset = RCANFD_GEN4_GAFL_OFFSET;
+ else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
else
offset = RCANFD_C_GAFL_OFFSET;
@@ -755,8 +831,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
/* Any data length accepted */
rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0);
/* Place the msg in corresponding Rx FIFO entry */
- rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start),
- RCANFD_GAFLP1_GAFLFDP(ridx));
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start),
+ RCANFD_GAFLP1_GAFLFDP(ridx));
/* Disable write access to page */
rcar_canfd_clear_bit(gpriv->base,
@@ -780,7 +856,7 @@ static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch)
cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) |
RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE);
- rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg);
}
static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
@@ -802,15 +878,15 @@ static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch)
else
cfpls = 0; /* b000 - Max 8 bytes payload */
- cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) |
- RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) |
+ cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) |
+ RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) |
RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE);
- rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg);
+ rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg);
if (gpriv->fdmode)
/* Clear FD mode specific control/status register */
rcar_canfd_write(gpriv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0);
}
static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv)
@@ -881,30 +957,26 @@ static void rcar_canfd_global_error(struct net_device *ndev)
u32 ridx = ch + RCANFD_RFFIFO_IDX;
gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL);
- if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) {
- netdev_dbg(ndev, "Ch0: ECC Error flag\n");
- stats->tx_dropped++;
- }
- if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) {
- netdev_dbg(ndev, "Ch1: ECC Error flag\n");
+ if (gerfl & RCANFD_GERFL_EEF(ch)) {
+ netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch);
stats->tx_dropped++;
}
if (gerfl & RCANFD_GERFL_MES) {
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (sts & RCANFD_CFSTS_CFMLT) {
netdev_dbg(ndev, "Tx Message Lost flag\n");
stats->tx_dropped++;
rcar_canfd_write(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFMLT);
}
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
if (sts & RCANFD_RFSTS_RFMLT) {
netdev_dbg(ndev, "Rx Message Lost flag\n");
stats->rx_dropped++;
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFMLT);
}
}
@@ -998,7 +1070,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
netdev_dbg(ndev, "Error warning interrupt\n");
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = txerr;
@@ -1008,7 +1080,7 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
netdev_dbg(ndev, "Error passive interrupt\n");
priv->can.state = CAN_STATE_ERROR_PASSIVE;
priv->can.can_stats.error_passive++;
- cf->can_id |= CAN_ERR_CRTL;
+ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
cf->data[6] = txerr;
@@ -1033,14 +1105,13 @@ static void rcar_canfd_error(struct net_device *ndev, u32 cerfl,
/* Clear channel error interrupts that are handled */
rcar_canfd_write(priv->base, RCANFD_CERFL(ch),
RCANFD_CERFL_ERR(~cerfl));
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
static void rcar_canfd_tx_done(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct net_device_stats *stats = &ndev->stats;
u32 sts;
unsigned long flags;
@@ -1051,14 +1122,12 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
sent = priv->tx_tail % RCANFD_FIFO_DEPTH;
stats->tx_packets++;
- stats->tx_bytes += priv->tx_len[sent];
- priv->tx_len[sent] = 0;
- can_get_echo_skb(ndev, sent, NULL);
+ stats->tx_bytes += can_get_echo_skb(ndev, sent, NULL);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_tail++;
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
unsent = RCANFD_CFSTS_CFMC(sts);
/* Wake producer only when there is room */
@@ -1074,9 +1143,8 @@ static void rcar_canfd_tx_done(struct net_device *ndev)
} while (1);
/* Clear interrupt */
- rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX),
sts & ~RCANFD_CFSTS_CFTXIF);
- can_led_event(ndev, CAN_LED_EVENT_TX);
}
static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch)
@@ -1096,7 +1164,7 @@ static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id)
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels)
rcar_canfd_handle_global_err(gpriv, ch);
return IRQ_HANDLED;
@@ -1106,15 +1174,17 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3
{
struct rcar_canfd_channel *priv = gpriv->ch[ch];
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- u32 sts;
+ u32 sts, cc;
/* Handle Rx interrupts */
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
- if (likely(sts & RCANFD_RFSTS_RFIF)) {
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
+ cc = rcar_canfd_read(priv->base, RCANFD_RFCC(gpriv, ridx));
+ if (likely(sts & RCANFD_RFSTS_RFIF &&
+ cc & RCANFD_RFCC_RFIE)) {
if (napi_schedule_prep(&priv->napi)) {
/* Disable Rx FIFO interrupts */
rcar_canfd_clear_bit(priv->base,
- RCANFD_RFCC(ridx),
+ RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
__napi_schedule(&priv->napi);
}
@@ -1126,7 +1196,7 @@ static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_i
struct rcar_canfd_global *gpriv = dev_id;
u32 ch;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels)
rcar_canfd_handle_global_receive(gpriv, ch);
return IRQ_HANDLED;
@@ -1140,7 +1210,7 @@ static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id)
/* Global error interrupts still indicate a condition specific
* to a channel. RxFIFO interrupt is a global interrupt.
*/
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_handle_global_err(gpriv, ch);
rcar_canfd_handle_global_receive(gpriv, ch);
}
@@ -1174,8 +1244,6 @@ static void rcar_canfd_state_change(struct net_device *ndev,
rx_state = txerr <= rxerr ? state : 0;
can_change_state(ndev, cf, tx_state, rx_state);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
}
@@ -1188,18 +1256,16 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch
/* Handle Tx interrupts */
sts = rcar_canfd_read(priv->base,
- RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX));
+ RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX));
if (likely(sts & RCANFD_CFSTS_CFTXIF))
rcar_canfd_tx_done(ndev);
}
static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id)
{
- struct rcar_canfd_global *gpriv = dev_id;
- u32 ch;
+ struct rcar_canfd_channel *priv = dev_id;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
- rcar_canfd_handle_channel_tx(gpriv, ch);
+ rcar_canfd_handle_channel_tx(priv->gpriv, priv->channel);
return IRQ_HANDLED;
}
@@ -1227,11 +1293,9 @@ static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 c
static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id)
{
- struct rcar_canfd_global *gpriv = dev_id;
- u32 ch;
+ struct rcar_canfd_channel *priv = dev_id;
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
- rcar_canfd_handle_channel_err(gpriv, ch);
+ rcar_canfd_handle_channel_err(priv->gpriv, priv->channel);
return IRQ_HANDLED;
}
@@ -1242,7 +1306,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
u32 ch;
/* Common FIFO is a per channel resource */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_handle_channel_err(gpriv, ch);
rcar_canfd_handle_channel_tx(gpriv, ch);
}
@@ -1253,6 +1317,7 @@ static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id)
static void rcar_canfd_set_bittiming(struct net_device *dev)
{
struct rcar_canfd_channel *priv = netdev_priv(dev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
@@ -1267,8 +1332,8 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
/* CAN FD only mode */
- cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) |
- RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2));
+ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2));
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
@@ -1280,16 +1345,25 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
tseg2 = dbt->phase_seg2 - 1;
- cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2));
+ cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
+ RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) |
- RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2));
+ if (is_gen4(gpriv)) {
+ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
+ RCANFD_NCFG_NBRP(brp) |
+ RCANFD_NCFG_NSJW(gpriv, sjw) |
+ RCANFD_NCFG_NTSEG2(gpriv, tseg2));
+ } else {
+ cfg = (RCANFD_CFG_TSEG1(tseg1) |
+ RCANFD_CFG_BRP(brp) |
+ RCANFD_CFG_SJW(sjw) |
+ RCANFD_CFG_TSEG2(tseg2));
+ }
rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg);
netdev_dbg(priv->ndev,
@@ -1301,6 +1375,7 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
static int rcar_canfd_start(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err = -EOPNOTSUPP;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1322,9 +1397,9 @@ static int rcar_canfd_start(struct net_device *ndev)
}
/* Enable Common & Rx FIFO */
- rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
return 0;
@@ -1340,16 +1415,22 @@ static int rcar_canfd_open(struct net_device *ndev)
struct rcar_canfd_global *gpriv = priv->gpriv;
int err;
+ err = phy_power_on(priv->transceiver);
+ if (err) {
+ netdev_err(ndev, "failed to power on PHY: %pe\n", ERR_PTR(err));
+ return err;
+ }
+
/* Peripheral clock is already enabled in probe */
err = clk_prepare_enable(gpriv->can_clk);
if (err) {
- netdev_err(ndev, "failed to enable CAN clock, error %d\n", err);
- goto out_clock;
+ netdev_err(ndev, "failed to enable CAN clock: %pe\n", ERR_PTR(err));
+ goto out_phy;
}
err = open_candev(ndev);
if (err) {
- netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ netdev_err(ndev, "open_candev() failed: %pe\n", ERR_PTR(err));
goto out_can_clock;
}
@@ -1358,20 +1439,21 @@ static int rcar_canfd_open(struct net_device *ndev)
if (err)
goto out_close;
netif_start_queue(ndev);
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
return 0;
out_close:
napi_disable(&priv->napi);
close_candev(ndev);
out_can_clock:
clk_disable_unprepare(gpriv->can_clk);
-out_clock:
+out_phy:
+ phy_power_off(priv->transceiver);
return err;
}
static void rcar_canfd_stop(struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int err;
u32 sts, ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
@@ -1389,9 +1471,9 @@ static void rcar_canfd_stop(struct net_device *ndev)
rcar_canfd_disable_channel_interrupts(priv);
/* Disable Common & Rx FIFO */
- rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX),
+ rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX),
RCANFD_CFCC_CFE);
- rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE);
+ rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE);
/* Set the state as STOPPED */
priv->can.state = CAN_STATE_STOPPED;
@@ -1407,7 +1489,7 @@ static int rcar_canfd_close(struct net_device *ndev)
napi_disable(&priv->napi);
clk_disable_unprepare(gpriv->can_clk);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
+ phy_power_off(priv->transceiver);
return 0;
}
@@ -1415,12 +1497,13 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct rcar_canfd_channel *priv = netdev_priv(ndev);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
u32 sts = 0, id, dlc;
unsigned long flags;
u32 ch = priv->channel;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (cf->can_id & CAN_EFF_FLAG) {
@@ -1435,11 +1518,11 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
rcar_canfd_write(priv->base,
- RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id);
+ RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
- RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc);
+ RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc);
if (can_is_canfd_skb(skb)) {
/* CAN FD frame format */
@@ -1452,10 +1535,10 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
}
rcar_canfd_write(priv->base,
- RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts);
+ RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts);
rcar_canfd_put_data(priv, cf,
- RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
+ RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0));
} else {
rcar_canfd_write(priv->base,
RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id);
@@ -1465,7 +1548,6 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0));
}
- priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len;
can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0);
spin_lock_irqsave(&priv->tx_lock, flags);
@@ -1479,7 +1561,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
* pointer for the Common FIFO
*/
rcar_canfd_write(priv->base,
- RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff);
+ RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK;
@@ -1488,18 +1570,21 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
+ struct rcar_canfd_global *gpriv = priv->gpriv;
struct canfd_frame *cf;
struct sk_buff *skb;
u32 sts = 0, id, dlc;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
- id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx));
- dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx));
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
+ id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
+ dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
- sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx));
- if (sts & RCANFD_RFFDSTS_RFFDF)
+ sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx));
+
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+ sts & RCANFD_RFFDSTS_RFFDF)
skb = alloc_canfd_skb(priv->ndev, &cf);
else
skb = alloc_can_skb(priv->ndev,
@@ -1537,12 +1622,14 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
if (sts & RCANFD_RFFDSTS_RFBRS)
cf->flags |= CANFD_BRS;
- rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0));
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
}
} else {
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
+ else if (is_gen4(gpriv))
+ rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
}
@@ -1550,11 +1637,10 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
/* Write 0xff to RFPC to increment the CPU-side
* pointer of the Rx FIFO
*/
- rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff);
-
- can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+ rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff);
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
netif_receive_skb(skb);
}
@@ -1563,13 +1649,14 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
{
struct rcar_canfd_channel *priv =
container_of(napi, struct rcar_canfd_channel, napi);
+ struct rcar_canfd_global *gpriv = priv->gpriv;
int num_pkts;
u32 sts;
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
for (num_pkts = 0; num_pkts < quota; num_pkts++) {
- sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx));
+ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx));
/* Check FIFO empty condition */
if (sts & RCANFD_RFSTS_RFEMP)
break;
@@ -1578,7 +1665,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* Clear interrupt bit */
if (sts & RCANFD_RFSTS_RFIF)
- rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx),
+ rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx),
sts & ~RCANFD_RFSTS_RFIF);
}
@@ -1586,7 +1673,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
if (num_pkts < quota) {
if (napi_complete_done(napi, num_pkts)) {
/* Enable Rx FIFO interrupts */
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx),
RCANFD_RFCC_RFIE);
}
}
@@ -1629,31 +1716,40 @@ static const struct net_device_ops rcar_canfd_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops rcar_canfd_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
- u32 fcan_freq)
+ u32 fcan_freq, struct phy *transceiver)
{
+ const struct rcar_canfd_hw_info *info = gpriv->info;
struct platform_device *pdev = gpriv->pdev;
+ struct device *dev = &pdev->dev;
struct rcar_canfd_channel *priv;
struct net_device *ndev;
int err = -ENODEV;
ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
- if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
- err = -ENOMEM;
- goto fail;
- }
+ if (!ndev)
+ return -ENOMEM;
+
priv = netdev_priv(ndev);
ndev->netdev_ops = &rcar_canfd_netdev_ops;
+ ndev->ethtool_ops = &rcar_canfd_ethtool_ops;
ndev->flags |= IFF_ECHO;
priv->ndev = ndev;
priv->base = gpriv->base;
+ priv->transceiver = transceiver;
priv->channel = ch;
+ priv->gpriv = gpriv;
+ if (transceiver)
+ priv->can.bitrate_max = transceiver->attrs.max_link_rate;
priv->can.clock.freq = fcan_freq;
- dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
+ dev_info(dev, "can_clk rate is %u\n", priv->can.clock.freq);
- if (gpriv->chip_id == RENESAS_RZG2L) {
+ if (info->multi_channel_irqs) {
char *irq_name;
int err_irq;
int tx_irq;
@@ -1670,32 +1766,32 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_err", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_err",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, err_irq,
+ err = devm_request_irq(dev, err_irq,
rcar_canfd_channel_err_interrupt, 0,
- irq_name, gpriv);
+ irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
- err_irq, err);
+ dev_err(dev, "devm_request_irq CH Err %d failed: %pe\n",
+ err_irq, ERR_PTR(err));
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_trx", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_trx",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, tx_irq,
+ err = devm_request_irq(dev, tx_irq,
rcar_canfd_channel_tx_interrupt, 0,
- irq_name, gpriv);
+ irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
- tx_irq, err);
+ dev_err(dev, "devm_request_irq Tx %d failed: %pe\n",
+ tx_irq, ERR_PTR(err));
goto fail;
}
}
@@ -1706,7 +1802,9 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
&rcar_canfd_data_bittiming_const;
/* Controller starts in CAN FD only mode */
- can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD);
+ if (err)
+ goto fail;
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
} else {
/* Controller starts in Classical CAN only mode */
@@ -1716,27 +1814,24 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->can.do_set_mode = rcar_canfd_do_set_mode;
priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
- priv->gpriv = gpriv;
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
- netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll,
- RCANFD_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
+ RCANFD_NAPI_WEIGHT);
+ spin_lock_init(&priv->tx_lock);
+ gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev,
- "register_candev() failed, error %d\n", err);
+ dev_err(dev, "register_candev() failed: %pe\n", ERR_PTR(err));
goto fail_candev;
}
- spin_lock_init(&priv->tx_lock);
- devm_can_led_init(ndev);
- gpriv->ch[priv->channel] = priv;
- dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
+ dev_info(dev, "device registered (channel %u)\n", priv->channel);
return 0;
fail_candev:
netif_napi_del(&priv->napi);
- free_candev(ndev);
fail:
+ free_candev(ndev);
return err;
}
@@ -1753,6 +1848,9 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
static int rcar_canfd_probe(struct platform_device *pdev)
{
+ struct phy *transceivers[RCANFD_NUM_CHANNELS] = { NULL, };
+ const struct rcar_canfd_hw_info *info;
+ struct device *dev = &pdev->dev;
void __iomem *addr;
u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
@@ -1761,22 +1859,28 @@ static int rcar_canfd_probe(struct platform_device *pdev)
int err, ch_irq, g_irq;
int g_err_irq, g_recc_irq;
bool fdmode = true; /* CAN FD only mode - default */
- enum rcanfd_chip_id chip_id;
+ char name[9] = "channelX";
+ int i;
- chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ info = of_device_get_match_data(dev);
- if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
+ if (of_property_read_bool(dev->of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel0");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(0); /* Channel 0 */
-
- of_child = of_get_child_by_name(pdev->dev.of_node, "channel1");
- if (of_child && of_device_is_available(of_child))
- channels_mask |= BIT(1); /* Channel 1 */
+ for (i = 0; i < info->max_channels; ++i) {
+ name[7] = '0' + i;
+ of_child = of_get_child_by_name(dev->of_node, name);
+ if (of_child && of_device_is_available(of_child)) {
+ channels_mask |= BIT(i);
+ transceivers[i] = devm_of_phy_optional_get(dev,
+ of_child, NULL);
+ }
+ of_node_put(of_child);
+ if (IS_ERR(transceivers[i]))
+ return PTR_ERR(transceivers[i]);
+ }
- if (chip_id == RENESAS_RCAR_GEN3) {
+ if (info->shared_global_irqs) {
ch_irq = platform_get_irq_byname_optional(pdev, "ch_int");
if (ch_irq < 0) {
/* For backward compatibility get irq by index */
@@ -1803,49 +1907,41 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
/* Global controller context */
- gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
- if (!gpriv) {
- err = -ENOMEM;
- goto fail_dev;
- }
+ gpriv = devm_kzalloc(dev, sizeof(*gpriv), GFP_KERNEL);
+ if (!gpriv)
+ return -ENOMEM;
+
gpriv->pdev = pdev;
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
- gpriv->chip_id = chip_id;
-
- if (gpriv->chip_id == RENESAS_RZG2L) {
- gpriv->rstc1 = devm_reset_control_get_exclusive(&pdev->dev, "rstp_n");
- if (IS_ERR(gpriv->rstc1))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1),
- "failed to get rstp_n\n");
-
- gpriv->rstc2 = devm_reset_control_get_exclusive(&pdev->dev, "rstc_n");
- if (IS_ERR(gpriv->rstc2))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2),
- "failed to get rstc_n\n");
- }
+ gpriv->info = info;
+
+ gpriv->rstc1 = devm_reset_control_get_optional_exclusive(dev, "rstp_n");
+ if (IS_ERR(gpriv->rstc1))
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc1),
+ "failed to get rstp_n\n");
+
+ gpriv->rstc2 = devm_reset_control_get_optional_exclusive(dev, "rstc_n");
+ if (IS_ERR(gpriv->rstc2))
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc2),
+ "failed to get rstc_n\n");
/* Peripheral clock */
- gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(gpriv->clkp)) {
- err = PTR_ERR(gpriv->clkp);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_dev;
- }
+ gpriv->clkp = devm_clk_get(dev, "fck");
+ if (IS_ERR(gpriv->clkp))
+ return dev_err_probe(dev, PTR_ERR(gpriv->clkp),
+ "cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
*/
- gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ gpriv->can_clk = devm_clk_get(dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
- gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
- if (IS_ERR(gpriv->can_clk)) {
- err = PTR_ERR(gpriv->can_clk);
- dev_err(&pdev->dev,
- "cannot get canfd clock, error %d\n", err);
- goto fail_dev;
- }
+ gpriv->can_clk = devm_clk_get(dev, "canfd");
+ if (IS_ERR(gpriv->can_clk))
+ return dev_err_probe(dev, PTR_ERR(gpriv->can_clk),
+ "cannot get canfd clock\n");
+
gpriv->fcan = RCANFD_CANFDCLK;
} else {
@@ -1853,9 +1949,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
fcan_freq = clk_get_rate(gpriv->can_clk);
- if (gpriv->fcan == RCANFD_CANFDCLK && gpriv->chip_id == RENESAS_RCAR_GEN3)
+ if (gpriv->fcan == RCANFD_CANFDCLK)
/* CANFD clock is further divided by (1/2) within the IP */
- fcan_freq /= 2;
+ fcan_freq /= info->postdiv;
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
@@ -1865,41 +1961,40 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->base = addr;
/* Request IRQ that's common for both channels */
- if (gpriv->chip_id == RENESAS_RCAR_GEN3) {
- err = devm_request_irq(&pdev->dev, ch_irq,
+ if (info->shared_global_irqs) {
+ err = devm_request_irq(dev, ch_irq,
rcar_canfd_channel_interrupt, 0,
"canfd.ch_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- ch_irq, err);
+ dev_err(dev, "devm_request_irq %d failed: %pe\n",
+ ch_irq, ERR_PTR(err));
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_irq,
- rcar_canfd_global_interrupt, 0,
- "canfd.g_int", gpriv);
+ err = devm_request_irq(dev, g_irq, rcar_canfd_global_interrupt,
+ 0, "canfd.g_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- g_irq, err);
+ dev_err(dev, "devm_request_irq %d failed: %pe\n",
+ g_irq, ERR_PTR(err));
goto fail_dev;
}
} else {
- err = devm_request_irq(&pdev->dev, g_recc_irq,
+ err = devm_request_irq(dev, g_recc_irq,
rcar_canfd_global_receive_fifo_interrupt, 0,
"canfd.g_recc", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- g_recc_irq, err);
+ dev_err(dev, "devm_request_irq %d failed: %pe\n",
+ g_recc_irq, ERR_PTR(err));
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_err_irq,
+ err = devm_request_irq(dev, g_err_irq,
rcar_canfd_global_err_interrupt, 0,
"canfd.g_err", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
- g_err_irq, err);
+ dev_err(dev, "devm_request_irq %d failed: %pe\n",
+ g_err_irq, ERR_PTR(err));
goto fail_dev;
}
}
@@ -1916,14 +2011,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Enable peripheral clock for register access */
err = clk_prepare_enable(gpriv->clkp);
if (err) {
- dev_err(&pdev->dev,
- "failed to enable peripheral clock, error %d\n", err);
+ dev_err(dev, "failed to enable peripheral clock: %pe\n",
+ ERR_PTR(err));
goto fail_reset;
}
err = rcar_canfd_reset_controller(gpriv);
if (err) {
- dev_err(&pdev->dev, "reset controller failed\n");
+ dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err));
goto fail_clk;
}
@@ -1931,7 +2026,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
rcar_canfd_configure_controller(gpriv);
/* Configure per channel attributes */
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
/* Configure Channel's Rx fifo */
rcar_canfd_configure_rx(gpriv, ch);
@@ -1953,23 +2048,24 @@ static int rcar_canfd_probe(struct platform_device *pdev)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GNOPM), 2, 500000);
if (err) {
- dev_err(&pdev->dev, "global operational mode failed\n");
+ dev_err(dev, "global operational mode failed\n");
goto fail_mode;
}
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
- err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq);
+ for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) {
+ err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq,
+ transceivers[ch]);
if (err)
goto fail_channel;
}
platform_set_drvdata(pdev, gpriv);
- dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n",
+ dev_info(dev, "global operational state (clk %d, fdmode %d)\n",
gpriv->fcan, gpriv->fdmode);
return 0;
fail_channel:
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS)
+ for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels)
rcar_canfd_channel_remove(gpriv, ch);
fail_mode:
rcar_canfd_disable_global_interrupts(gpriv);
@@ -1990,7 +2086,7 @@ static int rcar_canfd_remove(struct platform_device *pdev)
rcar_canfd_reset_controller(gpriv);
rcar_canfd_disable_global_interrupts(gpriv);
- for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) {
+ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]);
rcar_canfd_channel_remove(gpriv, ch);
}
@@ -2018,8 +2114,10 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
rcar_canfd_resume);
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
- { .compatible = "renesas,rcar-gen3-canfd", .data = (void *)RENESAS_RCAR_GEN3 },
- { .compatible = "renesas,rzg2l-canfd", .data = (void *)RENESAS_RZG2L },
+ { .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
+ { .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
+ { .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
+ { .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
{ }
};
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 110071b26921..4b2f9cb17fc3 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -107,7 +107,7 @@ config CAN_TSCAN1
depends on ISA
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
- http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ https://www.embeddedts.com/products/TS-CAN1
The driver supports multiple boards and automatically configures them:
PLD IO base addresses are read from jumpers JP1 and JP2,
IRQ numbers are read from jumpers JP4 and JP5,
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 4ab91759a5c6..c56e27223e5f 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -3,6 +3,7 @@
* Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
* Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ * Copyright (C) 2023 EMS Dr. Thomas Wuensche
*/
#include <linux/kernel.h>
@@ -19,12 +20,14 @@
#define DRV_NAME "ems_pci"
-MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
+MODULE_AUTHOR("Sebastian Haas <support@ems-wuensche.com>");
+MODULE_AUTHOR("Gerhard Uttenthaler <uttenthaler@ems-wuensche.com>");
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
MODULE_LICENSE("GPL v2");
#define EMS_PCI_V1_MAX_CHAN 2
#define EMS_PCI_V2_MAX_CHAN 4
+#define EMS_PCI_V3_MAX_CHAN 4
#define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN
struct ems_pci_card {
@@ -40,8 +43,7 @@ struct ems_pci_card {
#define EMS_PCI_CAN_CLOCK (16000000 / 2)
-/*
- * Register definitions and descriptions are from LinCAN 0.3.3.
+/* Register definitions and descriptions are from LinCAN 0.3.3.
*
* PSB4610 PITA-2 bridge control registers
*/
@@ -52,8 +54,7 @@ struct ems_pci_card {
#define PITA2_MISC 0x1c /* Miscellaneous Register */
#define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */
-/*
- * Register definitions for the PLX 9030
+/* Register definitions for the PLX 9030
*/
#define PLX_ICSR 0x4c /* Interrupt Control/Status register */
#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */
@@ -62,8 +63,16 @@ struct ems_pci_card {
#define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \
PLX_ICSR_LINTI1_CLR)
-/*
- * The board configuration is probably following:
+/* Register definitions for the ASIX99100
+ */
+#define ASIX_LINTSR 0x28 /* Interrupt Control/Status register */
+#define ASIX_LINTSR_INT0AC BIT(0) /* Writing 1 enables or clears interrupt */
+
+#define ASIX_LIEMR 0x24 /* Local Interrupt Enable / Miscellaneous Register */
+#define ASIX_LIEMR_L0EINTEN BIT(16) /* Local INT0 input assertion enable */
+#define ASIX_LIEMR_LRST BIT(14) /* Local Reset assert */
+
+/* The board configuration is probably following:
* RX1 is connected to ground.
* TX1 is not connected.
* CLKO is not connected.
@@ -72,23 +81,40 @@ struct ems_pci_card {
*/
#define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
-/*
- * In the CDR register, you should set CBP to 1.
+/* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
* (meaning direct oscillator output) because the second SJA1000 chip
* is driven by the first one CLKOUT output.
*/
#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
-#define EMS_PCI_V1_BASE_BAR 1
-#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
-#define EMS_PCI_V2_BASE_BAR 2
-#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
-#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
-#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+#define EMS_PCI_V1_BASE_BAR 1
+#define EMS_PCI_V1_CONF_BAR 0
+#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
+#define EMS_PCI_V1_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V1_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V2_BASE_BAR 2
+#define EMS_PCI_V2_CONF_BAR 0
+#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
+#define EMS_PCI_V2_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V2_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V3_BASE_BAR 0
+#define EMS_PCI_V3_CONF_BAR 5
+#define EMS_PCI_V3_CONF_SIZE 128 /* size of ASIX control area */
+#define EMS_PCI_V3_CAN_BASE_OFFSET 0x00 /* offset where the controllers starts */
+#define EMS_PCI_V3_CAN_CTRL_SIZE 0x100 /* memory size for each controller */
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
+#ifndef PCI_VENDOR_ID_ASIX
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_9110 0x9110
+#define PCI_SUBVENDOR_ID_ASIX 0xa000
+#endif
+#define PCI_SUBDEVICE_ID_EMS 0x4010
+
static const struct pci_device_id ems_pci_tbl[] = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
@@ -96,12 +122,13 @@ static const struct pci_device_id ems_pci_tbl[] = {
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000},
/* CPC-104P v2 */
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
+ /* CPC-PCIe v3 */
+ {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_9110, PCI_SUBVENDOR_ID_ASIX, PCI_SUBDEVICE_ID_EMS},
{0,}
};
MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
-/*
- * Helper to read internal registers from card logic (not CAN)
+/* Helper to read internal registers from card logic (not CAN)
*/
static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port)
{
@@ -146,8 +173,25 @@ static void ems_pci_v2_post_irq(const struct sja1000_priv *priv)
writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
}
-/*
- * Check if a CAN controller is present at the specified location
+static u8 ems_pci_v3_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + port);
+}
+
+static void ems_pci_v3_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val)
+{
+ writeb(val, priv->reg_base + port);
+}
+
+static void ems_pci_v3_post_irq(const struct sja1000_priv *priv)
+{
+ struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+}
+
+/* Check if a CAN controller is present at the specified location
* by trying to set 'em into the PeliCAN mode
*/
static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
@@ -185,10 +229,10 @@ static void ems_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- if (card->base_addr != NULL)
+ if (card->base_addr)
pci_iounmap(card->pci_dev, card->base_addr);
- if (card->conf_addr != NULL)
+ if (card->conf_addr)
pci_iounmap(card->pci_dev, card->conf_addr);
kfree(card);
@@ -202,8 +246,7 @@ static void ems_pci_card_reset(struct ems_pci_card *card)
writeb(0, card->base_addr);
}
-/*
- * Probe PCI device for EMS CAN signature and register each available
+/* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
static int ems_pci_add_card(struct pci_dev *pdev,
@@ -212,7 +255,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pci_card *card;
- int max_chan, conf_size, base_bar;
+ int max_chan, conf_size, base_bar, conf_bar;
int err, i;
/* Enabling PCI device */
@@ -222,8 +265,8 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
/* Allocating card structures to hold addresses, ... */
- card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
- if (card == NULL) {
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
pci_disable_device(pdev);
return -ENOMEM;
}
@@ -234,27 +277,35 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels = 0;
- if (pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (pdev->vendor == PCI_VENDOR_ID_ASIX) {
+ card->version = 3; /* CPC-PCI v3 */
+ max_chan = EMS_PCI_V3_MAX_CHAN;
+ base_bar = EMS_PCI_V3_BASE_BAR;
+ conf_bar = EMS_PCI_V3_CONF_BAR;
+ conf_size = EMS_PCI_V3_CONF_SIZE;
+ } else if (pdev->vendor == PCI_VENDOR_ID_PLX) {
card->version = 2; /* CPC-PCI v2 */
max_chan = EMS_PCI_V2_MAX_CHAN;
base_bar = EMS_PCI_V2_BASE_BAR;
+ conf_bar = EMS_PCI_V2_CONF_BAR;
conf_size = EMS_PCI_V2_CONF_SIZE;
} else {
card->version = 1; /* CPC-PCI v1 */
max_chan = EMS_PCI_V1_MAX_CHAN;
base_bar = EMS_PCI_V1_BASE_BAR;
+ conf_bar = EMS_PCI_V1_CONF_BAR;
conf_size = EMS_PCI_V1_CONF_SIZE;
}
/* Remap configuration space and controller memory area */
- card->conf_addr = pci_iomap(pdev, 0, conf_size);
- if (card->conf_addr == NULL) {
+ card->conf_addr = pci_iomap(pdev, conf_bar, conf_size);
+ if (!card->conf_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
- if (card->base_addr == NULL) {
+ if (!card->base_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -276,12 +327,20 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
}
+ if (card->version == 3) {
+ /* ASIX chip asserts local reset to CAN controllers
+ * after bootup until it is deasserted
+ */
+ writel(readl(card->conf_addr + ASIX_LIEMR) & ~ASIX_LIEMR_LRST,
+ card->conf_addr + ASIX_LIEMR);
+ }
+
ems_pci_card_reset(card);
/* Detect available channels */
for (i = 0; i < max_chan; i++) {
dev = alloc_sja1000dev(0);
- if (dev == NULL) {
+ if (!dev) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -292,16 +351,25 @@ static int ems_pci_add_card(struct pci_dev *pdev,
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
- priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
- + (i * EMS_PCI_CAN_CTRL_SIZE);
+
if (card->version == 1) {
priv->read_reg = ems_pci_v1_read_reg;
priv->write_reg = ems_pci_v1_write_reg;
priv->post_irq = ems_pci_v1_post_irq;
- } else {
+ priv->reg_base = card->base_addr + EMS_PCI_V1_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V1_CAN_CTRL_SIZE);
+ } else if (card->version == 2) {
priv->read_reg = ems_pci_v2_read_reg;
priv->write_reg = ems_pci_v2_write_reg;
priv->post_irq = ems_pci_v2_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V2_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V2_CAN_CTRL_SIZE);
+ } else {
+ priv->read_reg = ems_pci_v3_read_reg;
+ priv->write_reg = ems_pci_v3_write_reg;
+ priv->post_irq = ems_pci_v3_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V3_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V3_CAN_CTRL_SIZE);
}
/* Check if channel is present */
@@ -313,20 +381,28 @@ static int ems_pci_add_card(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->dev_id = i;
- if (card->version == 1)
+ if (card->version == 1) {
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
card->conf_addr + PITA2_ICR);
- else
+ } else if (card->version == 2) {
/* enable IRQ in PLX 9030 */
writel(PLX_ICSR_ENA_CLR,
card->conf_addr + PLX_ICSR);
+ } else {
+ /* Enable IRQ in AX99100 */
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+ /* Enable local INT0 input enable */
+ writel(readl(card->conf_addr + ASIX_LIEMR) | ASIX_LIEMR_L0EINTEN,
+ card->conf_addr + ASIX_LIEMR);
+ }
/* Register SJA1000 device */
err = register_sja1000dev(dev);
if (err) {
- dev_err(&pdev->dev, "Registering device failed "
- "(err=%d)\n", err);
+ dev_err(&pdev->dev,
+ "Registering device failed: %pe\n",
+ ERR_PTR(err));
free_sja1000dev(dev);
goto failure_cleanup;
}
@@ -334,7 +410,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels++;
dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
- i + 1, priv->reg_base, dev->irq);
+ i + 1, priv->reg_base, dev->irq);
} else {
free_sja1000dev(dev);
}
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 131a084c3535..ebd5941c3f53 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -478,7 +478,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 3fad54646746..aac5956e4a53 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -52,6 +52,7 @@
#include <linux/ptrace.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
@@ -60,7 +61,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include "sja1000.h"
@@ -184,8 +184,9 @@ static void chipset_init(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
- /* set clock divider and output control register */
- priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
+ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG))
+ /* set clock divider and output control register */
+ priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
/* set acceptance filter (accept all) */
priv->write_reg(priv, SJA1000_ACCC0, 0x00);
@@ -210,7 +211,8 @@ static void sja1000_start(struct net_device *dev)
set_reset_mode(dev);
/* Initialize chip if uninitialized at this stage */
- if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
+ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG ||
+ priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
chipset_init(dev);
/* Clear error counters and error code capture */
@@ -289,7 +291,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
u8 cmd_reg_val = 0x00;
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -372,18 +374,17 @@ static void sja1000_rx(struct net_device *dev)
} else {
for (i = 0; i < cf->len; i++)
cf->data[i] = priv->read_reg(priv, dreg++);
+
+ stats->rx_bytes += cf->len;
}
+ stats->rx_packets++;
cf->can_id = id;
/* release receive buffer */
sja1000_write_cmdreg(priv, CMD_RRB);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
@@ -404,9 +405,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
txerr = priv->read_reg(priv, SJA1000_TXERR);
rxerr = priv->read_reg(priv, SJA1000_RXERR);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
-
if (isrc & IRQ_DOI) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -428,6 +426,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & IRQ_BEI) {
/* bus error interrupt */
priv->can.can_stats.bus_error++;
@@ -487,8 +490,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
can_bus_off(dev);
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -528,13 +529,10 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
can_free_echo_skb(dev, 0, NULL);
} else {
/* transmission complete */
- stats->tx_bytes +=
- priv->read_reg(priv, SJA1000_FI) & 0xf;
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, 0, NULL);
}
netif_wake_queue(dev);
- can_led_event(dev, CAN_LED_EVENT_TX);
}
if (isrc & IRQ_RI) {
/* receive interrupt */
@@ -590,8 +588,6 @@ static int sja1000_open(struct net_device *dev)
/* init and start chi */
sja1000_start(dev);
- can_led_event(dev, CAN_LED_EVENT_OPEN);
-
netif_start_queue(dev);
return 0;
@@ -609,8 +605,6 @@ static int sja1000_close(struct net_device *dev)
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -661,25 +655,23 @@ static const struct net_device_ops sja1000_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops sja1000_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
int register_sja1000dev(struct net_device *dev)
{
- int ret;
-
if (!sja1000_probe_chip(dev))
return -ENODEV;
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &sja1000_netdev_ops;
+ dev->ethtool_ops = &sja1000_ethtool_ops;
set_reset_mode(dev);
chipset_init(dev);
- ret = register_candev(dev);
-
- if (!ret)
- devm_can_led_init(dev);
-
- return ret;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 9d46398f8154..7f736f1df547 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -145,7 +145,8 @@
/*
* Flags for sja1000priv.flags
*/
-#define SJA1000_CUSTOM_IRQ_HANDLER 0x1
+#define SJA1000_CUSTOM_IRQ_HANDLER BIT(0)
+#define SJA1000_QUIRK_NO_CDR_REG BIT(1)
/*
* SJA1000 private data structure
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index d513fac50718..db3e767d5320 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -202,22 +202,24 @@ static int sja1000_isa_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
- goto exit_unmap;
+ goto exit_free;
}
dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
DRV_NAME, priv->reg_base, dev->irq);
return 0;
- exit_unmap:
+exit_free:
+ free_sja1000dev(dev);
+exit_unmap:
if (mem[idx])
iounmap(base);
- exit_release:
+exit_release:
if (mem[idx])
release_mem_region(mem[idx], iosize);
else
release_region(port[idx], iosize);
- exit:
+exit:
return err;
}
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d7c2ec529b8f..6779d5357069 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -14,10 +14,10 @@
#include <linux/irq.h>
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include "sja1000.h"
@@ -32,7 +32,7 @@ MODULE_LICENSE("GPL v2");
struct sja1000_of_data {
size_t priv_sz;
- int (*init)(struct sja1000_priv *priv, struct device_node *of);
+ void (*init)(struct sja1000_priv *priv, struct device_node *of);
};
struct technologic_priv {
@@ -95,15 +95,18 @@ static void sp_technologic_write_reg16(const struct sja1000_priv *priv,
spin_unlock_irqrestore(&tp->io_lock, flags);
}
-static int sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
+static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *of)
{
struct technologic_priv *tp = priv->priv;
priv->read_reg = sp_technologic_read_reg16;
priv->write_reg = sp_technologic_write_reg16;
spin_lock_init(&tp->io_lock);
+}
- return 0;
+static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of)
+{
+ priv->flags = SJA1000_QUIRK_NO_CDR_REG;
}
static void sp_populate(struct sja1000_priv *priv,
@@ -156,11 +159,13 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
priv->write_reg = sp_write_reg8;
}
- err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ if (!priv->can.clock.freq) {
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ }
err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
if (!err)
@@ -195,8 +200,13 @@ static struct sja1000_of_data technologic_data = {
.init = sp_technologic_init,
};
+static struct sja1000_of_data renesas_data = {
+ .init = sp_rzn1_init,
+};
+
static const struct of_device_id sp_of_table[] = {
{ .compatible = "nxp,sja1000", .data = NULL, },
+ { .compatible = "renesas,rzn1-sja1000", .data = &renesas_data, },
{ .compatible = "technologic,sja1000", .data = &technologic_data, },
{ /* sentinel */ },
};
@@ -211,9 +221,9 @@ static int sp_probe(struct platform_device *pdev)
struct resource *res_mem, *res_irq = NULL;
struct sja1000_platform_data *pdata;
struct device_node *of = pdev->dev.of_node;
- const struct of_device_id *of_id;
const struct sja1000_of_data *of_data = NULL;
size_t priv_sz = 0;
+ struct clk *clk;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata && !of) {
@@ -234,19 +244,24 @@ static int sp_probe(struct platform_device *pdev)
if (!addr)
return -ENOMEM;
- if (of)
- irq = irq_of_parse_and_map(of, 0);
- else
+ if (of) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "CAN clk operation failed");
+ } else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res_irq)
+ return -ENODEV;
+ }
- if (!irq && !res_irq)
- return -ENODEV;
-
- of_id = of_match_device(sp_of_table, &pdev->dev);
- if (of_id && of_id->data) {
- of_data = of_id->data;
+ of_data = device_get_match_data(&pdev->dev);
+ if (of_data)
priv_sz = of_data->priv_sz;
- }
dev = alloc_sja1000dev(priv_sz);
if (!dev)
@@ -266,13 +281,19 @@ static int sp_probe(struct platform_device *pdev)
priv->reg_base = addr;
if (of) {
- sp_populate_of(priv, of);
-
- if (of_data && of_data->init) {
- err = of_data->init(priv, of);
- if (err)
+ if (clk) {
+ priv->can.clock.freq = clk_get_rate(clk) / 2;
+ if (!priv->can.clock.freq) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Zero CAN clk rate");
goto exit_free;
+ }
}
+
+ sp_populate_of(priv, of);
+
+ if (of_data && of_data->init)
+ of_data->init(priv, of);
} else {
sp_populate(priv, pdata, res_mem->flags);
}
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
index 3dbba8d61afb..f3862bed3d40 100644
--- a/drivers/net/can/sja1000/tscan1.c
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -5,10 +5,9 @@
* Copyright 2010 Andre B. Oliveira
*/
-/*
- * References:
- * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
- * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+/* References:
+ * - Getting started with TS-CAN1, Technologic Systems, Feb 2022
+ * https://docs.embeddedts.com/TS-CAN1
*/
#include <linux/init.h>
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
deleted file mode 100644
index 9a4ebda30510..000000000000
--- a/drivers/net/can/slcan.c
+++ /dev/null
@@ -1,790 +0,0 @@
-/*
- * slcan.c - serial line CAN interface driver (using tty line discipline)
- *
- * This file is derived from linux/drivers/net/slip/slip.c
- *
- * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
- * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
- * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, see http://www.gnu.org/licenses/gpl.html
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
- * DAMAGE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <linux/uaccess.h>
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/rtnetlink.h>
-#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/workqueue.h>
-#include <linux/can.h>
-#include <linux/can/skb.h>
-#include <linux/can/can-ml.h>
-
-MODULE_ALIAS_LDISC(N_SLCAN);
-MODULE_DESCRIPTION("serial line CAN interface");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
-
-#define SLCAN_MAGIC 0x53CA
-
-static int maxdev = 10; /* MAX number of SLCAN channels;
- This can be overridden with
- insmod slcan.ko maxdev=nnn */
-module_param(maxdev, int, 0);
-MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
-
-/* maximum rx buffer len: extended CAN frame with timestamp */
-#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
-
-#define SLC_CMD_LEN 1
-#define SLC_SFF_ID_LEN 3
-#define SLC_EFF_ID_LEN 8
-
-struct slcan {
- int magic;
-
- /* Various fields. */
- struct tty_struct *tty; /* ptr to TTY structure */
- struct net_device *dev; /* easy for intr handling */
- spinlock_t lock;
- struct work_struct tx_work; /* Flushes transmit buffer */
-
- /* These are pointers to the malloc()ed frame buffers. */
- unsigned char rbuff[SLC_MTU]; /* receiver buffer */
- int rcount; /* received chars counter */
- unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
- unsigned char *xhead; /* pointer to next XMIT byte */
- int xleft; /* bytes left in XMIT queue */
-
- unsigned long flags; /* Flag values/ mode etc */
-#define SLF_INUSE 0 /* Channel in use */
-#define SLF_ERROR 1 /* Parity, etc. error */
-};
-
-static struct net_device **slcan_devs;
-
- /************************************************************************
- * SLCAN ENCAPSULATION FORMAT *
- ************************************************************************/
-
-/*
- * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
- * frame format) a data length code (len) which can be from 0 to 8
- * and up to <len> data bytes as payload.
- * Additionally a CAN frame may become a remote transmission frame if the
- * RTR-bit is set. This causes another ECU to send a CAN frame with the
- * given can_id.
- *
- * The SLCAN ASCII representation of these different frame types is:
- * <type> <id> <dlc> <data>*
- *
- * Extended frames (29 bit) are defined by capital characters in the type.
- * RTR frames are defined as 'r' types - normal frames have 't' type:
- * t => 11 bit data frame
- * r => 11 bit RTR frame
- * T => 29 bit data frame
- * R => 29 bit RTR frame
- *
- * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
- * The <dlc> is a one byte ASCII number ('0' - '8')
- * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
- *
- * Examples:
- *
- * t1230 : can_id 0x123, len 0, no data
- * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
- * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
- * r1230 : can_id 0x123, len 0, no data, remote transmission request
- *
- */
-
- /************************************************************************
- * STANDARD SLCAN DECAPSULATION *
- ************************************************************************/
-
-/* Send one completely decapsulated can_frame to the network layer */
-static void slc_bump(struct slcan *sl)
-{
- struct sk_buff *skb;
- struct can_frame cf;
- int i, tmp;
- u32 tmpid;
- char *cmd = sl->rbuff;
-
- memset(&cf, 0, sizeof(cf));
-
- switch (*cmd) {
- case 'r':
- cf.can_id = CAN_RTR_FLAG;
- fallthrough;
- case 't':
- /* store dlc ASCII value and terminate SFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_SFF_ID_LEN] = 0;
- /* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_SFF_ID_LEN + 1;
- break;
- case 'R':
- cf.can_id = CAN_RTR_FLAG;
- fallthrough;
- case 'T':
- cf.can_id |= CAN_EFF_FLAG;
- /* store dlc ASCII value and terminate EFF CAN ID string */
- cf.len = sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN];
- sl->rbuff[SLC_CMD_LEN + SLC_EFF_ID_LEN] = 0;
- /* point to payload data behind the dlc */
- cmd += SLC_CMD_LEN + SLC_EFF_ID_LEN + 1;
- break;
- default:
- return;
- }
-
- if (kstrtou32(sl->rbuff + SLC_CMD_LEN, 16, &tmpid))
- return;
-
- cf.can_id |= tmpid;
-
- /* get len from sanitized ASCII value */
- if (cf.len >= '0' && cf.len < '9')
- cf.len -= '0';
- else
- return;
-
- /* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf.can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf.len; i++) {
- tmp = hex_to_bin(*cmd++);
- if (tmp < 0)
- return;
- cf.data[i] = (tmp << 4);
- tmp = hex_to_bin(*cmd++);
- if (tmp < 0)
- return;
- cf.data[i] |= tmp;
- }
- }
-
- skb = dev_alloc_skb(sizeof(struct can_frame) +
- sizeof(struct can_skb_priv));
- if (!skb)
- return;
-
- skb->dev = sl->dev;
- skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- can_skb_reserve(skb);
- can_skb_prv(skb)->ifindex = sl->dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
-
- skb_put_data(skb, &cf, sizeof(struct can_frame));
-
- sl->dev->stats.rx_packets++;
- sl->dev->stats.rx_bytes += cf.len;
- netif_rx_ni(skb);
-}
-
-/* parse tty input stream */
-static void slcan_unesc(struct slcan *sl, unsigned char s)
-{
- if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
- if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- (sl->rcount > 4)) {
- slc_bump(sl);
- }
- sl->rcount = 0;
- } else {
- if (!test_bit(SLF_ERROR, &sl->flags)) {
- if (sl->rcount < SLC_MTU) {
- sl->rbuff[sl->rcount++] = s;
- return;
- } else {
- sl->dev->stats.rx_over_errors++;
- set_bit(SLF_ERROR, &sl->flags);
- }
- }
- }
-}
-
- /************************************************************************
- * STANDARD SLCAN ENCAPSULATION *
- ************************************************************************/
-
-/* Encapsulate one can_frame and stuff into a TTY queue. */
-static void slc_encaps(struct slcan *sl, struct can_frame *cf)
-{
- int actual, i;
- unsigned char *pos;
- unsigned char *endpos;
- canid_t id = cf->can_id;
-
- pos = sl->xbuff;
-
- if (cf->can_id & CAN_RTR_FLAG)
- *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
- else
- *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
-
- /* determine number of chars for the CAN-identifier */
- if (cf->can_id & CAN_EFF_FLAG) {
- id &= CAN_EFF_MASK;
- endpos = pos + SLC_EFF_ID_LEN;
- } else {
- *pos |= 0x20; /* convert R/T to lower case for SFF */
- id &= CAN_SFF_MASK;
- endpos = pos + SLC_SFF_ID_LEN;
- }
-
- /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
- pos++;
- while (endpos >= pos) {
- *endpos-- = hex_asc_upper[id & 0xf];
- id >>= 4;
- }
-
- pos += (cf->can_id & CAN_EFF_FLAG) ? SLC_EFF_ID_LEN : SLC_SFF_ID_LEN;
-
- *pos++ = cf->len + '0';
-
- /* RTR frames may have a dlc > 0 but they never have any data bytes */
- if (!(cf->can_id & CAN_RTR_FLAG)) {
- for (i = 0; i < cf->len; i++)
- pos = hex_byte_pack_upper(pos, cf->data[i]);
- }
-
- *pos++ = '\r';
-
- /* Order of next two lines is *very* important.
- * When we are sending a little amount of data,
- * the transfer may be completed inside the ops->write()
- * routine, because it's running with interrupts enabled.
- * In this case we *never* got WRITE_WAKEUP event,
- * if we did not request it before write operation.
- * 14 Oct 1994 Dmitry Gorodchanin.
- */
- set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
- sl->xleft = (pos - sl->xbuff) - actual;
- sl->xhead = sl->xbuff + actual;
- sl->dev->stats.tx_bytes += cf->len;
-}
-
-/* Write out any remaining transmit buffer. Scheduled when tty is writable */
-static void slcan_transmit(struct work_struct *work)
-{
- struct slcan *sl = container_of(work, struct slcan, tx_work);
- int actual;
-
- spin_lock_bh(&sl->lock);
- /* First make sure we're connected. */
- if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
- spin_unlock_bh(&sl->lock);
- return;
- }
-
- if (sl->xleft <= 0) {
- /* Now serial buffer is almost free & we can start
- * transmission of another packet */
- sl->dev->stats.tx_packets++;
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- spin_unlock_bh(&sl->lock);
- netif_wake_queue(sl->dev);
- return;
- }
-
- actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
- sl->xleft -= actual;
- sl->xhead += actual;
- spin_unlock_bh(&sl->lock);
-}
-
-/*
- * Called by the driver when there's room for more data.
- * Schedule the transmit.
- */
-static void slcan_write_wakeup(struct tty_struct *tty)
-{
- struct slcan *sl;
-
- rcu_read_lock();
- sl = rcu_dereference(tty->disc_data);
- if (sl)
- schedule_work(&sl->tx_work);
- rcu_read_unlock();
-}
-
-/* Send a can_frame to a TTY queue. */
-static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- if (skb->len != CAN_MTU)
- goto out;
-
- spin_lock(&sl->lock);
- if (!netif_running(dev)) {
- spin_unlock(&sl->lock);
- printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
- goto out;
- }
- if (sl->tty == NULL) {
- spin_unlock(&sl->lock);
- goto out;
- }
-
- netif_stop_queue(sl->dev);
- slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
- spin_unlock(&sl->lock);
-
-out:
- kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-
-/******************************************
- * Routines looking at netdevice side.
- ******************************************/
-
-/* Netdevice UP -> DOWN routine */
-static int slc_close(struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- /* TTY discipline is running. */
- clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
- }
- netif_stop_queue(dev);
- sl->rcount = 0;
- sl->xleft = 0;
- spin_unlock_bh(&sl->lock);
-
- return 0;
-}
-
-/* Netdevice DOWN -> UP routine */
-static int slc_open(struct net_device *dev)
-{
- struct slcan *sl = netdev_priv(dev);
-
- if (sl->tty == NULL)
- return -ENODEV;
-
- sl->flags &= (1 << SLF_INUSE);
- netif_start_queue(dev);
- return 0;
-}
-
-/* Hook the destructor so we can free slcan devs at the right point in time */
-static void slc_free_netdev(struct net_device *dev)
-{
- int i = dev->base_addr;
-
- slcan_devs[i] = NULL;
-}
-
-static int slcan_change_mtu(struct net_device *dev, int new_mtu)
-{
- return -EINVAL;
-}
-
-static const struct net_device_ops slc_netdev_ops = {
- .ndo_open = slc_open,
- .ndo_stop = slc_close,
- .ndo_start_xmit = slc_xmit,
- .ndo_change_mtu = slcan_change_mtu,
-};
-
-static void slc_setup(struct net_device *dev)
-{
- dev->netdev_ops = &slc_netdev_ops;
- dev->needs_free_netdev = true;
- dev->priv_destructor = slc_free_netdev;
-
- dev->hard_header_len = 0;
- dev->addr_len = 0;
- dev->tx_queue_len = 10;
-
- dev->mtu = CAN_MTU;
- dev->type = ARPHRD_CAN;
-
- /* New-style flags. */
- dev->flags = IFF_NOARP;
- dev->features = NETIF_F_HW_CSUM;
-}
-
-/******************************************
- Routines looking at TTY side.
- ******************************************/
-
-/*
- * Handle the 'receiver data ready' interrupt.
- * This function is called by the 'tty_io' module in the kernel when
- * a block of SLCAN data has been received, which can now be decapsulated
- * and sent on to some IP layer for further processing. This will not
- * be re-entered while running but other ldisc functions may be called
- * in parallel
- */
-
-static void slcan_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, const char *fp,
- int count)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
-
- if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
- return;
-
- /* Read the characters out of the buffer */
- while (count--) {
- if (fp && *fp++) {
- if (!test_and_set_bit(SLF_ERROR, &sl->flags))
- sl->dev->stats.rx_errors++;
- cp++;
- continue;
- }
- slcan_unesc(sl, *cp++);
- }
-}
-
-/************************************
- * slcan_open helper routines.
- ************************************/
-
-/* Collect hanged up channels */
-static void slc_sync(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (dev == NULL)
- break;
-
- sl = netdev_priv(dev);
- if (sl->tty)
- continue;
- if (dev->flags & IFF_UP)
- dev_close(dev);
- }
-}
-
-/* Find a free SLCAN channel, and link in this `tty' line. */
-static struct slcan *slc_alloc(void)
-{
- int i;
- char name[IFNAMSIZ];
- struct net_device *dev = NULL;
- struct can_ml_priv *can_ml;
- struct slcan *sl;
- int size;
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (dev == NULL)
- break;
-
- }
-
- /* Sorry, too many, all slots in use */
- if (i >= maxdev)
- return NULL;
-
- sprintf(name, "slcan%d", i);
- size = ALIGN(sizeof(*sl), NETDEV_ALIGN) + sizeof(struct can_ml_priv);
- dev = alloc_netdev(size, name, NET_NAME_UNKNOWN, slc_setup);
- if (!dev)
- return NULL;
-
- dev->base_addr = i;
- sl = netdev_priv(dev);
- can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
- can_set_ml_priv(dev, can_ml);
-
- /* Initialize channel control data */
- sl->magic = SLCAN_MAGIC;
- sl->dev = dev;
- spin_lock_init(&sl->lock);
- INIT_WORK(&sl->tx_work, slcan_transmit);
- slcan_devs[i] = dev;
-
- return sl;
-}
-
-/*
- * Open the high-level part of the SLCAN channel.
- * This function is called by the TTY module when the
- * SLCAN line discipline is called for. Because we are
- * sure the tty line exists, we only have to link it to
- * a free SLCAN channel...
- *
- * Called in process context serialized from other ldisc calls.
- */
-
-static int slcan_open(struct tty_struct *tty)
-{
- struct slcan *sl;
- int err;
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (tty->ops->write == NULL)
- return -EOPNOTSUPP;
-
- /* RTnetlink lock is misused here to serialize concurrent
- opens of slcan channels. There are better ways, but it is
- the simplest one.
- */
- rtnl_lock();
-
- /* Collect hanged up channels. */
- slc_sync();
-
- sl = tty->disc_data;
-
- err = -EEXIST;
- /* First make sure we're not already connected. */
- if (sl && sl->magic == SLCAN_MAGIC)
- goto err_exit;
-
- /* OK. Find a free SLCAN channel to use. */
- err = -ENFILE;
- sl = slc_alloc();
- if (sl == NULL)
- goto err_exit;
-
- sl->tty = tty;
- tty->disc_data = sl;
-
- if (!test_bit(SLF_INUSE, &sl->flags)) {
- /* Perform the low-level SLCAN initialization. */
- sl->rcount = 0;
- sl->xleft = 0;
-
- set_bit(SLF_INUSE, &sl->flags);
-
- err = register_netdevice(sl->dev);
- if (err)
- goto err_free_chan;
- }
-
- /* Done. We have linked the TTY line to a channel. */
- rtnl_unlock();
- tty->receive_room = 65536; /* We don't flow control */
-
- /* TTY layer expects 0 on success */
- return 0;
-
-err_free_chan:
- sl->tty = NULL;
- tty->disc_data = NULL;
- clear_bit(SLF_INUSE, &sl->flags);
- slc_free_netdev(sl->dev);
- /* do not call free_netdev before rtnl_unlock */
- rtnl_unlock();
- free_netdev(sl->dev);
- return err;
-
-err_exit:
- rtnl_unlock();
-
- /* Count references from TTY module */
- return err;
-}
-
-/*
- * Close down a SLCAN channel.
- * This means flushing out any pending queues, and then returning. This
- * call is serialized against other ldisc functions.
- *
- * We also use this method for a hangup event.
- */
-
-static void slcan_close(struct tty_struct *tty)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
- return;
-
- spin_lock_bh(&sl->lock);
- rcu_assign_pointer(tty->disc_data, NULL);
- sl->tty = NULL;
- spin_unlock_bh(&sl->lock);
-
- synchronize_rcu();
- flush_work(&sl->tx_work);
-
- /* Flush network side */
- unregister_netdev(sl->dev);
- /* This will complete via sl_free_netdev */
-}
-
-static void slcan_hangup(struct tty_struct *tty)
-{
- slcan_close(tty);
-}
-
-/* Perform I/O control on an active SLCAN channel. */
-static int slcan_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct slcan *sl = (struct slcan *) tty->disc_data;
- unsigned int tmp;
-
- /* First make sure we're connected. */
- if (!sl || sl->magic != SLCAN_MAGIC)
- return -EINVAL;
-
- switch (cmd) {
- case SIOCGIFNAME:
- tmp = strlen(sl->dev->name) + 1;
- if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
- return -EFAULT;
- return 0;
-
- case SIOCSIFHWADDR:
- return -EINVAL;
-
- default:
- return tty_mode_ioctl(tty, cmd, arg);
- }
-}
-
-static struct tty_ldisc_ops slc_ldisc = {
- .owner = THIS_MODULE,
- .num = N_SLCAN,
- .name = "slcan",
- .open = slcan_open,
- .close = slcan_close,
- .hangup = slcan_hangup,
- .ioctl = slcan_ioctl,
- .receive_buf = slcan_receive_buf,
- .write_wakeup = slcan_write_wakeup,
-};
-
-static int __init slcan_init(void)
-{
- int status;
-
- if (maxdev < 4)
- maxdev = 4; /* Sanity */
-
- pr_info("slcan: serial line CAN interface driver\n");
- pr_info("slcan: %d dynamic interface channels.\n", maxdev);
-
- slcan_devs = kcalloc(maxdev, sizeof(struct net_device *), GFP_KERNEL);
- if (!slcan_devs)
- return -ENOMEM;
-
- /* Fill in our line protocol discipline, and register it */
- status = tty_register_ldisc(&slc_ldisc);
- if (status) {
- printk(KERN_ERR "slcan: can't register line discipline\n");
- kfree(slcan_devs);
- }
- return status;
-}
-
-static void __exit slcan_exit(void)
-{
- int i;
- struct net_device *dev;
- struct slcan *sl;
- unsigned long timeout = jiffies + HZ;
- int busy = 0;
-
- if (slcan_devs == NULL)
- return;
-
- /* First of all: check for active disciplines and hangup them.
- */
- do {
- if (busy)
- msleep_interruptible(100);
-
- busy = 0;
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- sl = netdev_priv(dev);
- spin_lock_bh(&sl->lock);
- if (sl->tty) {
- busy++;
- tty_hangup(sl->tty);
- }
- spin_unlock_bh(&sl->lock);
- }
- } while (busy && time_before(jiffies, timeout));
-
- /* FIXME: hangup is async so we should wait when doing this second
- phase */
-
- for (i = 0; i < maxdev; i++) {
- dev = slcan_devs[i];
- if (!dev)
- continue;
- slcan_devs[i] = NULL;
-
- sl = netdev_priv(dev);
- if (sl->tty) {
- printk(KERN_ERR "%s: tty discipline still running\n",
- dev->name);
- }
-
- unregister_netdev(dev);
- }
-
- kfree(slcan_devs);
- slcan_devs = NULL;
-
- tty_unregister_ldisc(&slc_ldisc);
-}
-
-module_init(slcan_init);
-module_exit(slcan_exit);
diff --git a/drivers/net/can/slcan/Makefile b/drivers/net/can/slcan/Makefile
new file mode 100644
index 000000000000..8a88e484ee21
--- /dev/null
+++ b/drivers/net/can/slcan/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_CAN_SLCAN) += slcan.o
+
+slcan-objs :=
+slcan-objs += slcan-core.o
+slcan-objs += slcan-ethtool.o
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
new file mode 100644
index 000000000000..f4db77007c13
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -0,0 +1,941 @@
+/*
+ * slcan.c - serial line CAN interface driver (using tty line discipline)
+ *
+ * This file is derived from linux/drivers/net/slip/slip.c and got
+ * inspiration from linux/drivers/net/can/can327.c for the rework made
+ * on the line discipline code.
+ *
+ * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
+ * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
+ * can327.c Author : Max Staudt <max-linux@enpas.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see http://www.gnu.org/licenses/gpl.html
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/skb.h>
+
+#include "slcan.h"
+
+MODULE_ALIAS_LDISC(N_SLCAN);
+MODULE_DESCRIPTION("serial line CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+MODULE_AUTHOR("Dario Binacchi <dario.binacchi@amarulasolutions.com>");
+
+/* maximum rx buffer len: extended CAN frame with timestamp */
+#define SLCAN_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1)
+
+#define SLCAN_CMD_LEN 1
+#define SLCAN_SFF_ID_LEN 3
+#define SLCAN_EFF_ID_LEN 8
+#define SLCAN_STATE_LEN 1
+#define SLCAN_STATE_BE_RXCNT_LEN 3
+#define SLCAN_STATE_BE_TXCNT_LEN 3
+#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \
+ SLCAN_STATE_BE_RXCNT_LEN + \
+ SLCAN_STATE_BE_TXCNT_LEN)
+struct slcan {
+ struct can_priv can;
+
+ /* Various fields. */
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
+ struct work_struct tx_work; /* Flushes transmit buffer */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char rbuff[SLCAN_MTU]; /* receiver buffer */
+ int rcount; /* received chars counter */
+ unsigned char xbuff[SLCAN_MTU]; /* transmitter buffer*/
+ unsigned char *xhead; /* pointer to next XMIT byte */
+ int xleft; /* bytes left in XMIT queue */
+
+ unsigned long flags; /* Flag values/ mode etc */
+#define SLF_ERROR 0 /* Parity, etc. error */
+#define SLF_XCMD 1 /* Command transmission */
+ unsigned long cmd_flags; /* Command flags */
+#define CF_ERR_RST 0 /* Reset errors on open */
+ wait_queue_head_t xcmd_wait; /* Wait queue for commands */
+ /* transmission */
+};
+
+static const u32 slcan_bitrate_const[] = {
+ 10000, 20000, 50000, 100000, 125000,
+ 250000, 500000, 800000, 1000000
+};
+
+bool slcan_err_rst_on_open(struct net_device *ndev)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ return !!test_bit(CF_ERR_RST, &sl->cmd_flags);
+}
+
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on)
+{
+ struct slcan *sl = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (on)
+ set_bit(CF_ERR_RST, &sl->cmd_flags);
+ else
+ clear_bit(CF_ERR_RST, &sl->cmd_flags);
+
+ return 0;
+}
+
+/*************************************************************************
+ * SLCAN ENCAPSULATION FORMAT *
+ *************************************************************************/
+
+/* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
+ * frame format) a data length code (len) which can be from 0 to 8
+ * and up to <len> data bytes as payload.
+ * Additionally a CAN frame may become a remote transmission frame if the
+ * RTR-bit is set. This causes another ECU to send a CAN frame with the
+ * given can_id.
+ *
+ * The SLCAN ASCII representation of these different frame types is:
+ * <type> <id> <dlc> <data>*
+ *
+ * Extended frames (29 bit) are defined by capital characters in the type.
+ * RTR frames are defined as 'r' types - normal frames have 't' type:
+ * t => 11 bit data frame
+ * r => 11 bit RTR frame
+ * T => 29 bit data frame
+ * R => 29 bit RTR frame
+ *
+ * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
+ * The <dlc> is a one byte ASCII number ('0' - '8')
+ * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
+ *
+ * Examples:
+ *
+ * t1230 : can_id 0x123, len 0, no data
+ * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, len 0, no data, remote transmission request
+ *
+ */
+
+/*************************************************************************
+ * STANDARD SLCAN DECAPSULATION *
+ *************************************************************************/
+
+/* Send one completely decapsulated can_frame to the network layer */
+static void slcan_bump_frame(struct slcan *sl)
+{
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ int i, tmp;
+ u32 tmpid;
+ char *cmd = sl->rbuff;
+
+ skb = alloc_can_skb(sl->dev, &cf);
+ if (unlikely(!skb)) {
+ sl->dev->stats.rx_dropped++;
+ return;
+ }
+
+ switch (*cmd) {
+ case 'r':
+ cf->can_id = CAN_RTR_FLAG;
+ fallthrough;
+ case 't':
+ /* store dlc ASCII value and terminate SFF CAN ID string */
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN] = 0;
+ /* point to payload data behind the dlc */
+ cmd += SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN + 1;
+ break;
+ case 'R':
+ cf->can_id = CAN_RTR_FLAG;
+ fallthrough;
+ case 'T':
+ cf->can_id |= CAN_EFF_FLAG;
+ /* store dlc ASCII value and terminate EFF CAN ID string */
+ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN];
+ sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN] = 0;
+ /* point to payload data behind the dlc */
+ cmd += SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN + 1;
+ break;
+ default:
+ goto decode_failed;
+ }
+
+ if (kstrtou32(sl->rbuff + SLCAN_CMD_LEN, 16, &tmpid))
+ goto decode_failed;
+
+ cf->can_id |= tmpid;
+
+ /* get len from sanitized ASCII value */
+ if (cf->len >= '0' && cf->len < '9')
+ cf->len -= '0';
+ else
+ goto decode_failed;
+
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++) {
+ tmp = hex_to_bin(*cmd++);
+ if (tmp < 0)
+ goto decode_failed;
+
+ cf->data[i] = (tmp << 4);
+ tmp = hex_to_bin(*cmd++);
+ if (tmp < 0)
+ goto decode_failed;
+
+ cf->data[i] |= tmp;
+ }
+ }
+
+ sl->dev->stats.rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ sl->dev->stats.rx_bytes += cf->len;
+
+ netif_rx(skb);
+ return;
+
+decode_failed:
+ sl->dev->stats.rx_errors++;
+ dev_kfree_skb(skb);
+}
+
+/* A change state frame must contain state info and receive and transmit
+ * error counters.
+ *
+ * Examples:
+ *
+ * sb256256 : state bus-off: rx counter 256, tx counter 256
+ * sa057033 : state active, rx counter 57, tx counter 33
+ */
+static void slcan_bump_state(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ u32 rxerr, txerr;
+ enum can_state state, rx_state, tx_state;
+
+ switch (cmd[1]) {
+ case 'a':
+ state = CAN_STATE_ERROR_ACTIVE;
+ break;
+ case 'w':
+ state = CAN_STATE_ERROR_WARNING;
+ break;
+ case 'p':
+ state = CAN_STATE_ERROR_PASSIVE;
+ break;
+ case 'b':
+ state = CAN_STATE_BUS_OFF;
+ break;
+ default:
+ return;
+ }
+
+ if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN)
+ return;
+
+ cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1;
+ cmd[SLCAN_STATE_BE_TXCNT_LEN] = 0;
+ if (kstrtou32(cmd, 10, &txerr))
+ return;
+
+ *cmd = 0;
+ cmd -= SLCAN_STATE_BE_RXCNT_LEN;
+ if (kstrtou32(cmd, 10, &rxerr))
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ tx_state = txerr >= rxerr ? state : 0;
+ rx_state = txerr <= rxerr ? state : 0;
+ can_change_state(dev, cf, tx_state, rx_state);
+
+ if (state == CAN_STATE_BUS_OFF) {
+ can_bus_off(dev);
+ } else if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ if (skb)
+ netif_rx(skb);
+}
+
+/* An error frame can contain more than one type of error.
+ *
+ * Examples:
+ *
+ * e1a : len 1, errors: ACK error
+ * e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error
+ */
+static void slcan_bump_err(struct slcan *sl)
+{
+ struct net_device *dev = sl->dev;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ char *cmd = sl->rbuff;
+ bool rx_errors = false, tx_errors = false, rx_over_errors = false;
+ int i, len;
+
+ /* get len from sanitized ASCII value */
+ len = cmd[1];
+ if (len >= '0' && len < '9')
+ len -= '0';
+ else
+ return;
+
+ if ((len + SLCAN_CMD_LEN + 1) > sl->rcount)
+ return;
+
+ skb = alloc_can_err_skb(dev, &cf);
+
+ if (skb)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ cmd += SLCAN_CMD_LEN + 1;
+ for (i = 0; i < len; i++, cmd++) {
+ switch (*cmd) {
+ case 'a':
+ netdev_dbg(dev, "ACK error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+ }
+
+ break;
+ case 'b':
+ netdev_dbg(dev, "Bit0 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+ break;
+ case 'B':
+ netdev_dbg(dev, "Bit1 error\n");
+ tx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+ break;
+ case 'c':
+ netdev_dbg(dev, "CRC error\n");
+ rx_errors = true;
+ if (skb) {
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+
+ break;
+ case 'f':
+ netdev_dbg(dev, "Form Error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+
+ break;
+ case 'o':
+ netdev_dbg(dev, "Rx overrun error\n");
+ rx_over_errors = true;
+ rx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+
+ break;
+ case 'O':
+ netdev_dbg(dev, "Tx overrun error\n");
+ tx_errors = true;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_TX_OVERFLOW;
+ }
+
+ break;
+ case 's':
+ netdev_dbg(dev, "Stuff error\n");
+ rx_errors = true;
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+ break;
+ default:
+ if (skb)
+ dev_kfree_skb(skb);
+
+ return;
+ }
+ }
+
+ if (rx_errors)
+ dev->stats.rx_errors++;
+
+ if (rx_over_errors)
+ dev->stats.rx_over_errors++;
+
+ if (tx_errors)
+ dev->stats.tx_errors++;
+
+ if (skb)
+ netif_rx(skb);
+}
+
+static void slcan_bump(struct slcan *sl)
+{
+ switch (sl->rbuff[0]) {
+ case 'r':
+ fallthrough;
+ case 't':
+ fallthrough;
+ case 'R':
+ fallthrough;
+ case 'T':
+ return slcan_bump_frame(sl);
+ case 'e':
+ return slcan_bump_err(sl);
+ case 's':
+ return slcan_bump_state(sl);
+ default:
+ return;
+ }
+}
+
+/* parse tty input stream */
+static void slcan_unesc(struct slcan *sl, unsigned char s)
+{
+ if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+ sl->rcount > 4)
+ slcan_bump(sl);
+
+ sl->rcount = 0;
+ } else {
+ if (!test_bit(SLF_ERROR, &sl->flags)) {
+ if (sl->rcount < SLCAN_MTU) {
+ sl->rbuff[sl->rcount++] = s;
+ return;
+ }
+
+ sl->dev->stats.rx_over_errors++;
+ set_bit(SLF_ERROR, &sl->flags);
+ }
+ }
+}
+
+/*************************************************************************
+ * STANDARD SLCAN ENCAPSULATION *
+ *************************************************************************/
+
+/* Encapsulate one can_frame and stuff into a TTY queue. */
+static void slcan_encaps(struct slcan *sl, struct can_frame *cf)
+{
+ int actual, i;
+ unsigned char *pos;
+ unsigned char *endpos;
+ canid_t id = cf->can_id;
+
+ pos = sl->xbuff;
+
+ if (cf->can_id & CAN_RTR_FLAG)
+ *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */
+ else
+ *pos = 'T'; /* becomes 't' in standard frame format (SSF) */
+
+ /* determine number of chars for the CAN-identifier */
+ if (cf->can_id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ endpos = pos + SLCAN_EFF_ID_LEN;
+ } else {
+ *pos |= 0x20; /* convert R/T to lower case for SFF */
+ id &= CAN_SFF_MASK;
+ endpos = pos + SLCAN_SFF_ID_LEN;
+ }
+
+ /* build 3 (SFF) or 8 (EFF) digit CAN identifier */
+ pos++;
+ while (endpos >= pos) {
+ *endpos-- = hex_asc_upper[id & 0xf];
+ id >>= 4;
+ }
+
+ pos += (cf->can_id & CAN_EFF_FLAG) ?
+ SLCAN_EFF_ID_LEN : SLCAN_SFF_ID_LEN;
+
+ *pos++ = cf->len + '0';
+
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf->can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf->len; i++)
+ pos = hex_byte_pack_upper(pos, cf->data[i]);
+
+ sl->dev->stats.tx_bytes += cf->len;
+ }
+
+ *pos++ = '\r';
+
+ /* Order of next two lines is *very* important.
+ * When we are sending a little amount of data,
+ * the transfer may be completed inside the ops->write()
+ * routine, because it's running with interrupts enabled.
+ * In this case we *never* got WRITE_WAKEUP event,
+ * if we did not request it before write operation.
+ * 14 Oct 1994 Dmitry Gorodchanin.
+ */
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff);
+ sl->xleft = (pos - sl->xbuff) - actual;
+ sl->xhead = sl->xbuff + actual;
+}
+
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slcan_transmit(struct work_struct *work)
+{
+ struct slcan *sl = container_of(work, struct slcan, tx_work);
+ int actual;
+
+ spin_lock_bh(&sl->lock);
+ /* First make sure we're connected. */
+ if (unlikely(!netif_running(sl->dev)) &&
+ likely(!test_bit(SLF_XCMD, &sl->flags))) {
+ spin_unlock_bh(&sl->lock);
+ return;
+ }
+
+ if (sl->xleft <= 0) {
+ if (unlikely(test_bit(SLF_XCMD, &sl->flags))) {
+ clear_bit(SLF_XCMD, &sl->flags);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ wake_up(&sl->xcmd_wait);
+ return;
+ }
+
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet
+ */
+ sl->dev->stats.tx_packets++;
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ netif_wake_queue(sl->dev);
+ return;
+ }
+
+ actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+ spin_unlock_bh(&sl->lock);
+}
+
+/* Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ schedule_work(&sl->tx_work);
+}
+
+/* Send a can_frame to a TTY queue. */
+static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+
+ if (can_dev_dropped_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ spin_lock(&sl->lock);
+ if (!netif_running(dev)) {
+ spin_unlock(&sl->lock);
+ netdev_warn(dev, "xmit: iface is down\n");
+ goto out;
+ }
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ goto out;
+ }
+
+ netif_stop_queue(sl->dev);
+ slcan_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */
+ spin_unlock(&sl->lock);
+
+ skb_tx_timestamp(skb);
+
+out:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+/******************************************
+ * Routines looking at netdevice side.
+ ******************************************/
+
+static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd)
+{
+ int ret, actual, n;
+
+ spin_lock(&sl->lock);
+ if (!sl->tty) {
+ spin_unlock(&sl->lock);
+ return -ENODEV;
+ }
+
+ n = scnprintf(sl->xbuff, sizeof(sl->xbuff), "%s", cmd);
+ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ actual = sl->tty->ops->write(sl->tty, sl->xbuff, n);
+ sl->xleft = n - actual;
+ sl->xhead = sl->xbuff + actual;
+ set_bit(SLF_XCMD, &sl->flags);
+ spin_unlock(&sl->lock);
+ ret = wait_event_interruptible_timeout(sl->xcmd_wait,
+ !test_bit(SLF_XCMD, &sl->flags),
+ HZ);
+ clear_bit(SLF_XCMD, &sl->flags);
+ if (ret == -ERESTARTSYS)
+ return ret;
+
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Netdevice UP -> DOWN routine */
+static int slcan_netdev_close(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+ int err;
+
+ if (sl->can.bittiming.bitrate &&
+ sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ err = slcan_transmit_cmd(sl, "C\r");
+ if (err)
+ netdev_warn(dev,
+ "failed to send close command 'C\\r'\n");
+ }
+
+ /* TTY discipline is running. */
+ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ flush_work(&sl->tx_work);
+
+ netif_stop_queue(dev);
+ sl->rcount = 0;
+ sl->xleft = 0;
+ close_candev(dev);
+ sl->can.state = CAN_STATE_STOPPED;
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNSET;
+
+ return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+static int slcan_netdev_open(struct net_device *dev)
+{
+ struct slcan *sl = netdev_priv(dev);
+ unsigned char cmd[SLCAN_MTU];
+ int err, s;
+
+ /* The baud rate is not set with the command
+ * `ip link set <iface> type can bitrate <baud>' and therefore
+ * can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing
+ * open_candev() to fail. So let's set to a fake value.
+ */
+ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNSET)
+ sl->can.bittiming.bitrate = CAN_BITRATE_UNKNOWN;
+
+ err = open_candev(dev);
+ if (err) {
+ netdev_err(dev, "failed to open can device\n");
+ return err;
+ }
+
+ if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) {
+ for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) {
+ if (sl->can.bittiming.bitrate == slcan_bitrate_const[s])
+ break;
+ }
+
+ /* The CAN framework has already validate the bitrate value,
+ * so we can avoid to check if `s' has been properly set.
+ */
+ snprintf(cmd, sizeof(cmd), "C\rS%d\r", s);
+ err = slcan_transmit_cmd(sl, cmd);
+ if (err) {
+ netdev_err(dev,
+ "failed to send bitrate command 'C\\rS%d\\r'\n",
+ s);
+ goto cmd_transmit_failed;
+ }
+
+ if (test_bit(CF_ERR_RST, &sl->cmd_flags)) {
+ err = slcan_transmit_cmd(sl, "F\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send error command 'F\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+
+ if (sl->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ err = slcan_transmit_cmd(sl, "L\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send listen-only command 'L\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ } else {
+ err = slcan_transmit_cmd(sl, "O\r");
+ if (err) {
+ netdev_err(dev,
+ "failed to send open command 'O\\r'\n");
+ goto cmd_transmit_failed;
+ }
+ }
+ }
+
+ sl->can.state = CAN_STATE_ERROR_ACTIVE;
+ netif_start_queue(dev);
+ return 0;
+
+cmd_transmit_failed:
+ close_candev(dev);
+ return err;
+}
+
+static const struct net_device_ops slcan_netdev_ops = {
+ .ndo_open = slcan_netdev_open,
+ .ndo_stop = slcan_netdev_close,
+ .ndo_start_xmit = slcan_netdev_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+/******************************************
+ * Routines looking at TTY side.
+ ******************************************/
+
+/* Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLCAN data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+static void slcan_receive_buf(struct tty_struct *tty,
+ const unsigned char *cp, const char *fp,
+ int count)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ if (!netif_running(sl->dev))
+ return;
+
+ /* Read the characters out of the buffer */
+ while (count--) {
+ if (fp && *fp++) {
+ if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+ sl->dev->stats.rx_errors++;
+ cp++;
+ continue;
+ }
+ slcan_unesc(sl, *cp++);
+ }
+}
+
+/* Open the high-level part of the SLCAN channel.
+ * This function is called by the TTY module when the
+ * SLCAN line discipline is called for.
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+static int slcan_open(struct tty_struct *tty)
+{
+ struct net_device *dev;
+ struct slcan *sl;
+ int err;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tty->ops->write)
+ return -EOPNOTSUPP;
+
+ dev = alloc_candev(sizeof(*sl), 1);
+ if (!dev)
+ return -ENFILE;
+
+ sl = netdev_priv(dev);
+
+ /* Configure TTY interface */
+ tty->receive_room = 65536; /* We don't flow control */
+ sl->rcount = 0;
+ sl->xleft = 0;
+ spin_lock_init(&sl->lock);
+ INIT_WORK(&sl->tx_work, slcan_transmit);
+ init_waitqueue_head(&sl->xcmd_wait);
+
+ /* Configure CAN metadata */
+ sl->can.bitrate_const = slcan_bitrate_const;
+ sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const);
+ sl->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY;
+
+ /* Configure netdev interface */
+ sl->dev = dev;
+ dev->netdev_ops = &slcan_netdev_ops;
+ dev->ethtool_ops = &slcan_ethtool_ops;
+
+ /* Mark ldisc channel as alive */
+ sl->tty = tty;
+ tty->disc_data = sl;
+
+ err = register_candev(dev);
+ if (err) {
+ free_candev(dev);
+ pr_err("can't register candev\n");
+ return err;
+ }
+
+ netdev_info(dev, "slcan on %s.\n", tty->name);
+ /* TTY layer expects 0 on success */
+ return 0;
+}
+
+/* Close down a SLCAN channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ * Once this is called, no other ldisc function of ours is entered.
+ *
+ * We also use this method for a hangup event.
+ */
+static void slcan_close(struct tty_struct *tty)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+
+ unregister_candev(sl->dev);
+
+ /*
+ * The netdev needn't be UP (so .ndo_stop() is not called). Hence make
+ * sure this is not running before freeing it up.
+ */
+ flush_work(&sl->tx_work);
+
+ /* Mark channel as dead */
+ spin_lock_bh(&sl->lock);
+ tty->disc_data = NULL;
+ sl->tty = NULL;
+ spin_unlock_bh(&sl->lock);
+
+ netdev_info(sl->dev, "slcan off %s.\n", tty->name);
+ free_candev(sl->dev);
+}
+
+/* Perform I/O control on an active SLCAN channel. */
+static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct slcan *sl = (struct slcan *)tty->disc_data;
+ unsigned int tmp;
+
+ switch (cmd) {
+ case SIOCGIFNAME:
+ tmp = strlen(sl->dev->name) + 1;
+ if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+ return -EFAULT;
+ return 0;
+
+ case SIOCSIFHWADDR:
+ return -EINVAL;
+
+ default:
+ return tty_mode_ioctl(tty, cmd, arg);
+ }
+}
+
+static struct tty_ldisc_ops slcan_ldisc = {
+ .owner = THIS_MODULE,
+ .num = N_SLCAN,
+ .name = KBUILD_MODNAME,
+ .open = slcan_open,
+ .close = slcan_close,
+ .ioctl = slcan_ioctl,
+ .receive_buf = slcan_receive_buf,
+ .write_wakeup = slcan_write_wakeup,
+};
+
+static int __init slcan_init(void)
+{
+ int status;
+
+ pr_info("serial line CAN interface driver\n");
+
+ /* Fill in our line protocol discipline, and register it */
+ status = tty_register_ldisc(&slcan_ldisc);
+ if (status)
+ pr_err("can't register line discipline\n");
+
+ return status;
+}
+
+static void __exit slcan_exit(void)
+{
+ /* This will only be called when all channels have been closed by
+ * userspace - tty_ldisc.c takes care of the module's refcount.
+ */
+ tty_unregister_ldisc(&slcan_ldisc);
+}
+
+module_init(slcan_init);
+module_exit(slcan_exit);
diff --git a/drivers/net/can/slcan/slcan-ethtool.c b/drivers/net/can/slcan/slcan-ethtool.c
new file mode 100644
index 000000000000..f598c653fbfa
--- /dev/null
+++ b/drivers/net/can/slcan/slcan-ethtool.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#include <linux/can/dev.h>
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "slcan.h"
+
+static const char slcan_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN BIT(0)
+ "err-rst-on-open",
+};
+
+static void slcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, slcan_priv_flags_strings,
+ sizeof(slcan_priv_flags_strings));
+ }
+}
+
+static u32 slcan_get_priv_flags(struct net_device *ndev)
+{
+ u32 flags = 0;
+
+ if (slcan_err_rst_on_open(ndev))
+ flags |= SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN;
+
+ return flags;
+}
+
+static int slcan_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ bool err_rst_op_open = !!(flags & SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN);
+
+ return slcan_enable_err_rst_on_open(ndev, err_rst_op_open);
+}
+
+static int slcan_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(slcan_priv_flags_strings);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+const struct ethtool_ops slcan_ethtool_ops = {
+ .get_strings = slcan_get_strings,
+ .get_priv_flags = slcan_get_priv_flags,
+ .set_priv_flags = slcan_set_priv_flags,
+ .get_sset_count = slcan_get_sset_count,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
diff --git a/drivers/net/can/slcan/slcan.h b/drivers/net/can/slcan/slcan.h
new file mode 100644
index 000000000000..85cedf856db3
--- /dev/null
+++ b/drivers/net/can/slcan/slcan.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * slcan.h - serial line CAN interface driver
+ *
+ * Copyright (C) Laurence Culhane <loz@holmes.demon.co.uk>
+ * Copyright (C) Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * Copyright (C) Oliver Hartkopp <socketcan@hartkopp.net>
+ * Copyright (C) 2022 Amarula Solutions, Dario Binacchi <dario.binacchi@amarulasolutions.com>
+ *
+ */
+
+#ifndef _SLCAN_H
+#define _SLCAN_H
+
+bool slcan_err_rst_on_open(struct net_device *ndev);
+int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on);
+
+extern const struct ethtool_ops slcan_ethtool_ops;
+
+#endif /* _SLCAN_H */
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index 2e93ee792373..e5c939b63fa6 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -293,7 +293,7 @@ static int softingcs_probe(struct pcmcia_device *pcmcia)
return 0;
platform_failed:
- kfree(dev);
+ platform_device_put(pdev);
mem_failed:
pcmcia_bad:
pcmcia_failed:
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 7e1536877993..32286f861a19 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -565,18 +565,19 @@ int softing_startstop(struct net_device *dev, int up)
if (ret < 0)
goto failed;
}
- /* enable_error_frame */
- /*
+
+ /* enable_error_frame
+ *
* Error reporting is switched off at the moment since
* the receiving of them is not yet 100% verified
* This should be enabled sooner or later
- *
- if (error_reporting) {
+ */
+ if (0 && error_reporting) {
ret = softing_fct_cmd(card, 51, "enable_error_frame");
if (ret < 0)
goto failed;
}
- */
+
/* initialize interface */
iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index cfc1325aad10..c72f505d29fe 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -5,6 +5,7 @@
* - Kurt Van Dijck, EIA Electronics
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <asm/io.h>
@@ -59,7 +60,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
uint8_t buf[DPRAM_TX_SIZE];
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
spin_lock(&card->spin);
@@ -282,7 +283,10 @@ static int softing_handle_1(struct softing *card)
skb = priv->can.echo_skb[priv->tx.echo_get];
if (skb)
skb->tstamp = ktime;
- can_get_echo_skb(netdev, priv->tx.echo_get, NULL);
+ ++netdev->stats.tx_packets;
+ netdev->stats.tx_bytes +=
+ can_get_echo_skb(netdev, priv->tx.echo_get,
+ NULL);
++priv->tx.echo_get;
if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
priv->tx.echo_get = 0;
@@ -290,9 +294,6 @@ static int softing_handle_1(struct softing *card)
--priv->tx.pending;
if (card->tx.pending)
--card->tx.pending;
- ++netdev->stats.tx_packets;
- if (!(msg.can_id & CAN_RTR_FLAG))
- netdev->stats.tx_bytes += msg.len;
} else {
int ret;
@@ -392,13 +393,10 @@ static int softing_netdev_open(struct net_device *ndev)
static int softing_netdev_stop(struct net_device *ndev)
{
- int ret;
-
netif_stop_queue(ndev);
/* softing cycle does close_candev() */
- ret = softing_startstop(ndev, 0);
- return ret;
+ return softing_startstop(ndev, 0);
}
static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
@@ -614,8 +612,12 @@ static const struct net_device_ops softing_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops softing_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const softing_btr_const = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -652,6 +654,7 @@ static struct net_device *softing_netdev_create(struct softing *card,
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &softing_netdev_ops;
+ netdev->ethtool_ops = &softing_ethtool_ops;
priv->can.do_set_mode = softing_candev_set_mode;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
@@ -849,7 +852,7 @@ platform_resource_failed:
static struct platform_driver softing_driver = {
.driver = {
- .name = "softing",
+ .name = KBUILD_MODNAME,
},
.probe = softing_pdev_probe,
.remove = softing_pdev_remove,
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 89d9c986a229..e1b8533a602e 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -16,20 +16,20 @@
#include <linux/can/core.h>
#include <linux/can/dev.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
@@ -153,7 +153,6 @@ struct hi3110_priv {
u8 *spi_rx_buf;
struct sk_buff *tx_skb;
- int tx_len;
struct workqueue_struct *wq;
struct work_struct tx_work;
@@ -166,6 +165,8 @@ struct hi3110_priv {
#define HI3110_AFTER_SUSPEND_POWER 4
#define HI3110_AFTER_SUSPEND_RESTART 8
int restart_tx;
+ bool tx_busy;
+
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
@@ -175,13 +176,13 @@ static void hi3110_clean(struct net_device *net)
{
struct hi3110_priv *priv = netdev_priv(net);
- if (priv->tx_skb || priv->tx_len)
+ if (priv->tx_skb || priv->tx_busy)
net->stats.tx_errors++;
dev_kfree_skb(priv->tx_skb);
- if (priv->tx_len)
+ if (priv->tx_busy)
can_free_echo_skb(priv->net, 0, NULL);
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
}
/* Note about handling of error return of hi3110_spi_trans: accessing
@@ -343,18 +344,17 @@ static void hi3110_hw_rx(struct spi_device *spi)
/* Data length */
frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F);
- if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR)
+ if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) {
frame->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF,
frame->len);
+ priv->net->stats.rx_bytes += frame->len;
+ }
priv->net->stats.rx_packets++;
- priv->net->stats.rx_bytes += frame->len;
-
- can_led_event(priv->net, CAN_LED_EVENT_RX);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void hi3110_hw_sleep(struct spi_device *spi)
@@ -368,12 +368,12 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb,
struct hi3110_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- if (priv->tx_skb || priv->tx_len) {
+ if (priv->tx_skb || priv->tx_busy) {
dev_err(&spi->dev, "hard_xmit called while tx busy\n");
return NETDEV_TX_BUSY;
}
- if (can_dropped_invalid_skb(net, skb))
+ if (can_dev_dropped_skb(net, skb))
return NETDEV_TX_OK;
netif_stop_queue(net);
@@ -565,8 +565,6 @@ static int hi3110_stop(struct net_device *net)
mutex_unlock(&priv->hi3110_lock);
- can_led_event(net, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -585,7 +583,7 @@ static void hi3110_tx_work_handler(struct work_struct *ws)
} else {
frame = (struct can_frame *)priv->tx_skb->data;
hi3110_hw_tx(spi, frame);
- priv->tx_len = 1 + frame->len;
+ priv->tx_busy = true;
can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
@@ -670,12 +668,10 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
txerr = hi3110_read(spi, HI3110_READ_TEC);
rxerr = hi3110_read(spi, HI3110_READ_REC);
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
tx_state = txerr >= rxerr ? new_state : 0;
rx_state = txerr <= rxerr ? new_state : 0;
can_change_state(net, cf, tx_state, rx_state);
- netif_rx_ni(skb);
+ netif_rx(skb);
if (new_state == CAN_STATE_BUS_OFF) {
can_bus_off(net);
@@ -684,6 +680,10 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
hi3110_hw_sleep(spi);
break;
}
+ } else {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
}
@@ -716,18 +716,14 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
cf->data[6] = hi3110_read(spi, HI3110_READ_TEC);
cf->data[7] = hi3110_read(spi, HI3110_READ_REC);
netdev_dbg(priv->net, "Bus Error\n");
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
- if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
+ if (priv->tx_busy && statf & HI3110_STAT_TXMTY) {
net->stats.tx_packets++;
- net->stats.tx_bytes += priv->tx_len - 1;
- can_led_event(net, CAN_LED_EVENT_TX);
- if (priv->tx_len) {
- can_get_echo_skb(net, 0, NULL);
- priv->tx_len = 0;
- }
+ net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL);
+ priv->tx_busy = false;
netif_wake_queue(net);
}
@@ -754,7 +750,7 @@ static int hi3110_open(struct net_device *net)
priv->force_quit = 0;
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist,
flags, DEVICE_NAME, priv);
@@ -784,7 +780,6 @@ static int hi3110_open(struct net_device *net)
if (ret)
goto out_free_wq;
- can_led_event(net, CAN_LED_EVENT_OPEN);
netif_wake_queue(net);
mutex_unlock(&priv->hi3110_lock);
@@ -808,6 +803,10 @@ static const struct net_device_ops hi3110_netdev_ops = {
.ndo_start_xmit = hi3110_hard_start_xmit,
};
+static const struct ethtool_ops hi3110_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id hi3110_of_match[] = {
{
.compatible = "holt,hi3110",
@@ -828,19 +827,25 @@ MODULE_DEVICE_TABLE(spi, hi3110_id_table);
static int hi3110_can_probe(struct spi_device *spi)
{
- const struct of_device_id *of_id = of_match_device(hi3110_of_match,
- &spi->dev);
+ struct device *dev = &spi->dev;
struct net_device *net;
struct hi3110_priv *priv;
+ const void *match;
struct clk *clk;
- int freq, ret;
+ u32 freq;
+ int ret;
- clk = devm_clk_get(&spi->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&spi->dev, "no CAN clock source defined\n");
- return PTR_ERR(clk);
+ clk = devm_clk_get_optional(&spi->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "no CAN clock source defined\n");
+
+ if (clk) {
+ freq = clk_get_rate(clk);
+ } else {
+ ret = device_property_read_u32(dev, "clock-frequency", &freq);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clock-frequency!\n");
}
- freq = clk_get_rate(clk);
/* Sanity check */
if (freq > 40000000)
@@ -851,13 +856,12 @@ static int hi3110_can_probe(struct spi_device *spi)
if (!net)
return -ENOMEM;
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- goto out_free;
- }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free;
net->netdev_ops = &hi3110_netdev_ops;
+ net->ethtool_ops = &hi3110_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
@@ -870,8 +874,9 @@ static int hi3110_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING;
- if (of_id)
- priv->model = (enum hi3110_model)(uintptr_t)of_id->data;
+ match = device_get_match_data(dev);
+ if (match)
+ priv->model = (enum hi3110_model)(uintptr_t)match;
else
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
@@ -918,9 +923,7 @@ static int hi3110_can_probe(struct spi_device *spi)
ret = hi3110_hw_probe(spi);
if (ret) {
- if (ret == -ENODEV)
- dev_err(&spi->dev, "Cannot initialize %x. Wrong wiring?\n",
- priv->model);
+ dev_err_probe(dev, ret, "Cannot initialize %x. Wrong wiring?\n", priv->model);
goto error_probe;
}
hi3110_hw_sleep(spi);
@@ -929,7 +932,6 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto error_probe;
- devm_can_led_init(net);
netdev_info(net, "%x successfully initialized.\n", priv->model);
return 0;
@@ -938,17 +940,15 @@ static int hi3110_can_probe(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
out_clk:
- if (!IS_ERR(clk))
- clk_disable_unprepare(clk);
+ clk_disable_unprepare(clk);
out_free:
free_candev(net);
- dev_err(&spi->dev, "Probe failed, err=%d\n", -ret);
- return ret;
+ return dev_err_probe(dev, ret, "Probe failed\n");
}
-static int hi3110_can_remove(struct spi_device *spi)
+static void hi3110_can_remove(struct spi_device *spi)
{
struct hi3110_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -957,12 +957,9 @@ static int hi3110_can_remove(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
- if (!IS_ERR(priv->clk))
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused hi3110_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 0579ab74f728..79c4bab5f724 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -22,11 +22,11 @@
#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/freezer.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
@@ -237,7 +237,6 @@ struct mcp251x_priv {
u8 *spi_rx_buf;
struct sk_buff *tx_skb;
- int tx_len;
struct workqueue_struct *wq;
struct work_struct tx_work;
@@ -250,6 +249,8 @@ struct mcp251x_priv {
#define AFTER_SUSPEND_POWER 4
#define AFTER_SUSPEND_RESTART 8
int restart_tx;
+ bool tx_busy;
+
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
@@ -272,13 +273,13 @@ static void mcp251x_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
- if (priv->tx_skb || priv->tx_len)
+ if (priv->tx_skb || priv->tx_busy)
net->stats.tx_errors++;
dev_kfree_skb(priv->tx_skb);
- if (priv->tx_len)
+ if (priv->tx_busy)
can_free_echo_skb(priv->net, 0, NULL);
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
}
/* Note about handling of error return of mcp251x_spi_trans: accessing
@@ -600,9 +601,6 @@ static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
gpio->names = mcp251x_gpio_names;
gpio->can_sleep = true;
-#ifdef CONFIG_OF_GPIO
- gpio->of_node = priv->spi->dev.of_node;
-#endif
return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv);
}
@@ -733,14 +731,14 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
}
/* Data length */
frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
- memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
+ if (!(frame->can_id & CAN_RTR_FLAG)) {
+ memcpy(frame->data, buf + RXBDAT_OFF, frame->len);
+ priv->net->stats.rx_bytes += frame->len;
+ }
priv->net->stats.rx_packets++;
- priv->net->stats.rx_bytes += frame->len;
- can_led_event(priv->net, CAN_LED_EVENT_RX);
-
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static void mcp251x_hw_sleep(struct spi_device *spi)
@@ -786,12 +784,12 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- if (priv->tx_skb || priv->tx_len) {
+ if (priv->tx_skb || priv->tx_busy) {
dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
return NETDEV_TX_BUSY;
}
- if (can_dropped_invalid_skb(net, skb))
+ if (can_dev_dropped_skb(net, skb))
return NETDEV_TX_OK;
netif_stop_queue(net);
@@ -973,8 +971,6 @@ static int mcp251x_stop(struct net_device *net)
mutex_unlock(&priv->mcp_lock);
- can_led_event(net, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -987,7 +983,7 @@ static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
if (skb) {
frame->can_id |= can_id;
frame->data[1] = data1;
- netif_rx_ni(skb);
+ netif_rx(skb);
} else {
netdev_err(net, "cannot allocate error skb\n");
}
@@ -1011,7 +1007,7 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
if (frame->len > CAN_FRAME_MAX_DATA_LEN)
frame->len = CAN_FRAME_MAX_DATA_LEN;
mcp251x_hw_tx(spi, frame, 0);
- priv->tx_len = 1 + frame->len;
+ priv->tx_busy = true;
can_put_echo_skb(priv->tx_skb, net, 0, 0);
priv->tx_skb = NULL;
}
@@ -1074,9 +1070,6 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
mcp251x_read_2regs(spi, CANINTF, &intf, &eflag);
- /* mask out flags we don't care about */
- intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
-
/* receive buffer 0 */
if (intf & CANINTF_RX0IF) {
mcp251x_hw_rx(spi, 0);
@@ -1086,6 +1079,18 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (mcp251x_is_2510(spi))
mcp251x_write_bits(spi, CANINTF,
CANINTF_RX0IF, 0x00);
+
+ /* check if buffer 1 is already known to be full, no need to re-read */
+ if (!(intf & CANINTF_RX1IF)) {
+ u8 intf1, eflag1;
+
+ /* intf needs to be read again to avoid a race condition */
+ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1);
+
+ /* combine flags from both operations for error handling */
+ intf |= intf1;
+ eflag |= eflag1;
+ }
}
/* receive buffer 1 */
@@ -1096,6 +1101,9 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
clear_intf |= CANINTF_RX1IF;
}
+ /* mask out flags we don't care about */
+ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR;
+
/* any error or tx interrupt we need to clear? */
if (intf & (CANINTF_ERR | CANINTF_TX))
clear_intf |= intf & (CANINTF_ERR | CANINTF_TX);
@@ -1177,12 +1185,11 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
break;
if (intf & CANINTF_TX) {
- net->stats.tx_packets++;
- net->stats.tx_bytes += priv->tx_len - 1;
- can_led_event(net, CAN_LED_EVENT_TX);
- if (priv->tx_len) {
- can_get_echo_skb(net, 0, NULL);
- priv->tx_len = 0;
+ if (priv->tx_busy) {
+ net->stats.tx_packets++;
+ net->stats.tx_bytes += can_get_echo_skb(net, 0,
+ NULL);
+ priv->tx_busy = false;
}
netif_wake_queue(net);
}
@@ -1209,7 +1216,7 @@ static int mcp251x_open(struct net_device *net)
priv->force_quit = 0;
priv->tx_skb = NULL;
- priv->tx_len = 0;
+ priv->tx_busy = false;
if (!dev_fwnode(&spi->dev))
flags = IRQF_TRIGGER_FALLING;
@@ -1232,8 +1239,6 @@ static int mcp251x_open(struct net_device *net)
if (ret)
goto out_free_irq;
- can_led_event(net, CAN_LED_EVENT_OPEN);
-
netif_wake_queue(net);
mutex_unlock(&priv->mcp_lock);
@@ -1256,6 +1261,10 @@ static const struct net_device_ops mcp251x_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops mcp251x_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id mcp251x_of_match[] = {
{
.compatible = "microchip,mcp2510",
@@ -1321,6 +1330,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
goto out_free;
net->netdev_ops = &mcp251x_netdev_ops;
+ net->ethtool_ops = &mcp251x_ethtool_ops;
net->flags |= IFF_ECHO;
priv = netdev_priv(net);
@@ -1403,15 +1413,16 @@ static int mcp251x_can_probe(struct spi_device *spi)
if (ret)
goto error_probe;
- devm_can_led_init(net);
-
ret = mcp251x_gpio_setup(priv);
if (ret)
- goto error_probe;
+ goto out_unregister_candev;
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
+out_unregister_candev:
+ unregister_candev(net);
+
error_probe:
destroy_workqueue(priv->wq);
priv->wq = NULL;
@@ -1427,7 +1438,7 @@ out_free:
return ret;
}
-static int mcp251x_can_remove(struct spi_device *spi)
+static void mcp251x_can_remove(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -1442,8 +1453,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
clk_disable_unprepare(priv->clk);
free_candev(net);
-
- return 0;
}
static int __maybe_unused mcp251x_can_suspend(struct device *dev)
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
index dd0fc0a54be1..877e4356010d 100644
--- a/drivers/net/can/spi/mcp251xfd/Kconfig
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -2,6 +2,7 @@
config CAN_MCP251XFD
tristate "Microchip MCP251xFD SPI CAN controllers"
+ select CAN_RX_OFFLOAD
select REGMAP
select WANT_DEV_COREDUMP
help
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
index 3cba3b9447ea..94d7de954294 100644
--- a/drivers/net/can/spi/mcp251xfd/Makefile
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -3,9 +3,16 @@
obj-$(CONFIG_CAN_MCP251XFD) += mcp251xfd.o
mcp251xfd-objs :=
+mcp251xfd-objs += mcp251xfd-chip-fifo.o
mcp251xfd-objs += mcp251xfd-core.o
mcp251xfd-objs += mcp251xfd-crc16.o
+mcp251xfd-objs += mcp251xfd-ethtool.o
+mcp251xfd-objs += mcp251xfd-ram.o
mcp251xfd-objs += mcp251xfd-regmap.o
+mcp251xfd-objs += mcp251xfd-ring.o
+mcp251xfd-objs += mcp251xfd-rx.o
+mcp251xfd-objs += mcp251xfd-tef.o
mcp251xfd-objs += mcp251xfd-timestamp.o
+mcp251xfd-objs += mcp251xfd-tx.o
mcp251xfd-$(CONFIG_DEV_COREDUMP) += mcp251xfd-dump.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
new file mode 100644
index 000000000000..0d96097a2547
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static int
+mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fifo_con;
+
+ /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
+ *
+ * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
+ * generate a RXOVIF, use this to properly detect RX MAB
+ * overflows.
+ */
+ fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_RXTSEN |
+ MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+
+ if (mcp251xfd_is_fd_mode(priv))
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ return regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
+}
+
+static int
+mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fltcon;
+
+ fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
+ MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
+
+ return regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FLTCON(ring->nr >> 2),
+ MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
+ fltcon);
+}
+
+int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u32 val;
+ int err, n;
+
+ /* TEF */
+ val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_TEFCON_TEFTSEN |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
+ if (err)
+ return err;
+
+ /* TX FIFO */
+ val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_TXEN |
+ MCP251XFD_REG_FIFOCON_TXATIE;
+
+ if (mcp251xfd_is_fd_mode(priv))
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
+
+ err = regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(priv->tx->fifo_nr),
+ val);
+ if (err)
+ return err;
+
+ /* RX FIFOs */
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
+ err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index e16dc482f327..68df6d4641b5 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -12,6 +12,7 @@
// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
//
+#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -20,8 +21,6 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
-#include <asm/unaligned.h>
-
#include "mcp251xfd.h"
#define DEVICE_NAME "mcp251xfd"
@@ -39,6 +38,12 @@ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
.model = MCP251XFD_MODEL_MCP2518FD,
};
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP251863,
+};
+
/* Autodetect model, start with CRC enabled. */
static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
.quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
@@ -77,6 +82,8 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
return "MCP2517FD";
case MCP251XFD_MODEL_MCP2518FD:
return "MCP2518FD";
+ case MCP251XFD_MODEL_MCP251863:
+ return "MCP251863";
case MCP251XFD_MODEL_MCP251XFD:
return "MCP251xFD";
}
@@ -114,6 +121,22 @@ static const char *mcp251xfd_get_mode_str(const u8 mode)
return "<unknown>";
}
+static const char *
+mcp251xfd_get_osc_str(const u32 osc, const u32 osc_reference)
+{
+ switch (~osc & osc_reference &
+ (MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY)) {
+ case MCP251XFD_REG_OSC_PLLRDY:
+ return "PLL";
+ case MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator";
+ case MCP251XFD_REG_OSC_PLLRDY | MCP251XFD_REG_OSC_OSCRDY:
+ return "Oscillator/PLL";
+ }
+
+ return "<unknown>";
+}
+
static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
{
if (!priv->reg_vdd)
@@ -180,328 +203,9 @@ static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
return 0;
}
-static inline u8
-mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
- union mcp251xfd_write_reg_buf *write_reg_buf,
- const u16 reg, const u32 mask, const u32 val)
+static inline bool mcp251xfd_reg_invalid(u32 reg)
{
- u8 first_byte, last_byte, len;
- u8 *data;
- __le32 val_le32;
-
- first_byte = mcp251xfd_first_byte_set(mask);
- last_byte = mcp251xfd_last_byte_set(mask);
- len = last_byte - first_byte + 1;
-
- data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
- val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
- memcpy(data, &val_le32, len);
-
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
- u16 crc;
-
- mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
- len);
- /* CRC */
- len += sizeof(write_reg_buf->crc.cmd);
- crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
- put_unaligned_be16(crc, (void *)write_reg_buf + len);
-
- /* Total length */
- len += sizeof(write_reg_buf->crc.crc);
- } else {
- len += sizeof(write_reg_buf->nocrc.cmd);
- }
-
- return len;
-}
-
-static inline int
-mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- u8 *tef_tail)
-{
- u32 tef_ua;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
- if (err)
- return err;
-
- *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- u8 *tx_tail)
-{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg,
- MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
- &fifo_sta);
- if (err)
- return err;
-
- *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head)
-{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
- &fifo_sta);
- if (err)
- return err;
-
- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_tail)
-{
- u32 fifo_ua;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
- &fifo_ua);
- if (err)
- return err;
-
- fifo_ua -= ring->base - MCP251XFD_RAM_START;
- *rx_tail = fifo_ua / ring->obj_size;
-
- return 0;
-}
-
-static void
-mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_tx_ring *ring,
- struct mcp251xfd_tx_obj *tx_obj,
- const u8 rts_buf_len,
- const u8 n)
-{
- struct spi_transfer *xfer;
- u16 addr;
-
- /* FIFO load */
- addr = mcp251xfd_get_tx_obj_addr(ring, n);
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
- mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
- addr);
- else
- mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
- addr);
-
- xfer = &tx_obj->xfer[0];
- xfer->tx_buf = &tx_obj->buf;
- xfer->len = 0; /* actual len is assigned on the fly */
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
-
- /* FIFO request to send */
- xfer = &tx_obj->xfer[1];
- xfer->tx_buf = &ring->rts_buf;
- xfer->len = rts_buf_len;
-
- /* SPI message */
- spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
- ARRAY_SIZE(tx_obj->xfer));
-}
-
-static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_tef_ring *tef_ring;
- struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
- struct mcp251xfd_tx_obj *tx_obj;
- struct spi_transfer *xfer;
- u32 val;
- u16 addr;
- u8 len;
- int i, j;
-
- netdev_reset_queue(priv->ndev);
-
- /* TEF */
- tef_ring = priv->tef;
- tef_ring->head = 0;
- tef_ring->tail = 0;
-
- /* FIFO increment TEF tail pointer */
- addr = MCP251XFD_REG_TEFCON;
- val = MCP251XFD_REG_TEFCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(tef_ring->uinc_xfer); j++) {
- xfer = &tef_ring->uinc_xfer[j];
- xfer->tx_buf = &tef_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an active
- * chip select after the complete SPI message. This causes the
- * controller to interpret the next register access as
- * data. Set "cs_change" of the last transfer to "0" to
- * properly deactivate the chip select at the end of the
- * message.
- */
- xfer->cs_change = 0;
-
- /* TX */
- tx_ring = priv->tx;
- tx_ring->head = 0;
- tx_ring->tail = 0;
- tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
-
- /* FIFO request to send */
- addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
- val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
- addr, val, val);
-
- mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
- mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
-
- /* RX */
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- rx_ring->head = 0;
- rx_ring->tail = 0;
- rx_ring->nr = i;
- rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
-
- if (!prev_rx_ring)
- rx_ring->base =
- mcp251xfd_get_tx_obj_addr(tx_ring,
- tx_ring->obj_num);
- else
- rx_ring->base = prev_rx_ring->base +
- prev_rx_ring->obj_size *
- prev_rx_ring->obj_num;
-
- prev_rx_ring = rx_ring;
-
- /* FIFO increment RX tail pointer */
- addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
- val = MCP251XFD_REG_FIFOCON_UINC;
- len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
- addr, val, val);
-
- for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
- xfer = &rx_ring->uinc_xfer[j];
- xfer->tx_buf = &rx_ring->uinc_buf;
- xfer->len = len;
- xfer->cs_change = 1;
- xfer->cs_change_delay.value = 0;
- xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
- }
-
- /* "cs_change == 1" on the last transfer results in an
- * active chip select after the complete SPI
- * message. This causes the controller to interpret
- * the next register access as data. Set "cs_change"
- * of the last transfer to "0" to properly deactivate
- * the chip select at the end of the message.
- */
- xfer->cs_change = 0;
- }
-}
-
-static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
-{
- int i;
-
- for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
- kfree(priv->rx[i]);
- priv->rx[i] = NULL;
- }
-}
-
-static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_tx_ring *tx_ring;
- struct mcp251xfd_rx_ring *rx_ring;
- int tef_obj_size, tx_obj_size, rx_obj_size;
- int tx_obj_num;
- int ram_free, i;
-
- tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
- /* listen-only mode works like FD mode */
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
- tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
- rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
- } else {
- tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
- tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
- rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
- }
-
- tx_ring = priv->tx;
- tx_ring->obj_num = tx_obj_num;
- tx_ring->obj_size = tx_obj_size;
-
- ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
- (tef_obj_size + tx_obj_size);
-
- for (i = 0;
- i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
- i++) {
- int rx_obj_num;
-
- rx_obj_num = ram_free / rx_obj_size;
- rx_obj_num = min(1 << (fls(rx_obj_num) - 1),
- MCP251XFD_RX_OBJ_NUM_MAX);
-
- rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
- GFP_KERNEL);
- if (!rx_ring) {
- mcp251xfd_ring_free(priv);
- return -ENOMEM;
- }
- rx_ring->obj_num = rx_obj_num;
- rx_ring->obj_size = rx_obj_size;
- priv->rx[i] = rx_ring;
-
- ram_free -= rx_ring->obj_num * rx_ring->obj_size;
- }
- priv->rx_ring_num = i;
-
- netdev_dbg(priv->ndev,
- "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
- tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
- tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
-
- mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
- netdev_dbg(priv->ndev,
- "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
- i, rx_ring->obj_num, rx_ring->obj_size,
- rx_ring->obj_size * rx_ring->obj_num);
- }
-
- netdev_dbg(priv->ndev,
- "FIFO setup: free: %d bytes\n",
- ram_free);
-
- return 0;
+ return reg == 0x0 || reg == 0xffffffff;
}
static inline int
@@ -523,34 +227,55 @@ static int
__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
const u8 mode_req, bool nowait)
{
- u32 con, con_reqop;
+ u32 con = 0, con_reqop, osc = 0;
+ u8 mode;
int err;
con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
- if (err)
+ if (err == -EBADMSG) {
+ netdev_err(priv->ndev,
+ "Failed to set Requested Operation Mode.\n");
+
+ return -ENODEV;
+ } else if (err) {
return err;
+ }
if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
return 0;
err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
+ !mcp251xfd_reg_invalid(con) &&
FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
con) == mode_req,
MCP251XFD_POLL_SLEEP_US,
MCP251XFD_POLL_TIMEOUT_US);
- if (err) {
- u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ if (err != -ETIMEDOUT && err != -EBADMSG)
+ return err;
+
+ /* Ignore return value.
+ * Print below error messages, even if this fails.
+ */
+ regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (mcp251xfd_reg_invalid(con)) {
netdev_err(priv->ndev,
- "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode_req), mode_req,
- mcp251xfd_get_mode_str(mode), mode);
- return err;
+ "Failed to read CAN Control Register (con=0x%08x, osc=0x%08x).\n",
+ con, osc);
+
+ return -ENODEV;
}
- return 0;
+ mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+ netdev_err(priv->ndev,
+ "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u) (con=0x%08x, osc=0x%08x).\n",
+ mcp251xfd_get_mode_str(mode_req), mode_req,
+ mcp251xfd_get_mode_str(mode), mode,
+ con, osc);
+
+ return -ETIMEDOUT;
}
static inline int
@@ -567,27 +292,58 @@ mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
return __mcp251xfd_chip_set_mode(priv, mode_req, true);
}
-static inline bool mcp251xfd_osc_invalid(u32 reg)
+static int
+mcp251xfd_chip_wait_for_osc_ready(const struct mcp251xfd_priv *priv,
+ u32 osc_reference, u32 osc_mask)
{
- return reg == 0x0 || reg == 0xffffffff;
+ u32 osc;
+ int err;
+
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
+ !mcp251xfd_reg_invalid(osc) &&
+ (osc & osc_mask) == osc_reference,
+ MCP251XFD_OSC_STAB_SLEEP_US,
+ MCP251XFD_OSC_STAB_TIMEOUT_US);
+ if (err != -ETIMEDOUT)
+ return err;
+
+ if (mcp251xfd_reg_invalid(osc)) {
+ netdev_err(priv->ndev,
+ "Failed to read Oscillator Configuration Register (osc=0x%08x).\n",
+ osc);
+ return -ENODEV;
+ }
+
+ netdev_err(priv->ndev,
+ "Timeout waiting for %s ready (osc=0x%08x, osc_reference=0x%08x, osc_mask=0x%08x).\n",
+ mcp251xfd_get_osc_str(osc, osc_reference),
+ osc, osc_reference, osc_mask);
+
+ return -ETIMEDOUT;
}
-static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
+static int mcp251xfd_chip_wake(const struct mcp251xfd_priv *priv)
{
u32 osc, osc_reference, osc_mask;
int err;
- /* Set Power On Defaults for "Clock Output Divisor" and remove
- * "Oscillator Disable" bit.
+ /* For normal sleep on MCP2517FD and MCP2518FD, clearing
+ * "Oscillator Disable" will wake the chip. For low power mode
+ * on MCP2518FD, asserting the chip select will wake the
+ * chip. Writing to the Oscillator register will wake it in
+ * both cases.
*/
osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+
+ /* We cannot check for the PLL ready bit (either set or
+ * unset), as the PLL might be enabled. This can happen if the
+ * system reboots, while the mcp251xfd stays powered.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY;
- osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY;
- /* Note:
- *
- * If the controller is in Sleep Mode the following write only
+ /* If the controller is in Sleep Mode the following write only
* removes the "Oscillator Disable" bit and powers it up. All
* other bits are unaffected.
*/
@@ -595,24 +351,31 @@ static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
if (err)
return err;
- /* Wait for "Oscillator Ready" bit */
- err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
- (osc & osc_mask) == osc_reference,
- MCP251XFD_OSC_STAB_SLEEP_US,
- MCP251XFD_OSC_STAB_TIMEOUT_US);
- if (mcp251xfd_osc_invalid(osc)) {
- netdev_err(priv->ndev,
- "Failed to detect %s (osc=0x%08x).\n",
- mcp251xfd_get_model_str(priv), osc);
- return -ENODEV;
- } else if (err == -ETIMEDOUT) {
- netdev_err(priv->ndev,
- "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
- osc, osc_reference);
- return -ETIMEDOUT;
+ /* Sometimes the PLL is stuck enabled, the controller never
+ * sets the OSC Ready bit, and we get an -ETIMEDOUT. Our
+ * caller takes care of retry.
+ */
+ return mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+}
+
+static inline int mcp251xfd_chip_sleep(const struct mcp251xfd_priv *priv)
+{
+ if (priv->pll_enable) {
+ u32 osc;
+ int err;
+
+ /* Turn off PLL */
+ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ netdev_err(priv->ndev,
+ "Failed to disable PLL.\n");
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_slow;
}
- return err;
+ return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
}
static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
@@ -620,10 +383,10 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
const __be16 cmd = mcp251xfd_cmd_reset();
int err;
- /* The Set Mode and SPI Reset command only seems to works if
- * the controller is not in Sleep Mode.
+ /* The Set Mode and SPI Reset command only works if the
+ * controller is not in Sleep Mode.
*/
- err = mcp251xfd_chip_clock_enable(priv);
+ err = mcp251xfd_chip_wake(priv);
if (err)
return err;
@@ -637,34 +400,29 @@ static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
{
- u32 osc, osc_reference;
+ u32 osc_reference, osc_mask;
u8 mode;
int err;
- err = mcp251xfd_chip_get_mode(priv, &mode);
- if (err)
- return err;
-
- if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
- netdev_info(priv->ndev,
- "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
- mcp251xfd_get_mode_str(mode), mode);
- return -ETIMEDOUT;
- }
-
+ /* Check for reset defaults of OSC reg.
+ * This will take care of stabilization period.
+ */
osc_reference = MCP251XFD_REG_OSC_OSCRDY |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_mask = osc_reference | MCP251XFD_REG_OSC_PLLRDY;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
- /* check reset defaults of OSC reg */
- err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ err = mcp251xfd_chip_get_mode(priv, &mode);
if (err)
return err;
- if (osc != osc_reference) {
+ if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
netdev_info(priv->ndev,
- "Controller failed to reset. osc=0x%08x, reference value=0x%08x.\n",
- osc, osc_reference);
+ "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
return -ETIMEDOUT;
}
@@ -700,7 +458,7 @@ static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
{
- u32 osc;
+ u32 osc, osc_reference, osc_mask;
int err;
/* Activate Low Power Mode on Oscillator Disable. This only
@@ -710,10 +468,29 @@ static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
osc = MCP251XFD_REG_OSC_LPMEN |
FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+
+ if (priv->pll_enable) {
+ osc |= MCP251XFD_REG_OSC_PLLEN;
+ osc_reference |= MCP251XFD_REG_OSC_PLLRDY;
+ }
+
err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
if (err)
return err;
+ err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask);
+ if (err)
+ return err;
+
+ priv->spi->max_speed_hz = priv->spi_max_speed_hz_fast;
+
+ return 0;
+}
+
+static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv)
+{
/* Set Time Base Counter Prescaler to 1.
*
* This means an overflow of the 32 bit Time Base Counter
@@ -838,108 +615,6 @@ static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
}
-static int
-mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u32 fifo_con;
-
- /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
- *
- * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
- * generate a RXOVIF, use this to properly detect RX MAB
- * overflows.
- */
- fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
- ring->obj_num - 1) |
- MCP251XFD_REG_FIFOCON_RXTSEN |
- MCP251XFD_REG_FIFOCON_RXOVIE |
- MCP251XFD_REG_FIFOCON_TFNRFNIE;
-
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
- fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_64);
- else
- fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_8);
-
- return regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
-}
-
-static int
-mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u32 fltcon;
-
- fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
- MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
-
- return regmap_update_bits(priv->map_reg,
- MCP251XFD_REG_FLTCON(ring->nr >> 2),
- MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
- fltcon);
-}
-
-static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- const struct mcp251xfd_rx_ring *rx_ring;
- u32 val;
- int err, n;
-
- /* TEF */
- val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
- tx_ring->obj_num - 1) |
- MCP251XFD_REG_TEFCON_TEFTSEN |
- MCP251XFD_REG_TEFCON_TEFOVIE |
- MCP251XFD_REG_TEFCON_TEFNEIE;
-
- err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
- if (err)
- return err;
-
- /* FIFO 1 - TX */
- val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
- tx_ring->obj_num - 1) |
- MCP251XFD_REG_FIFOCON_TXEN |
- MCP251XFD_REG_FIFOCON_TXATIE;
-
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_64);
- else
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
- MCP251XFD_REG_FIFOCON_PLSIZE_8);
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
- MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
- else
- val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
- MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
-
- err = regmap_write(priv->map_reg,
- MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
- val);
- if (err)
- return err;
-
- /* RX FIFOs */
- mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
- err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
- if (err)
- return err;
-
- err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
{
struct mcp251xfd_ecc *ecc = &priv->ecc;
@@ -968,18 +643,10 @@ static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
return err;
}
-static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_ecc *ecc = &priv->ecc;
-
- ecc->ecc_stat = 0;
-}
-
static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
{
u8 mode;
-
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -1064,14 +731,14 @@ static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
}
-static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
- const enum can_state state)
+static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+ const enum can_state state)
{
priv->can.state = state;
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
- return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ mcp251xfd_chip_sleep(priv);
}
static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
@@ -1086,6 +753,10 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
+ err = mcp251xfd_chip_timestamp_init(priv);
+ if (err)
+ goto out_chip_stop;
+
err = mcp251xfd_set_bittiming(priv);
if (err)
goto out_chip_stop;
@@ -1098,7 +769,9 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
- mcp251xfd_ring_init(priv);
+ err = mcp251xfd_ring_init(priv);
+ if (err)
+ goto out_chip_stop;
err = mcp251xfd_chip_fifo_init(priv);
if (err)
@@ -1186,433 +859,6 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
return __mcp251xfd_get_berr_counter(ndev, bec);
}
-static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
-{
- u8 tef_tail_chip, tef_tail;
- int err;
-
- if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
- return 0;
-
- err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
- if (err)
- return err;
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- if (tef_tail_chip != tef_tail) {
- netdev_err(priv->ndev,
- "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
- tef_tail_chip, tef_tail);
- return -EILSEQ;
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring)
-{
- u8 rx_tail_chip, rx_tail;
- int err;
-
- if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
- return 0;
-
- err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
- if (err)
- return err;
-
- rx_tail = mcp251xfd_get_rx_tail(ring);
- if (rx_tail_chip != rx_tail) {
- netdev_err(priv->ndev,
- "RX tail of chip (%d) and ours (%d) inconsistent.\n",
- rx_tail_chip, rx_tail);
- return -EILSEQ;
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- u32 tef_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
- if (err)
- return err;
-
- if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
- netdev_err(priv->ndev,
- "Transmit Event FIFO buffer overflow.\n");
- return -ENOBUFS;
- }
-
- netdev_info(priv->ndev,
- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
- tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
- "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
- "not empty" : "empty",
- seq, priv->tef->tail, priv->tef->head, tx_ring->head);
-
- /* The Sequence Number in the TEF doesn't match our tef_tail. */
- return -EAGAIN;
-}
-
-static int
-mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
- unsigned int *frame_len_ptr)
-{
- struct net_device_stats *stats = &priv->ndev->stats;
- struct sk_buff *skb;
- u32 seq, seq_masked, tef_tail_masked, tef_tail;
-
- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
- hw_tef_obj->flags);
-
- /* Use the MCP2517FD mask on the MCP2518FD, too. We only
- * compare 7 bits, this should be enough to detect
- * net-yet-completed, i.e. old TEF objects.
- */
- seq_masked = seq &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- tef_tail_masked = priv->tef->tail &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- if (seq_masked != tef_tail_masked)
- return mcp251xfd_handle_tefif_recover(priv, seq);
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- skb = priv->can.echo_skb[tef_tail];
- if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
- stats->tx_bytes +=
- can_rx_offload_get_echo_skb(&priv->offload,
- tef_tail, hw_tef_obj->ts,
- frame_len_ptr);
- stats->tx_packets++;
- priv->tef->tail++;
-
- return 0;
-}
-
-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- unsigned int new_head;
- u8 chip_tx_tail;
- int err;
-
- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
- if (err)
- return err;
-
- /* chip_tx_tail, is the next TX-Object send by the HW.
- * The new TEF head must be >= the old head, ...
- */
- new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
- if (new_head <= priv->tef->head)
- new_head += tx_ring->obj_num;
-
- /* ... but it cannot exceed the TX head. */
- priv->tef->head = min(new_head, tx_ring->head);
-
- return mcp251xfd_check_tef_tail(priv);
-}
-
-static inline int
-mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_hw_tef_obj *hw_tef_obj,
- const u8 offset, const u8 len)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- const int val_bytes = regmap_get_val_bytes(priv->map_rx);
-
- if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
- (offset > tx_ring->obj_num ||
- len > tx_ring->obj_num ||
- offset + len > tx_ring->obj_num)) {
- netdev_err(priv->ndev,
- "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
- tx_ring->obj_num, offset, len);
- return -ERANGE;
- }
-
- return regmap_bulk_read(priv->map_rx,
- mcp251xfd_get_tef_obj_addr(offset),
- hw_tef_obj,
- sizeof(*hw_tef_obj) / val_bytes * len);
-}
-
-static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
- unsigned int total_frame_len = 0;
- u8 tef_tail, len, l;
- int err, i;
-
- err = mcp251xfd_tef_ring_update(priv);
- if (err)
- return err;
-
- tef_tail = mcp251xfd_get_tef_tail(priv);
- len = mcp251xfd_get_tef_len(priv);
- l = mcp251xfd_get_tef_linear_len(priv);
- err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
- if (err)
- return err;
-
- if (l < len) {
- err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
- if (err)
- return err;
- }
-
- for (i = 0; i < len; i++) {
- unsigned int frame_len = 0;
-
- err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
- /* -EAGAIN means the Sequence Number in the TEF
- * doesn't match our tef_tail. This can happen if we
- * read the TEF objects too early. Leave loop let the
- * interrupt handler call us again.
- */
- if (err == -EAGAIN)
- goto out_netif_wake_queue;
- if (err)
- return err;
-
- total_frame_len += frame_len;
- }
-
- out_netif_wake_queue:
- len = i; /* number of handled goods TEFs */
- if (len) {
- struct mcp251xfd_tef_ring *ring = priv->tef;
- struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- int offset;
-
- /* Increment the TEF FIFO tail pointer 'len' times in
- * a single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
- if (err)
- return err;
-
- tx_ring->tail += len;
- netdev_completed_queue(priv->ndev, len, total_frame_len);
-
- err = mcp251xfd_check_tef_tail(priv);
- if (err)
- return err;
- }
-
- mcp251xfd_ecc_tefif_successful(priv);
-
- if (mcp251xfd_get_tx_free(priv->tx)) {
- /* Make sure that anybody stopping the queue after
- * this sees the new tx_ring->tail.
- */
- smp_mb();
- netif_wake_queue(priv->ndev);
- }
-
- return 0;
-}
-
-static int
-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
-{
- u32 new_head;
- u8 chip_rx_head;
- int err;
-
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
- if (err)
- return err;
-
- /* chip_rx_head, is the next RX-Object filled by the HW.
- * The new RX head must be >= the old head.
- */
- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
- if (new_head <= ring->head)
- new_head += ring->obj_num;
-
- ring->head = new_head;
-
- return mcp251xfd_check_rx_tail(priv, ring);
-}
-
-static void
-mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
- struct sk_buff *skb)
-{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- u8 dlc;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
- u32 sid, eid;
-
- eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
- sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
-
- cfd->can_id = CAN_EFF_FLAG |
- FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
- FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
- } else {
- cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
- hw_rx_obj->id);
- }
-
- dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
-
- /* CANFD */
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
- cfd->flags |= CANFD_ESI;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
- cfd->flags |= CANFD_BRS;
-
- cfd->len = can_fd_dlc2len(dlc);
- } else {
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
- cfd->can_id |= CAN_RTR_FLAG;
-
- can_frame_set_cc_len((struct can_frame *)cfd, dlc,
- priv->can.ctrlmode);
- }
-
- if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
- memcpy(cfd->data, hw_rx_obj->data, cfd->len);
-
- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
-}
-
-static int
-mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring,
- const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
-{
- struct net_device_stats *stats = &priv->ndev->stats;
- struct sk_buff *skb;
- struct canfd_frame *cfd;
- int err;
-
- if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
- skb = alloc_canfd_skb(priv->ndev, &cfd);
- else
- skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
-
- if (!skb) {
- stats->rx_dropped++;
- return 0;
- }
-
- mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
- if (err)
- stats->rx_fifo_errors++;
-
- return 0;
-}
-
-static inline int
-mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
- const u8 offset, const u8 len)
-{
- const int val_bytes = regmap_get_val_bytes(priv->map_rx);
- int err;
-
- err = regmap_bulk_read(priv->map_rx,
- mcp251xfd_get_rx_obj_addr(ring, offset),
- hw_rx_obj,
- len * ring->obj_size / val_bytes);
-
- return err;
-}
-
-static int
-mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
-{
- struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
- u8 rx_tail, len;
- int err, i;
-
- err = mcp251xfd_rx_ring_update(priv, ring);
- if (err)
- return err;
-
- while ((len = mcp251xfd_get_rx_linear_len(ring))) {
- int offset;
-
- rx_tail = mcp251xfd_get_rx_tail(ring);
-
- err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
- rx_tail, len);
- if (err)
- return err;
-
- for (i = 0; i < len; i++) {
- err = mcp251xfd_handle_rxif_one(priv, ring,
- (void *)hw_rx_obj +
- i * ring->obj_size);
- if (err)
- return err;
- }
-
- /* Increment the RX FIFO tail pointer 'len' times in a
- * single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
- if (err)
- return err;
-
- ring->tail += len;
- }
-
- return 0;
-}
-
-static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
-{
- struct mcp251xfd_rx_ring *ring;
- int err, n;
-
- mcp251xfd_for_each_rx_ring(priv, ring, n) {
- err = mcp251xfd_handle_rxif_ring(priv, ring);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static struct sk_buff *
mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
struct can_frame **cf, u32 *timestamp)
@@ -1653,12 +899,15 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
/* If SERRIF is active, there was a RX MAB overflow. */
if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
- netdev_info(priv->ndev,
- "RX-%d: MAB overflow detected.\n",
- ring->nr);
+ if (net_ratelimit())
+ netdev_dbg(priv->ndev,
+ "RX-%d: MAB overflow detected.\n",
+ ring->nr);
} else {
- netdev_info(priv->ndev,
- "RX-%d: FIFO overflow.\n", ring->nr);
+ if (net_ratelimit())
+ netdev_dbg(priv->ndev,
+ "RX-%d: FIFO overflow.\n",
+ ring->nr);
}
err = regmap_update_bits(priv->map_reg,
@@ -1676,7 +925,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1781,7 +1030,7 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
return 0;
mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -1850,11 +1099,12 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
if (err)
return err;
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = bec.txerr;
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
stats->rx_fifo_errors++;
@@ -2019,7 +1269,8 @@ mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
* - for mcp2518fd: offset not 0 or 1
*/
if (chip_tx_tail != tx_tail ||
- !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
+ !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) ||
+ mcp251xfd_is_251863(priv))))) {
netdev_err(priv->ndev,
"ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
@@ -2144,6 +1395,20 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
return 0;
}
+static int mcp251xfd_read_regs_status(struct mcp251xfd_priv *priv)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_reg);
+ size_t len;
+
+ if (priv->rx_ring_num == 1)
+ len = sizeof(priv->regs_status.intf);
+ else
+ len = sizeof(priv->regs_status);
+
+ return regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
+ &priv->regs_status, len / val_bytes);
+}
+
#define mcp251xfd_handle(priv, irq, ...) \
({ \
struct mcp251xfd_priv *_priv = (priv); \
@@ -2160,7 +1425,6 @@ static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
{
struct mcp251xfd_priv *priv = dev_id;
- const int val_bytes = regmap_get_val_bytes(priv->map_reg);
irqreturn_t handled = IRQ_NONE;
int err;
@@ -2172,21 +1436,28 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
if (!rx_pending)
break;
+ /* Assume 1st RX-FIFO pending, if other FIFOs
+ * are pending the main IRQ handler will take
+ * care.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
err = mcp251xfd_handle(priv, rxif);
if (err)
goto out_fail;
handled = IRQ_HANDLED;
- } while (1);
+
+ /* We don't know which RX-FIFO is pending, but only
+ * handle the 1st RX-FIFO. Leave loop here if we have
+ * more than 1 RX-FIFO to avoid starvation.
+ */
+ } while (priv->rx_ring_num == 1);
do {
u32 intf_pending, intf_pending_clearable;
bool set_normal_mode = false;
- err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
- &priv->regs_status,
- sizeof(priv->regs_status) /
- val_bytes);
+ err = mcp251xfd_read_regs_status(priv);
if (err)
goto out_fail;
@@ -2311,212 +1582,23 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
return handled;
}
-static inline struct
-mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
-{
- u8 tx_head;
-
- tx_head = mcp251xfd_get_tx_head(tx_ring);
-
- return &tx_ring->obj[tx_head];
-}
-
-static void
-mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_obj *tx_obj,
- const struct sk_buff *skb,
- unsigned int seq)
-{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
- struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
- union mcp251xfd_tx_obj_load_buf *load_buf;
- u8 dlc;
- u32 id, flags;
- int len_sanitized = 0, len;
-
- if (cfd->can_id & CAN_EFF_FLAG) {
- u32 sid, eid;
-
- sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
- eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
-
- id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
- FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
-
- flags = MCP251XFD_OBJ_FLAGS_IDE;
- } else {
- id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
- flags = 0;
- }
-
- /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
- * harm, only the lower 7 bits will be transferred into the
- * TEF object.
- */
- flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
-
- if (cfd->can_id & CAN_RTR_FLAG)
- flags |= MCP251XFD_OBJ_FLAGS_RTR;
- else
- len_sanitized = canfd_sanitize_len(cfd->len);
-
- /* CANFD */
- if (can_is_canfd_skb(skb)) {
- if (cfd->flags & CANFD_ESI)
- flags |= MCP251XFD_OBJ_FLAGS_ESI;
-
- flags |= MCP251XFD_OBJ_FLAGS_FDF;
-
- if (cfd->flags & CANFD_BRS)
- flags |= MCP251XFD_OBJ_FLAGS_BRS;
-
- dlc = can_fd_len2dlc(cfd->len);
- } else {
- dlc = can_get_cc_dlc((struct can_frame *)cfd,
- priv->can.ctrlmode);
- }
-
- flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
-
- load_buf = &tx_obj->buf;
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
- hw_tx_obj = &load_buf->crc.hw_tx_obj;
- else
- hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
-
- put_unaligned_le32(id, &hw_tx_obj->id);
- put_unaligned_le32(flags, &hw_tx_obj->flags);
-
- /* Copy data */
- memcpy(hw_tx_obj->data, cfd->data, cfd->len);
-
- /* Clear unused data at end of CAN frame */
- if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
- int pad_len;
-
- pad_len = len_sanitized - cfd->len;
- if (pad_len)
- memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
- }
-
- /* Number of bytes to be written into the RAM of the controller */
- len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
- if (MCP251XFD_SANITIZE_CAN)
- len += round_up(len_sanitized, sizeof(u32));
- else
- len += round_up(cfd->len, sizeof(u32));
-
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
- u16 crc;
-
- mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
- len);
- /* CRC */
- len += sizeof(load_buf->crc.cmd);
- crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
- put_unaligned_be16(crc, (void *)load_buf + len);
-
- /* Total length */
- len += sizeof(load_buf->crc.crc);
- } else {
- len += sizeof(load_buf->nocrc.cmd);
- }
-
- tx_obj->xfer[0].len = len;
-}
-
-static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_obj *tx_obj)
-{
- return spi_async(priv->spi, &tx_obj->msg);
-}
-
-static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_tx_ring *tx_ring)
-{
- if (mcp251xfd_get_tx_free(tx_ring) > 0)
- return false;
-
- netif_stop_queue(priv->ndev);
-
- /* Memory barrier before checking tx_free (head and tail) */
- smp_mb();
-
- if (mcp251xfd_get_tx_free(tx_ring) == 0) {
- netdev_dbg(priv->ndev,
- "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
- tx_ring->head, tx_ring->tail,
- tx_ring->head - tx_ring->tail);
-
- return true;
- }
-
- netif_start_queue(priv->ndev);
-
- return false;
-}
-
-static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
-{
- struct mcp251xfd_priv *priv = netdev_priv(ndev);
- struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- struct mcp251xfd_tx_obj *tx_obj;
- unsigned int frame_len;
- u8 tx_head;
- int err;
-
- if (can_dropped_invalid_skb(ndev, skb))
- return NETDEV_TX_OK;
-
- if (mcp251xfd_tx_busy(priv, tx_ring))
- return NETDEV_TX_BUSY;
-
- tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
- mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
-
- /* Stop queue if we occupy the complete TX FIFO */
- tx_head = mcp251xfd_get_tx_head(tx_ring);
- tx_ring->head++;
- if (mcp251xfd_get_tx_free(tx_ring) == 0)
- netif_stop_queue(ndev);
-
- frame_len = can_skb_get_frame_len(skb);
- err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
- if (!err)
- netdev_sent_queue(priv->ndev, frame_len);
-
- err = mcp251xfd_tx_obj_write(priv, tx_obj);
- if (err)
- goto out_err;
-
- return NETDEV_TX_OK;
-
- out_err:
- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
-
- return NETDEV_TX_OK;
-}
-
static int mcp251xfd_open(struct net_device *ndev)
{
struct mcp251xfd_priv *priv = netdev_priv(ndev);
const struct spi_device *spi = priv->spi;
int err;
- err = pm_runtime_get_sync(ndev->dev.parent);
- if (err < 0) {
- pm_runtime_put_noidle(ndev->dev.parent);
+ err = open_candev(ndev);
+ if (err)
return err;
- }
- err = open_candev(ndev);
+ err = pm_runtime_resume_and_get(ndev->dev.parent);
if (err)
- goto out_pm_runtime_put;
+ goto out_close_candev;
err = mcp251xfd_ring_alloc(priv);
if (err)
- goto out_close_candev;
+ goto out_pm_runtime_put;
err = mcp251xfd_transceiver_enable(priv);
if (err)
@@ -2527,6 +1609,7 @@ static int mcp251xfd_open(struct net_device *ndev)
goto out_transceiver_disable;
mcp251xfd_timestamp_init(priv);
+ clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
@@ -2547,16 +1630,17 @@ static int mcp251xfd_open(struct net_device *ndev)
free_irq(spi->irq, priv);
out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
mcp251xfd_timestamp_stop(priv);
out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
out_mcp251xfd_ring_free:
mcp251xfd_ring_free(priv);
- out_close_candev:
- close_candev(ndev);
out_pm_runtime_put:
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
pm_runtime_put(ndev->dev.parent);
+ out_close_candev:
+ close_candev(ndev);
return err;
}
@@ -2566,6 +1650,9 @@ static int mcp251xfd_stop(struct net_device *ndev)
struct mcp251xfd_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ hrtimer_cancel(&priv->rx_irq_timer);
+ hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
can_rx_offload_disable(&priv->offload);
@@ -2584,6 +1671,7 @@ static const struct net_device_ops mcp251xfd_netdev_ops = {
.ndo_open = mcp251xfd_open,
.ndo_stop = mcp251xfd_stop,
.ndo_start_xmit = mcp251xfd_start_xmit,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
.ndo_change_mtu = can_change_mtu,
};
@@ -2604,8 +1692,8 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
u32 osc;
int err;
- /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
- * autodetect the model.
+ /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863,
+ * so use it to autodetect the model.
*/
err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
MCP251XFD_REG_OSC_LPMEN,
@@ -2617,15 +1705,23 @@ static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
if (err)
return err;
- if (osc & MCP251XFD_REG_OSC_LPMEN)
- devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
- else
+ if (osc & MCP251XFD_REG_OSC_LPMEN) {
+ /* We cannot distinguish between MCP2518FD and
+ * MCP251863. If firmware specifies MCP251863, keep
+ * it, otherwise set to MCP2518FD.
+ */
+ if (mcp251xfd_is_251863(priv))
+ devtype_data = &mcp251xfd_devtype_data_mcp251863;
+ else
+ devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
+ } else {
devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
+ }
- if (!mcp251xfd_is_251X(priv) &&
+ if (!mcp251xfd_is_251XFD(priv) &&
priv->devtype_data.model != devtype_data->model) {
netdev_info(ndev,
- "Detected %s, but firmware specifies a %s. Fixing up.",
+ "Detected %s, but firmware specifies a %s. Fixing up.\n",
__mcp251xfd_get_model_str(devtype_data->model),
mcp251xfd_get_model_str(priv));
}
@@ -2662,7 +1758,7 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
return 0;
netdev_info(priv->ndev,
- "RX_INT active after softreset, disabling RX_INT support.");
+ "RX_INT active after softreset, disabling RX_INT support.\n");
devm_gpiod_put(&priv->spi->dev, priv->rx_int);
priv->rx_int = NULL;
@@ -2670,8 +1766,9 @@ static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
}
static int
-mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
- u32 *dev_id, u32 *effective_speed_hz)
+mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
+ u32 *effective_speed_hz_slow,
+ u32 *effective_speed_hz_fast)
{
struct mcp251xfd_map_buf_nocrc *buf_rx;
struct mcp251xfd_map_buf_nocrc *buf_tx;
@@ -2690,23 +1787,27 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
xfer[0].tx_buf = buf_tx;
xfer[0].len = sizeof(buf_tx->cmd);
+ xfer[0].speed_hz = priv->spi_max_speed_hz_slow;
xfer[1].rx_buf = buf_rx->data;
- xfer[1].len = sizeof(dev_id);
+ xfer[1].len = sizeof(*dev_id);
+ xfer[1].speed_hz = priv->spi_max_speed_hz_fast;
mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
+
err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
if (err)
goto out_kfree_buf_tx;
- *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
- *effective_speed_hz = xfer->effective_speed_hz;
+ *dev_id = get_unaligned_le32(buf_rx->data);
+ *effective_speed_hz_slow = xfer[0].effective_speed_hz;
+ *effective_speed_hz_fast = xfer[1].effective_speed_hz;
out_kfree_buf_tx:
kfree(buf_tx);
out_kfree_buf_rx:
kfree(buf_rx);
- return 0;
+ return err;
}
#define MCP251XFD_QUIRK_ACTIVE(quirk) \
@@ -2715,34 +1816,45 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
static int
mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
{
- u32 dev_id, effective_speed_hz;
+ u32 dev_id, effective_speed_hz_slow, effective_speed_hz_fast;
+ unsigned long clk_rate;
int err;
err = mcp251xfd_register_get_dev_id(priv, &dev_id,
- &effective_speed_hz);
+ &effective_speed_hz_slow,
+ &effective_speed_hz_fast);
if (err)
return err;
+ clk_rate = clk_get_rate(priv->clk);
+
netdev_info(priv->ndev,
- "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
+ "%s rev%lu.%lu (%cRX_INT %cPLL %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD o:%lu.%02luMHz c:%u.%02uMHz m:%u.%02uMHz rs:%u.%02uMHz es:%u.%02uMHz rf:%u.%02uMHz ef:%u.%02uMHz) successfully initialized.\n",
mcp251xfd_get_model_str(priv),
FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
priv->rx_int ? '+' : '-',
+ priv->pll_enable ? '+' : '-',
MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
MCP251XFD_QUIRK_ACTIVE(CRC_REG),
MCP251XFD_QUIRK_ACTIVE(CRC_RX),
MCP251XFD_QUIRK_ACTIVE(CRC_TX),
MCP251XFD_QUIRK_ACTIVE(ECC),
MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
+ clk_rate / 1000000,
+ clk_rate % 1000000 / 1000 / 10,
priv->can.clock.freq / 1000000,
priv->can.clock.freq % 1000000 / 1000 / 10,
priv->spi_max_speed_hz_orig / 1000000,
priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
- priv->spi->max_speed_hz / 1000000,
- priv->spi->max_speed_hz % 1000000 / 1000 / 10,
- effective_speed_hz / 1000000,
- effective_speed_hz % 1000000 / 1000 / 10);
+ priv->spi_max_speed_hz_slow / 1000000,
+ priv->spi_max_speed_hz_slow % 1000000 / 1000 / 10,
+ effective_speed_hz_slow / 1000000,
+ effective_speed_hz_slow % 1000000 / 1000 / 10,
+ priv->spi_max_speed_hz_fast / 1000000,
+ priv->spi_max_speed_hz_fast % 1000000 / 1000 / 10,
+ effective_speed_hz_fast / 1000000,
+ effective_speed_hz_fast % 1000000 / 1000 / 10);
return 0;
}
@@ -2768,19 +1880,27 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
if (err == -ENODEV)
goto out_runtime_disable;
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err == -ENODEV)
+ goto out_runtime_disable;
+ if (err)
+ goto out_chip_sleep;
err = mcp251xfd_register_chip_detect(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
err = mcp251xfd_register_check_rx_int(priv);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
+
+ mcp251xfd_ethtool_init(priv);
err = register_candev(ndev);
if (err)
- goto out_chip_set_mode_sleep;
+ goto out_chip_sleep;
err = mcp251xfd_register_done(priv);
if (err)
@@ -2790,7 +1910,7 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
* disable the clocks and vdd. If CONFIG_PM is not enabled,
* the clocks and vdd will stay powered.
*/
- err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ err = mcp251xfd_chip_sleep(priv);
if (err)
goto out_unregister_candev;
@@ -2800,8 +1920,8 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
out_unregister_candev:
unregister_candev(ndev);
- out_chip_set_mode_sleep:
- mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ out_chip_sleep:
+ mcp251xfd_chip_sleep(priv);
out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
out_runtime_put_noidle:
@@ -2817,10 +1937,10 @@ static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
unregister_candev(ndev);
- pm_runtime_get_sync(ndev->dev.parent);
- pm_runtime_put_noidle(ndev->dev.parent);
- mcp251xfd_clks_and_vdd_disable(priv);
- pm_runtime_disable(ndev->dev.parent);
+ if (pm_runtime_enabled(ndev->dev.parent))
+ pm_runtime_disable(ndev->dev.parent);
+ else
+ mcp251xfd_clks_and_vdd_disable(priv);
}
static const struct of_device_id mcp251xfd_of_match[] = {
@@ -2831,6 +1951,9 @@ static const struct of_device_id mcp251xfd_of_match[] = {
.compatible = "microchip,mcp2518fd",
.data = &mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .compatible = "microchip,mcp251863",
+ .data = &mcp251xfd_devtype_data_mcp251863,
+ }, {
.compatible = "microchip,mcp251xfd",
.data = &mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -2847,6 +1970,9 @@ static const struct spi_device_id mcp251xfd_id_table[] = {
.name = "mcp2518fd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
}, {
+ .name = "mcp251863",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863,
+ }, {
.name = "mcp251xfd",
.driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
}, {
@@ -2863,6 +1989,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
struct gpio_desc *rx_int;
struct regulator *reg_vdd, *reg_xceiver;
struct clk *clk;
+ bool pll_enable = false;
u32 freq = 0;
int err;
@@ -2913,12 +2040,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
return -ERANGE;
}
- if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
- dev_err(&spi->dev,
- "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
- freq);
- return -ERANGE;
- }
+ if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER)
+ pll_enable = true;
ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
MCP251XFD_TX_OBJ_NUM_MAX);
@@ -2934,6 +2057,8 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv = netdev_priv(ndev);
spi_set_drvdata(spi, priv);
priv->can.clock.freq = freq;
+ if (pll_enable)
+ priv->can.clock.freq *= MCP251XFD_OSC_PLL_MULTIPLIER;
priv->can.do_set_mode = mcp251xfd_set_mode;
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
@@ -2942,10 +2067,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC;
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
priv->rx_int = rx_int;
priv->clk = clk;
+ priv->pll_enable = pll_enable;
priv->reg_vdd = reg_vdd;
priv->reg_xceiver = reg_xceiver;
@@ -2983,7 +2110,16 @@ static int mcp251xfd_probe(struct spi_device *spi)
*
*/
priv->spi_max_speed_hz_orig = spi->max_speed_hz;
- spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
+ priv->spi_max_speed_hz_slow = min(spi->max_speed_hz,
+ freq / 2 / 1000 * 850);
+ if (priv->pll_enable)
+ priv->spi_max_speed_hz_fast = min(spi->max_speed_hz,
+ freq *
+ MCP251XFD_OSC_PLL_MULTIPLIER /
+ 2 / 1000 * 850);
+ else
+ priv->spi_max_speed_hz_fast = priv->spi_max_speed_hz_slow;
+ spi->max_speed_hz = priv->spi_max_speed_hz_slow;
spi->bits_per_word = 8;
spi->rt = true;
err = spi_setup(spi);
@@ -3000,8 +2136,11 @@ static int mcp251xfd_probe(struct spi_device *spi)
goto out_free_candev;
err = mcp251xfd_register(priv);
- if (err)
+ if (err) {
+ dev_err_probe(&spi->dev, err, "Failed to detect %s.\n",
+ mcp251xfd_get_model_str(priv));
goto out_can_rx_offload_del;
+ }
return 0;
@@ -3015,7 +2154,7 @@ static int mcp251xfd_probe(struct spi_device *spi)
return err;
}
-static int mcp251xfd_remove(struct spi_device *spi)
+static void mcp251xfd_remove(struct spi_device *spi)
{
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
@@ -3024,8 +2163,6 @@ static int mcp251xfd_remove(struct spi_device *spi)
mcp251xfd_unregister(priv);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
-
- return 0;
}
static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index ffae8fdd3af0..004eaf96262b 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -207,10 +207,10 @@ static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv,
.val = tx->base,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR,
- .val = 0,
+ .val = tx->nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR,
- .val = MCP251XFD_TX_FIFO,
+ .val = tx->fifo_nr,
}, {
.key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM,
.val = tx->obj_num,
@@ -253,7 +253,7 @@ void mcp251xfd_dump(const struct mcp251xfd_priv *priv)
file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) *
sizeof(struct mcp251xfd_dump_object_reg);
- /* TEF ring, RX ring, TX rings */
+ /* TEF ring, RX rings, TX ring */
rings_num = 1 + priv->rx_ring_num + 1;
obj_num += rings_num;
file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX *
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
new file mode 100644
index 000000000000..57eeb066a945
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include <linux/ethtool.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
+
+static void
+mcp251xfd_ring_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ ring->rx_max_pending = layout.max_rx;
+ ring->tx_max_pending = layout.max_tx;
+
+ ring->rx_pending = priv->rx_obj_num;
+ ring->tx_pending = priv->tx->obj_num;
+}
+
+static int
+mcp251xfd_ring_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, ring, NULL, fd_mode);
+ if ((layout.cur_rx != priv->rx_obj_num ||
+ layout.cur_tx != priv->tx->obj_num) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ u32 rx_max_frames, tx_max_frames;
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (priv->rx_obj_num_coalesce_irq == 0)
+ rx_max_frames = 1;
+ else
+ rx_max_frames = priv->rx_obj_num_coalesce_irq;
+
+ ec->rx_max_coalesced_frames_irq = rx_max_frames;
+ ec->rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq;
+
+ if (priv->tx_obj_num_coalesce_irq == 0)
+ tx_max_frames = 1;
+ else
+ tx_max_frames = priv->tx_obj_num_coalesce_irq;
+
+ ec->tx_max_coalesced_frames_irq = tx_max_frames;
+ ec->tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static int mcp251xfd_ring_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ const struct ethtool_ringparam ring = {
+ .rx_pending = priv->rx_obj_num,
+ .tx_pending = priv->tx->obj_num,
+ };
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, ec, fd_mode);
+
+ if ((layout.rx_coalesce != priv->rx_obj_num_coalesce_irq ||
+ ec->rx_coalesce_usecs_irq != priv->rx_coalesce_usecs_irq ||
+ layout.tx_coalesce != priv->tx_obj_num_coalesce_irq ||
+ ec->tx_coalesce_usecs_irq != priv->tx_coalesce_usecs_irq) &&
+ netif_running(ndev))
+ return -EBUSY;
+
+ priv->rx_obj_num = layout.cur_rx;
+ priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
+ priv->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+
+ priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
+ priv->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+
+ return 0;
+}
+
+static const struct ethtool_ops mcp251xfd_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ |
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ |
+ ETHTOOL_COALESCE_TX_USECS_IRQ |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
+ .get_ringparam = mcp251xfd_ring_get_ringparam,
+ .set_ringparam = mcp251xfd_ring_set_ringparam,
+ .get_coalesce = mcp251xfd_ring_get_coalesce,
+ .set_coalesce = mcp251xfd_ring_set_coalesce,
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv)
+{
+ struct can_ram_layout layout;
+
+ priv->ndev->ethtool_ops = &mcp251xfd_ethtool_ops;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, false);
+ priv->rx_obj_num = layout.default_rx;
+ priv->tx->obj_num = layout.default_tx;
+
+ priv->rx_obj_num_coalesce_irq = 0;
+ priv->tx_obj_num_coalesce_irq = 0;
+ priv->rx_coalesce_usecs_irq = 0;
+ priv->tx_coalesce_usecs_irq = 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
new file mode 100644
index 000000000000..9e8e82cdba46
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2021, 2022 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp251xfd-ram.h"
+
+static inline u8 can_ram_clamp(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ u8 val)
+{
+ u8 max;
+
+ max = min_t(u8, obj->max, obj->fifo_num * config->fifo_depth);
+ return clamp(val, obj->min, max);
+}
+
+static u8
+can_ram_rounddown_pow_of_two(const struct can_ram_config *config,
+ const struct can_ram_obj_config *obj,
+ const u8 coalesce, u8 val)
+{
+ u8 fifo_num = obj->fifo_num;
+ u8 ret = 0, i;
+
+ val = can_ram_clamp(config, obj, val);
+
+ if (coalesce) {
+ /* Use 1st FIFO for coalescing, if requested.
+ *
+ * Either use complete FIFO (and FIFO Full IRQ) for
+ * coalescing or only half of FIFO (FIFO Half Full
+ * IRQ) and use remaining half for normal objects.
+ */
+ ret = min_t(u8, coalesce * 2, config->fifo_depth);
+ val -= ret;
+ fifo_num--;
+ }
+
+ for (i = 0; i < fifo_num && val; i++) {
+ u8 n;
+
+ n = min_t(u8, rounddown_pow_of_two(val),
+ config->fifo_depth);
+
+ /* skip small FIFOs */
+ if (n < obj->fifo_depth_min)
+ return ret;
+
+ ret += n;
+ val -= n;
+ }
+
+ return ret;
+}
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode)
+{
+ u8 num_rx, num_tx;
+ u16 ram_free;
+
+ /* default CAN */
+
+ num_tx = config->tx.def[fd_mode];
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * num_tx;
+
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ layout->default_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->default_tx = num_tx;
+
+ /* MAX CAN */
+
+ ram_free = config->size;
+ ram_free -= config->tx.size[fd_mode] * config->tx.min;
+ num_rx = ram_free / config->rx.size[fd_mode];
+
+ ram_free = config->size;
+ ram_free -= config->rx.size[fd_mode] * config->rx.min;
+ num_tx = ram_free / config->tx.size[fd_mode];
+
+ layout->max_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx);
+ layout->max_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* cur CAN */
+
+ if (ring) {
+ u8 num_rx_coalesce = 0, num_tx_coalesce = 0;
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->rx_coalesce_usecs_irq == 0 &&
+ ec->rx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_rx / 2, config->fifo_depth);
+ num_rx_coalesce = clamp(ec->rx_max_coalesced_frames_irq,
+ (u32)config->rx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_rx_coalesce = rounddown_pow_of_two(num_rx_coalesce);
+
+ num_rx = can_ram_rounddown_pow_of_two(config, &config->rx,
+ num_rx_coalesce, num_rx);
+ }
+
+ ram_free = config->size - config->rx.size[fd_mode] * num_rx;
+ num_tx = ram_free / config->tx.size[fd_mode];
+ num_tx = min_t(u8, ring->tx_pending, num_tx);
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx);
+
+ /* The ethtool doc says:
+ * To disable coalescing, set usecs = 0 and max_frames = 1.
+ */
+ if (ec && !(ec->tx_coalesce_usecs_irq == 0 &&
+ ec->tx_max_coalesced_frames_irq == 1)) {
+ u8 max;
+
+ /* use only max half of available objects for coalescing */
+ max = min_t(u8, num_tx / 2, config->fifo_depth);
+ num_tx_coalesce = clamp(ec->tx_max_coalesced_frames_irq,
+ (u32)config->tx.fifo_depth_coalesce_min,
+ (u32)max);
+ num_tx_coalesce = rounddown_pow_of_two(num_tx_coalesce);
+
+ num_tx = can_ram_rounddown_pow_of_two(config, &config->tx,
+ num_tx_coalesce, num_tx);
+ }
+
+ layout->cur_rx = num_rx;
+ layout->cur_tx = num_tx;
+ layout->rx_coalesce = num_rx_coalesce;
+ layout->tx_coalesce = num_tx_coalesce;
+ } else {
+ layout->cur_rx = layout->default_rx;
+ layout->cur_tx = layout->default_tx;
+ layout->rx_coalesce = 0;
+ layout->tx_coalesce = 0;
+ }
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
new file mode 100644
index 000000000000..7558c1510cbf
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2021, 2022 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ */
+
+#ifndef _MCP251XFD_RAM_H
+#define _MCP251XFD_RAM_H
+
+#include <linux/ethtool.h>
+
+#define CAN_RAM_NUM_MAX (-1)
+
+enum can_ram_mode {
+ CAN_RAM_MODE_CAN,
+ CAN_RAM_MODE_CANFD,
+ __CAN_RAM_MODE_MAX
+};
+
+struct can_ram_obj_config {
+ u8 size[__CAN_RAM_MODE_MAX];
+
+ u8 def[__CAN_RAM_MODE_MAX];
+ u8 min;
+ u8 max;
+
+ u8 fifo_num;
+ u8 fifo_depth_min;
+ u8 fifo_depth_coalesce_min;
+};
+
+struct can_ram_config {
+ const struct can_ram_obj_config rx;
+ const struct can_ram_obj_config tx;
+
+ u16 size;
+ u8 fifo_depth;
+};
+
+struct can_ram_layout {
+ u8 default_rx;
+ u8 default_tx;
+
+ u8 max_rx;
+ u8 max_tx;
+
+ u8 cur_rx;
+ u8 cur_tx;
+
+ u8 rx_coalesce;
+ u8 tx_coalesce;
+};
+
+void can_ram_get_layout(struct can_ram_layout *layout,
+ const struct can_ram_config *config,
+ const struct ethtool_ringparam *ring,
+ const struct ethtool_coalesce *ec,
+ const bool fd_mode);
+
+#endif
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 297491516a26..92b7bc7f14b9 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -2,8 +2,8 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020 Pengutronix,
-// Marc Kleine-Budde <kernel@pengutronix.de>
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
//
#include "mcp251xfd.h"
@@ -47,22 +47,32 @@ mcp251xfd_regmap_nocrc_gather_write(void *context,
return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
}
-static inline bool mcp251xfd_update_bits_read_reg(unsigned int reg)
+static inline bool
+mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv,
+ unsigned int reg)
{
+ struct mcp251xfd_rx_ring *ring;
+ int n;
+
switch (reg) {
case MCP251XFD_REG_INT:
case MCP251XFD_REG_TEFCON:
- case MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_FLTCON(0):
case MCP251XFD_REG_ECCSTAT:
case MCP251XFD_REG_CRC:
return false;
case MCP251XFD_REG_CON:
- case MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO(0)):
case MCP251XFD_REG_OSC:
case MCP251XFD_REG_ECCCON:
return true;
default:
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ if (reg == MCP251XFD_REG_FIFOCON(ring->fifo_nr))
+ return false;
+ if (reg == MCP251XFD_REG_FIFOSTA(ring->fifo_nr))
+ return true;
+ }
+
WARN(1, "Status of reg 0x%04x unknown.\n", reg);
}
@@ -92,7 +102,7 @@ mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
- if (mcp251xfd_update_bits_read_reg(reg)) {
+ if (mcp251xfd_update_bits_read_reg(priv, reg)) {
struct spi_transfer xfer[2] = { };
struct spi_message msg;
@@ -250,7 +260,6 @@ mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const b
return 0;
}
-
static int
mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv,
struct spi_message *msg, unsigned int data_len)
@@ -325,19 +334,21 @@ mcp251xfd_regmap_crc_read(void *context,
* register. It increments once per SYS clock tick,
* which is 20 or 40 MHz.
*
- * Observation shows that if the lowest byte (which is
- * transferred first on the SPI bus) of that register
- * is 0x00 or 0x80 the calculated CRC doesn't always
- * match the transferred one.
+ * Observation on the mcp2518fd shows that if the
+ * lowest byte (which is transferred first on the SPI
+ * bus) of that register is 0x00 or 0x80 the
+ * calculated CRC doesn't always match the transferred
+ * one. On the mcp2517fd this problem is not limited
+ * to the first byte being 0x00 or 0x80.
*
* If the highest bit in the lowest byte is flipped
* the transferred CRC matches the calculated one. We
- * assume for now the CRC calculation in the chip
- * works on wrong data and the transferred data is
- * correct.
+ * assume for now the CRC operates on the correct
+ * data.
*/
if (reg == MCP251XFD_REG_TBC &&
- (buf_rx->data[0] == 0x0 || buf_rx->data[0] == 0x80)) {
+ ((buf_rx->data[0] & 0xf8) == 0x0 ||
+ (buf_rx->data[0] & 0xf8) == 0x80)) {
/* Flip highest bit in lowest byte of le32 */
buf_rx->data[0] ^= 0x80;
@@ -347,10 +358,8 @@ mcp251xfd_regmap_crc_read(void *context,
val_len);
if (!err) {
/* If CRC is now correct, assume
- * transferred data was OK, flip bit
- * back to original value.
+ * flipped data is OK.
*/
- buf_rx->data[0] ^= 0x80;
goto out;
}
}
@@ -369,7 +378,7 @@ mcp251xfd_regmap_crc_read(void *context,
* to the caller. It will take care of both cases.
*
*/
- if (reg == MCP251XFD_REG_OSC) {
+ if (reg == MCP251XFD_REG_OSC && val_len == sizeof(__le32)) {
err = 0;
goto out;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
new file mode 100644
index 000000000000..bfe4caa0c99d
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <asm/unaligned.h>
+
+#include "mcp251xfd.h"
+#include "mcp251xfd-ram.h"
+
+static inline u8
+mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
+ union mcp251xfd_write_reg_buf *write_reg_buf,
+ const u16 reg, const u32 mask, const u32 val)
+{
+ u8 first_byte, last_byte, len;
+ u8 *data;
+ __le32 val_le32;
+
+ first_byte = mcp251xfd_first_byte_set(mask);
+ last_byte = mcp251xfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+ memcpy(data, &val_le32, len);
+
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ } else if (len == 1) {
+ u16 crc;
+
+ /* CRC */
+ len += sizeof(write_reg_buf->safe.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->safe.crc);
+ } else {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(write_reg_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->crc.crc);
+ }
+
+ return len;
+}
+
+static void
+mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base)
+{
+ struct mcp251xfd_tef_ring *tef_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ /* TEF */
+ tef_ring = priv->tef;
+ tef_ring->head = 0;
+ tef_ring->tail = 0;
+
+ /* TEF- and TX-FIFO have same number of objects */
+ *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num);
+
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf,
+ addr, val, val);
+ tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf;
+ tef_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&tef_ring->irq_enable_msg,
+ &tef_ring->irq_enable_xfer, 1);
+
+ /* FIFO increment TEF tail pointer */
+ addr = MCP251XFD_REG_TEFCON;
+ val = MCP251XFD_REG_TEFCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf,
+ addr, val, val);
+
+ for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) {
+ xfer = &tef_ring->uinc_xfer[i];
+ xfer->tx_buf = &tef_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an active
+ * chip select after the complete SPI message. This causes the
+ * controller to interpret the next register access as
+ * data. Set "cs_change" of the last transfer to "0" to
+ * properly deactivate the chip select at the end of the
+ * message.
+ */
+ xfer->cs_change = 0;
+
+ if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) {
+ val = MCP251XFD_REG_TEFCON_UINC |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &tef_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &tef_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
+}
+
+static void
+mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_tx_ring *ring,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const u8 rts_buf_len,
+ const u8 n)
+{
+ struct spi_transfer *xfer;
+ u16 addr;
+
+ /* FIFO load */
+ addr = mcp251xfd_get_tx_obj_addr(ring, n);
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
+ addr);
+ else
+ mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
+ addr);
+
+ xfer = &tx_obj->xfer[0];
+ xfer->tx_buf = &tx_obj->buf;
+ xfer->len = 0; /* actual len is assigned on the fly */
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* FIFO request to send */
+ xfer = &tx_obj->xfer[1];
+ xfer->tx_buf = &ring->rts_buf;
+ xfer->len = rts_buf_len;
+
+ /* SPI message */
+ spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
+ ARRAY_SIZE(tx_obj->xfer));
+}
+
+static void
+mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+{
+ struct mcp251xfd_tx_ring *tx_ring;
+ struct mcp251xfd_tx_obj *tx_obj;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ tx_ring = priv->tx;
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->base = *base;
+ tx_ring->nr = 0;
+ tx_ring->fifo_nr = *fifo_nr;
+
+ *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num);
+ *fifo_nr += 1;
+
+ /* FIFO request to send */
+ addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
+ addr, val, val);
+
+ mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
+ mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+}
+
+static void
+mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
+{
+ struct mcp251xfd_rx_ring *rx_ring;
+ struct spi_transfer *xfer;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i, j;
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+ rx_ring->base = *base;
+ rx_ring->nr = i;
+ rx_ring->fifo_nr = *fifo_nr;
+
+ *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num);
+ *fifo_nr += 1;
+
+ /* FIFO IRQ enable */
+ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr);
+ val = MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf,
+ addr, val, val);
+ rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf;
+ rx_ring->irq_enable_xfer.len = len;
+ spi_message_init_with_transfers(&rx_ring->irq_enable_msg,
+ &rx_ring->irq_enable_xfer, 1);
+
+ /* FIFO increment RX tail pointer */
+ val = MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf,
+ addr, val, val);
+
+ for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) {
+ xfer = &rx_ring->uinc_xfer[j];
+ xfer->tx_buf = &rx_ring->uinc_buf;
+ xfer->len = len;
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+ }
+
+ /* "cs_change == 1" on the last transfer results in an
+ * active chip select after the complete SPI
+ * message. This causes the controller to interpret
+ * the next register access as data. Set "cs_change"
+ * of the last transfer to "0" to properly deactivate
+ * the chip select at the end of the message.
+ */
+ xfer->cs_change = 0;
+
+ /* Use 1st RX-FIFO for IRQ coalescing. If enabled
+ * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq
+ * is activated), use the last transfer to disable:
+ *
+ * - TFNRFNIE (Receive FIFO Not Empty Interrupt)
+ *
+ * and enable:
+ *
+ * - TFHRFHIE (Receive FIFO Half Full Interrupt)
+ * - or -
+ * - TFERFFIE (Receive FIFO Full Interrupt)
+ *
+ * depending on rx_max_coalesce_frames_irq.
+ *
+ * The RXOVIE (Overflow Interrupt) is always enabled.
+ */
+ if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq ||
+ priv->rx_obj_num_coalesce_irq)) {
+ val = MCP251XFD_REG_FIFOCON_UINC |
+ MCP251XFD_REG_FIFOCON_RXOVIE;
+
+ if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num)
+ val |= MCP251XFD_REG_FIFOCON_TFERFFIE;
+ else if (priv->rx_obj_num_coalesce_irq)
+ val |= MCP251XFD_REG_FIFOCON_TFHRFHIE;
+
+ len = mcp251xfd_cmd_prepare_write_reg(priv,
+ &rx_ring->uinc_irq_disable_buf,
+ addr, val, val);
+ xfer->tx_buf = &rx_ring->uinc_irq_disable_buf;
+ xfer->len = len;
+ }
+ }
+}
+
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u16 base = 0, ram_used;
+ u8 fifo_nr = 1;
+ int i;
+
+ netdev_reset_queue(priv->ndev);
+
+ mcp251xfd_ring_init_tef(priv, &base);
+ mcp251xfd_ring_init_rx(priv, &base, &fifo_nr);
+ mcp251xfd_ring_init_tx(priv, &base, &fifo_nr);
+
+ /* mcp251xfd_handle_rxif() will iterate over all RX rings.
+ * Rings with their corresponding bit set in
+ * priv->regs_status.rxif are read out.
+ *
+ * If the chip is configured for only 1 RX-FIFO, and if there
+ * is an RX interrupt pending (RXIF in INT register is set),
+ * it must be the 1st RX-FIFO.
+ *
+ * We mark the RXIF of the 1st FIFO as pending here, so that
+ * we can skip the read of the RXIF register in
+ * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case.
+ *
+ * If we use more than 1 RX-FIFO, this value gets overwritten
+ * in mcp251xfd_read_regs_status(), so set it unconditionally
+ * here.
+ */
+ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr);
+
+ if (priv->tx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx_obj_num_coalesce_irq *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq),
+ priv->tx->obj_num - priv->tx_obj_num_coalesce_irq,
+ sizeof(struct mcp251xfd_hw_tef_obj),
+ (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) *
+ sizeof(struct mcp251xfd_hw_tef_obj));
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n",
+ mcp251xfd_get_tef_obj_addr(0),
+ priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj),
+ priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj));
+ }
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ priv->rx_obj_num_coalesce_irq, rx_ring->obj_size,
+ priv->rx_obj_num_coalesce_irq * rx_ring->obj_size);
+
+ if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH)
+ continue;
+
+ netdev_dbg(priv->ndev,
+ " 0x%03x: %2u*%u bytes = %4u bytes\n",
+ mcp251xfd_get_rx_obj_addr(rx_ring,
+ priv->rx_obj_num_coalesce_irq),
+ rx_ring->obj_num - priv->rx_obj_num_coalesce_irq,
+ rx_ring->obj_size,
+ (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) *
+ rx_ring->obj_size);
+ } else {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ rx_ring->nr, rx_ring->fifo_nr,
+ mcp251xfd_get_rx_obj_addr(rx_ring, 0),
+ rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_num * rx_ring->obj_size);
+ }
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n",
+ priv->tx->fifo_nr,
+ mcp251xfd_get_tx_obj_addr(priv->tx, 0),
+ priv->tx->obj_num, priv->tx->obj_size,
+ priv->tx->obj_num * priv->tx->obj_size);
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %4d bytes\n",
+ MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START));
+
+ ram_used = base - MCP251XFD_RAM_START;
+ if (ram_used > MCP251XFD_RAM_SIZE) {
+ netdev_err(priv->ndev,
+ "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n",
+ ram_used, MCP251XFD_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
+ kfree(priv->rx[i]);
+ priv->rx[i] = NULL;
+ }
+}
+
+static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ rx_irq_timer);
+ struct mcp251xfd_rx_ring *ring = priv->rx[0];
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t)
+{
+ struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv,
+ tx_irq_timer);
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+
+ if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags))
+ return HRTIMER_NORESTART;
+
+ spi_async(priv->spi, &ring->irq_enable_msg);
+
+ return HRTIMER_NORESTART;
+}
+
+const struct can_ram_config mcp251xfd_ram_config = {
+ .rx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd),
+ .min = MCP251XFD_RX_OBJ_NUM_MIN,
+ .max = MCP251XFD_RX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX,
+ .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX,
+ .fifo_num = MCP251XFD_FIFO_RX_NUM,
+ .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .tx = {
+ .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_can),
+ .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) +
+ sizeof(struct mcp251xfd_hw_tx_obj_canfd),
+ .min = MCP251XFD_TX_OBJ_NUM_MIN,
+ .max = MCP251XFD_TX_OBJ_NUM_MAX,
+ .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT,
+ .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT,
+ .fifo_num = MCP251XFD_FIFO_TX_NUM,
+ .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN,
+ .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN,
+ },
+ .size = MCP251XFD_RAM_SIZE,
+ .fifo_depth = MCP251XFD_FIFO_DEPTH,
+};
+
+int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+{
+ const bool fd_mode = mcp251xfd_is_fd_mode(priv);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_rx_ring *rx_ring;
+ u8 tx_obj_size, rx_obj_size;
+ u8 rem, i;
+
+ /* switching from CAN-2.0 to CAN-FD mode or vice versa */
+ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) {
+ struct can_ram_layout layout;
+
+ can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode);
+ priv->rx_obj_num = layout.default_rx;
+ tx_ring->obj_num = layout.default_tx;
+ }
+
+ if (fd_mode) {
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+ set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
+ } else {
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+ clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
+ }
+
+ tx_ring->obj_size = tx_obj_size;
+
+ rem = priv->rx_obj_num;
+ for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) {
+ u8 rx_obj_num;
+
+ if (i == 0 && priv->rx_obj_num_coalesce_irq)
+ rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2,
+ MCP251XFD_FIFO_DEPTH);
+ else
+ rx_obj_num = min_t(u8, rounddown_pow_of_two(rem),
+ MCP251XFD_FIFO_DEPTH);
+ rem -= rx_obj_num;
+
+ rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
+ GFP_KERNEL);
+ if (!rx_ring) {
+ mcp251xfd_ring_free(priv);
+ return -ENOMEM;
+ }
+
+ rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_size = rx_obj_size;
+ priv->rx[i] = rx_ring;
+ }
+ priv->rx_ring_num = i;
+
+ hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer;
+
+ hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer;
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
new file mode 100644
index 000000000000..ced8d9c81f8c
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline int
+mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_head, bool *fifo_empty)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+ *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_tail)
+{
+ u32 fifo_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
+ &fifo_ua);
+ if (err)
+ return err;
+
+ fifo_ua -= ring->base - MCP251XFD_RAM_START;
+ *rx_tail = fifo_ua / ring->obj_size;
+
+ return 0;
+}
+
+static int
+mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u8 rx_tail_chip, rx_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
+ if (err)
+ return err;
+
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+ if (rx_tail_chip != rx_tail) {
+ netdev_err(priv->ndev,
+ "RX tail of chip (%d) and ours (%d) inconsistent.\n",
+ rx_tail_chip, rx_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ u32 new_head;
+ u8 chip_rx_head;
+ bool fifo_empty;
+ int err;
+
+ err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
+ &fifo_empty);
+ if (err || fifo_empty)
+ return err;
+
+ /* chip_rx_head, is the next RX-Object filled by the HW.
+ * The new RX head must be >= the old head.
+ */
+ new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
+ if (new_head <= ring->head)
+ new_head += ring->obj_num;
+
+ ring->head = new_head;
+
+ return mcp251xfd_check_rx_tail(priv, ring);
+}
+
+static void
+mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ u8 dlc;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
+ u32 sid, eid;
+
+ eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
+ sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
+
+ cfd->can_id = CAN_EFF_FLAG |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
+ } else {
+ cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
+ hw_rx_obj->id);
+ }
+
+ dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags);
+
+ /* CANFD */
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
+ cfd->flags |= CANFD_BRS;
+
+ cfd->len = can_fd_dlc2len(dlc);
+ } else {
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ can_frame_set_cc_len((struct can_frame *)cfd, dlc,
+ priv->can.ctrlmode);
+ }
+
+ if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+
+ mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
+}
+
+static int
+mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct canfd_frame *cfd;
+ int err;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
+
+ if (!skb) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ const u8 offset, const u8 len)
+{
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
+ int err;
+
+ err = regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_rx_obj_addr(ring, offset),
+ hw_rx_obj,
+ len * ring->obj_size / val_bytes);
+
+ return err;
+}
+
+static int
+mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
+ u8 rx_tail, len;
+ int err, i;
+
+ err = mcp251xfd_rx_ring_update(priv, ring);
+ if (err)
+ return err;
+
+ while ((len = mcp251xfd_get_rx_linear_len(ring))) {
+ int offset;
+
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+
+ err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
+ rx_tail, len);
+ if (err)
+ return err;
+
+ for (i = 0; i < len; i++) {
+ err = mcp251xfd_handle_rxif_one(priv, ring,
+ (void *)hw_rx_obj +
+ i * ring->obj_size);
+ if (err)
+ return err;
+ }
+
+ /* Increment the RX FIFO tail pointer 'len' times in a
+ * single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ ring->tail += len;
+ }
+
+ return 0;
+}
+
+int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_rx_ring *ring;
+ int err, n;
+
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ /* - if RX IRQ coalescing is active always handle ring 0
+ * - only handle rings if RX IRQ is active
+ */
+ if ((ring->nr > 0 || !priv->rx_obj_num_coalesce_irq) &&
+ !(priv->regs_status.rxif & BIT(ring->fifo_nr)))
+ continue;
+
+ err = mcp251xfd_handle_rxif_ring(priv, ring);
+ if (err)
+ return err;
+ }
+
+ if (priv->rx_coalesce_usecs_irq)
+ hrtimer_start(&priv->rx_irq_timer,
+ ns_to_ktime(priv->rx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
new file mode 100644
index 000000000000..237617b0c125
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline int
+mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tef_tail)
+{
+ u32 tef_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
+ if (err)
+ return err;
+
+ *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
+
+ return 0;
+}
+
+static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
+{
+ u8 tef_tail_chip, tef_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ if (tef_tail_chip != tef_tail) {
+ netdev_err(priv->ndev,
+ "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
+ tef_tail_chip, tef_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ u32 tef_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
+ if (err)
+ return err;
+
+ if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
+ netdev_err(priv->ndev,
+ "Transmit Event FIFO buffer overflow.\n");
+ return -ENOBUFS;
+ }
+
+ netdev_info(priv->ndev,
+ "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
+ tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
+ "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
+ "not empty" : "empty",
+ seq, priv->tef->tail, priv->tef->head, tx_ring->head);
+
+ /* The Sequence Number in the TEF doesn't match our tef_tail. */
+ return -EAGAIN;
+}
+
+static int
+mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ unsigned int *frame_len_ptr)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ u32 seq, seq_masked, tef_tail_masked, tef_tail;
+
+ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
+ hw_tef_obj->flags);
+
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this should be enough to detect
+ * net-yet-completed, i.e. old TEF objects.
+ */
+ seq_masked = seq &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ tef_tail_masked = priv->tef->tail &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ if (seq_masked != tef_tail_masked)
+ return mcp251xfd_handle_tefif_recover(priv, seq);
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ skb = priv->can.echo_skb[tef_tail];
+ if (skb)
+ mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload,
+ tef_tail, hw_tef_obj->ts,
+ frame_len_ptr);
+ stats->tx_packets++;
+ priv->tef->tail++;
+
+ return 0;
+}
+
+static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ unsigned int new_head;
+ u8 chip_tx_tail;
+ int err;
+
+ err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ if (err)
+ return err;
+
+ /* chip_tx_tail, is the next TX-Object send by the HW.
+ * The new TEF head must be >= the old head, ...
+ */
+ new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
+ if (new_head <= priv->tef->head)
+ new_head += tx_ring->obj_num;
+
+ /* ... but it cannot exceed the TX head. */
+ priv->tef->head = min(new_head, tx_ring->head);
+
+ return mcp251xfd_check_tef_tail(priv);
+}
+
+static inline int
+mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ const u8 offset, const u8 len)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const int val_bytes = regmap_get_val_bytes(priv->map_rx);
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ (offset > tx_ring->obj_num ||
+ len > tx_ring->obj_num ||
+ offset + len > tx_ring->obj_num)) {
+ netdev_err(priv->ndev,
+ "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n",
+ tx_ring->obj_num, offset, len);
+ return -ERANGE;
+ }
+
+ return regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_tef_obj_addr(offset),
+ hw_tef_obj,
+ sizeof(*hw_tef_obj) / val_bytes * len);
+}
+
+static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+
+ ecc->ecc_stat = 0;
+}
+
+int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ unsigned int total_frame_len = 0;
+ u8 tef_tail, len, l;
+ int err, i;
+
+ err = mcp251xfd_tef_ring_update(priv);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ len = mcp251xfd_get_tef_len(priv);
+ l = mcp251xfd_get_tef_linear_len(priv);
+ err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
+ if (err)
+ return err;
+
+ if (l < len) {
+ err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ unsigned int frame_len = 0;
+
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
+ /* -EAGAIN means the Sequence Number in the TEF
+ * doesn't match our tef_tail. This can happen if we
+ * read the TEF objects too early. Leave loop let the
+ * interrupt handler call us again.
+ */
+ if (err == -EAGAIN)
+ goto out_netif_wake_queue;
+ if (err)
+ return err;
+
+ total_frame_len += frame_len;
+ }
+
+ out_netif_wake_queue:
+ len = i; /* number of handled goods TEFs */
+ if (len) {
+ struct mcp251xfd_tef_ring *ring = priv->tef;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int offset;
+
+ /* Increment the TEF FIFO tail pointer 'len' times in
+ * a single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ tx_ring->tail += len;
+ netdev_completed_queue(priv->ndev, len, total_frame_len);
+
+ err = mcp251xfd_check_tef_tail(priv);
+ if (err)
+ return err;
+ }
+
+ mcp251xfd_ecc_tefif_successful(priv);
+
+ if (mcp251xfd_get_tx_free(priv->tx)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(priv->ndev);
+ }
+
+ if (priv->tx_coalesce_usecs_irq)
+ hrtimer_start(&priv->tx_irq_timer,
+ ns_to_ktime(priv->tx_coalesce_usecs_irq *
+ NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
new file mode 100644
index 000000000000..160528d3cc26
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+
+#include "mcp251xfd.h"
+
+static inline struct
+mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
+{
+ u8 tx_head;
+
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+
+ return &tx_ring->obj[tx_head];
+}
+
+static void
+mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const struct sk_buff *skb,
+ unsigned int seq)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
+ union mcp251xfd_tx_obj_load_buf *load_buf;
+ u8 dlc;
+ u32 id, flags;
+ int len_sanitized = 0, len;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ u32 sid, eid;
+
+ sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
+ eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
+
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
+
+ flags = MCP251XFD_OBJ_FLAGS_IDE;
+ } else {
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
+ flags = 0;
+ }
+
+ /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
+ * harm, only the lower 7 bits will be transferred into the
+ * TEF object.
+ */
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq);
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ flags |= MCP251XFD_OBJ_FLAGS_RTR;
+ else
+ len_sanitized = canfd_sanitize_len(cfd->len);
+
+ /* CANFD */
+ if (can_is_canfd_skb(skb)) {
+ if (cfd->flags & CANFD_ESI)
+ flags |= MCP251XFD_OBJ_FLAGS_ESI;
+
+ flags |= MCP251XFD_OBJ_FLAGS_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ flags |= MCP251XFD_OBJ_FLAGS_BRS;
+
+ dlc = can_fd_len2dlc(cfd->len);
+ } else {
+ dlc = can_get_cc_dlc((struct can_frame *)cfd,
+ priv->can.ctrlmode);
+ }
+
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc);
+
+ load_buf = &tx_obj->buf;
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ hw_tx_obj = &load_buf->crc.hw_tx_obj;
+ else
+ hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
+
+ put_unaligned_le32(id, &hw_tx_obj->id);
+ put_unaligned_le32(flags, &hw_tx_obj->flags);
+
+ /* Copy data */
+ memcpy(hw_tx_obj->data, cfd->data, cfd->len);
+
+ /* Clear unused data at end of CAN frame */
+ if (MCP251XFD_SANITIZE_CAN && len_sanitized) {
+ int pad_len;
+
+ pad_len = len_sanitized - cfd->len;
+ if (pad_len)
+ memset(hw_tx_obj->data + cfd->len, 0x0, pad_len);
+ }
+
+ /* Number of bytes to be written into the RAM of the controller */
+ len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
+ if (MCP251XFD_SANITIZE_CAN)
+ len += round_up(len_sanitized, sizeof(u32));
+ else
+ len += round_up(cfd->len, sizeof(u32));
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(load_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
+ put_unaligned_be16(crc, (void *)load_buf + len);
+
+ /* Total length */
+ len += sizeof(load_buf->crc.crc);
+ } else {
+ len += sizeof(load_buf->nocrc.cmd);
+ }
+
+ tx_obj->xfer[0].len = len;
+}
+
+static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj)
+{
+ return spi_async(priv->spi, &tx_obj->msg);
+}
+
+static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring)
+{
+ if (mcp251xfd_get_tx_free(tx_ring) > 0)
+ return false;
+
+ netif_stop_queue(priv->ndev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (mcp251xfd_get_tx_free(tx_ring) == 0) {
+ netdev_dbg(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ tx_ring->head, tx_ring->tail,
+ tx_ring->head - tx_ring->tail);
+
+ return true;
+ }
+
+ netif_start_queue(priv->ndev);
+
+ return false;
+}
+
+netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_tx_obj *tx_obj;
+ unsigned int frame_len;
+ u8 tx_head;
+ int err;
+
+ if (can_dev_dropped_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (mcp251xfd_tx_busy(priv, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
+ mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
+
+ /* Stop queue if we occupy the complete TX FIFO */
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ tx_ring->head++;
+ if (mcp251xfd_get_tx_free(tx_ring) == 0)
+ netif_stop_queue(ndev);
+
+ frame_len = can_skb_get_frame_len(skb);
+ err = can_put_echo_skb(skb, ndev, tx_head, frame_len);
+ if (!err)
+ netdev_sent_queue(priv->ndev, frame_len);
+
+ err = mcp251xfd_tx_obj_write(priv, tx_obj);
+ if (err)
+ goto out_err;
+
+ return NETDEV_TX_OK;
+
+ out_err:
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+
+ return NETDEV_TX_OK;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 0f322dabaf65..7024ff0cc2c0 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,14 +2,15 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019 Pengutronix,
- * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
*/
#ifndef _MCP251XFD_H
#define _MCP251XFD_H
+#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/rx-offload.h>
@@ -366,25 +367,6 @@
#define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4)
#define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0)
-/* number of TX FIFO objects, depending on CAN mode
- *
- * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
- * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
- */
-#define MCP251XFD_RX_OBJ_NUM_MAX 32
-#define MCP251XFD_TX_OBJ_NUM_CAN 8
-#define MCP251XFD_TX_OBJ_NUM_CANFD 4
-
-#if MCP251XFD_TX_OBJ_NUM_CAN > MCP251XFD_TX_OBJ_NUM_CANFD
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CAN
-#else
-#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CANFD
-#endif
-
-#define MCP251XFD_NAPI_WEIGHT 32
-#define MCP251XFD_TX_FIFO 1
-#define MCP251XFD_RX_FIFO(x) (MCP251XFD_TX_FIFO + 1 + (x))
-
/* SPI commands */
#define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000
#define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000
@@ -405,12 +387,38 @@ static_assert(MCP251XFD_TIMESTAMP_WORK_DELAY_SEC <
#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
#define MCP251XFD_POLL_SLEEP_US (10)
#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+
+/* Misc */
+#define MCP251XFD_NAPI_WEIGHT 32
#define MCP251XFD_SOFTRESET_RETRIES_MAX 3
#define MCP251XFD_READ_CRC_RETRIES_MAX 3
#define MCP251XFD_ECC_CNT_MAX 2
#define MCP251XFD_SANITIZE_SPI 1
#define MCP251XFD_SANITIZE_CAN 1
+/* FIFO and Ring */
+#define MCP251XFD_FIFO_TEF_NUM 1U
+#define MCP251XFD_FIFO_RX_NUM 3U
+#define MCP251XFD_FIFO_TX_NUM 1U
+
+#define MCP251XFD_FIFO_DEPTH 32U
+
+#define MCP251XFD_RX_OBJ_NUM_MIN 16U
+#define MCP251XFD_RX_OBJ_NUM_MAX (MCP251XFD_FIFO_RX_NUM * MCP251XFD_FIFO_DEPTH)
+#define MCP251XFD_RX_FIFO_DEPTH_MIN 4U
+#define MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN 8U
+
+#define MCP251XFD_TX_OBJ_NUM_MIN 2U
+#define MCP251XFD_TX_OBJ_NUM_MAX 16U
+#define MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT 8U
+#define MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT 4U
+#define MCP251XFD_TX_FIFO_DEPTH_MIN 2U
+#define MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN 2U
+
+static_assert(MCP251XFD_FIFO_TEF_NUM == 1U);
+static_assert(MCP251XFD_FIFO_TEF_NUM == MCP251XFD_FIFO_TX_NUM);
+static_assert(MCP251XFD_FIFO_RX_NUM <= 4U);
+
/* Silence TX MAB overflow warnings */
#define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0)
/* Use CRC to access registers */
@@ -433,7 +441,7 @@ struct mcp251xfd_hw_tef_obj {
/* The tx_obj_raw version is used in spi async, i.e. without
* regmap. We have to take care of endianness ourselves.
*/
-struct mcp251xfd_hw_tx_obj_raw {
+struct __packed mcp251xfd_hw_tx_obj_raw {
__le32 id;
__le32 flags;
u8 data[sizeof_field(struct canfd_frame, data)];
@@ -496,6 +504,11 @@ union mcp251xfd_write_reg_buf {
u8 data[4];
__be16 crc;
} crc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[1];
+ __be16 crc;
+ } safe;
} ____cacheline_aligned;
struct mcp251xfd_tx_obj {
@@ -511,7 +524,12 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
struct spi_transfer uinc_xfer[MCP251XFD_TX_OBJ_NUM_MAX];
};
@@ -520,6 +538,8 @@ struct mcp251xfd_tx_ring {
unsigned int tail;
u16 base;
+ u8 nr;
+ u8 fifo_nr;
u8 obj_num;
u8 obj_size;
@@ -537,8 +557,13 @@ struct mcp251xfd_rx_ring {
u8 obj_num;
u8 obj_size;
+ union mcp251xfd_write_reg_buf irq_enable_buf;
+ struct spi_transfer irq_enable_xfer;
+ struct spi_message irq_enable_msg;
+
union mcp251xfd_write_reg_buf uinc_buf;
- struct spi_transfer uinc_xfer[MCP251XFD_RX_OBJ_NUM_MAX];
+ union mcp251xfd_write_reg_buf uinc_irq_disable_buf;
+ struct spi_transfer uinc_xfer[MCP251XFD_FIFO_DEPTH];
struct mcp251xfd_hw_rx_obj_canfd obj[];
};
@@ -560,12 +585,14 @@ struct mcp251xfd_ecc {
struct mcp251xfd_regs_status {
u32 intf;
+ u32 rxif;
};
enum mcp251xfd_model {
MCP251XFD_MODEL_MCP2517FD = 0x2517,
MCP251XFD_MODEL_MCP2518FD = 0x2518,
- MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */
+ MCP251XFD_MODEL_MCP251863 = 0x251863,
+ MCP251XFD_MODEL_MCP251XFD = 0xffffffff, /* autodetect model */
};
struct mcp251xfd_devtype_data {
@@ -573,6 +600,13 @@ struct mcp251xfd_devtype_data {
u32 quirks;
};
+enum mcp251xfd_flags {
+ MCP251XFD_FLAGS_DOWN,
+ MCP251XFD_FLAGS_FD_MODE,
+
+ __MCP251XFD_FLAGS_SIZE__
+};
+
struct mcp251xfd_priv {
struct can_priv can;
struct can_rx_offload offload;
@@ -591,12 +625,24 @@ struct mcp251xfd_priv {
struct spi_device *spi;
u32 spi_max_speed_hz_orig;
+ u32 spi_max_speed_hz_fast;
+ u32 spi_max_speed_hz_slow;
+
+ struct mcp251xfd_tef_ring tef[MCP251XFD_FIFO_TEF_NUM];
+ struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
+ struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
- struct mcp251xfd_tef_ring tef[1];
- struct mcp251xfd_tx_ring tx[1];
- struct mcp251xfd_rx_ring *rx[1];
+ DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
+ u8 rx_obj_num;
+ u8 rx_obj_num_coalesce_irq;
+ u8 tx_obj_num_coalesce_irq;
+
+ u32 rx_coalesce_usecs_irq;
+ u32 tx_coalesce_usecs_irq;
+ struct hrtimer rx_irq_timer;
+ struct hrtimer tx_irq_timer;
struct mcp251xfd_ecc ecc;
struct mcp251xfd_regs_status regs_status;
@@ -607,6 +653,7 @@ struct mcp251xfd_priv {
struct gpio_desc *rx_int;
struct clk *clk;
+ bool pll_enable;
struct regulator *reg_vdd;
struct regulator *reg_xceiver;
@@ -618,12 +665,19 @@ struct mcp251xfd_priv {
static inline bool \
mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \
{ \
- return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \
+ return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model; \
}
-MCP251XFD_IS(2517);
-MCP251XFD_IS(2518);
-MCP251XFD_IS(251X);
+MCP251XFD_IS(2517FD);
+MCP251XFD_IS(2518FD);
+MCP251XFD_IS(251863);
+MCP251XFD_IS(251XFD);
+
+static inline bool mcp251xfd_is_fd_mode(const struct mcp251xfd_priv *priv)
+{
+ /* listen-only mode works like FD mode */
+ return priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD);
+}
static inline u8 mcp251xfd_first_byte_set(u32 mask)
{
@@ -710,6 +764,13 @@ mcp251xfd_spi_cmd_write_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd,
}
static inline void
+mcp251xfd_spi_cmd_write_safe_set_addr(struct mcp251xfd_buf_cmd *cmd,
+ u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE | addr);
+}
+
+static inline void
mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
u16 addr, u16 len)
{
@@ -720,14 +781,20 @@ mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
static inline u8 *
mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
union mcp251xfd_write_reg_buf *write_reg_buf,
- u16 addr)
+ u16 addr, u8 len)
{
u8 *data;
if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
- mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
- addr);
- data = write_reg_buf->crc.data;
+ if (len == 1) {
+ mcp251xfd_spi_cmd_write_safe_set_addr(&write_reg_buf->safe.cmd,
+ addr);
+ data = write_reg_buf->safe.data;
+ } else {
+ mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
+ addr);
+ data = write_reg_buf->crc.data;
+ }
} else {
mcp251xfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd,
addr);
@@ -761,6 +828,24 @@ mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n)
return ring->base + ring->obj_size * n;
}
+static inline int
+mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tx_tail)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg,
+ MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv)
{
return priv->tef->head & (priv->tx->obj_num - 1);
@@ -849,15 +934,26 @@ mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
(n) < (priv)->rx_ring_num; \
(n)++, (ring) = *((priv)->rx + (n)))
-int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv);
u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
const void *data, size_t data_size);
u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv);
+int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+extern const struct can_ram_config mcp251xfd_ram_config;
+int mcp251xfd_ring_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
+int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
+int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
+int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
struct sk_buff *skb, u32 timestamp);
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev);
+
#if IS_ENABLED(CONFIG_DEV_COREDUMP)
void mcp251xfd_dump(const struct mcp251xfd_priv *priv);
#else
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 54aa7c25c4de..2b78f9197681 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -51,9 +51,9 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/ethtool.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -61,6 +61,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#define DRV_NAME "sun4i_can"
@@ -200,10 +201,20 @@
#define SUN4I_CAN_MAX_IRQ 20
#define SUN4I_MODE_MAX_RETRIES 100
+/**
+ * struct sun4ican_quirks - Differences between SoC variants.
+ *
+ * @has_reset: SoC needs reset deasserted.
+ */
+struct sun4ican_quirks {
+ bool has_reset;
+};
+
struct sun4ican_priv {
struct can_priv can;
void __iomem *base;
struct clk *clk;
+ struct reset_control *reset;
spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
};
@@ -418,7 +429,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d
canid_t id;
int i;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dev_dropped_skb(dev, skb))
return NETDEV_TX_OK;
netif_stop_queue(dev);
@@ -490,21 +501,21 @@ static void sun4i_can_rx(struct net_device *dev)
}
/* remote frame ? */
- if (fi & SUN4I_MSG_RTR_FLAG)
+ if (fi & SUN4I_MSG_RTR_FLAG) {
id |= CAN_RTR_FLAG;
- else
+ } else {
for (i = 0; i < cf->len; i++)
cf->data[i] = readl(priv->base + dreg + i * 4);
+ stats->rx_bytes += cf->len;
+ }
+ stats->rx_packets++;
+
cf->can_id = id;
sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF);
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
-
- can_led_event(dev, CAN_LED_EVENT_RX);
}
static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
@@ -525,11 +536,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
rxerr = (errc >> 16) & 0xFF;
txerr = errc & 0xFF;
- if (skb) {
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
-
if (isrc & SUN4I_INT_DATA_OR) {
/* data overrun interrupt */
netdev_dbg(dev, "data overrun interrupt\n");
@@ -560,6 +566,11 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
else
state = CAN_STATE_ERROR_ACTIVE;
}
+ if (skb && state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
if (isrc & SUN4I_INT_BUS_ERR) {
/* bus error interrupt */
netdev_dbg(dev, "bus error interrupt\n");
@@ -622,13 +633,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
can_bus_off(dev);
}
- if (likely(skb)) {
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (likely(skb))
netif_rx(skb);
- } else {
+ else
return -ENOMEM;
- }
return 0;
}
@@ -651,13 +659,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
if (isrc & SUN4I_INT_TBUF_VLD) {
/* transmission complete interrupt */
- stats->tx_bytes +=
- readl(priv->base +
- SUN4I_REG_RBUF_RBACK_START_ADDR) & 0xf;
+ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL);
stats->tx_packets++;
- can_get_echo_skb(dev, 0, NULL);
netif_wake_queue(dev);
- can_led_event(dev, CAN_LED_EVENT_TX);
}
if ((isrc & SUN4I_INT_RBUF_VLD) &&
!(isrc & SUN4I_INT_DATA_OR)) {
@@ -702,6 +706,13 @@ static int sun4ican_open(struct net_device *dev)
goto exit_irq;
}
+ /* software reset deassert */
+ err = reset_control_deassert(priv->reset);
+ if (err) {
+ netdev_err(dev, "could not deassert CAN reset\n");
+ goto exit_soft_reset;
+ }
+
/* turn on clocking for CAN peripheral block */
err = clk_prepare_enable(priv->clk);
if (err) {
@@ -715,7 +726,6 @@ static int sun4ican_open(struct net_device *dev)
goto exit_can_start;
}
- can_led_event(dev, CAN_LED_EVENT_OPEN);
netif_start_queue(dev);
return 0;
@@ -723,6 +733,8 @@ static int sun4ican_open(struct net_device *dev)
exit_can_start:
clk_disable_unprepare(priv->clk);
exit_clock:
+ reset_control_assert(priv->reset);
+exit_soft_reset:
free_irq(dev->irq, dev);
exit_irq:
close_candev(dev);
@@ -736,10 +748,10 @@ static int sun4ican_close(struct net_device *dev)
netif_stop_queue(dev);
sun4i_can_stop(dev);
clk_disable_unprepare(priv->clk);
+ reset_control_assert(priv->reset);
free_irq(dev->irq, dev);
close_candev(dev);
- can_led_event(dev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -750,9 +762,31 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_start_xmit = sun4ican_start_xmit,
};
+static const struct ethtool_ops sun4ican_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const struct sun4ican_quirks sun4ican_quirks_a10 = {
+ .has_reset = false,
+};
+
+static const struct sun4ican_quirks sun4ican_quirks_r40 = {
+ .has_reset = true,
+};
+
static const struct of_device_id sun4ican_of_match[] = {
- {.compatible = "allwinner,sun4i-a10-can"},
- {},
+ {
+ .compatible = "allwinner,sun4i-a10-can",
+ .data = &sun4ican_quirks_a10
+ }, {
+ .compatible = "allwinner,sun7i-a20-can",
+ .data = &sun4ican_quirks_a10
+ }, {
+ .compatible = "allwinner,sun8i-r40-can",
+ .data = &sun4ican_quirks_r40
+ }, {
+ /* sentinel */
+ },
};
MODULE_DEVICE_TABLE(of, sun4ican_of_match);
@@ -771,10 +805,28 @@ static int sun4ican_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct clk *clk;
+ struct reset_control *reset = NULL;
void __iomem *addr;
int err, irq;
struct net_device *dev;
struct sun4ican_priv *priv;
+ const struct sun4ican_quirks *quirks;
+
+ quirks = of_device_get_match_data(&pdev->dev);
+ if (!quirks) {
+ dev_err(&pdev->dev, "failed to determine the quirks to use\n");
+ err = -ENODEV;
+ goto exit;
+ }
+
+ if (quirks->has_reset) {
+ reset = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(reset)) {
+ dev_err(&pdev->dev, "unable to request reset\n");
+ err = PTR_ERR(reset);
+ goto exit;
+ }
+ }
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
@@ -804,6 +856,7 @@ static int sun4ican_probe(struct platform_device *pdev)
}
dev->netdev_ops = &sun4ican_netdev_ops;
+ dev->ethtool_ops = &sun4ican_ethtool_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -818,6 +871,7 @@ static int sun4ican_probe(struct platform_device *pdev)
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;
+ priv->reset = reset;
spin_lock_init(&priv->cmdreg_lock);
platform_set_drvdata(pdev, dev);
@@ -829,7 +883,6 @@ static int sun4ican_probe(struct platform_device *pdev)
DRV_NAME, err);
goto exit_free;
}
- devm_can_led_init(dev);
dev_info(&pdev->dev, "device registered (base=%p, irq=%d)\n",
priv->base, dev->irq);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 353062ead98f..27700f72eac2 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* TI HECC (CAN) device driver
*
@@ -6,16 +7,6 @@
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2019 Jeroen Hofstee <jhofstee@victronenergy.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed as is WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/module.h>
@@ -23,6 +14,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/platform_device.h>
@@ -34,7 +26,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/can/rx-offload.h>
#define DRV_NAME "ti_hecc"
@@ -479,7 +470,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
mbxno = get_tx_head_mb(priv);
@@ -633,7 +624,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
cf->data[3] = CAN_ERR_PROT_LOC_ACK;
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb,
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb,
timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
@@ -663,12 +654,13 @@ static void ti_hecc_change_state(struct net_device *ndev,
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = hecc_read(priv, HECC_CANTEC);
cf->data[7] = hecc_read(priv, HECC_CANREC);
}
timestamp = hecc_read(priv, HECC_CANLNT);
- err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
if (err)
ndev->stats.rx_fifo_errors++;
}
@@ -759,7 +751,6 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
can_rx_offload_get_echo_skb(&priv->offload,
mbxno, stamp, NULL);
stats->tx_packets++;
- can_led_event(ndev, CAN_LED_EVENT_TX);
--priv->tx_tail;
}
@@ -814,8 +805,6 @@ static int ti_hecc_open(struct net_device *ndev)
return err;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
-
ti_hecc_start(ndev);
can_rx_offload_enable(&priv->offload);
netif_start_queue(ndev);
@@ -834,8 +823,6 @@ static int ti_hecc_close(struct net_device *ndev)
close_candev(ndev);
ti_hecc_transceiver_switch(priv, 0);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
-
return 0;
}
@@ -846,6 +833,10 @@ static const struct net_device_ops ti_hecc_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ti_hecc_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct of_device_id ti_hecc_dt_ids[] = {
{
.compatible = "ti,am3517-hecc",
@@ -859,7 +850,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
struct net_device *ndev = (struct net_device *)0;
struct ti_hecc_priv *priv;
struct device_node *np = pdev->dev.of_node;
- struct resource *irq;
struct regulator *reg_xceiver;
int err = -ENODEV;
@@ -904,9 +894,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_candev;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "No irq resource\n");
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ err = ndev->irq;
goto probe_exit_candev;
}
@@ -920,11 +910,11 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
spin_lock_init(&priv->mbx_lock);
- ndev->irq = irq->start;
ndev->flags |= IFF_ECHO;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &ti_hecc_netdev_ops;
+ ndev->ethtool_ops = &ti_hecc_ethtool_ops;
priv->clk = clk_get(&pdev->dev, "hecc_ck");
if (IS_ERR(priv->clk)) {
@@ -956,8 +946,6 @@ static int ti_hecc_probe(struct platform_device *pdev)
goto probe_exit_offload;
}
- devm_can_led_init(ndev);
-
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
priv->base, (u32)ndev->irq);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index f959215c9d53..445504ababce 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -14,15 +14,23 @@ config CAN_EMS_USB
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
-config CAN_ESD_USB2
- tristate "ESD USB/2 CAN/USB interface"
+config CAN_ESD_USB
+ tristate "esd electronics gmbh CAN/USB interfaces"
help
- This driver supports the CAN-USB/2 interface
- from esd electronic system design gmbh (http://www.esd.eu).
+ This driver adds supports for several CAN/USB interfaces
+ from esd electronics gmbh (https://www.esd.eu).
+
+ The drivers supports the following devices:
+ - esd CAN-USB/2
+ - esd CAN-USB/Micro
+
+ To compile this driver as a module, choose M here: the module
+ will be called esd_usb.
config CAN_ETAS_ES58X
tristate "ETAS ES58X CAN/USB interfaces"
select CRC16
+ select NET_DEVLINK
help
This driver supports the ES581.4, ES582.1 and ES584.1 interfaces
from ETAS GmbH (https://www.etas.com/en/products/es58x.php).
@@ -31,10 +39,13 @@ config CAN_ETAS_ES58X
will be called etas_es58x.
config CAN_GS_USB
- tristate "Geschwister Schneider UG interfaces"
+ tristate "Geschwister Schneider UG and candleLight compatible interfaces"
help
- This driver supports the Geschwister Schneider and bytewerk.org
- candleLight USB CAN interfaces USB/CAN devices
+ This driver supports the Geschwister Schneider and
+ bytewerk.org candleLight compatible
+ (https://github.com/candle-usb/candleLight_fw) USB/CAN
+ interfaces.
+
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index 748cf31a0d53..1ea16be5743b 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
-obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_ESD_USB) += esd_usb.o
obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x/
obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb/
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 2b5302e72435..050c0b49938a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -194,7 +195,7 @@ struct __packed ems_cpc_msg {
__le32 ts_sec; /* timestamp in seconds */
__le32 ts_nsec; /* timestamp in nano seconds */
- union {
+ union __packed {
u8 generic[64];
struct cpc_can_msg can_msg;
struct cpc_can_params can_params;
@@ -230,7 +231,6 @@ struct ems_tx_urb_context {
struct ems_usb *dev;
u32 echo_index;
- u8 dlc;
};
struct ems_usb {
@@ -320,10 +320,11 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
} else {
for (i = 0; i < cf->len; i++)
cf->data[i] = msg->msg.can_msg.msg[i];
- }
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -397,8 +398,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
stats->rx_errors++;
}
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -518,9 +517,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
/* transmission complete interrupt */
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->dlc;
-
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index,
+ NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -749,7 +747,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
@@ -806,7 +804,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
context->dev = dev;
context->echo_index = i;
- context->dlc = cf->len;
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
size, ems_usb_write_bulk_callback, context);
@@ -823,7 +820,6 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
- dev_kfree_skb(skb);
atomic_dec(&dev->active_tx_urbs);
@@ -884,8 +880,12 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ems_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const ems_usb_bittiming_const = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -995,6 +995,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
+ netdev->ethtool_ops = &ems_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -1079,7 +1080,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver ems_usb_driver = {
- .name = "ems_usb",
+ .name = KBUILD_MODNAME,
.probe = ems_usb_probe,
.disconnect = ems_usb_disconnect,
.id_table = ems_usb_table,
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb.c
index c6068a251fbe..d33bac3a6c10 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * CAN driver for esd CAN-USB/2 and CAN-USB/Micro
+ * CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro
*
- * Copyright (C) 2010-2012 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <socketcan@esd.eu>
+ * Copyright (C) 2022 esd electronics gmbh, Frank Jungclaus <frank.jungclaus@esd.eu>
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -14,20 +16,24 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
-MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd.eu>");
-MODULE_DESCRIPTION("CAN driver for esd CAN-USB/2 and CAN-USB/Micro interfaces");
+MODULE_AUTHOR("Matthias Fuchs <socketcan@esd.eu>");
+MODULE_AUTHOR("Frank Jungclaus <frank.jungclaus@esd.eu>");
+MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2 and CAN-USB/Micro interfaces");
MODULE_LICENSE("GPL v2");
-/* Define these values to match your devices */
+/* USB vendor and product ID */
#define USB_ESDGMBH_VENDOR_ID 0x0ab4
#define USB_CANUSB2_PRODUCT_ID 0x0010
#define USB_CANUSBM_PRODUCT_ID 0x0011
+/* CAN controller clock frequencies */
#define ESD_USB2_CAN_CLOCK 60000000
#define ESD_USBM_CAN_CLOCK 36000000
-#define ESD_USB2_MAX_NETS 2
-/* USB2 commands */
+/* Maximum number of CAN nets */
+#define ESD_USB_MAX_NETS 2
+
+/* USB commands */
#define CMD_VERSION 1 /* also used for VERSION_REPLY */
#define CMD_CAN_RX 2 /* device to host only */
#define CMD_CAN_TX 3 /* also used for TX_DONE */
@@ -43,13 +49,15 @@ MODULE_LICENSE("GPL v2");
#define ESD_EVENT 0x40000000
#define ESD_IDMASK 0x1fffffff
-/* esd CAN event ids used by this driver */
-#define ESD_EV_CAN_ERROR_EXT 2
+/* esd CAN event ids */
+#define ESD_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */
/* baudrate message flags */
-#define ESD_USB2_UBR 0x80000000
-#define ESD_USB2_LOM 0x40000000
-#define ESD_USB2_NO_BAUDRATE 0x7fffffff
+#define ESD_USB_UBR 0x80000000
+#define ESD_USB_LOM 0x40000000
+#define ESD_USB_NO_BAUDRATE 0x7fffffff
+
+/* bit timing CAN-USB/2 */
#define ESD_USB2_TSEG1_MIN 1
#define ESD_USB2_TSEG1_MAX 16
#define ESD_USB2_TSEG1_SHIFT 16
@@ -68,7 +76,7 @@ MODULE_LICENSE("GPL v2");
#define ESD_ID_ENABLE 0x80
#define ESD_MAX_ID_SEGMENT 64
-/* SJA1000 ECC register (emulated by usb2 firmware) */
+/* SJA1000 ECC register (emulated by usb firmware) */
#define SJA1000_ECC_SEG 0x1F
#define SJA1000_ECC_DIR 0x20
#define SJA1000_ECC_ERR 0x06
@@ -119,7 +127,15 @@ struct rx_msg {
u8 dlc;
__le32 ts;
__le32 id; /* upper 3 bits contain flags */
- u8 data[8];
+ union {
+ u8 data[8];
+ struct {
+ u8 status; /* CAN Controller Status */
+ u8 ecc; /* Error Capture Register */
+ u8 rec; /* RX Error Counter */
+ u8 tec; /* TX Error Counter */
+ } ev_can_err_ext; /* For ESD_EV_CAN_ERROR_EXT */
+ };
};
struct tx_msg {
@@ -158,37 +174,34 @@ struct set_baudrate_msg {
};
/* Main message type used between library and application */
-struct __attribute__ ((packed)) esd_usb2_msg {
- union {
- struct header_msg hdr;
- struct version_msg version;
- struct version_reply_msg version_reply;
- struct rx_msg rx;
- struct tx_msg tx;
- struct tx_done_msg txdone;
- struct set_baudrate_msg setbaud;
- struct id_filter_msg filter;
- } msg;
+union __packed esd_usb_msg {
+ struct header_msg hdr;
+ struct version_msg version;
+ struct version_reply_msg version_reply;
+ struct rx_msg rx;
+ struct tx_msg tx;
+ struct tx_done_msg txdone;
+ struct set_baudrate_msg setbaud;
+ struct id_filter_msg filter;
};
-static struct usb_device_id esd_usb2_table[] = {
+static struct usb_device_id esd_usb_table[] = {
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSB2_PRODUCT_ID)},
{USB_DEVICE(USB_ESDGMBH_VENDOR_ID, USB_CANUSBM_PRODUCT_ID)},
{}
};
-MODULE_DEVICE_TABLE(usb, esd_usb2_table);
+MODULE_DEVICE_TABLE(usb, esd_usb_table);
-struct esd_usb2_net_priv;
+struct esd_usb_net_priv;
struct esd_tx_urb_context {
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
u32 echo_index;
- int len; /* CAN payload length */
};
-struct esd_usb2 {
+struct esd_usb {
struct usb_device *udev;
- struct esd_usb2_net_priv *nets[ESD_USB2_MAX_NETS];
+ struct esd_usb_net_priv *nets[ESD_USB_MAX_NETS];
struct usb_anchor rx_submitted;
@@ -199,63 +212,80 @@ struct esd_usb2 {
dma_addr_t rxbuf_dma[MAX_RX_URBS];
};
-struct esd_usb2_net_priv {
+struct esd_usb_net_priv {
struct can_priv can; /* must be the first member */
atomic_t active_tx_jobs;
struct usb_anchor tx_submitted;
struct esd_tx_urb_context tx_contexts[MAX_TX_URBS];
- struct esd_usb2 *usb2;
+ struct esd_usb *usb;
struct net_device *netdev;
int index;
u8 old_state;
struct can_berr_counter bec;
};
-static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
+ union esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf;
struct sk_buff *skb;
- u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK;
+ u32 id = le32_to_cpu(msg->rx.id) & ESD_IDMASK;
if (id == ESD_EV_CAN_ERROR_EXT) {
- u8 state = msg->msg.rx.data[0];
- u8 ecc = msg->msg.rx.data[1];
- u8 rxerr = msg->msg.rx.data[2];
- u8 txerr = msg->msg.rx.data[3];
+ u8 state = msg->rx.ev_can_err_ext.status;
+ u8 ecc = msg->rx.ev_can_err_ext.ecc;
+
+ priv->bec.rxerr = msg->rx.ev_can_err_ext.rec;
+ priv->bec.txerr = msg->rx.ev_can_err_ext.tec;
+
+ netdev_dbg(priv->netdev,
+ "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
+ msg->rx.dlc, state, ecc,
+ priv->bec.rxerr, priv->bec.txerr);
+
+ /* if berr-reporting is off, only pass through on state change ... */
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+ state == priv->old_state)
+ return;
skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb == NULL) {
+ if (!skb)
stats->rx_dropped++;
- return;
- }
if (state != priv->old_state) {
+ enum can_state tx_state, rx_state;
+ enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+
priv->old_state = state;
switch (state & ESD_BUSSTATE_MASK) {
case ESD_BUSSTATE_BUSOFF:
- priv->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- priv->can.can_stats.bus_off++;
+ new_state = CAN_STATE_BUS_OFF;
can_bus_off(priv->netdev);
break;
case ESD_BUSSTATE_WARN:
- priv->can.state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
+ new_state = CAN_STATE_ERROR_WARNING;
break;
case ESD_BUSSTATE_ERRPASSIVE:
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- priv->can.can_stats.error_passive++;
+ new_state = CAN_STATE_ERROR_PASSIVE;
break;
default:
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ new_state = CAN_STATE_ERROR_ACTIVE;
+ priv->bec.txerr = 0;
+ priv->bec.rxerr = 0;
break;
}
- } else {
+
+ if (new_state != priv->can.state) {
+ tx_state = (priv->bec.txerr >= priv->bec.rxerr) ? new_state : 0;
+ rx_state = (priv->bec.txerr <= priv->bec.rxerr) ? new_state : 0;
+ can_change_state(priv->netdev, cf,
+ tx_state, rx_state);
+ }
+ } else if (skb) {
priv->can.can_stats.bus_error++;
stats->rx_errors++;
@@ -272,7 +302,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
break;
}
@@ -280,27 +309,22 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
if (!(ecc & SJA1000_ECC_DIR))
cf->data[2] |= CAN_ERR_PROT_TX;
- if (priv->can.state == CAN_STATE_ERROR_WARNING ||
- priv->can.state == CAN_STATE_ERROR_PASSIVE) {
- cf->data[1] = (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ /* Bit stream position in CAN frame as the error was detected */
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
}
- priv->bec.txerr = txerr;
- priv->bec.rxerr = rxerr;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = priv->bec.txerr;
+ cf->data[7] = priv->bec.rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
+ netif_rx(skb);
+ }
}
}
-static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv,
+ union esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct can_frame *cf;
@@ -311,10 +335,10 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
if (!netif_device_present(priv->netdev))
return;
- id = le32_to_cpu(msg->msg.rx.id);
+ id = le32_to_cpu(msg->rx.id);
if (id & ESD_EVENT) {
- esd_usb2_rx_event(priv, msg);
+ esd_usb_rx_event(priv, msg);
} else {
skb = alloc_can_skb(priv->netdev, &cf);
if (skb == NULL) {
@@ -323,29 +347,28 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
}
cf->can_id = id & ESD_IDMASK;
- can_frame_set_cc_len(cf, msg->msg.rx.dlc & ~ESD_RTR,
+ can_frame_set_cc_len(cf, msg->rx.dlc & ~ESD_RTR,
priv->can.ctrlmode);
if (id & ESD_EXTID)
cf->can_id |= CAN_EFF_FLAG;
- if (msg->msg.rx.dlc & ESD_RTR) {
+ if (msg->rx.dlc & ESD_RTR) {
cf->can_id |= CAN_RTR_FLAG;
} else {
for (i = 0; i < cf->len; i++)
- cf->data[i] = msg->msg.rx.data[i];
- }
+ cf->data[i] = msg->rx.data[i];
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
-
- return;
}
-static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
- struct esd_usb2_msg *msg)
+static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv,
+ union esd_usb_msg *msg)
{
struct net_device_stats *stats = &priv->netdev->stats;
struct net_device *netdev = priv->netdev;
@@ -354,12 +377,12 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
if (!netif_device_present(netdev))
return;
- context = &priv->tx_contexts[msg->msg.txdone.hnd & (MAX_TX_URBS - 1)];
+ context = &priv->tx_contexts[msg->txdone.hnd & (MAX_TX_URBS - 1)];
- if (!msg->msg.txdone.status) {
+ if (!msg->txdone.status) {
stats->tx_packets++;
- stats->tx_bytes += context->len;
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ stats->tx_bytes += can_get_echo_skb(netdev, context->echo_index,
+ NULL);
} else {
stats->tx_errors++;
can_free_echo_skb(netdev, context->echo_index, NULL);
@@ -372,9 +395,9 @@ static void esd_usb2_tx_done_msg(struct esd_usb2_net_priv *priv,
netif_wake_queue(netdev);
}
-static void esd_usb2_read_bulk_callback(struct urb *urb)
+static void esd_usb_read_bulk_callback(struct urb *urb)
{
- struct esd_usb2 *dev = urb->context;
+ struct esd_usb *dev = urb->context;
int retval;
int pos = 0;
int i;
@@ -396,32 +419,32 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
}
while (pos < urb->actual_length) {
- struct esd_usb2_msg *msg;
+ union esd_usb_msg *msg;
- msg = (struct esd_usb2_msg *)(urb->transfer_buffer + pos);
+ msg = (union esd_usb_msg *)(urb->transfer_buffer + pos);
- switch (msg->msg.hdr.cmd) {
+ switch (msg->hdr.cmd) {
case CMD_CAN_RX:
- if (msg->msg.rx.net >= dev->net_count) {
+ if (msg->rx.net >= dev->net_count) {
dev_err(dev->udev->dev.parent, "format error\n");
break;
}
- esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
+ esd_usb_rx_can_msg(dev->nets[msg->rx.net], msg);
break;
case CMD_CAN_TX:
- if (msg->msg.txdone.net >= dev->net_count) {
+ if (msg->txdone.net >= dev->net_count) {
dev_err(dev->udev->dev.parent, "format error\n");
break;
}
- esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
- msg);
+ esd_usb_tx_done_msg(dev->nets[msg->txdone.net],
+ msg);
break;
}
- pos += msg->msg.hdr.len << 2;
+ pos += msg->hdr.len << 2;
if (pos > urb->actual_length) {
dev_err(dev->udev->dev.parent, "format error\n");
@@ -432,7 +455,7 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1),
urb->transfer_buffer, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
+ esd_usb_read_bulk_callback, dev);
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval == -ENODEV) {
@@ -444,19 +467,15 @@ resubmit_urb:
dev_err(dev->udev->dev.parent,
"failed resubmitting read bulk urb: %d\n", retval);
}
-
- return;
}
-/*
- * callback for bulk IN urb
- */
-static void esd_usb2_write_bulk_callback(struct urb *urb)
+/* callback for bulk IN urb */
+static void esd_usb_write_bulk_callback(struct urb *urb)
{
struct esd_tx_urb_context *context = urb->context;
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
struct net_device *netdev;
- size_t size = sizeof(struct esd_usb2_msg);
+ size_t size = sizeof(union esd_usb_msg);
WARN_ON(!context);
@@ -480,7 +499,7 @@ static ssize_t firmware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d.%d.%d\n",
(dev->version >> 12) & 0xf,
@@ -493,7 +512,7 @@ static ssize_t hardware_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d.%d.%d\n",
(dev->version >> 28) & 0xf,
@@ -506,26 +525,26 @@ static ssize_t nets_show(struct device *d,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(d);
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
return sprintf(buf, "%d", dev->net_count);
}
static DEVICE_ATTR_RO(nets);
-static int esd_usb2_send_msg(struct esd_usb2 *dev, struct esd_usb2_msg *msg)
+static int esd_usb_send_msg(struct esd_usb *dev, union esd_usb_msg *msg)
{
int actual_length;
return usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev, 2),
msg,
- msg->msg.hdr.len << 2,
+ msg->hdr.len << 2,
&actual_length,
1000);
}
-static int esd_usb2_wait_msg(struct esd_usb2 *dev,
- struct esd_usb2_msg *msg)
+static int esd_usb_wait_msg(struct esd_usb *dev,
+ union esd_usb_msg *msg)
{
int actual_length;
@@ -537,7 +556,7 @@ static int esd_usb2_wait_msg(struct esd_usb2 *dev,
1000);
}
-static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
+static int esd_usb_setup_rx_urbs(struct esd_usb *dev)
{
int i, err = 0;
@@ -570,7 +589,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
usb_fill_bulk_urb(urb, dev->udev,
usb_rcvbulkpipe(dev->udev, 1),
buf, RX_BUFFER_SIZE,
- esd_usb2_read_bulk_callback, dev);
+ esd_usb_read_bulk_callback, dev);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &dev->rx_submitted);
@@ -608,14 +627,12 @@ freeurb:
return 0;
}
-/*
- * Start interface
- */
-static int esd_usb2_start(struct esd_usb2_net_priv *priv)
+/* Start interface */
+static int esd_usb_start(struct esd_usb_net_priv *priv)
{
- struct esd_usb2 *dev = priv->usb2;
+ struct esd_usb *dev = priv->usb;
struct net_device *netdev = priv->netdev;
- struct esd_usb2_msg *msg;
+ union esd_usb_msg *msg;
int err, i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
@@ -624,8 +641,7 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
goto out;
}
- /*
- * Enable all IDs
+ /* Enable all IDs
* The IDADD message takes up to 64 32 bit bitmasks (2048 bits).
* Each bit represents one 11 bit CAN identifier. A set bit
* enables reception of the corresponding CAN identifier. A cleared
@@ -637,20 +653,20 @@ static int esd_usb2_start(struct esd_usb2_net_priv *priv)
* the number of the starting bitmask (0..64) to the filter.option
* field followed by only some bitmasks.
*/
- msg->msg.hdr.cmd = CMD_IDADD;
- msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
- msg->msg.filter.net = priv->index;
- msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+ msg->hdr.cmd = CMD_IDADD;
+ msg->hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+ msg->filter.net = priv->index;
+ msg->filter.option = ESD_ID_ENABLE; /* start with segment 0 */
for (i = 0; i < ESD_MAX_ID_SEGMENT; i++)
- msg->msg.filter.mask[i] = cpu_to_le32(0xffffffff);
+ msg->filter.mask[i] = cpu_to_le32(0xffffffff);
/* enable 29bit extended IDs */
- msg->msg.filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
+ msg->filter.mask[ESD_MAX_ID_SEGMENT] = cpu_to_le32(0x00000001);
- err = esd_usb2_send_msg(dev, msg);
+ err = esd_usb_send_msg(dev, msg);
if (err)
goto out;
- err = esd_usb2_setup_rx_urbs(dev);
+ err = esd_usb_setup_rx_urbs(dev);
if (err)
goto out;
@@ -666,9 +682,9 @@ out:
return err;
}
-static void unlink_all_urbs(struct esd_usb2 *dev)
+static void unlink_all_urbs(struct esd_usb *dev)
{
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
int i, j;
usb_kill_anchored_urbs(&dev->rx_submitted);
@@ -689,9 +705,9 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
}
}
-static int esd_usb2_open(struct net_device *netdev)
+static int esd_usb_open(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
int err;
/* common open */
@@ -700,7 +716,7 @@ static int esd_usb2_open(struct net_device *netdev)
return err;
/* finally start device */
- err = esd_usb2_start(priv);
+ err = esd_usb_start(priv);
if (err) {
netdev_warn(netdev, "couldn't start device: %d\n", err);
close_candev(netdev);
@@ -712,22 +728,22 @@ static int esd_usb2_open(struct net_device *netdev)
return 0;
}
-static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
+static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2 *dev = priv->usb2;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb *dev = priv->usb;
struct esd_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
- struct esd_usb2_msg *msg;
+ union esd_usb_msg *msg;
struct urb *urb;
u8 *buf;
int i, err;
int ret = NETDEV_TX_OK;
- size_t size = sizeof(struct esd_usb2_msg);
+ size_t size = sizeof(union esd_usb_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
@@ -747,24 +763,24 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
goto nobufmem;
}
- msg = (struct esd_usb2_msg *)buf;
+ msg = (union esd_usb_msg *)buf;
- msg->msg.hdr.len = 3; /* minimal length */
- msg->msg.hdr.cmd = CMD_CAN_TX;
- msg->msg.tx.net = priv->index;
- msg->msg.tx.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
- msg->msg.tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
+ msg->hdr.len = 3; /* minimal length */
+ msg->hdr.cmd = CMD_CAN_TX;
+ msg->tx.net = priv->index;
+ msg->tx.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
+ msg->tx.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
if (cf->can_id & CAN_RTR_FLAG)
- msg->msg.tx.dlc |= ESD_RTR;
+ msg->tx.dlc |= ESD_RTR;
if (cf->can_id & CAN_EFF_FLAG)
- msg->msg.tx.id |= cpu_to_le32(ESD_EXTID);
+ msg->tx.id |= cpu_to_le32(ESD_EXTID);
for (i = 0; i < cf->len; i++)
- msg->msg.tx.data[i] = cf->data[i];
+ msg->tx.data[i] = cf->data[i];
- msg->msg.hdr.len += (cf->len + 3) >> 2;
+ msg->hdr.len += (cf->len + 3) >> 2;
for (i = 0; i < MAX_TX_URBS; i++) {
if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
@@ -773,9 +789,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
}
}
- /*
- * This may never happen.
- */
+ /* This may never happen */
if (!context) {
netdev_warn(netdev, "couldn't find free context\n");
ret = NETDEV_TX_BUSY;
@@ -784,14 +798,13 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
context->priv = priv;
context->echo_index = i;
- context->len = cf->len;
/* hnd must not be 0 - MSB is stripped in txdone handling */
- msg->msg.tx.hnd = 0x80000000 | i; /* returned in TX done message */
+ msg->tx.hnd = 0x80000000 | i; /* returned in TX done message */
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
- msg->msg.hdr.len << 2,
- esd_usb2_write_bulk_callback, context);
+ msg->hdr.len << 2,
+ esd_usb_write_bulk_callback, context);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -824,8 +837,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
netif_trans_update(netdev);
- /*
- * Release our reference to this URB, the USB core will eventually free
+ /* Release our reference to this URB, the USB core will eventually free
* it entirely.
*/
usb_free_urb(urb);
@@ -842,33 +854,33 @@ nourbmem:
return ret;
}
-static int esd_usb2_close(struct net_device *netdev)
+static int esd_usb_close(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
- struct esd_usb2_msg *msg;
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
+ union esd_usb_msg *msg;
int i;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
- /* Disable all IDs (see esd_usb2_start()) */
- msg->msg.hdr.cmd = CMD_IDADD;
- msg->msg.hdr.len = 2 + ESD_MAX_ID_SEGMENT;
- msg->msg.filter.net = priv->index;
- msg->msg.filter.option = ESD_ID_ENABLE; /* start with segment 0 */
+ /* Disable all IDs (see esd_usb_start()) */
+ msg->hdr.cmd = CMD_IDADD;
+ msg->hdr.len = 2 + ESD_MAX_ID_SEGMENT;
+ msg->filter.net = priv->index;
+ msg->filter.option = ESD_ID_ENABLE; /* start with segment 0 */
for (i = 0; i <= ESD_MAX_ID_SEGMENT; i++)
- msg->msg.filter.mask[i] = 0;
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
+ msg->filter.mask[i] = 0;
+ if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending idadd message failed\n");
/* set CAN controller to reset mode */
- msg->msg.hdr.len = 2;
- msg->msg.hdr.cmd = CMD_SETBAUD;
- msg->msg.setbaud.net = priv->index;
- msg->msg.setbaud.rsvd = 0;
- msg->msg.setbaud.baud = cpu_to_le32(ESD_USB2_NO_BAUDRATE);
- if (esd_usb2_send_msg(priv->usb2, msg) < 0)
+ msg->hdr.len = 2;
+ msg->hdr.cmd = CMD_SETBAUD;
+ msg->setbaud.net = priv->index;
+ msg->setbaud.rsvd = 0;
+ msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE);
+ if (esd_usb_send_msg(priv->usb, msg) < 0)
netdev_err(netdev, "sending setbaud message failed\n");
priv->can.state = CAN_STATE_STOPPED;
@@ -882,13 +894,17 @@ static int esd_usb2_close(struct net_device *netdev)
return 0;
}
-static const struct net_device_ops esd_usb2_netdev_ops = {
- .ndo_open = esd_usb2_open,
- .ndo_stop = esd_usb2_close,
- .ndo_start_xmit = esd_usb2_start_xmit,
+static const struct net_device_ops esd_usb_netdev_ops = {
+ .ndo_open = esd_usb_open,
+ .ndo_stop = esd_usb_close,
+ .ndo_start_xmit = esd_usb_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops esd_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const esd_usb2_bittiming_const = {
.name = "esd_usb2",
.tseg1_min = ESD_USB2_TSEG1_MIN,
@@ -903,20 +919,20 @@ static const struct can_bittiming_const esd_usb2_bittiming_const = {
static int esd_usb2_set_bittiming(struct net_device *netdev)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
struct can_bittiming *bt = &priv->can.bittiming;
- struct esd_usb2_msg *msg;
+ union esd_usb_msg *msg;
int err;
u32 canbtr;
int sjw_shift;
- canbtr = ESD_USB2_UBR;
+ canbtr = ESD_USB_UBR;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
- canbtr |= ESD_USB2_LOM;
+ canbtr |= ESD_USB_LOM;
canbtr |= (bt->brp - 1) & (ESD_USB2_BRP_MAX - 1);
- if (le16_to_cpu(priv->usb2->udev->descriptor.idProduct) ==
+ if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) ==
USB_CANUSBM_PRODUCT_ID)
sjw_shift = ESD_USBM_SJW_SHIFT;
else
@@ -936,24 +952,24 @@ static int esd_usb2_set_bittiming(struct net_device *netdev)
if (!msg)
return -ENOMEM;
- msg->msg.hdr.len = 2;
- msg->msg.hdr.cmd = CMD_SETBAUD;
- msg->msg.setbaud.net = priv->index;
- msg->msg.setbaud.rsvd = 0;
- msg->msg.setbaud.baud = cpu_to_le32(canbtr);
+ msg->hdr.len = 2;
+ msg->hdr.cmd = CMD_SETBAUD;
+ msg->setbaud.net = priv->index;
+ msg->setbaud.rsvd = 0;
+ msg->setbaud.baud = cpu_to_le32(canbtr);
netdev_info(netdev, "setting BTR=%#x\n", canbtr);
- err = esd_usb2_send_msg(priv->usb2, msg);
+ err = esd_usb_send_msg(priv->usb, msg);
kfree(msg);
return err;
}
-static int esd_usb2_get_berr_counter(const struct net_device *netdev,
- struct can_berr_counter *bec)
+static int esd_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
{
- struct esd_usb2_net_priv *priv = netdev_priv(netdev);
+ struct esd_usb_net_priv *priv = netdev_priv(netdev);
bec->txerr = priv->bec.txerr;
bec->rxerr = priv->bec.rxerr;
@@ -961,7 +977,7 @@ static int esd_usb2_get_berr_counter(const struct net_device *netdev,
return 0;
}
-static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
+static int esd_usb_set_mode(struct net_device *netdev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
@@ -975,11 +991,11 @@ static int esd_usb2_set_mode(struct net_device *netdev, enum can_mode mode)
return 0;
}
-static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
+static int esd_usb_probe_one_net(struct usb_interface *intf, int index)
{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
- struct esd_usb2_net_priv *priv;
+ struct esd_usb_net_priv *priv;
int err = 0;
int i;
@@ -998,13 +1014,14 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
- priv->usb2 = dev;
+ priv->usb = dev;
priv->netdev = netdev;
priv->index = index;
priv->can.state = CAN_STATE_STOPPED;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
- CAN_CTRLMODE_CC_LEN8_DLC;
+ CAN_CTRLMODE_CC_LEN8_DLC |
+ CAN_CTRLMODE_BERR_REPORTING;
if (le16_to_cpu(dev->udev->descriptor.idProduct) ==
USB_CANUSBM_PRODUCT_ID)
@@ -1016,12 +1033,13 @@ static int esd_usb2_probe_one_net(struct usb_interface *intf, int index)
priv->can.bittiming_const = &esd_usb2_bittiming_const;
priv->can.do_set_bittiming = esd_usb2_set_bittiming;
- priv->can.do_set_mode = esd_usb2_set_mode;
- priv->can.do_get_berr_counter = esd_usb2_get_berr_counter;
+ priv->can.do_set_mode = esd_usb_set_mode;
+ priv->can.do_get_berr_counter = esd_usb_get_berr_counter;
netdev->flags |= IFF_ECHO; /* we support local echo */
- netdev->netdev_ops = &esd_usb2_netdev_ops;
+ netdev->netdev_ops = &esd_usb_netdev_ops;
+ netdev->ethtool_ops = &esd_usb_ethtool_ops;
SET_NETDEV_DEV(netdev, &intf->dev);
netdev->dev_id = index;
@@ -1041,17 +1059,16 @@ done:
return err;
}
-/*
- * probe function for new USB2 devices
+/* probe function for new USB devices
*
* check version information and number of available
* CAN interfaces
*/
-static int esd_usb2_probe(struct usb_interface *intf,
+static int esd_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct esd_usb2 *dev;
- struct esd_usb2_msg *msg;
+ struct esd_usb *dev;
+ union esd_usb_msg *msg;
int i, err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -1073,26 +1090,26 @@ static int esd_usb2_probe(struct usb_interface *intf,
}
/* query number of CAN interfaces (nets) */
- msg->msg.hdr.cmd = CMD_VERSION;
- msg->msg.hdr.len = 2;
- msg->msg.version.rsvd = 0;
- msg->msg.version.flags = 0;
- msg->msg.version.drv_version = 0;
+ msg->hdr.cmd = CMD_VERSION;
+ msg->hdr.len = 2;
+ msg->version.rsvd = 0;
+ msg->version.flags = 0;
+ msg->version.drv_version = 0;
- err = esd_usb2_send_msg(dev, msg);
+ err = esd_usb_send_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "sending version message failed\n");
goto free_msg;
}
- err = esd_usb2_wait_msg(dev, msg);
+ err = esd_usb_wait_msg(dev, msg);
if (err < 0) {
dev_err(&intf->dev, "no version message answer\n");
goto free_msg;
}
- dev->net_count = (int)msg->msg.version_reply.nets;
- dev->version = le32_to_cpu(msg->msg.version_reply.version);
+ dev->net_count = (int)msg->version_reply.nets;
+ dev->version = le32_to_cpu(msg->version_reply.version);
if (device_create_file(&intf->dev, &dev_attr_firmware))
dev_err(&intf->dev,
@@ -1108,7 +1125,7 @@ static int esd_usb2_probe(struct usb_interface *intf,
/* do per device probing */
for (i = 0; i < dev->net_count; i++)
- esd_usb2_probe_one_net(intf, i);
+ esd_usb_probe_one_net(intf, i);
free_msg:
kfree(msg);
@@ -1118,12 +1135,10 @@ done:
return err;
}
-/*
- * called by the usb core when the device is removed from the system
- */
-static void esd_usb2_disconnect(struct usb_interface *intf)
+/* called by the usb core when the device is removed from the system */
+static void esd_usb_disconnect(struct usb_interface *intf)
{
- struct esd_usb2 *dev = usb_get_intfdata(intf);
+ struct esd_usb *dev = usb_get_intfdata(intf);
struct net_device *netdev;
int i;
@@ -1147,11 +1162,11 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
}
/* usb specific object needed to register this driver with the usb subsystem */
-static struct usb_driver esd_usb2_driver = {
- .name = "esd_usb2",
- .probe = esd_usb2_probe,
- .disconnect = esd_usb2_disconnect,
- .id_table = esd_usb2_table,
+static struct usb_driver esd_usb_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = esd_usb_probe,
+ .disconnect = esd_usb_disconnect,
+ .id_table = esd_usb_table,
};
-module_usb_driver(esd_usb2_driver);
+module_usb_driver(esd_usb_driver);
diff --git a/drivers/net/can/usb/etas_es58x/Makefile b/drivers/net/can/usb/etas_es58x/Makefile
index a129b4aa0215..d6667ebe259f 100644
--- a/drivers/net/can/usb/etas_es58x/Makefile
+++ b/drivers/net/can/usb/etas_es58x/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CAN_ETAS_ES58X) += etas_es58x.o
-etas_es58x-y = es58x_core.o es581_4.o es58x_fd.o
+etas_es58x-y = es58x_core.o es58x_devlink.o es581_4.o es58x_fd.o
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.c b/drivers/net/can/usb/etas_es58x/es581_4.c
index 14e360c9f2c9..4151b18fd045 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.c
+++ b/drivers/net/can/usb/etas_es58x/es581_4.c
@@ -6,11 +6,12 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
-#include <linux/kernel.h>
#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/units.h>
#include "es58x_core.h"
#include "es581_4.h"
@@ -469,8 +470,8 @@ const struct es58x_parameters es581_4_param = {
.bittiming_const = &es581_4_bittiming_const,
.data_bittiming_const = NULL,
.tdc_const = NULL,
- .bitrate_max = 1 * CAN_MBPS,
- .clock = {.freq = 50 * CAN_MHZ},
+ .bitrate_max = 1 * MEGA /* BPS */,
+ .clock = {.freq = 50 * MEGA /* Hz */},
.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC,
.tx_start_of_frame = 0xAFAF,
.rx_start_of_frame = 0xFAFA,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index 24627ab14626..0c7f7505632c 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -7,25 +7,24 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
+#include <asm/unaligned.h>
+#include <linux/crc16.h>
+#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
-#include <linux/crc16.h>
-#include <asm/unaligned.h>
+#include <net/devlink.h>
#include "es58x_core.h"
-#define DRV_VERSION "1.00"
MODULE_AUTHOR("Vincent Mailhol <mailhol.vincent@wanadoo.fr>");
MODULE_AUTHOR("Arunachalam Santhanam <arunachalam.santhanam@in.bosch.com>");
MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters");
-MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL v2");
-#define ES58X_MODULE_NAME "etas_es58x"
#define ES58X_VENDOR_ID 0x108C
#define ES581_4_PRODUCT_ID 0x0159
#define ES582_1_PRODUCT_ID 0x0168
@@ -59,11 +58,11 @@ MODULE_DEVICE_TABLE(usb, es58x_id_table);
#define es58x_print_hex_dump(buf, len) \
print_hex_dump(KERN_DEBUG, \
- ES58X_MODULE_NAME " " __stringify(buf) ": ", \
+ KBUILD_MODNAME " " __stringify(buf) ": ", \
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
#define es58x_print_hex_dump_debug(buf, len) \
- print_hex_dump_debug(ES58X_MODULE_NAME " " __stringify(buf) ": ",\
+ print_hex_dump_debug(KBUILD_MODNAME " " __stringify(buf) ": ",\
DUMP_PREFIX_NONE, 16, 1, buf, len, false)
/* The last two bytes of an ES58X command is a CRC16. The first two
@@ -849,13 +848,6 @@ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error,
break;
}
- /* driver/net/can/dev.c:can_restart() takes in account error
- * messages in the RX stats. Doing the same here for
- * consistency.
- */
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += CAN_ERR_DLC;
-
if (cf) {
if (cf->data[1])
cf->can_id |= CAN_ERR_CRTL;
@@ -1469,10 +1461,6 @@ static void es58x_read_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_read_bulk_callback, es58x_dev);
-
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret == -ENODEV) {
for (i = 0; i < es58x_dev->num_can_ch; i++)
@@ -1606,7 +1594,8 @@ static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev)
return NULL;
usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- buf, tx_buf_len, NULL, NULL);
+ buf, tx_buf_len, es58x_write_bulk_callback,
+ NULL);
return urb;
}
@@ -1639,9 +1628,7 @@ static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb,
int ret;
es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length);
- usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe,
- urb->transfer_buffer, urb->transfer_buffer_length,
- es58x_write_bulk_callback, netdev);
+ urb->context = netdev;
usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy);
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret) {
@@ -1714,7 +1701,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
{
const struct device *dev = es58x_dev->dev;
const struct es58x_parameters *param = es58x_dev->param;
- size_t rx_buf_len = es58x_dev->rx_max_packet_size;
+ u16 rx_buf_len = usb_maxpacket(es58x_dev->udev, es58x_dev->rx_pipe);
struct urb *urb;
u8 *buf;
int i;
@@ -1746,7 +1733,7 @@ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev)
dev_err(dev, "%s: Could not setup any rx URBs\n", __func__);
return ret;
}
- dev_dbg(dev, "%s: Allocated %d rx URBs each of size %zu\n",
+ dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n",
__func__, i, rx_buf_len);
return ret;
@@ -1794,7 +1781,7 @@ static int es58x_open(struct net_device *netdev)
struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev;
int ret;
- if (atomic_inc_return(&es58x_dev->opened_channel_cnt) == 1) {
+ if (!es58x_dev->opened_channel_cnt) {
ret = es58x_alloc_rx_urbs(es58x_dev);
if (ret)
return ret;
@@ -1812,12 +1799,13 @@ static int es58x_open(struct net_device *netdev)
if (ret)
goto free_urbs;
+ es58x_dev->opened_channel_cnt++;
netif_start_queue(netdev);
return ret;
free_urbs:
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
netdev_err(netdev, "%s: Could not open the network device: %pe\n",
__func__, ERR_PTR(ret));
@@ -1852,7 +1840,8 @@ static int es58x_stop(struct net_device *netdev)
es58x_flush_pending_tx_msg(netdev);
- if (atomic_dec_and_test(&es58x_dev->opened_channel_cnt))
+ es58x_dev->opened_channel_cnt--;
+ if (!es58x_dev->opened_channel_cnt)
es58x_free_urbs(es58x_dev);
return 0;
@@ -1925,7 +1914,7 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
unsigned int frame_len;
int ret;
- if (can_dropped_invalid_skb(netdev, skb)) {
+ if (can_dev_dropped_skb(netdev, skb)) {
if (priv->tx_urb)
goto xmit_commit;
return NETDEV_TX_OK;
@@ -1986,7 +1975,12 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb,
static const struct net_device_ops es58x_netdev_ops = {
.ndo_open = es58x_open,
.ndo_stop = es58x_stop,
- .ndo_start_xmit = es58x_start_xmit
+ .ndo_start_xmit = es58x_start_xmit,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+};
+
+static const struct ethtool_ops es58x_ethtool_ops = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
};
/**
@@ -2045,10 +2039,16 @@ static int es58x_set_mode(struct net_device *netdev, enum can_mode mode)
* @es58x_dev: ES58X device.
* @priv: ES58X private parameters related to the network device.
* @channel_idx: Index of the network device.
+ *
+ * Return: zero on success, errno if devlink port could not be
+ * properly registered.
*/
-static void es58x_init_priv(struct es58x_device *es58x_dev,
- struct es58x_priv *priv, int channel_idx)
+static int es58x_init_priv(struct es58x_device *es58x_dev,
+ struct es58x_priv *priv, int channel_idx)
{
+ struct devlink_port_attrs attrs = {
+ .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ };
const struct es58x_parameters *param = es58x_dev->param;
struct can_priv *can = &priv->can;
@@ -2067,6 +2067,10 @@ static void es58x_init_priv(struct es58x_device *es58x_dev,
can->state = CAN_STATE_STOPPED;
can->ctrlmode_supported = param->ctrlmode_supported;
can->do_set_mode = es58x_set_mode;
+
+ devlink_port_attrs_set(&priv->devlink_port, &attrs);
+ return devlink_port_register(priv_to_devlink(es58x_dev),
+ &priv->devlink_port, channel_idx);
}
/**
@@ -2090,19 +2094,31 @@ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx)
}
SET_NETDEV_DEV(netdev, dev);
es58x_dev->netdev[channel_idx] = netdev;
- es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
+ ret = es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx);
+ if (ret)
+ goto free_candev;
+ SET_NETDEV_DEVLINK_PORT(netdev, &es58x_priv(netdev)->devlink_port);
netdev->netdev_ops = &es58x_netdev_ops;
+ netdev->ethtool_ops = &es58x_ethtool_ops;
netdev->flags |= IFF_ECHO; /* We support local echo */
+ netdev->dev_port = channel_idx;
ret = register_candev(netdev);
if (ret)
- return ret;
+ goto devlink_port_unregister;
netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0),
es58x_dev->param->dql_min_limit);
return ret;
+
+ devlink_port_unregister:
+ devlink_port_unregister(&es58x_priv(netdev)->devlink_port);
+ free_candev:
+ es58x_dev->netdev[channel_idx] = NULL;
+ free_candev(netdev);
+ return ret;
}
/**
@@ -2119,54 +2135,13 @@ static void es58x_free_netdevs(struct es58x_device *es58x_dev)
if (!netdev)
continue;
unregister_candev(netdev);
+ devlink_port_unregister(&es58x_priv(netdev)->devlink_port);
es58x_dev->netdev[i] = NULL;
free_candev(netdev);
}
}
/**
- * es58x_get_product_info() - Get the product information and print them.
- * @es58x_dev: ES58X device.
- *
- * Do a synchronous call to get the product information.
- *
- * Return: zero on success, errno when any error occurs.
- */
-static int es58x_get_product_info(struct es58x_device *es58x_dev)
-{
- struct usb_device *udev = es58x_dev->udev;
- const int es58x_prod_info_idx = 6;
- /* Empirical tests show a prod_info length of maximum 83,
- * below should be more than enough.
- */
- const size_t prod_info_len = 127;
- char *prod_info;
- int ret;
-
- prod_info = kmalloc(prod_info_len, GFP_KERNEL);
- if (!prod_info)
- return -ENOMEM;
-
- ret = usb_string(udev, es58x_prod_info_idx, prod_info, prod_info_len);
- if (ret < 0) {
- dev_err(es58x_dev->dev,
- "%s: Could not read the product info: %pe\n",
- __func__, ERR_PTR(ret));
- goto out_free;
- }
- if (ret >= prod_info_len - 1) {
- dev_warn(es58x_dev->dev,
- "%s: Buffer is too small, result might be truncated\n",
- __func__);
- }
- dev_info(es58x_dev->dev, "Product info: %s\n", prod_info);
-
- out_free:
- kfree(prod_info);
- return ret < 0 ? ret : 0;
-}
-
-/**
* es58x_init_es58x_dev() - Initialize the ES58X device.
* @intf: USB interface.
* @driver_info: Quirks of the device.
@@ -2179,15 +2154,15 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
{
struct device *dev = &intf->dev;
struct es58x_device *es58x_dev;
+ struct devlink *devlink;
const struct es58x_parameters *param;
const struct es58x_operators *ops;
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_endpoint_descriptor *ep_in, *ep_out;
int ret;
- dev_info(dev,
- "Starting %s %s (Serial Number %s) driver version %s\n",
- udev->manufacturer, udev->product, udev->serial, DRV_VERSION);
+ dev_info(dev, "Starting %s %s (Serial Number %s)\n",
+ udev->manufacturer, udev->product, udev->serial);
ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out,
NULL, NULL);
@@ -2202,11 +2177,12 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
ops = &es581_4_ops;
}
- es58x_dev = devm_kzalloc(dev, es58x_sizeof_es58x_device(param),
- GFP_KERNEL);
- if (!es58x_dev)
+ devlink = devlink_alloc(&es58x_dl_ops, es58x_sizeof_es58x_device(param),
+ dev);
+ if (!devlink)
return ERR_PTR(-ENOMEM);
+ es58x_dev = devlink_priv(devlink);
es58x_dev->param = param;
es58x_dev->ops = ops;
es58x_dev->dev = dev;
@@ -2221,14 +2197,12 @@ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf,
init_usb_anchor(&es58x_dev->tx_urbs_idle);
init_usb_anchor(&es58x_dev->tx_urbs_busy);
atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0);
- atomic_set(&es58x_dev->opened_channel_cnt, 0);
usb_set_intfdata(intf, es58x_dev);
es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev,
ep_in->bEndpointAddress);
es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev,
ep_out->bEndpointAddress);
- es58x_dev->rx_max_packet_size = le16_to_cpu(ep_in->wMaxPacketSize);
return es58x_dev;
}
@@ -2245,25 +2219,24 @@ static int es58x_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct es58x_device *es58x_dev;
- int ch_idx, ret;
+ int ch_idx;
es58x_dev = es58x_init_es58x_dev(intf, id->driver_info);
if (IS_ERR(es58x_dev))
return PTR_ERR(es58x_dev);
- ret = es58x_get_product_info(es58x_dev);
- if (ret)
- return ret;
+ es58x_parse_product_info(es58x_dev);
+ devlink_register(priv_to_devlink(es58x_dev));
for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) {
- ret = es58x_init_netdev(es58x_dev, ch_idx);
+ int ret = es58x_init_netdev(es58x_dev, ch_idx);
if (ret) {
es58x_free_netdevs(es58x_dev);
return ret;
}
}
- return ret;
+ return 0;
}
/**
@@ -2280,13 +2253,15 @@ static void es58x_disconnect(struct usb_interface *intf)
dev_info(&intf->dev, "Disconnecting %s %s\n",
es58x_dev->udev->manufacturer, es58x_dev->udev->product);
+ devlink_unregister(priv_to_devlink(es58x_dev));
es58x_free_netdevs(es58x_dev);
es58x_free_urbs(es58x_dev);
+ devlink_free(priv_to_devlink(es58x_dev));
usb_set_intfdata(intf, NULL);
}
static struct usb_driver es58x_driver = {
- .name = ES58X_MODULE_NAME,
+ .name = KBUILD_MODNAME,
.probe = es58x_probe,
.disconnect = es58x_disconnect,
.id_table = es58x_id_table
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index 826a15871573..c1ba1a4e8857 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -6,17 +6,18 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#ifndef __ES58X_COMMON_H__
#define __ES58X_COMMON_H__
-#include <linux/types.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+#include <net/devlink.h>
#include "es581_4.h"
#include "es58x_fd.h"
@@ -222,7 +223,7 @@ union es58x_urb_cmd {
u8 cmd_type;
u8 cmd_id;
} __packed;
- u8 raw_cmd[0];
+ DECLARE_FLEX_ARRAY(u8, raw_cmd);
};
/**
@@ -230,6 +231,7 @@ union es58x_urb_cmd {
* @can: struct can_priv must be the first member (Socket CAN relies
* on the fact that function netdev_priv() returns a pointer to
* a struct can_priv).
+ * @devlink_port: devlink instance for the network interface.
* @es58x_dev: pointer to the corresponding ES58X device.
* @tx_urb: Used as a buffer to concatenate the TX messages and to do
* a bulk send. Please refer to es58x_start_xmit() for more
@@ -255,6 +257,7 @@ union es58x_urb_cmd {
*/
struct es58x_priv {
struct can_priv can;
+ struct devlink_port devlink_port;
struct es58x_device *es58x_dev;
struct urb *tx_urb;
@@ -357,6 +360,39 @@ struct es58x_operators {
};
/**
+ * struct es58x_sw_version - Version number of the firmware or the
+ * bootloader.
+ * @major: Version major number, represented on two digits.
+ * @minor: Version minor number, represented on two digits.
+ * @revision: Version revision number, represented on two digits.
+ *
+ * The firmware and the bootloader share the same format: "xx.xx.xx"
+ * where 'x' is a digit. Both can be retrieved from the product
+ * information string.
+ */
+struct es58x_sw_version {
+ u8 major;
+ u8 minor;
+ u8 revision;
+};
+
+/**
+ * struct es58x_hw_revision - Hardware revision number.
+ * @letter: Revision letter.
+ * @major: Version major number, represented on three digits.
+ * @minor: Version minor number, represented on three digits.
+ *
+ * The hardware revision uses its own format: "axxx/xxx" where 'a' is
+ * a letter and 'x' a digit. It can be retrieved from the product
+ * information string.
+ */
+struct es58x_hw_revision {
+ char letter;
+ u16 major;
+ u16 minor;
+};
+
+/**
* struct es58x_device - All information specific to an ES58X device.
* @dev: Device information.
* @udev: USB device information.
@@ -373,8 +409,9 @@ struct es58x_operators {
* queue wake/stop logic should prevent this URB from getting
* empty. Please refer to es58x_get_tx_urb() for more details.
* @tx_urbs_idle_cnt: number of urbs in @tx_urbs_idle.
- * @opened_channel_cnt: number of channels opened (c.f. es58x_open()
- * and es58x_stop()).
+ * @firmware_version: The firmware version number.
+ * @bootloader_version: The bootloader version number.
+ * @hardware_revision: The hardware revision number.
* @ktime_req_ns: kernel timestamp when es58x_set_realtime_diff_ns()
* was called.
* @realtime_diff_ns: difference in nanoseconds between the clocks of
@@ -382,8 +419,11 @@ struct es58x_operators {
* @timestamps: a temporary buffer to store the time stamps before
* feeding them to es58x_can_get_echo_skb(). Can only be used
* in RX branches.
- * @rx_max_packet_size: Maximum length of bulk-in URB.
* @num_can_ch: Number of CAN channel (i.e. number of elements of @netdev).
+ * @opened_channel_cnt: number of channels opened. Free of race
+ * conditions because its two users (net_device_ops:ndo_open()
+ * and net_device_ops:ndo_close()) guarantee that the network
+ * stack big kernel lock (a.k.a. rtnl_mutex) is being hold.
* @rx_cmd_buf_len: Length of @rx_cmd_buf.
* @rx_cmd_buf: The device might split the URB commands in an
* arbitrary amount of pieces. This buffer is used to concatenate
@@ -399,22 +439,25 @@ struct es58x_device {
const struct es58x_parameters *param;
const struct es58x_operators *ops;
- int rx_pipe;
- int tx_pipe;
+ unsigned int rx_pipe;
+ unsigned int tx_pipe;
struct usb_anchor rx_urbs;
struct usb_anchor tx_urbs_busy;
struct usb_anchor tx_urbs_idle;
atomic_t tx_urbs_idle_cnt;
- atomic_t opened_channel_cnt;
+
+ struct es58x_sw_version firmware_version;
+ struct es58x_sw_version bootloader_version;
+ struct es58x_hw_revision hardware_revision;
u64 ktime_req_ns;
s64 realtime_diff_ns;
u64 timestamps[ES58X_ECHO_BULK_MAX];
- u16 rx_max_packet_size;
u8 num_can_ch;
+ u8 opened_channel_cnt;
u16 rx_cmd_buf_len;
union es58x_urb_cmd rx_cmd_buf;
@@ -674,6 +717,7 @@ static inline enum es58x_flag es58x_get_flags(const struct sk_buff *skb)
return es58x_flags;
}
+/* es58x_core.c. */
int es58x_can_get_echo_skb(struct net_device *netdev, u32 packet_idx,
u64 *tstamps, unsigned int pkts);
int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries,
@@ -691,9 +735,15 @@ int es58x_rx_cmd_ret_u32(struct net_device *netdev,
int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id,
const void *msg, u16 cmd_len, int channel_idx);
+/* es58x_devlink.c. */
+void es58x_parse_product_info(struct es58x_device *es58x_dev);
+extern const struct devlink_ops es58x_dl_ops;
+
+/* es581_4.c. */
extern const struct es58x_parameters es581_4_param;
extern const struct es58x_operators es581_4_ops;
+/* es58x_fd.c. */
extern const struct es58x_parameters es58x_fd_param;
extern const struct es58x_operators es58x_fd_ops;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
new file mode 100644
index 000000000000..9fba29e2f57c
--- /dev/null
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces.
+ *
+ * File es58x_devlink.c: report the product information using devlink.
+ *
+ * Copyright (c) 2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <net/devlink.h>
+
+#include "es58x_core.h"
+
+/* USB descriptor index containing the product information string. */
+#define ES58X_PROD_INFO_IDX 6
+
+/**
+ * es58x_parse_sw_version() - Extract boot loader or firmware version.
+ * @es58x_dev: ES58X device.
+ * @prod_info: USB custom string returned by the device.
+ * @prefix: Select which information should be parsed. Set it to "FW"
+ * to parse the firmware version or to "BL" to parse the
+ * bootloader version.
+ *
+ * The @prod_info string contains the firmware and the bootloader
+ * version number all prefixed by a magic string and concatenated with
+ * other numbers. Depending on the device, the firmware (bootloader)
+ * format is either "FW_Vxx.xx.xx" ("BL_Vxx.xx.xx") or "FW:xx.xx.xx"
+ * ("BL:xx.xx.xx") where 'x' represents a digit. @prod_info must
+ * contains the common part of those prefixes: "FW" or "BL".
+ *
+ * Parse @prod_info and store the version number in
+ * &es58x_dev.firmware_version or &es58x_dev.bootloader_version
+ * according to @prefix value.
+ *
+ * Return: zero on success, -EINVAL if @prefix contains an invalid
+ * value and -EBADMSG if @prod_info could not be parsed.
+ */
+static int es58x_parse_sw_version(struct es58x_device *es58x_dev,
+ const char *prod_info, const char *prefix)
+{
+ struct es58x_sw_version *version;
+ int major, minor, revision;
+
+ if (!strcmp(prefix, "FW"))
+ version = &es58x_dev->firmware_version;
+ else if (!strcmp(prefix, "BL"))
+ version = &es58x_dev->bootloader_version;
+ else
+ return -EINVAL;
+
+ /* Go to prefix */
+ prod_info = strstr(prod_info, prefix);
+ if (!prod_info)
+ return -EBADMSG;
+ /* Go to beginning of the version number */
+ while (!isdigit(*prod_info)) {
+ prod_info++;
+ if (!*prod_info)
+ return -EBADMSG;
+ }
+
+ if (sscanf(prod_info, "%2u.%2u.%2u", &major, &minor, &revision) != 3)
+ return -EBADMSG;
+
+ version->major = major;
+ version->minor = minor;
+ version->revision = revision;
+
+ return 0;
+}
+
+/**
+ * es58x_parse_hw_rev() - Extract hardware revision number.
+ * @es58x_dev: ES58X device.
+ * @prod_info: USB custom string returned by the device.
+ *
+ * @prod_info contains the hardware revision prefixed by a magic
+ * string and conquenated together with other numbers. Depending on
+ * the device, the hardware revision format is either
+ * "HW_VER:axxx/xxx" or "HR:axxx/xxx" where 'a' represents a letter
+ * and 'x' a digit.
+ *
+ * Parse @prod_info and store the hardware revision number in
+ * &es58x_dev.hardware_revision.
+ *
+ * Return: zero on success, -EBADMSG if @prod_info could not be
+ * parsed.
+ */
+static int es58x_parse_hw_rev(struct es58x_device *es58x_dev,
+ const char *prod_info)
+{
+ char letter;
+ int major, minor;
+
+ /* The only occurrence of 'H' is in the hardware revision prefix. */
+ prod_info = strchr(prod_info, 'H');
+ if (!prod_info)
+ return -EBADMSG;
+ /* Go to beginning of the hardware revision */
+ prod_info = strchr(prod_info, ':');
+ if (!prod_info)
+ return -EBADMSG;
+ prod_info++;
+
+ if (sscanf(prod_info, "%c%3u/%3u", &letter, &major, &minor) != 3)
+ return -EBADMSG;
+
+ es58x_dev->hardware_revision.letter = letter;
+ es58x_dev->hardware_revision.major = major;
+ es58x_dev->hardware_revision.minor = minor;
+
+ return 0;
+}
+
+/**
+ * es58x_parse_product_info() - Parse the ES58x product information
+ * string.
+ * @es58x_dev: ES58X device.
+ *
+ * Retrieve the product information string and parse it to extract the
+ * firmware version, the bootloader version and the hardware
+ * revision.
+ *
+ * If the function fails, simply emit a log message and continue
+ * because product information is not critical for the driver to
+ * operate.
+ */
+void es58x_parse_product_info(struct es58x_device *es58x_dev)
+{
+ char *prod_info;
+
+ prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX);
+ if (!prod_info) {
+ dev_warn(es58x_dev->dev,
+ "could not retrieve the product info string\n");
+ return;
+ }
+
+ if (es58x_parse_sw_version(es58x_dev, prod_info, "FW") ||
+ es58x_parse_sw_version(es58x_dev, prod_info, "BL") ||
+ es58x_parse_hw_rev(es58x_dev, prod_info))
+ dev_info(es58x_dev->dev,
+ "could not parse product info: '%s'\n", prod_info);
+
+ kfree(prod_info);
+}
+
+/**
+ * es58x_sw_version_is_set() - Check if the version is a valid number.
+ * @sw_ver: Version number of either the firmware or the bootloader.
+ *
+ * If &es58x_sw_version.major, &es58x_sw_version.minor and
+ * &es58x_sw_version.revision are all zero, the product string could
+ * not be parsed and the version number is invalid.
+ */
+static inline bool es58x_sw_version_is_set(struct es58x_sw_version *sw_ver)
+{
+ return sw_ver->major || sw_ver->minor || sw_ver->revision;
+}
+
+/**
+ * es58x_hw_revision_is_set() - Check if the revision is a valid number.
+ * @hw_rev: Revision number of the hardware.
+ *
+ * If &es58x_hw_revision.letter is the null character, the product
+ * string could not be parsed and the hardware revision number is
+ * invalid.
+ */
+static inline bool es58x_hw_revision_is_set(struct es58x_hw_revision *hw_rev)
+{
+ return hw_rev->letter != '\0';
+}
+
+/**
+ * es58x_devlink_info_get() - Report the product information.
+ * @devlink: Devlink.
+ * @req: skb wrapper where to put requested information.
+ * @extack: Unused.
+ *
+ * Report the firmware version, the bootloader version, the hardware
+ * revision and the serial number through netlink.
+ *
+ * Return: zero on success, errno when any error occurs.
+ */
+static int es58x_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct es58x_device *es58x_dev = devlink_priv(devlink);
+ struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version;
+ struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version;
+ struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision;
+ char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
+ int ret = 0;
+
+ if (es58x_sw_version_is_set(fw_ver)) {
+ snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ fw_ver->major, fw_ver->minor, fw_ver->revision);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (es58x_sw_version_is_set(bl_ver)) {
+ snprintf(buf, sizeof(buf), "%02u.%02u.%02u",
+ bl_ver->major, bl_ver->minor, bl_ver->revision);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ if (es58x_hw_revision_is_set(hw_rev)) {
+ snprintf(buf, sizeof(buf), "%c%03u/%03u",
+ hw_rev->letter, hw_rev->major, hw_rev->minor);
+ ret = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ if (ret)
+ return ret;
+ }
+
+ return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
+}
+
+const struct devlink_ops es58x_dl_ops = {
+ .info_get = es58x_devlink_info_get,
+};
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index 4f0cae29f4d8..fa87b0b78e3e 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -8,11 +8,12 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020, 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
-#include <linux/kernel.h>
#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/units.h>
#include "es58x_core.h"
#include "es58x_fd.h"
@@ -68,7 +69,8 @@ static int es58x_fd_echo_msg(struct net_device *netdev,
int i, num_element;
u32 rcv_packet_idx;
- const u32 mask = GENMASK(31, sizeof(echo_msg->packet_idx) * 8);
+ const u32 mask = GENMASK(BITS_PER_TYPE(mask) - 1,
+ BITS_PER_TYPE(echo_msg->packet_idx));
num_element = es58x_msg_num_element(es58x_dev->dev,
es58x_fd_urb_cmd->echo_msg,
@@ -171,12 +173,11 @@ static int es58x_fd_rx_event_msg(struct net_device *netdev,
const struct es58x_fd_rx_event_msg *rx_event_msg;
int ret;
+ rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len);
if (ret)
return ret;
- rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg;
-
return es58x_rx_err_msg(netdev, rx_event_msg->error_code,
rx_event_msg->event_code,
get_unaligned_le64(&rx_event_msg->timestamp));
@@ -522,8 +523,8 @@ const struct es58x_parameters es58x_fd_param = {
* Mbps work in an optimal environment but are not recommended
* for production environment.
*/
- .bitrate_max = 8 * CAN_MBPS,
- .clock = {.freq = 80 * CAN_MHZ},
+ .bitrate_max = 8 * MEGA /* BPS */,
+ .clock = {.freq = 80 * MEGA /* Hz */},
.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO,
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 1b400de00f51..d476c2884008 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -9,26 +9,45 @@
* Many thanks to all socketcan devs!
*/
+#include <linux/bitfield.h>
+#include <linux/clocksource.h>
#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/timecounter.h>
+#include <linux/units.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
/* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID 0x1d50
-#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_GS_USB_1_VENDOR_ID 0x1d50
+#define USB_GS_USB_1_PRODUCT_ID 0x606f
-#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
-#define GSUSB_ENDPOINT_IN 1
-#define GSUSB_ENDPOINT_OUT 2
+#define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2
+#define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f
+
+#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
+#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
+
+#define GS_USB_ENDPOINT_IN 1
+#define GS_USB_ENDPOINT_OUT 2
+
+/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
+ * for timer overflow (will be after ~71 minutes)
+ */
+#define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ)
+#define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800
+static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC <
+ CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2);
/* Device specific constants */
enum gs_usb_breq {
@@ -40,6 +59,14 @@ enum gs_usb_breq {
GS_USB_BREQ_DEVICE_CONFIG,
GS_USB_BREQ_TIMESTAMP,
GS_USB_BREQ_IDENTIFY,
+ GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID,
+ GS_USB_BREQ_SET_USER_ID,
+ GS_USB_BREQ_DATA_BITTIMING,
+ GS_USB_BREQ_BT_CONST_EXT,
+ GS_USB_BREQ_SET_TERMINATION,
+ GS_USB_BREQ_GET_TERMINATION,
+ GS_USB_BREQ_GET_STATE,
};
enum gs_can_mode {
@@ -63,6 +90,14 @@ enum gs_can_identify_mode {
GS_CAN_IDENTIFY_ON
};
+enum gs_can_termination_state {
+ GS_CAN_TERMINATION_STATE_OFF = 0,
+ GS_CAN_TERMINATION_STATE_ON
+};
+
+#define GS_USB_TERMINATION_DISABLED CAN_TERMINATION_DISABLED
+#define GS_USB_TERMINATION_ENABLED 120
+
/* data types passed between host and device */
/* The firmware on the original USB2CAN by Geschwister Schneider
@@ -87,11 +122,21 @@ struct gs_device_config {
__le32 hw_version;
} __packed;
-#define GS_CAN_MODE_NORMAL 0
-#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
-#define GS_CAN_MODE_LOOP_BACK BIT(1)
-#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY BIT(0)
+#define GS_CAN_MODE_LOOP_BACK BIT(1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_MODE_ONE_SHOT BIT(3)
+#define GS_CAN_MODE_HW_TIMESTAMP BIT(4)
+/* GS_CAN_FEATURE_IDENTIFY BIT(5) */
+/* GS_CAN_FEATURE_USER_ID BIT(6) */
+#define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_MODE_FD BIT(8)
+/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
+/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
+/* GS_CAN_FEATURE_TERMINATION BIT(11) */
+#define GS_CAN_MODE_BERR_REPORTING BIT(12)
+/* GS_CAN_FEATURE_GET_STATE BIT(13) */
struct gs_device_mode {
__le32 mode;
@@ -116,12 +161,32 @@ struct gs_identify_mode {
__le32 mode;
} __packed;
-#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
-#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
-#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
-#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
-#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
-#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+struct gs_device_termination_state {
+ __le32 state;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
+#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
+#define GS_CAN_FEATURE_ONE_SHOT BIT(3)
+#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4)
+#define GS_CAN_FEATURE_IDENTIFY BIT(5)
+#define GS_CAN_FEATURE_USER_ID BIT(6)
+#define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7)
+#define GS_CAN_FEATURE_FD BIT(8)
+#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
+#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
+#define GS_CAN_FEATURE_TERMINATION BIT(11)
+#define GS_CAN_FEATURE_BERR_REPORTING BIT(12)
+#define GS_CAN_FEATURE_GET_STATE BIT(13)
+#define GS_CAN_FEATURE_MASK GENMASK(13, 0)
+
+/* internal quirks - keep in GS_CAN_FEATURE space for now */
+
+/* CANtact Pro original firmware:
+ * BREQ DATA_BITTIMING overlaps with GET_USER_ID
+ */
+#define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31)
struct gs_device_bt_const {
__le32 feature;
@@ -136,7 +201,60 @@ struct gs_device_bt_const {
__le32 brp_inc;
} __packed;
-#define GS_CAN_FLAG_OVERFLOW 1
+struct gs_device_bt_const_extended {
+ __le32 feature;
+ __le32 fclk_can;
+ __le32 tseg1_min;
+ __le32 tseg1_max;
+ __le32 tseg2_min;
+ __le32 tseg2_max;
+ __le32 sjw_max;
+ __le32 brp_min;
+ __le32 brp_max;
+ __le32 brp_inc;
+
+ __le32 dtseg1_min;
+ __le32 dtseg1_max;
+ __le32 dtseg2_min;
+ __le32 dtseg2_max;
+ __le32 dsjw_max;
+ __le32 dbrp_min;
+ __le32 dbrp_max;
+ __le32 dbrp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW BIT(0)
+#define GS_CAN_FLAG_FD BIT(1)
+#define GS_CAN_FLAG_BRS BIT(2)
+#define GS_CAN_FLAG_ESI BIT(3)
+
+struct classic_can {
+ u8 data[8];
+} __packed;
+
+struct classic_can_ts {
+ u8 data[8];
+ __le32 timestamp_us;
+} __packed;
+
+struct classic_can_quirk {
+ u8 data[8];
+ u8 quirk;
+} __packed;
+
+struct canfd {
+ u8 data[64];
+} __packed;
+
+struct canfd_ts {
+ u8 data[64];
+ __le32 timestamp_us;
+} __packed;
+
+struct canfd_quirk {
+ u8 data[64];
+ u8 quirk;
+} __packed;
struct gs_host_frame {
u32 echo_id;
@@ -147,7 +265,14 @@ struct gs_host_frame {
u8 flags;
u8 reserved;
- u8 data[8];
+ union {
+ DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+ DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts);
+ DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
+ DECLARE_FLEX_ARRAY(struct canfd, canfd);
+ DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts);
+ DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
+ };
} __packed;
/* The GS USB devices make use of the same flags and masks as in
* linux/can.h and linux/can/error.h, and no additional mapping is necessary.
@@ -158,9 +283,9 @@ struct gs_host_frame {
/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
#define GS_MAX_RX_URBS 30
/* Maximum number of interfaces the driver supports per device.
- * Current hardware only supports 2 interfaces. The future may vary.
+ * Current hardware only supports 3 interfaces. The future may vary.
*/
-#define GS_MAX_INTF 2
+#define GS_MAX_INTF 3
struct gs_tx_context {
struct gs_can *dev;
@@ -174,11 +299,19 @@ struct gs_can {
struct net_device *netdev;
struct usb_device *udev;
- struct usb_interface *iface;
- struct can_bittiming_const bt_const;
+ struct can_bittiming_const bt_const, data_bt_const;
unsigned int channel; /* channel number */
+ /* time counter for hardware timestamps */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */
+ struct delayed_work timestamp;
+
+ u32 feature;
+ unsigned int hf_size_tx;
+
/* This lock prevents a race condition between xmit and receive. */
spinlock_t tx_ctx_lock;
struct gs_tx_context tx_context[GS_MAX_TX_URBS];
@@ -191,8 +324,9 @@ struct gs_can {
struct gs_usb {
struct gs_can *canch[GS_MAX_INTF];
struct usb_anchor rx_submitted;
- atomic_t active_channels;
struct usb_device *udev;
+ unsigned int hf_size_rx;
+ u8 active_channels;
};
/* 'allocate' a tx context.
@@ -242,31 +376,107 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
return NULL;
}
-static int gs_cmd_reset(struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *dev)
+{
+ struct gs_device_mode dm = {
+ .mode = GS_CAN_MODE_RESET,
+ };
+
+ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+}
+
+static inline int gs_usb_get_timestamp(const struct gs_can *dev,
+ u32 *timestamp_p)
{
- struct gs_device_mode *dm;
- struct usb_interface *intf = gsdev->iface;
+ __le32 timestamp;
int rc;
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
+ rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_TIMESTAMP,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &timestamp, sizeof(timestamp),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
- dm->mode = GS_CAN_MODE_RESET;
+ *timestamp_p = le32_to_cpu(timestamp);
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- gsdev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
+ return 0;
+}
- kfree(dm);
+static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
+{
+ struct gs_can *dev = container_of(cc, struct gs_can, cc);
+ u32 timestamp = 0;
+ int err;
+
+ lockdep_assert_held(&dev->tc_lock);
+
+ /* drop lock for synchronous USB transfer */
+ spin_unlock_bh(&dev->tc_lock);
+ err = gs_usb_get_timestamp(dev, &timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ if (err)
+ netdev_err(dev->netdev,
+ "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ err);
+
+ return timestamp;
+}
- return rc;
+static void gs_usb_timestamp_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gs_can *dev;
+
+ dev = container_of(delayed_work, struct gs_can, timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_read(&dev->tc);
+ spin_unlock_bh(&dev->tc_lock);
+
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_skb_set_timestamp(struct gs_can *dev,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ spin_lock_bh(&dev->tc_lock);
+ ns = timecounter_cyc2time(&dev->tc, timestamp);
+ spin_unlock_bh(&dev->tc_lock);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void gs_usb_timestamp_init(struct gs_can *dev)
+{
+ struct cyclecounter *cc = &dev->cc;
+
+ cc->read = gs_usb_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+ cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ);
+ cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift);
+
+ spin_lock_init(&dev->tc_lock);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_init(&dev->tc, &dev->cc, ktime_get_real_ns());
+ spin_unlock_bh(&dev->tc_lock);
+
+ INIT_DELAYED_WORK(&dev->timestamp, gs_usb_timestamp_work);
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_timestamp_stop(struct gs_can *dev)
+{
+ cancel_delayed_work_sync(&dev->timestamp);
}
static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
@@ -294,6 +504,24 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
}
}
+static void gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb,
+ const struct gs_host_frame *hf)
+{
+ u32 timestamp;
+
+ if (!(dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP))
+ return;
+
+ if (hf->flags & GS_CAN_FLAG_FD)
+ timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us);
+ else
+ timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us);
+
+ gs_usb_skb_set_timestamp(dev, skb, timestamp);
+
+ return;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *usbcan = urb->context;
@@ -304,6 +532,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
struct gs_host_frame *hf = urb->transfer_buffer;
struct gs_tx_context *txc;
struct can_frame *cf;
+ struct canfd_frame *cfd;
struct sk_buff *skb;
BUG_ON(!usbcan);
@@ -321,7 +550,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
/* device reports out of range channel id */
if (hf->channel >= GS_MAX_INTF)
- goto resubmit_urb;
+ goto device_detach;
dev = usbcan->canch[hf->channel];
@@ -332,18 +561,35 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
return;
if (hf->echo_id == -1) { /* normal rx */
- skb = alloc_can_skb(dev->netdev, &cf);
- if (!skb)
- return;
+ if (hf->flags & GS_CAN_FLAG_FD) {
+ skb = alloc_canfd_skb(dev->netdev, &cfd);
+ if (!skb)
+ return;
+
+ cfd->can_id = le32_to_cpu(hf->can_id);
+ cfd->len = can_fd_dlc2len(hf->can_dlc);
+ if (hf->flags & GS_CAN_FLAG_BRS)
+ cfd->flags |= CANFD_BRS;
+ if (hf->flags & GS_CAN_FLAG_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ memcpy(cfd->data, hf->canfd->data, cfd->len);
+ } else {
+ skb = alloc_can_skb(dev->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id = le32_to_cpu(hf->can_id);
+ can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- cf->can_id = le32_to_cpu(hf->can_id);
+ memcpy(cf->data, hf->classic_can->data, 8);
- can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
- memcpy(cf->data, hf->data, 8);
+ /* ERROR frames tell us information about the controller */
+ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
+ gs_update_state(dev, cf);
+ }
- /* ERROR frames tell us information about the controller */
- if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
- gs_update_state(dev, cf);
+ gs_usb_set_timestamp(dev, skb, hf);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += hf->can_dlc;
@@ -357,9 +603,6 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += hf->can_dlc;
-
txc = gs_get_tx_context(dev, hf->echo_id);
/* bad devices send bad echo_ids. */
@@ -370,7 +613,12 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- can_get_echo_skb(netdev, hf->echo_id, NULL);
+ skb = dev->can.echo_skb[hf->echo_id];
+ gs_usb_set_timestamp(dev, skb, hf);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id,
+ NULL);
gs_free_tx_context(txc);
@@ -393,19 +641,16 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
}
resubmit_urb:
- usb_fill_bulk_urb(urb,
- usbcan->udev,
- usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
- hf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- usbcan
- );
+ usb_fill_bulk_urb(urb, usbcan->udev,
+ usb_rcvbulkpipe(usbcan->udev, GS_USB_ENDPOINT_IN),
+ hf, dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, usbcan);
rc = usb_submit_urb(urb, GFP_ATOMIC);
/* USB failure take down all interfaces */
if (rc == -ENODEV) {
+ device_detach:
for (rc = 0; rc < GS_MAX_INTF; rc++) {
if (usbcan->canch[rc])
netif_device_detach(usbcan->canch[rc]->netdev);
@@ -417,38 +662,42 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.bittiming;
- struct usb_interface *intf = dev->iface;
- int rc;
- struct gs_device_bittiming *dbt;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
/* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BITTIMING,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dbt,
- sizeof(*dbt),
- 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
- rc);
+ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
+}
- return (rc > 0) ? 0 : rc;
+static int gs_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.data_bittiming;
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
+ u8 request = GS_USB_BREQ_DATA_BITTIMING;
+
+ if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
+ request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
+
+ /* request data bit timings */
+ return usb_control_msg_send(dev->udev, 0, request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static void gs_usb_xmit_callback(struct urb *urb)
@@ -459,11 +708,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
-
- usb_free_coherent(urb->dev,
- urb->transfer_buffer_length,
- urb->transfer_buffer,
- urb->transfer_dma);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -474,11 +718,12 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
struct urb *urb;
struct gs_host_frame *hf;
struct can_frame *cf;
+ struct canfd_frame *cfd;
int rc;
unsigned int idx;
struct gs_tx_context *txc;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* find an empty context to keep track of transmission */
@@ -491,8 +736,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (!urb)
goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
- &urb->transfer_dma);
+ hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC);
if (!hf) {
netdev_err(netdev, "No memory left for USB buffer\n");
goto nomem_hf;
@@ -507,22 +751,36 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
hf->echo_id = idx;
hf->channel = dev->channel;
+ hf->flags = 0;
+ hf->reserved = 0;
- cf = (struct can_frame *)skb->data;
+ if (can_is_canfd_skb(skb)) {
+ cfd = (struct canfd_frame *)skb->data;
- hf->can_id = cpu_to_le32(cf->can_id);
- hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+ hf->can_id = cpu_to_le32(cfd->can_id);
+ hf->can_dlc = can_fd_len2dlc(cfd->len);
+ hf->flags |= GS_CAN_FLAG_FD;
+ if (cfd->flags & CANFD_BRS)
+ hf->flags |= GS_CAN_FLAG_BRS;
+ if (cfd->flags & CANFD_ESI)
+ hf->flags |= GS_CAN_FLAG_ESI;
- memcpy(hf->data, cf->data, cf->len);
+ memcpy(hf->canfd->data, cfd->data, cfd->len);
+ } else {
+ cf = (struct can_frame *)skb->data;
+
+ hf->can_id = cpu_to_le32(cf->can_id);
+ hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
+
+ memcpy(hf->classic_can->data, cf->data, cf->len);
+ }
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
- hf,
- sizeof(*hf),
- gs_usb_xmit_callback,
- txc);
+ usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
+ hf, dev->hf_size_tx,
+ gs_usb_xmit_callback, txc);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, idx, 0);
@@ -537,10 +795,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
@@ -560,10 +814,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
badidx:
- usb_free_coherent(dev->udev,
- sizeof(*hf),
- hf,
- urb->transfer_dma);
+ kfree(hf);
nomem_hf:
usb_free_urb(urb);
@@ -578,16 +829,32 @@ static int gs_can_open(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- int rc, i;
- struct gs_device_mode *dm;
+ struct gs_device_mode dm = {
+ .mode = cpu_to_le32(GS_CAN_MODE_START),
+ };
+ struct gs_host_frame *hf;
u32 ctrlmode;
u32 flags = 0;
+ int rc, i;
rc = open_candev(netdev);
if (rc)
return rc;
- if (atomic_add_return(1, &parent->active_channels) == 1) {
+ ctrlmode = dev->can.ctrlmode;
+ if (ctrlmode & CAN_CTRLMODE_FD) {
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, canfd_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX)
+ dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1);
+ else
+ dev->hf_size_tx = struct_size(hf, classic_can, 1);
+ }
+
+ if (!parent->active_channels) {
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
@@ -598,10 +865,8 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
/* alloc rx buffer */
- buf = usb_alloc_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- GFP_KERNEL,
- &urb->transfer_dma);
+ buf = kmalloc(dev->parent->hf_size_rx,
+ GFP_KERNEL);
if (!buf) {
netdev_err(netdev,
"No memory left for USB buffer\n");
@@ -613,12 +878,11 @@ static int gs_can_open(struct net_device *netdev)
usb_fill_bulk_urb(urb,
dev->udev,
usb_rcvbulkpipe(dev->udev,
- GSUSB_ENDPOINT_IN),
+ GS_USB_ENDPOINT_IN),
buf,
- sizeof(struct gs_host_frame),
- gs_usb_receive_bulk_callback,
- parent);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ dev->parent->hf_size_rx,
+ gs_usb_receive_bulk_callback, parent);
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -628,8 +892,7 @@ static int gs_can_open(struct net_device *netdev)
netif_device_detach(dev->netdev);
netdev_err(netdev,
- "usb_submit failed (err=%d)\n",
- rc);
+ "usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
usb_free_urb(urb);
@@ -643,57 +906,90 @@ static int gs_can_open(struct net_device *netdev)
}
}
- dm = kmalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
-
/* flags */
- ctrlmode = dev->can.ctrlmode;
-
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
- else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+
+ if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
flags |= GS_CAN_MODE_LISTEN_ONLY;
- /* Controller is not allowed to retry TX
- * this mode is unavailable on atmels uc3c hardware
- */
+ if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
flags |= GS_CAN_MODE_ONE_SHOT;
- if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ if (ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ flags |= GS_CAN_MODE_BERR_REPORTING;
+
+ if (ctrlmode & CAN_CTRLMODE_FD)
+ flags |= GS_CAN_MODE_FD;
+
+ /* if hardware supports timestamps, enable it */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) {
+ flags |= GS_CAN_MODE_HW_TIMESTAMP;
+
+ /* start polling timestamp */
+ gs_usb_timestamp_init(dev);
+ }
/* finally start device */
- dm->mode = cpu_to_le32(GS_CAN_MODE_START);
- dm->flags = cpu_to_le32(flags);
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- dm,
- sizeof(*dm),
- 1000);
-
- if (rc < 0) {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dm.flags = cpu_to_le32(flags);
+ rc = usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+ if (rc) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
- kfree(dm);
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+ dev->can.state = CAN_STATE_STOPPED;
return rc;
}
- kfree(dm);
-
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
-
+ parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
return 0;
}
+static int gs_usb_get_state(const struct net_device *netdev,
+ struct can_berr_counter *bec,
+ enum can_state *state)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_state ds;
+ int rc;
+
+ rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_GET_STATE,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &ds, sizeof(ds),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (le32_to_cpu(ds.state) >= CAN_STATE_MAX)
+ return -EOPNOTSUPP;
+
+ *state = le32_to_cpu(ds.state);
+ bec->txerr = le32_to_cpu(ds.txerr);
+ bec->rxerr = le32_to_cpu(ds.rxerr);
+
+ return 0;
+}
+
+static int gs_usb_can_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ enum can_state state;
+
+ return gs_usb_get_state(netdev, bec, &state);
+}
+
static int gs_can_close(struct net_device *netdev)
{
int rc;
@@ -702,9 +998,15 @@ static int gs_can_close(struct net_device *netdev)
netif_stop_queue(netdev);
+ /* stop polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+
/* Stop polling */
- if (atomic_dec_and_test(&parent->active_channels))
+ parent->active_channels--;
+ if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
+ }
/* Stop sending URBs */
usb_kill_anchored_urbs(&dev->tx_submitted);
@@ -727,58 +1029,56 @@ static int gs_can_close(struct net_device *netdev)
return 0;
}
+static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_eth_ioctl_hwts(netdev, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
.ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = gs_can_eth_ioctl,
};
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
{
struct gs_can *dev = netdev_priv(netdev);
- struct gs_identify_mode *imode;
- int rc;
-
- imode = kmalloc(sizeof(*imode), GFP_KERNEL);
-
- if (!imode)
- return -ENOMEM;
+ struct gs_identify_mode imode;
if (do_identify)
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
-
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface),
- 0),
- GS_USB_BREQ_IDENTIFY,
- USB_DIR_OUT | USB_TYPE_VENDOR |
- USB_RECIP_INTERFACE,
- dev->channel,
- 0,
- imode,
- sizeof(*imode),
- 100);
-
- kfree(imode);
-
- return (rc > 0) ? 0 : rc;
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
+
+ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_IDENTIFY,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &imode, sizeof(imode), 100,
+ GFP_KERNEL);
}
/* blink LED's for finding the this interface */
-static int gs_usb_set_phys_id(struct net_device *dev,
+static int gs_usb_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
+ const struct gs_can *dev = netdev_priv(netdev);
int rc = 0;
+ if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY))
+ return -EOPNOTSUPP;
+
switch (state) {
case ETHTOOL_ID_ACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON);
break;
case ETHTOOL_ID_INACTIVE:
- rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF);
+ rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF);
break;
default:
break;
@@ -787,8 +1087,65 @@ static int gs_usb_set_phys_id(struct net_device *dev,
return rc;
}
+static int gs_usb_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+
+ /* report if device supports HW timestamps */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_ethtool_op_get_ts_info_hwts(netdev, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops gs_usb_ethtool_ops = {
.set_phys_id = gs_usb_set_phys_id,
+ .get_ts_info = gs_usb_get_ts_info,
+};
+
+static int gs_usb_get_termination(struct net_device *netdev, u16 *term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+ int rc;
+
+ rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_GET_TERMINATION,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (term_state.state == cpu_to_le32(GS_CAN_TERMINATION_STATE_ON))
+ *term = GS_USB_TERMINATION_ENABLED;
+ else
+ *term = GS_USB_TERMINATION_DISABLED;
+
+ return 0;
+}
+
+static int gs_usb_set_termination(struct net_device *netdev, u16 term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+
+ if (term == GS_USB_TERMINATION_ENABLED)
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_ON);
+ else
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_OFF);
+
+ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_SET_TERMINATION,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+}
+
+static const u16 gs_usb_termination_const[] = {
+ GS_USB_TERMINATION_DISABLED,
+ GS_USB_TERMINATION_ENABLED
};
static struct gs_can *gs_make_candev(unsigned int channel,
@@ -798,29 +1155,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct gs_can *dev;
struct net_device *netdev;
int rc;
- struct gs_device_bt_const *bt_const;
+ struct gs_device_bt_const_extended bt_const_extended;
+ struct gs_device_bt_const bt_const;
u32 feature;
- bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
- if (!bt_const)
- return ERR_PTR(-ENOMEM);
-
/* fetch bit timing constants */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel,
- 0,
- bt_const,
- sizeof(*bt_const),
- 1000);
-
- if (rc < 0) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const, sizeof(bt_const), 1000,
+ GFP_KERNEL);
+
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const);
+ "Couldn't get bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
return ERR_PTR(rc);
}
@@ -828,29 +1177,29 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Couldn't allocate candev\n");
- kfree(bt_const);
return ERR_PTR(-ENOMEM);
}
dev = netdev_priv(netdev);
netdev->netdev_ops = &gs_usb_netdev_ops;
+ netdev->ethtool_ops = &gs_usb_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+ netdev->dev_id = channel;
/* dev setup */
- strcpy(dev->bt_const.name, "gs_usb");
- dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
- dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
- dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
- dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
- dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
- dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
- dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
- dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
+ strcpy(dev->bt_const.name, KBUILD_MODNAME);
+ dev->bt_const.tseg1_min = le32_to_cpu(bt_const.tseg1_min);
+ dev->bt_const.tseg1_max = le32_to_cpu(bt_const.tseg1_max);
+ dev->bt_const.tseg2_min = le32_to_cpu(bt_const.tseg2_min);
+ dev->bt_const.tseg2_max = le32_to_cpu(bt_const.tseg2_max);
+ dev->bt_const.sjw_max = le32_to_cpu(bt_const.sjw_max);
+ dev->bt_const.brp_min = le32_to_cpu(bt_const.brp_min);
+ dev->bt_const.brp_max = le32_to_cpu(bt_const.brp_max);
+ dev->bt_const.brp_inc = le32_to_cpu(bt_const.brp_inc);
dev->udev = interface_to_usbdev(intf);
- dev->iface = intf;
dev->netdev = netdev;
dev->channel = channel;
@@ -864,13 +1213,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can setup */
dev->can.state = CAN_STATE_STOPPED;
- dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
+ dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
- feature = le32_to_cpu(bt_const->feature);
+ feature = le32_to_cpu(bt_const.feature);
+ dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
@@ -883,22 +1233,110 @@ static struct gs_can *gs_make_candev(unsigned int channel,
if (feature & GS_CAN_FEATURE_ONE_SHOT)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
- SET_NETDEV_DEV(netdev, &intf->dev);
+ if (feature & GS_CAN_FEATURE_FD) {
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ /* The data bit timing will be overwritten, if
+ * GS_CAN_FEATURE_BT_CONST_EXT is set.
+ */
+ dev->can.data_bittiming_const = &dev->bt_const;
+ dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ }
- if (le32_to_cpu(dconf->sw_version) > 1)
- if (feature & GS_CAN_FEATURE_IDENTIFY)
- netdev->ethtool_ops = &gs_usb_ethtool_ops;
+ if (feature & GS_CAN_FEATURE_TERMINATION) {
+ rc = gs_usb_get_termination(netdev, &dev->can.termination);
+ if (rc) {
+ dev->feature &= ~GS_CAN_FEATURE_TERMINATION;
- kfree(bt_const);
+ dev_info(&intf->dev,
+ "Disabling termination support for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ } else {
+ dev->can.termination_const = gs_usb_termination_const;
+ dev->can.termination_const_cnt = ARRAY_SIZE(gs_usb_termination_const);
+ dev->can.do_set_termination = gs_usb_set_termination;
+ }
+ }
+
+ if (feature & GS_CAN_FEATURE_BERR_REPORTING)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING;
+
+ if (feature & GS_CAN_FEATURE_GET_STATE)
+ dev->can.do_get_berr_counter = gs_usb_can_get_berr_counter;
+
+ /* The CANtact Pro from LinkLayer Labs is based on the
+ * LPC54616 µC, which is affected by the NXP LPC USB transfer
+ * erratum. However, the current firmware (version 2) doesn't
+ * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the
+ * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround
+ * this issue.
+ *
+ * For the GS_USB_BREQ_DATA_BITTIMING USB control message the
+ * CANtact Pro firmware uses a request value, which is already
+ * used by the candleLight firmware for a different purpose
+ * (GS_USB_BREQ_GET_USER_ID). Set the feature
+ * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
+ * issue.
+ */
+ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) &&
+ dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) &&
+ dev->udev->manufacturer && dev->udev->product &&
+ !strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
+ !strcmp(dev->udev->product, "CANtact Pro") &&
+ (le32_to_cpu(dconf->sw_version) <= 2))
+ dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX |
+ GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO;
+
+ /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */
+ if (!(le32_to_cpu(dconf->sw_version) > 1 &&
+ feature & GS_CAN_FEATURE_IDENTIFY))
+ dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
+
+ /* fetch extended bit timing constants if device has feature
+ * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
+ */
+ if (feature & GS_CAN_FEATURE_FD &&
+ feature & GS_CAN_FEATURE_BT_CONST_EXT) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const_extended,
+ sizeof(bt_const_extended),
+ 1000, GFP_KERNEL);
+ if (rc) {
+ dev_err(&intf->dev,
+ "Couldn't get extended bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
+ }
+
+ strcpy(dev->data_bt_const.name, KBUILD_MODNAME);
+ dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended.dtseg1_min);
+ dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended.dtseg1_max);
+ dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended.dtseg2_min);
+ dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended.dtseg2_max);
+ dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended.dsjw_max);
+ dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended.dbrp_min);
+ dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
+ dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
+
+ dev->can.data_bittiming_const = &dev->data_bt_const;
+ }
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
rc = register_candev(dev->netdev);
if (rc) {
- free_candev(dev->netdev);
- dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
- return ERR_PTR(rc);
+ dev_err(&intf->dev,
+ "Couldn't register candev for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
}
return dev;
+
+ out_free_candev:
+ free_candev(dev->netdev);
+ return ERR_PTR(rc);
}
static void gs_destroy_candev(struct gs_can *dev)
@@ -911,84 +1349,64 @@ static void gs_destroy_candev(struct gs_can *dev)
static int gs_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct gs_host_frame *hf;
struct gs_usb *dev;
- int rc = -ENOMEM;
+ struct gs_host_config hconf = {
+ .byte_order = cpu_to_le32(0x0000beef),
+ };
+ struct gs_device_config dconf;
unsigned int icount, i;
- struct gs_host_config *hconf;
- struct gs_device_config *dconf;
-
- hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
- if (!hconf)
- return -ENOMEM;
-
- hconf->byte_order = cpu_to_le32(0x0000beef);
+ int rc;
/* send host config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_HOST_FORMAT,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- hconf,
- sizeof(*hconf),
- 1000);
-
- kfree(hconf);
-
- if (rc < 0) {
- dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
- rc);
+ rc = usb_control_msg_send(udev, 0,
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &hconf, sizeof(hconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
+ dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
return rc;
}
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
- if (!dconf)
- return -ENOMEM;
-
/* read device config */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_DEVICE_CONFIG,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1,
- intf->cur_altsetting->desc.bInterfaceNumber,
- dconf,
- sizeof(*dconf),
- 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(udev, 0,
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &dconf, sizeof(dconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
- kfree(dconf);
return rc;
}
- icount = dconf->icount + 1;
+ icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
if (icount > GS_MAX_INTF) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
GS_MAX_INTF);
- kfree(dconf);
return -EINVAL;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- kfree(dconf);
+ if (!dev)
return -ENOMEM;
- }
init_usb_anchor(&dev->rx_submitted);
- atomic_set(&dev->active_channels, 0);
-
usb_set_intfdata(intf, dev);
- dev->udev = interface_to_usbdev(intf);
+ dev->udev = udev;
for (i = 0; i < icount; i++) {
- dev->canch[i] = gs_make_candev(i, intf, dconf);
+ unsigned int hf_size_rx = 0;
+
+ dev->canch[i] = gs_make_candev(i, intf, &dconf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
@@ -999,22 +1417,36 @@ static int gs_usb_probe(struct usb_interface *intf,
gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
- kfree(dconf);
kfree(dev);
return rc;
}
dev->canch[i]->parent = dev;
- }
- kfree(dconf);
+ /* set RX packet size based on FD and if hardware
+ * timestamps are supported.
+ */
+ if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, canfd_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, classic_can_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, classic_can, 1);
+ }
+ dev->hf_size_rx = max(dev->hf_size_rx, hf_size_rx);
+ }
return 0;
}
static void gs_usb_disconnect(struct usb_interface *intf)
{
- unsigned i;
struct gs_usb *dev = usb_get_intfdata(intf);
+ unsigned int i;
+
usb_set_intfdata(intf, NULL);
if (!dev) {
@@ -1031,20 +1463,24 @@ static void gs_usb_disconnect(struct usb_interface *intf)
}
static const struct usb_device_id gs_usb_table[] = {
- { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
- USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID,
+ USB_GS_USB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
+ USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
+ USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, gs_usb_table);
static struct usb_driver gs_usb_driver = {
- .name = "gs_usb",
- .probe = gs_usb_probe,
+ .name = KBUILD_MODNAME,
+ .probe = gs_usb_probe,
.disconnect = gs_usb_disconnect,
- .id_table = gs_usb_table,
+ .id_table = gs_usb_table,
};
module_usb_driver(gs_usb_driver);
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
index 390b6bde883c..ff10b3790d84 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h
@@ -35,9 +35,11 @@
#define KVASER_USB_RX_BUFFER_SIZE 3072
#define KVASER_USB_MAX_NET_DEVICES 5
-/* USB devices features */
-#define KVASER_USB_HAS_SILENT_MODE BIT(0)
-#define KVASER_USB_HAS_TXRX_ERRORS BIT(1)
+/* Kvaser USB device quirks */
+#define KVASER_USB_QUIRK_HAS_SILENT_MODE BIT(0)
+#define KVASER_USB_QUIRK_HAS_TXRX_ERRORS BIT(1)
+#define KVASER_USB_QUIRK_IGNORE_CLK_FREQ BIT(2)
+#define KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP BIT(3)
/* Device capabilities */
#define KVASER_USB_CAP_BERR_CAP 0x01
@@ -65,26 +67,28 @@ struct kvaser_usb_dev_card_data_hydra {
struct kvaser_usb_dev_card_data {
u32 ctrlmode_supported;
u32 capabilities;
- union {
- struct {
- enum kvaser_usb_leaf_family family;
- } leaf;
- struct kvaser_usb_dev_card_data_hydra hydra;
- };
+ struct kvaser_usb_dev_card_data_hydra hydra;
};
/* Context for an outstanding, not yet ACKed, transmission */
struct kvaser_usb_tx_urb_context {
struct kvaser_usb_net_priv *priv;
u32 echo_index;
- int dlc;
};
+struct kvaser_usb_busparams {
+ __le32 bitrate;
+ u8 tseg1;
+ u8 tseg2;
+ u8 sjw;
+ u8 nsamples;
+} __packed;
+
struct kvaser_usb {
struct usb_device *udev;
struct usb_interface *intf;
struct kvaser_usb_net_priv *nets[KVASER_USB_MAX_NET_DEVICES];
- const struct kvaser_usb_dev_ops *ops;
+ const struct kvaser_usb_driver_info *driver_info;
const struct kvaser_usb_dev_cfg *cfg;
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
@@ -108,13 +112,19 @@ struct kvaser_usb_net_priv {
struct can_priv can;
struct can_berr_counter bec;
+ /* subdriver-specific data */
+ void *sub_priv;
+
struct kvaser_usb *dev;
struct net_device *netdev;
int channel;
- struct completion start_comp, stop_comp, flush_comp;
+ struct completion start_comp, stop_comp, flush_comp,
+ get_busparams_comp;
struct usb_anchor tx_submitted;
+ struct kvaser_usb_busparams busparams_nominal, busparams_data;
+
spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */
int active_tx_contexts;
struct kvaser_usb_tx_urb_context tx_contexts[];
@@ -124,11 +134,15 @@ struct kvaser_usb_net_priv {
* struct kvaser_usb_dev_ops - Device specific functions
* @dev_set_mode: used for can.do_set_mode
* @dev_set_bittiming: used for can.do_set_bittiming
+ * @dev_get_busparams: readback arbitration busparams
* @dev_set_data_bittiming: used for can.do_set_data_bittiming
+ * @dev_get_data_busparams: readback data busparams
* @dev_get_berr_counter: used for can.do_get_berr_counter
*
* @dev_setup_endpoints: setup USB in and out endpoints
* @dev_init_card: initialize card
+ * @dev_init_channel: initialize channel
+ * @dev_remove_channel: uninitialize channel
* @dev_get_software_info: get software info
* @dev_get_software_details: get software details
* @dev_get_card_info: get card info
@@ -144,12 +158,18 @@ struct kvaser_usb_net_priv {
*/
struct kvaser_usb_dev_ops {
int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode);
- int (*dev_set_bittiming)(struct net_device *netdev);
- int (*dev_set_data_bittiming)(struct net_device *netdev);
+ int (*dev_set_bittiming)(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams);
+ int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv);
+ int (*dev_set_data_bittiming)(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams);
+ int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv);
int (*dev_get_berr_counter)(const struct net_device *netdev,
struct can_berr_counter *bec);
int (*dev_setup_endpoints)(struct kvaser_usb *dev);
int (*dev_init_card)(struct kvaser_usb *dev);
+ int (*dev_init_channel)(struct kvaser_usb_net_priv *priv);
+ void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv);
int (*dev_get_software_info)(struct kvaser_usb *dev);
int (*dev_get_software_details)(struct kvaser_usb *dev);
int (*dev_get_card_info)(struct kvaser_usb *dev);
@@ -162,8 +182,14 @@ struct kvaser_usb_dev_ops {
void (*dev_read_bulk_callback)(struct kvaser_usb *dev, void *buf,
int len);
void *(*dev_frame_to_cmd)(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid);
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid);
+};
+
+struct kvaser_usb_driver_info {
+ u32 quirks;
+ enum kvaser_usb_leaf_family family;
+ const struct kvaser_usb_dev_ops *ops;
};
struct kvaser_usb_dev_cfg {
@@ -176,6 +202,8 @@ struct kvaser_usb_dev_cfg {
extern const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops;
extern const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops;
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv);
+
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
int *actual_len);
@@ -185,4 +213,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
int len);
int kvaser_usb_can_rx_over_error(struct net_device *netdev);
+
+extern const struct can_bittiming_const kvaser_usb_flexc_bittiming_const;
+
#endif /* KVASER_USB_H */
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 0cc0fc866a2a..7135ec851341 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/gfp.h>
#include <linux/if.h>
#include <linux/kernel.h>
@@ -30,187 +31,220 @@
#include "kvaser_usb.h"
/* Kvaser USB vendor id. */
-#define KVASER_VENDOR_ID 0x0bfd
+#define KVASER_VENDOR_ID 0x0bfd
/* Kvaser Leaf USB devices product ids */
-#define USB_LEAF_DEVEL_PRODUCT_ID 10
-#define USB_LEAF_LITE_PRODUCT_ID 11
-#define USB_LEAF_PRO_PRODUCT_ID 12
-#define USB_LEAF_SPRO_PRODUCT_ID 14
-#define USB_LEAF_PRO_LS_PRODUCT_ID 15
-#define USB_LEAF_PRO_SWC_PRODUCT_ID 16
-#define USB_LEAF_PRO_LIN_PRODUCT_ID 17
-#define USB_LEAF_SPRO_LS_PRODUCT_ID 18
-#define USB_LEAF_SPRO_SWC_PRODUCT_ID 19
-#define USB_MEMO2_DEVEL_PRODUCT_ID 22
-#define USB_MEMO2_HSHS_PRODUCT_ID 23
-#define USB_UPRO_HSHS_PRODUCT_ID 24
-#define USB_LEAF_LITE_GI_PRODUCT_ID 25
-#define USB_LEAF_PRO_OBDII_PRODUCT_ID 26
-#define USB_MEMO2_HSLS_PRODUCT_ID 27
-#define USB_LEAF_LITE_CH_PRODUCT_ID 28
-#define USB_BLACKBIRD_SPRO_PRODUCT_ID 29
-#define USB_OEM_MERCURY_PRODUCT_ID 34
-#define USB_OEM_LEAF_PRODUCT_ID 35
-#define USB_CAN_R_PRODUCT_ID 39
-#define USB_LEAF_LITE_V2_PRODUCT_ID 288
-#define USB_MINI_PCIE_HS_PRODUCT_ID 289
-#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
-#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
-#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
-#define USB_USBCAN_R_V2_PRODUCT_ID 294
-#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 295
-#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 296
-#define USB_LEAF_PRODUCT_ID_END \
- USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID
+#define USB_LEAF_DEVEL_PRODUCT_ID 0x000a
+#define USB_LEAF_LITE_PRODUCT_ID 0x000b
+#define USB_LEAF_PRO_PRODUCT_ID 0x000c
+#define USB_LEAF_SPRO_PRODUCT_ID 0x000e
+#define USB_LEAF_PRO_LS_PRODUCT_ID 0x000f
+#define USB_LEAF_PRO_SWC_PRODUCT_ID 0x0010
+#define USB_LEAF_PRO_LIN_PRODUCT_ID 0x0011
+#define USB_LEAF_SPRO_LS_PRODUCT_ID 0x0012
+#define USB_LEAF_SPRO_SWC_PRODUCT_ID 0x0013
+#define USB_MEMO2_DEVEL_PRODUCT_ID 0x0016
+#define USB_MEMO2_HSHS_PRODUCT_ID 0x0017
+#define USB_UPRO_HSHS_PRODUCT_ID 0x0018
+#define USB_LEAF_LITE_GI_PRODUCT_ID 0x0019
+#define USB_LEAF_PRO_OBDII_PRODUCT_ID 0x001a
+#define USB_MEMO2_HSLS_PRODUCT_ID 0x001b
+#define USB_LEAF_LITE_CH_PRODUCT_ID 0x001c
+#define USB_BLACKBIRD_SPRO_PRODUCT_ID 0x001d
+#define USB_OEM_MERCURY_PRODUCT_ID 0x0022
+#define USB_OEM_LEAF_PRODUCT_ID 0x0023
+#define USB_CAN_R_PRODUCT_ID 0x0027
+#define USB_LEAF_LITE_V2_PRODUCT_ID 0x0120
+#define USB_MINI_PCIE_HS_PRODUCT_ID 0x0121
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 0x0122
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 0x0123
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 0x0124
+#define USB_USBCAN_R_V2_PRODUCT_ID 0x0126
+#define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 0x0127
+#define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 0x0128
/* Kvaser USBCan-II devices product ids */
-#define USB_USBCAN_REVB_PRODUCT_ID 2
-#define USB_VCI2_PRODUCT_ID 3
-#define USB_USBCAN2_PRODUCT_ID 4
-#define USB_MEMORATOR_PRODUCT_ID 5
+#define USB_USBCAN_REVB_PRODUCT_ID 0x0002
+#define USB_VCI2_PRODUCT_ID 0x0003
+#define USB_USBCAN2_PRODUCT_ID 0x0004
+#define USB_MEMORATOR_PRODUCT_ID 0x0005
/* Kvaser Minihydra USB devices product ids */
-#define USB_BLACKBIRD_V2_PRODUCT_ID 258
-#define USB_MEMO_PRO_5HS_PRODUCT_ID 260
-#define USB_USBCAN_PRO_5HS_PRODUCT_ID 261
-#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 262
-#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 263
-#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264
-#define USB_MEMO_2HS_PRODUCT_ID 265
-#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266
-#define USB_HYBRID_2CANLIN_PRODUCT_ID 267
-#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
-#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
-#define USB_HYBRID_PRO_2CANLIN_PRODUCT_ID 270
-#define USB_U100_PRODUCT_ID 273
-#define USB_U100P_PRODUCT_ID 274
-#define USB_U100S_PRODUCT_ID 275
-#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276
-#define USB_HYBRID_CANLIN_PRODUCT_ID 277
-#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
-#define USB_HYDRA_PRODUCT_ID_END \
- USB_HYBRID_PRO_CANLIN_PRODUCT_ID
-
-static inline bool kvaser_is_leaf(const struct usb_device_id *id)
-{
- return (id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_CAN_R_PRODUCT_ID) ||
- (id->idProduct >= USB_LEAF_LITE_V2_PRODUCT_ID &&
- id->idProduct <= USB_LEAF_PRODUCT_ID_END);
-}
+#define USB_BLACKBIRD_V2_PRODUCT_ID 0x0102
+#define USB_MEMO_PRO_5HS_PRODUCT_ID 0x0104
+#define USB_USBCAN_PRO_5HS_PRODUCT_ID 0x0105
+#define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 0x0106
+#define USB_LEAF_PRO_HS_V2_PRODUCT_ID 0x0107
+#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 0x0108
+#define USB_MEMO_2HS_PRODUCT_ID 0x0109
+#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 0x010a
+#define USB_HYBRID_2CANLIN_PRODUCT_ID 0x010b
+#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 0x010c
+#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 0x010d
+#define USB_HYBRID_PRO_2CANLIN_PRODUCT_ID 0x010e
+#define USB_U100_PRODUCT_ID 0x0111
+#define USB_U100P_PRODUCT_ID 0x0112
+#define USB_U100S_PRODUCT_ID 0x0113
+#define USB_USBCAN_PRO_4HS_PRODUCT_ID 0x0114
+#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
+#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
+ .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
+ .ops = &kvaser_usb_hydra_dev_ops,
+};
-static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_USBCAN_REVB_PRODUCT_ID &&
- id->idProduct <= USB_MEMORATOR_PRODUCT_ID;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE,
+ .family = KVASER_USBCAN,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
-static inline bool kvaser_is_hydra(const struct usb_device_id *id)
-{
- return id->idProduct >= USB_BLACKBIRD_V2_PRODUCT_ID &&
- id->idProduct <= USB_HYDRA_PRODUCT_ID_END;
-}
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = {
+ .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = {
+ .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS |
+ KVASER_USB_QUIRK_HAS_SILENT_MODE |
+ KVASER_USB_QUIRK_IGNORE_CLK_FREQ,
+ .family = KVASER_LEAF,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
+
+static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
+ .quirks = 0,
+ .ops = &kvaser_usb_leaf_dev_ops,
+};
static const struct usb_device_id kvaser_usb_table[] = {
- /* Leaf USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID) },
+ /* Leaf M32C USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS |
- KVASER_USB_HAS_SILENT_MODE },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID) },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err },
+
+ /* Leaf i.MX28 USB product IDs */
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx },
/* USBCANII USB product IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID),
- .driver_info = KVASER_USB_HAS_TXRX_ERRORS },
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan },
/* Minihydra USB product IDs */
- { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len)
{
- int actual_len; /* Not used */
-
return usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
- cmd, len, &actual_len, KVASER_USB_TIMEOUT);
+ cmd, len, NULL, KVASER_USB_TIMEOUT);
}
int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len,
@@ -279,8 +313,6 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -289,6 +321,7 @@ int kvaser_usb_can_rx_over_error(struct net_device *netdev)
static void kvaser_usb_read_bulk_callback(struct urb *urb)
{
struct kvaser_usb *dev = urb->context;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
unsigned int i;
@@ -305,8 +338,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
- dev->ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
- urb->actual_length);
+ ops->dev_read_bulk_callback(dev, urb->transfer_buffer,
+ urb->actual_length);
resubmit_urb:
usb_fill_bulk_urb(urb, dev->udev,
@@ -400,21 +433,18 @@ static int kvaser_usb_open(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
err = open_candev(netdev);
if (err)
return err;
- err = kvaser_usb_setup_rx_urbs(dev);
+ err = ops->dev_set_opt_mode(priv);
if (err)
goto error;
- err = dev->ops->dev_set_opt_mode(priv);
- if (err)
- goto error;
-
- err = dev->ops->dev_start_chip(priv);
+ err = ops->dev_start_chip(priv);
if (err) {
netdev_warn(netdev, "Cannot start device, error %d\n", err);
goto error;
@@ -443,7 +473,7 @@ static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
/* This method might sleep. Do not call it in the atomic context
* of URB completions.
*/
-static void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
+void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv)
{
usb_kill_anchored_urbs(&priv->tx_submitted);
kvaser_usb_reset_tx_urb_contexts(priv);
@@ -471,22 +501,23 @@ static int kvaser_usb_close(struct net_device *netdev)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int err;
netif_stop_queue(netdev);
- err = dev->ops->dev_flush_queue(priv);
+ err = ops->dev_flush_queue(priv);
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, priv->channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n",
err);
}
- err = dev->ops->dev_stop_chip(priv);
+ err = ops->dev_stop_chip(priv);
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
@@ -499,6 +530,91 @@ static int kvaser_usb_close(struct net_device *netdev)
return 0;
}
+static int kvaser_usb_set_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct kvaser_usb_busparams busparams;
+ int tseg1 = bt->prop_seg + bt->phase_seg1;
+ int tseg2 = bt->phase_seg2;
+ int sjw = bt->sjw;
+ int err;
+
+ busparams.bitrate = cpu_to_le32(bt->bitrate);
+ busparams.sjw = (u8)sjw;
+ busparams.tseg1 = (u8)tseg1;
+ busparams.tseg2 = (u8)tseg2;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ busparams.nsamples = 3;
+ else
+ busparams.nsamples = 1;
+
+ err = ops->dev_set_bittiming(netdev, &busparams);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(priv->dev);
+ if (err)
+ return err;
+
+ err = ops->dev_get_busparams(priv);
+ if (err) {
+ /* Treat EOPNOTSUPP as success */
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+ }
+
+ if (memcmp(&busparams, &priv->busparams_nominal,
+ sizeof(priv->busparams_nominal)) != 0)
+ err = -EINVAL;
+
+ return err;
+}
+
+static int kvaser_usb_set_data_bittiming(struct net_device *netdev)
+{
+ struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct kvaser_usb_busparams busparams;
+ int tseg1 = dbt->prop_seg + dbt->phase_seg1;
+ int tseg2 = dbt->phase_seg2;
+ int sjw = dbt->sjw;
+ int err;
+
+ if (!ops->dev_set_data_bittiming ||
+ !ops->dev_get_data_busparams)
+ return -EOPNOTSUPP;
+
+ busparams.bitrate = cpu_to_le32(dbt->bitrate);
+ busparams.sjw = (u8)sjw;
+ busparams.tseg1 = (u8)tseg1;
+ busparams.tseg2 = (u8)tseg2;
+ busparams.nsamples = 1;
+
+ err = ops->dev_set_data_bittiming(netdev, &busparams);
+ if (err)
+ return err;
+
+ err = kvaser_usb_setup_rx_urbs(priv->dev);
+ if (err)
+ return err;
+
+ err = ops->dev_get_data_busparams(priv);
+ if (err)
+ return err;
+
+ if (memcmp(&busparams, &priv->busparams_data,
+ sizeof(priv->busparams_data)) != 0)
+ err = -EINVAL;
+
+ return err;
+}
+
static void kvaser_usb_write_bulk_callback(struct urb *urb)
{
struct kvaser_usb_tx_urb_context *context = urb->context;
@@ -525,6 +641,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
struct kvaser_usb *dev = priv->dev;
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
struct net_device_stats *stats = &netdev->stats;
struct kvaser_usb_tx_urb_context *context = NULL;
struct urb *urb;
@@ -534,7 +651,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
unsigned int i;
unsigned long flags;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -567,8 +684,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
goto freeurb;
}
- buf = dev->ops->dev_frame_to_cmd(priv, skb, &context->dlc, &cmd_len,
- context->echo_index);
+ buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index);
if (!buf) {
stats->tx_dropped++;
dev_kfree_skb(skb);
@@ -631,8 +747,25 @@ static const struct net_device_ops kvaser_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct net_device_ops kvaser_usb_netdev_ops_hwts = {
+ .ndo_open = kvaser_usb_open,
+ .ndo_stop = kvaser_usb_close,
+ .ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_start_xmit = kvaser_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = {
+ .get_ts_info = can_ethtool_op_get_ts_info_hwts,
+};
+
static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
{
+ const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops;
int i;
for (i = 0; i < dev->nchannels; i++) {
@@ -648,19 +781,23 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev)
if (!dev->nets[i])
continue;
+ if (ops->dev_remove_channel)
+ ops->dev_remove_channel(dev->nets[i]);
+
free_candev(dev->nets[i]->netdev);
}
}
-static int kvaser_usb_init_one(struct kvaser_usb *dev,
- const struct usb_device_id *id, int channel)
+static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel)
{
struct net_device *netdev;
struct kvaser_usb_net_priv *priv;
+ const struct kvaser_usb_driver_info *driver_info = dev->driver_info;
+ const struct kvaser_usb_dev_ops *ops = driver_info->ops;
int err;
- if (dev->ops->dev_reset_chip) {
- err = dev->ops->dev_reset_chip(dev, channel);
+ if (ops->dev_reset_chip) {
+ err = ops->dev_reset_chip(dev, channel);
if (err)
return err;
}
@@ -677,6 +814,8 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
init_usb_anchor(&priv->tx_submitted);
init_completion(&priv->start_comp);
init_completion(&priv->stop_comp);
+ init_completion(&priv->flush_comp);
+ init_completion(&priv->get_busparams_comp);
priv->can.ctrlmode_supported = 0;
priv->dev = dev;
@@ -689,42 +828,56 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev,
priv->can.state = CAN_STATE_STOPPED;
priv->can.clock.freq = dev->cfg->clock.freq;
priv->can.bittiming_const = dev->cfg->bittiming_const;
- priv->can.do_set_bittiming = dev->ops->dev_set_bittiming;
- priv->can.do_set_mode = dev->ops->dev_set_mode;
- if ((id->driver_info & KVASER_USB_HAS_TXRX_ERRORS) ||
+ priv->can.do_set_bittiming = kvaser_usb_set_bittiming;
+ priv->can.do_set_mode = ops->dev_set_mode;
+ if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) ||
(priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP))
- priv->can.do_get_berr_counter = dev->ops->dev_get_berr_counter;
- if (id->driver_info & KVASER_USB_HAS_SILENT_MODE)
+ priv->can.do_get_berr_counter = ops->dev_get_berr_counter;
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported;
if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
priv->can.data_bittiming_const = dev->cfg->data_bittiming_const;
- priv->can.do_set_data_bittiming =
- dev->ops->dev_set_data_bittiming;
+ priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming;
}
netdev->flags |= IFF_ECHO;
netdev->netdev_ops = &kvaser_usb_netdev_ops;
-
+ if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) {
+ netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts;
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts;
+ } else {
+ netdev->netdev_ops = &kvaser_usb_netdev_ops;
+ netdev->ethtool_ops = &kvaser_usb_ethtool_ops;
+ }
SET_NETDEV_DEV(netdev, &dev->intf->dev);
netdev->dev_id = channel;
dev->nets[channel] = priv;
+ if (ops->dev_init_channel) {
+ err = ops->dev_init_channel(priv);
+ if (err)
+ goto err;
+ }
+
err = register_candev(netdev);
if (err) {
dev_err(&dev->intf->dev, "Failed to register CAN device\n");
- free_candev(netdev);
- dev->nets[channel] = NULL;
- return err;
+ goto err;
}
netdev_dbg(netdev, "device registered\n");
return 0;
+
+err:
+ free_candev(netdev);
+ dev->nets[channel] = NULL;
+ return err;
}
static int kvaser_usb_probe(struct usb_interface *intf,
@@ -733,29 +886,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
struct kvaser_usb *dev;
int err;
int i;
+ const struct kvaser_usb_driver_info *driver_info;
+ const struct kvaser_usb_dev_ops *ops;
+
+ driver_info = (const struct kvaser_usb_driver_info *)id->driver_info;
+ if (!driver_info)
+ return -ENODEV;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
- if (kvaser_is_leaf(id)) {
- dev->card_data.leaf.family = KVASER_LEAF;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_usbcan(id)) {
- dev->card_data.leaf.family = KVASER_USBCAN;
- dev->ops = &kvaser_usb_leaf_dev_ops;
- } else if (kvaser_is_hydra(id)) {
- dev->ops = &kvaser_usb_hydra_dev_ops;
- } else {
- dev_err(&intf->dev,
- "Product ID (%d) is not a supported Kvaser USB device\n",
- id->idProduct);
- return -ENODEV;
- }
-
dev->intf = intf;
+ dev->driver_info = driver_info;
+ ops = driver_info->ops;
- err = dev->ops->dev_setup_endpoints(dev);
+ err = ops->dev_setup_endpoints(dev);
if (err) {
dev_err(&intf->dev, "Cannot get usb endpoint(s)");
return err;
@@ -769,22 +915,22 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev->card_data.ctrlmode_supported = 0;
dev->card_data.capabilities = 0;
- err = dev->ops->dev_init_card(dev);
+ err = ops->dev_init_card(dev);
if (err) {
dev_err(&intf->dev,
"Failed to initialize card, error %d\n", err);
return err;
}
- err = dev->ops->dev_get_software_info(dev);
+ err = ops->dev_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_software_details) {
- err = dev->ops->dev_get_software_details(dev);
+ if (ops->dev_get_software_details) {
+ err = ops->dev_get_software_details(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software details, error %d\n", err);
@@ -802,14 +948,14 @@ static int kvaser_usb_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs);
- err = dev->ops->dev_get_card_info(dev);
+ err = ops->dev_get_card_info(dev);
if (err) {
dev_err(&intf->dev, "Cannot get card info, error %d\n", err);
return err;
}
- if (dev->ops->dev_get_capabilities) {
- err = dev->ops->dev_get_capabilities(dev);
+ if (ops->dev_get_capabilities) {
+ err = ops->dev_get_capabilities(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get capabilities, error %d\n", err);
@@ -819,7 +965,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
}
for (i = 0; i < dev->nchannels; i++) {
- err = kvaser_usb_init_one(dev, id, i);
+ err = kvaser_usb_init_one(dev, i);
if (err) {
kvaser_usb_remove_interfaces(dev);
return err;
@@ -842,7 +988,7 @@ static void kvaser_usb_disconnect(struct usb_interface *intf)
}
static struct usb_driver kvaser_usb_driver = {
- .name = "kvaser_usb",
+ .name = KBUILD_MODNAME,
.probe = kvaser_usb_probe,
.disconnect = kvaser_usb_disconnect,
.id_table = kvaser_usb_table,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index dcee8dc828ec..ef341c4254fc 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <linux/usb.h>
#include <linux/can.h>
@@ -44,6 +45,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt;
/* Minihydra command IDs */
#define CMD_SET_BUSPARAMS_REQ 16
+#define CMD_GET_BUSPARAMS_REQ 17
+#define CMD_GET_BUSPARAMS_RESP 18
#define CMD_GET_CHIP_STATE_REQ 19
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_DRIVERMODE_REQ 21
@@ -195,21 +198,26 @@ struct kvaser_cmd_chip_state_event {
#define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01
#define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02
struct kvaser_cmd_set_busparams {
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 nsamples;
+ struct kvaser_usb_busparams busparams_nominal;
u8 reserved0[4];
- __le32 bitrate_d;
- u8 tseg1_d;
- u8 tseg2_d;
- u8 sjw_d;
- u8 nsamples_d;
+ struct kvaser_usb_busparams busparams_data;
u8 canfd_mode;
u8 reserved1[7];
} __packed;
+/* Busparam type */
+#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00
+#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01
+struct kvaser_cmd_get_busparams_req {
+ u8 type;
+ u8 reserved[27];
+} __packed;
+
+struct kvaser_cmd_get_busparams_res {
+ struct kvaser_usb_busparams busparams;
+ u8 reserved[20];
+} __packed;
+
/* Ctrl modes */
#define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01
#define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02
@@ -280,6 +288,8 @@ struct kvaser_cmd {
struct kvaser_cmd_error_event error_event;
struct kvaser_cmd_set_busparams set_busparams_req;
+ struct kvaser_cmd_get_busparams_req get_busparams_req;
+ struct kvaser_cmd_get_busparams_res get_busparams_res;
struct kvaser_cmd_chip_state_event chip_state_event;
@@ -295,6 +305,7 @@ struct kvaser_cmd {
#define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1)
#define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4)
#define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5)
+#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6)
/* CAN frame flags. Used in ext_rx_can and ext_tx_can */
#define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12)
#define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13)
@@ -361,6 +372,10 @@ struct kvaser_cmd_ext {
} __packed;
} __packed;
+struct kvaser_usb_net_hydra_priv {
+ int pending_get_busparams_type;
+};
+
static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.name = "kvaser_usb_kcan",
.tseg1_min = 1,
@@ -373,7 +388,7 @@ static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = {
.brp_inc = 1,
};
-static const struct can_bittiming_const kvaser_usb_hydra_flexc_bittiming_c = {
+const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = {
.name = "kvaser_usb_flex",
.tseg1_min = 4,
.tseg1_max = 16,
@@ -530,13 +545,15 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
u8 cmd_no, int channel)
{
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = cmd_no;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
if (channel < 0) {
kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@@ -553,7 +570,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -569,21 +586,22 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
{
struct kvaser_cmd *cmd;
struct kvaser_usb *dev = priv->dev;
+ size_t cmd_len;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = cmd_no;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd_async(priv, cmd,
- kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len);
if (err)
kfree(cmd);
@@ -692,7 +710,7 @@ static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -727,24 +745,26 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
{
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
struct kvaser_cmd *cmd;
+ size_t cmd_len;
u32 value = 0;
u32 mask = 0;
u16 cap_cmd_res;
int err;
int i;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -838,6 +858,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev,
complete(&priv->flush_comp);
}
+static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ struct kvaser_usb_net_hydra_priv *hydra;
+
+ priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
+ if (!priv)
+ return;
+
+ hydra = priv->sub_priv;
+ if (!hydra)
+ return;
+
+ switch (hydra->pending_get_busparams_type) {
+ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN:
+ memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams,
+ sizeof(priv->busparams_nominal));
+ break;
+ case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD:
+ memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams,
+ sizeof(priv->busparams_nominal));
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n",
+ hydra->pending_get_busparams_type);
+ break;
+ }
+ hydra->pending_get_busparams_type = -1;
+
+ complete(&priv->get_busparams_comp);
+}
+
static void
kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv,
u8 bus_status,
@@ -869,7 +922,6 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
struct net_device *netdev = priv->netdev;
struct can_frame *cf;
struct sk_buff *skb;
- struct net_device_stats *stats;
enum can_state new_state, old_state;
old_state = priv->can.state;
@@ -916,12 +968,12 @@ static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv,
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- cf->data[6] = bec->txerr;
- cf->data[7] = bec->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec->txerr;
+ cf->data[7] = bec->rxerr;
+ }
- stats = &netdev->stats;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1071,11 +1123,12 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
shhwtstamps->hwtstamp = hwtstamp;
cf->can_id |= CAN_ERR_BUSERROR;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
priv->bec.txerr = bec.txerr;
@@ -1109,8 +1162,6 @@ static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv,
}
stats->tx_errors++;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -1120,7 +1171,9 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
struct kvaser_usb_tx_urb_context *context;
struct kvaser_usb_net_priv *priv;
unsigned long irq_flags;
+ unsigned int len;
bool one_shot_fail = false;
+ bool is_err_frame = false;
u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd);
priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd);
@@ -1139,24 +1192,28 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev,
kvaser_usb_hydra_one_shot_fail(priv, cmd_ext);
one_shot_fail = true;
}
+
+ is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK &&
+ flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME;
}
context = &priv->tx_contexts[transid % dev->max_tx_urbs];
- if (!one_shot_fail) {
- struct net_device_stats *stats = &priv->netdev->stats;
-
- stats->tx_packets++;
- stats->tx_bytes += can_fd_dlc2len(context->dlc);
- }
spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags);
- can_get_echo_skb(priv->netdev, context->echo_index, NULL);
+ len = can_get_echo_skb(priv->netdev, context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags);
+
+ if (!one_shot_fail && !is_err_frame) {
+ struct net_device_stats *stats = &priv->netdev->stats;
+
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ }
}
static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
@@ -1208,13 +1265,15 @@ static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev,
cf->len = can_cc_dlc2len(cmd->rx_can.dlc);
- if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, cmd->rx_can.data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -1286,13 +1345,15 @@ static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev,
cf->len = can_cc_dlc2len(dlc);
}
- if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME)
+ if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+
netif_rx(skb);
}
@@ -1316,6 +1377,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev,
kvaser_usb_hydra_state_event(dev, cmd);
break;
+ case CMD_GET_BUSPARAMS_RESP:
+ kvaser_usb_hydra_get_busparams_reply(dev, cmd);
+ break;
+
case CMD_ERROR_EVENT:
kvaser_usb_hydra_error_event(dev, cmd);
break;
@@ -1371,8 +1436,8 @@ static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev,
static void *
kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd_ext *cmd;
@@ -1384,9 +1449,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
u32 kcan_id;
u32 kcan_header;
- *frame_len = nbr_of_bytes;
-
- cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1451,8 +1514,8 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
static void *
kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
@@ -1460,9 +1523,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
u32 flags;
u32 id;
- *frame_len = cf->len;
-
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1495,7 +1556,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
cmd->tx_can.id = cpu_to_le32(id);
cmd->tx_can.flags = flags;
- memcpy(cmd->tx_can.data, cf->data, *frame_len);
+ memcpy(cmd->tx_can.data, cf->data, cf->len);
return cmd;
}
@@ -1516,61 +1577,101 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev,
return err;
}
-static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
+static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv,
+ int busparams_type)
+{
+ struct kvaser_usb *dev = priv->dev;
+ struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv;
+ struct kvaser_cmd *cmd;
+ size_t cmd_len;
+ int err;
+
+ if (!hydra)
+ return -EINVAL;
+
+ cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ kvaser_usb_hydra_set_cmd_dest_he
+ (cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
+ kvaser_usb_hydra_set_cmd_transid
+ (cmd, kvaser_usb_hydra_get_next_transid(dev));
+ cmd->get_busparams_req.type = busparams_type;
+ hydra->pending_get_busparams_type = busparams_type;
+
+ reinit_completion(&priv->get_busparams_comp);
+
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return err;
+}
+
+static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv)
+{
+ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN);
+}
+
+static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv)
+{
+ return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD);
+}
+
+static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
- int tseg1 = bt->prop_seg + bt->phase_seg1;
- int tseg2 = bt->phase_seg2;
- int sjw = bt->sjw;
+ size_t cmd_len;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ;
- cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate);
- cmd->set_busparams_req.sjw = (u8)sjw;
- cmd->set_busparams_req.tseg1 = (u8)tseg1;
- cmd->set_busparams_req.tseg2 = (u8)tseg2;
- cmd->set_busparams_req.nsamples = 1;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ memcpy(&cmd->set_busparams_req.busparams_nominal, busparams,
+ sizeof(cmd->set_busparams_req.busparams_nominal));
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
return err;
}
-static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
+static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_cmd *cmd;
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *dbt = &priv->can.data_bittiming;
struct kvaser_usb *dev = priv->dev;
- int tseg1 = dbt->prop_seg + dbt->phase_seg1;
- int tseg2 = dbt->phase_seg2;
- int sjw = dbt->sjw;
+ size_t cmd_len;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ;
- cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate);
- cmd->set_busparams_req.sjw_d = (u8)sjw;
- cmd->set_busparams_req.tseg1_d = (u8)tseg1;
- cmd->set_busparams_req.tseg2_d = (u8)tseg2;
- cmd->set_busparams_req.nsamples_d = 1;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
+ memcpy(&cmd->set_busparams_req.busparams_data, busparams,
+ sizeof(cmd->set_busparams_req.busparams_data));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1586,7 +1687,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
@@ -1677,6 +1778,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev)
return 0;
}
+static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_hydra_priv *hydra;
+
+ hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL);
+ if (!hydra)
+ return -ENOMEM;
+
+ priv->sub_priv = hydra;
+
+ return 0;
+}
+
static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
{
struct kvaser_cmd cmd;
@@ -1701,15 +1815,17 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev)
static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
{
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
u32 flags;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
cmd->sw_detail_req.use_ext_cmd = 1;
kvaser_usb_hydra_set_cmd_dest_he
(cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL);
@@ -1717,7 +1833,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
kvaser_usb_hydra_set_cmd_transid
(cmd, kvaser_usb_hydra_get_next_transid(dev));
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
if (err)
goto end;
@@ -1835,6 +1951,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
+ size_t cmd_len;
int err;
if ((priv->can.ctrlmode &
@@ -1845,11 +1962,12 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -EINVAL;
}
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ;
+ cmd_len = kvaser_usb_hydra_cmd_size(cmd);
kvaser_usb_hydra_set_cmd_dest_he
(cmd, dev->card_data.hydra.channel_to_he[priv->channel]);
kvaser_usb_hydra_set_cmd_transid
@@ -1859,7 +1977,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
else
cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL;
- err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd));
+ err = kvaser_usb_send_cmd(dev, cmd, cmd_len);
kfree(cmd);
return err;
@@ -1869,7 +1987,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->start_comp);
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ,
priv->channel);
@@ -1887,7 +2005,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
/* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT
* see comment in kvaser_usb_hydra_update_state()
@@ -1910,7 +2028,7 @@ static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv)
{
int err;
- init_completion(&priv->flush_comp);
+ reinit_completion(&priv->flush_comp);
err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE,
priv->channel);
@@ -2003,17 +2121,17 @@ static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev,
static void *
kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
void *buf;
if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD)
- buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, frame_len,
- cmd_len, transid);
+ buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, cmd_len,
+ transid);
else
- buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, frame_len,
- cmd_len, transid);
+ buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, cmd_len,
+ transid);
return buf;
}
@@ -2021,10 +2139,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
.dev_set_mode = kvaser_usb_hydra_set_mode,
.dev_set_bittiming = kvaser_usb_hydra_set_bittiming,
+ .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams,
.dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming,
+ .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams,
.dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints,
.dev_init_card = kvaser_usb_hydra_init_card,
+ .dev_init_channel = kvaser_usb_hydra_init_channel,
.dev_get_software_info = kvaser_usb_hydra_get_software_info,
.dev_get_software_details = kvaser_usb_hydra_get_software_details,
.dev_get_card_info = kvaser_usb_hydra_get_card_info,
@@ -2040,7 +2161,7 @@ const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = {
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
.clock = {
- .freq = 80000000,
+ .freq = 80 * MEGA /* Hz */,
},
.timestamp_freq = 80,
.bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c,
@@ -2049,15 +2170,15 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = {
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = {
.clock = {
- .freq = 24000000,
+ .freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_hydra_flexc_bittiming_c,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = {
.clock = {
- .freq = 80000000,
+ .freq = 80 * MEGA /* Hz */,
},
.timestamp_freq = 24,
.bittiming_const = &kvaser_usb_hydra_rt_bittiming_c,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
index f7af1bf5ab46..1c2f99ce4c6c 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
@@ -19,7 +19,9 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <linux/usb.h>
+#include <linux/workqueue.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -55,6 +57,9 @@
#define CMD_RX_EXT_MESSAGE 14
#define CMD_TX_EXT_MESSAGE 15
#define CMD_SET_BUS_PARAMS 16
+#define CMD_GET_BUS_PARAMS 17
+#define CMD_GET_BUS_PARAMS_REPLY 18
+#define CMD_GET_CHIP_STATE 19
#define CMD_CHIP_STATE_EVENT 20
#define CMD_SET_CTRL_MODE 21
#define CMD_RESET_CHIP 24
@@ -69,10 +74,13 @@
#define CMD_GET_CARD_INFO_REPLY 35
#define CMD_GET_SOFTWARE_INFO 38
#define CMD_GET_SOFTWARE_INFO_REPLY 39
+#define CMD_ERROR_EVENT 45
#define CMD_FLUSH_QUEUE 48
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
#define CMD_FLUSH_QUEUE_REPLY 68
+#define CMD_GET_CAPABILITIES_REQ 95
+#define CMD_GET_CAPABILITIES_RESP 96
#define CMD_LEAF_LOG_MESSAGE 106
@@ -82,6 +90,8 @@
#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
+#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12)
+
/* error factors */
#define M16C_EF_ACKE BIT(0)
#define M16C_EF_CRCE BIT(1)
@@ -100,16 +110,6 @@
#define USBCAN_ERROR_STATE_RX_ERROR BIT(1)
#define USBCAN_ERROR_STATE_BUSERROR BIT(2)
-/* bittiming parameters */
-#define KVASER_USB_TSEG1_MIN 1
-#define KVASER_USB_TSEG1_MAX 16
-#define KVASER_USB_TSEG2_MIN 1
-#define KVASER_USB_TSEG2_MAX 8
-#define KVASER_USB_SJW_MAX 4
-#define KVASER_USB_BRP_MIN 1
-#define KVASER_USB_BRP_MAX 64
-#define KVASER_USB_BRP_INC 1
-
/* ctrl modes */
#define KVASER_CTRL_MODE_NORMAL 1
#define KVASER_CTRL_MODE_SILENT 2
@@ -166,11 +166,7 @@ struct usbcan_cmd_softinfo {
struct kvaser_cmd_busparams {
u8 tid;
u8 channel;
- __le32 bitrate;
- u8 tseg1;
- u8 tseg2;
- u8 sjw;
- u8 no_samp;
+ struct kvaser_usb_busparams busparams;
} __packed;
struct kvaser_cmd_tx_can {
@@ -239,7 +235,7 @@ struct kvaser_cmd_tx_acknowledge_header {
u8 tid;
} __packed;
-struct leaf_cmd_error_event {
+struct leaf_cmd_can_error_event {
u8 tid;
u8 flags;
__le16 time[3];
@@ -251,7 +247,7 @@ struct leaf_cmd_error_event {
u8 error_factor;
} __packed;
-struct usbcan_cmd_error_event {
+struct usbcan_cmd_can_error_event {
u8 tid;
u8 padding;
u8 tx_errors_count_ch0;
@@ -263,6 +259,28 @@ struct usbcan_cmd_error_event {
__le16 time;
} __packed;
+/* CMD_ERROR_EVENT error codes */
+#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8
+#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9
+
+struct leaf_cmd_error_event {
+ u8 tid;
+ u8 error_code;
+ __le16 timestamp[3];
+ __le16 padding;
+ __le16 info1;
+ __le16 info2;
+} __packed;
+
+struct usbcan_cmd_error_event {
+ u8 tid;
+ u8 error_code;
+ __le16 info1;
+ __le16 info2;
+ __le16 timestamp;
+ __le16 padding;
+} __packed;
+
struct kvaser_cmd_ctrl_mode {
u8 tid;
u8 channel;
@@ -287,6 +305,28 @@ struct leaf_cmd_log_message {
u8 data[8];
} __packed;
+/* Sub commands for cap_req and cap_res */
+#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02
+#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05
+struct kvaser_cmd_cap_req {
+ __le16 padding0;
+ __le16 cap_cmd;
+ __le16 padding1;
+ __le16 channel;
+} __packed;
+
+/* Status codes for cap_res */
+#define KVASER_USB_LEAF_CAP_STAT_OK 0x00
+#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01
+#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02
+struct kvaser_cmd_cap_res {
+ __le16 padding;
+ __le16 cap_cmd;
+ __le16 status;
+ __le32 mask;
+ __le32 value;
+} __packed;
+
struct kvaser_cmd {
u8 len;
u8 id;
@@ -302,14 +342,18 @@ struct kvaser_cmd {
struct leaf_cmd_softinfo softinfo;
struct leaf_cmd_rx_can rx_can;
struct leaf_cmd_chip_state_event chip_state_event;
- struct leaf_cmd_error_event error_event;
+ struct leaf_cmd_can_error_event can_error_event;
struct leaf_cmd_log_message log_message;
+ struct leaf_cmd_error_event error_event;
+ struct kvaser_cmd_cap_req cap_req;
+ struct kvaser_cmd_cap_res cap_res;
} __packed leaf;
union {
struct usbcan_cmd_softinfo softinfo;
struct usbcan_cmd_rx_can rx_can;
struct usbcan_cmd_chip_state_event chip_state_event;
+ struct usbcan_cmd_can_error_event can_error_event;
struct usbcan_cmd_error_event error_event;
} __packed usbcan;
@@ -319,6 +363,42 @@ struct kvaser_cmd {
} u;
} __packed;
+#define CMD_SIZE_ANY 0xff
+#define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field)
+
+static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can),
+ [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event),
+ [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res),
+ [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams),
+ [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event),
+ /* ignored events: */
+ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY,
+};
+
+static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = {
+ [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple),
+ [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo),
+ [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header),
+ [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo),
+ [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can),
+ [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event),
+ [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event),
+ [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event),
+ /* ignored events: */
+ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY,
+};
+
/* Summary of a kvaser error event, for a unified Leaf/Usbcan error
* handling. Some discrepancies between the two families exist:
*
@@ -342,62 +422,126 @@ struct kvaser_usb_err_summary {
};
};
-static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
- .name = "kvaser_usb",
- .tseg1_min = KVASER_USB_TSEG1_MIN,
- .tseg1_max = KVASER_USB_TSEG1_MAX,
- .tseg2_min = KVASER_USB_TSEG2_MIN,
- .tseg2_max = KVASER_USB_TSEG2_MAX,
- .sjw_max = KVASER_USB_SJW_MAX,
- .brp_min = KVASER_USB_BRP_MIN,
- .brp_max = KVASER_USB_BRP_MAX,
- .brp_inc = KVASER_USB_BRP_INC,
+struct kvaser_usb_net_leaf_priv {
+ struct kvaser_usb_net_priv *net;
+
+ struct delayed_work chip_state_req_work;
+
+ /* started but not reported as bus-on yet */
+ bool joining_bus;
+};
+
+static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = {
+ .name = "kvaser_usb_ucii",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 16,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = {
+ .name = "kvaser_usb_leaf",
+ .tseg1_min = 3,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 2,
+ .brp_max = 128,
+ .brp_inc = 2,
+};
+
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = {
+ .clock = {
+ .freq = 8 * MEGA /* Hz */,
+ },
+ .timestamp_freq = 1,
+ .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = {
.clock = {
- .freq = 8000000,
+ .freq = 16 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = {
.clock = {
- .freq = 16000000,
+ .freq = 16 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = {
.clock = {
- .freq = 24000000,
+ .freq = 24 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
-static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
+static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = {
.clock = {
- .freq = 32000000,
+ .freq = 32 * MEGA /* Hz */,
},
.timestamp_freq = 1,
- .bittiming_const = &kvaser_usb_leaf_bittiming_const,
+ .bittiming_const = &kvaser_usb_flexc_bittiming_const,
};
+static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ /* buffer size >= cmd->len ensured by caller */
+ u8 min_size = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf))
+ min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id];
+ break;
+ case KVASER_USBCAN:
+ if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan))
+ min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id];
+ break;
+ }
+
+ if (min_size == CMD_SIZE_ANY)
+ return 0;
+
+ if (min_size) {
+ min_size += CMD_HEADER_LEN;
+ if (cmd->len >= min_size)
+ return 0;
+
+ dev_err_ratelimited(&dev->intf->dev,
+ "Received command %u too short (size %u, needed %u)",
+ cmd->id, cmd->len, min_size);
+ return -EIO;
+ }
+
+ dev_warn_ratelimited(&dev->intf->dev,
+ "Unhandled command (%d, size %d)\n",
+ cmd->id, cmd->len);
+ return -EINVAL;
+}
+
static void *
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
- const struct sk_buff *skb, int *frame_len,
- int *cmd_len, u16 transid)
+ const struct sk_buff *skb, int *cmd_len,
+ u16 transid)
{
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
u8 *cmd_tx_can_flags = NULL; /* GCC */
struct can_frame *cf = (struct can_frame *)skb->data;
- *frame_len = cf->len;
-
cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
if (cmd) {
cmd->u.tx_can.tid = transid & 0xff;
@@ -405,7 +549,7 @@ kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
sizeof(struct kvaser_cmd_tx_can);
cmd->u.tx_can.channel = priv->channel;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags;
break;
@@ -493,6 +637,9 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
end:
kfree(buf);
+ if (err == 0)
+ err = kvaser_usb_leaf_verify_size(dev, cmd);
+
return err;
}
@@ -525,16 +672,26 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
dev->fw_version = le32_to_cpu(softinfo->fw_version);
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
- switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
- case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
- break;
- case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
- dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
- break;
+ if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP)
+ dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP;
+
+ if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) {
+ /* Firmware expects bittiming parameters calculated for 16MHz
+ * clock, regardless of the actual clock
+ */
+ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg;
+ } else {
+ switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
+ case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz;
+ break;
+ case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
+ dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz;
+ break;
+ }
}
}
@@ -551,7 +708,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
if (err)
return err;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
break;
@@ -559,7 +716,7 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
- dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
+ dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg;
break;
}
@@ -598,13 +755,123 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev)
dev->nchannels = cmd.u.cardinfo.nchannels;
if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES ||
- (dev->card_data.leaf.family == KVASER_USBCAN &&
+ (dev->driver_info->family == KVASER_USBCAN &&
dev->nchannels > MAX_USBCAN_NET_DEVICES))
return -EINVAL;
return 0;
}
+static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev,
+ u16 cap_cmd_req, u16 *status)
+{
+ struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
+ struct kvaser_cmd *cmd;
+ u32 value = 0;
+ u32 mask = 0;
+ u16 cap_cmd_res;
+ int err;
+ int i;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->id = CMD_GET_CAPABILITIES_REQ;
+ cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req);
+ cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req);
+
+ err = kvaser_usb_send_cmd(dev, cmd, cmd->len);
+ if (err)
+ goto end;
+
+ err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd);
+ if (err)
+ goto end;
+
+ *status = le16_to_cpu(cmd->u.leaf.cap_res.status);
+
+ if (*status != KVASER_USB_LEAF_CAP_STAT_OK)
+ goto end;
+
+ cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd);
+ switch (cap_cmd_res) {
+ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
+ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
+ value = le32_to_cpu(cmd->u.leaf.cap_res.value);
+ mask = le32_to_cpu(cmd->u.leaf.cap_res.mask);
+ break;
+ default:
+ dev_warn(&dev->intf->dev, "Unknown capability command %u\n",
+ cap_cmd_res);
+ break;
+ }
+
+ for (i = 0; i < dev->nchannels; i++) {
+ if (BIT(i) & (value & mask)) {
+ switch (cap_cmd_res) {
+ case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE:
+ card_data->ctrlmode_supported |=
+ CAN_CTRLMODE_LISTENONLY;
+ break;
+ case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT:
+ card_data->capabilities |=
+ KVASER_USB_CAP_BERR_CAP;
+ break;
+ }
+ }
+ }
+
+end:
+ kfree(cmd);
+
+ return err;
+}
+
+static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev)
+{
+ int err;
+ u16 status;
+
+ if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) {
+ dev_info(&dev->intf->dev,
+ "No extended capability support. Upgrade device firmware.\n");
+ return 0;
+ }
+
+ err = kvaser_usb_leaf_get_single_capability(dev,
+ KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n",
+ status);
+
+ err = kvaser_usb_leaf_get_single_capability(dev,
+ KVASER_USB_LEAF_CAP_CMD_ERR_REPORT,
+ &status);
+ if (err)
+ return err;
+ if (status)
+ dev_info(&dev->intf->dev,
+ "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n",
+ status);
+
+ return 0;
+}
+
+static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev)
+{
+ int err = 0;
+
+ if (dev->driver_info->family == KVASER_LEAF)
+ err = kvaser_usb_leaf_get_capabilities_leaf(dev);
+
+ return err;
+}
+
static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -633,7 +900,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
context = &priv->tx_contexts[tid % dev->max_tx_urbs];
/* Sometimes the state change doesn't come after a bus-off event */
- if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) {
+ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) {
struct sk_buff *skb;
struct can_frame *cf;
@@ -641,8 +908,6 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
if (skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
} else {
netdev_err(priv->netdev,
@@ -655,12 +920,11 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev,
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
- stats->tx_packets++;
- stats->tx_bytes += context->dlc;
-
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
- can_get_echo_skb(priv->netdev, context->echo_index, NULL);
+ stats->tx_packets++;
+ stats->tx_bytes += can_get_echo_skb(priv->netdev,
+ context->echo_index, NULL);
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
@@ -689,11 +953,22 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv,
return err;
}
+static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work)
+{
+ struct kvaser_usb_net_leaf_priv *leaf =
+ container_of(work, struct kvaser_usb_net_leaf_priv,
+ chip_state_req_work.work);
+ struct kvaser_usb_net_priv *priv = leaf->net;
+
+ kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE);
+}
+
static void
kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
const struct kvaser_usb_err_summary *es,
struct can_frame *cf)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
struct kvaser_usb *dev = priv->dev;
struct net_device_stats *stats = &priv->netdev->stats;
enum can_state cur_state, new_state, tx_state, rx_state;
@@ -707,20 +982,32 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
new_state = CAN_STATE_BUS_OFF;
} else if (es->status & M16C_STATE_BUS_PASSIVE) {
new_state = CAN_STATE_ERROR_PASSIVE;
- } else if (es->status & M16C_STATE_BUS_ERROR) {
+ } else if ((es->status & M16C_STATE_BUS_ERROR) &&
+ cur_state >= CAN_STATE_BUS_OFF) {
/* Guard against spurious error events after a busoff */
- if (cur_state < CAN_STATE_BUS_OFF) {
- if (es->txerr >= 128 || es->rxerr >= 128)
- new_state = CAN_STATE_ERROR_PASSIVE;
- else if (es->txerr >= 96 || es->rxerr >= 96)
- new_state = CAN_STATE_ERROR_WARNING;
- else if (cur_state > CAN_STATE_ERROR_ACTIVE)
- new_state = CAN_STATE_ERROR_ACTIVE;
- }
+ } else if (es->txerr >= 128 || es->rxerr >= 128) {
+ new_state = CAN_STATE_ERROR_PASSIVE;
+ } else if (es->txerr >= 96 || es->rxerr >= 96) {
+ new_state = CAN_STATE_ERROR_WARNING;
+ } else {
+ new_state = CAN_STATE_ERROR_ACTIVE;
}
- if (!es->status)
- new_state = CAN_STATE_ERROR_ACTIVE;
+ /* 0bfd:0124 FW 4.18.778 was observed to send the initial
+ * CMD_CHIP_STATE_EVENT after CMD_START_CHIP with M16C_STATE_BUS_OFF
+ * bit set if the channel was bus-off when it was last stopped (even
+ * across chip resets). This bit will clear shortly afterwards, without
+ * triggering a second unsolicited chip state event.
+ * Ignore this initial bus-off.
+ */
+ if (leaf->joining_bus) {
+ if (new_state == CAN_STATE_BUS_OFF) {
+ netdev_dbg(priv->netdev, "ignoring bus-off during startup");
+ new_state = cur_state;
+ } else {
+ leaf->joining_bus = false;
+ }
+ }
if (new_state != cur_state) {
tx_state = (es->txerr >= es->rxerr) ? new_state : 0;
@@ -730,11 +1017,11 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv,
}
if (priv->can.restart_ms &&
- cur_state >= CAN_STATE_BUS_OFF &&
+ cur_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF)
priv->can.can_stats.restarts++;
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
priv->can.can_stats.bus_error++;
@@ -764,6 +1051,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
struct sk_buff *skb;
struct net_device_stats *stats;
struct kvaser_usb_net_priv *priv;
+ struct kvaser_usb_net_leaf_priv *leaf;
enum can_state old_state, new_state;
if (es->channel >= dev->nchannels) {
@@ -773,8 +1061,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
priv = dev->nets[es->channel];
+ leaf = priv->sub_priv;
stats = &priv->netdev->stats;
+ /* Ignore e.g. state change to bus-off reported just after stopping */
+ if (!netif_running(priv->netdev))
+ return;
+
/* Update all of the CAN interface's state and error counters before
* trying any memory allocation that can actually fail with -ENOMEM.
*
@@ -789,6 +1082,17 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf);
new_state = priv->can.state;
+ /* If there are errors, request status updates periodically as we do
+ * not get automatic notifications of improved state.
+ * Also request updates if we saw a stale BUS_OFF during startup
+ * (joining_bus).
+ */
+ if (new_state < CAN_STATE_BUS_OFF &&
+ (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE ||
+ leaf->joining_bus))
+ schedule_delayed_work(&leaf->chip_state_req_work,
+ msecs_to_jiffies(500));
+
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
@@ -806,14 +1110,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
}
if (priv->can.restart_ms &&
- old_state >= CAN_STATE_BUS_OFF &&
+ old_state == CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF) {
cf->can_id |= CAN_ERR_RESTARTED;
netif_carrier_on(priv->netdev);
}
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
if (es->leaf.error_factor) {
cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
@@ -840,11 +1144,12 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev,
break;
}
- cf->data[6] = es->txerr;
- cf->data[7] = es->rxerr;
+ if (new_state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = es->txerr;
+ cf->data[7] = es->rxerr;
+ }
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -904,11 +1209,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
case CMD_CAN_ERROR_EVENT:
es.channel = 0;
- es.status = cmd->u.usbcan.error_event.status_ch0;
- es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0;
- es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0;
+ es.status = cmd->u.usbcan.can_error_event.status_ch0;
+ es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0;
+ es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0;
es.usbcan.other_ch_status =
- cmd->u.usbcan.error_event.status_ch1;
+ cmd->u.usbcan.can_error_event.status_ch1;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
/* The USBCAN firmware supports up to 2 channels.
@@ -916,13 +1221,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev,
*/
if (dev->nchannels == MAX_USBCAN_NET_DEVICES) {
es.channel = 1;
- es.status = cmd->u.usbcan.error_event.status_ch1;
+ es.status = cmd->u.usbcan.can_error_event.status_ch1;
es.txerr =
- cmd->u.usbcan.error_event.tx_errors_count_ch1;
+ cmd->u.usbcan.can_error_event.tx_errors_count_ch1;
es.rxerr =
- cmd->u.usbcan.error_event.rx_errors_count_ch1;
+ cmd->u.usbcan.can_error_event.rx_errors_count_ch1;
es.usbcan.other_ch_status =
- cmd->u.usbcan.error_event.status_ch0;
+ cmd->u.usbcan.can_error_event.status_ch0;
kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es);
}
break;
@@ -939,11 +1244,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev,
switch (cmd->id) {
case CMD_CAN_ERROR_EVENT:
- es.channel = cmd->u.leaf.error_event.channel;
- es.status = cmd->u.leaf.error_event.status;
- es.txerr = cmd->u.leaf.error_event.tx_errors_count;
- es.rxerr = cmd->u.leaf.error_event.rx_errors_count;
- es.leaf.error_factor = cmd->u.leaf.error_event.error_factor;
+ es.channel = cmd->u.leaf.can_error_event.channel;
+ es.status = cmd->u.leaf.can_error_event.status;
+ es.txerr = cmd->u.leaf.can_error_event.tx_errors_count;
+ es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count;
+ es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor;
break;
case CMD_LEAF_LOG_MESSAGE:
es.channel = cmd->u.leaf.log_message.channel;
@@ -1005,7 +1310,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) &&
- (dev->card_data.leaf.family == KVASER_LEAF &&
+ (dev->driver_info->family == KVASER_LEAF &&
cmd->id == CMD_LEAF_LOG_MESSAGE)) {
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
return;
@@ -1021,7 +1326,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- switch (dev->card_data.leaf.family) {
+ switch (dev->driver_info->family) {
case KVASER_LEAF:
rx_data = cmd->u.leaf.rx_can.data;
break;
@@ -1036,7 +1341,7 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
return;
}
- if (dev->card_data.leaf.family == KVASER_LEAF && cmd->id ==
+ if (dev->driver_info->family == KVASER_LEAF && cmd->id ==
CMD_LEAF_LOG_MESSAGE) {
cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id);
if (cf->can_id & KVASER_EXTENDED_FRAME)
@@ -1071,10 +1376,79 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev,
}
stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
netif_rx(skb);
}
+static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u16 info1 = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ info1 = le16_to_cpu(cmd->u.leaf.error_event.info1);
+ break;
+ case KVASER_USBCAN:
+ info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1);
+ break;
+ }
+
+ /* info1 will contain the offending cmd_no */
+ switch (info1) {
+ case CMD_SET_CTRL_MODE:
+ dev_warn(&dev->intf->dev,
+ "CMD_SET_CTRL_MODE error in parameter\n");
+ break;
+
+ case CMD_SET_BUS_PARAMS:
+ dev_warn(&dev->intf->dev,
+ "CMD_SET_BUS_PARAMS error in parameter\n");
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled parameter error event cmd_no (%u)\n",
+ info1);
+ break;
+ }
+}
+
+static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ u8 error_code = 0;
+
+ switch (dev->driver_info->family) {
+ case KVASER_LEAF:
+ error_code = cmd->u.leaf.error_event.error_code;
+ break;
+ case KVASER_USBCAN:
+ error_code = cmd->u.usbcan.error_event.error_code;
+ break;
+ }
+
+ switch (error_code) {
+ case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL:
+ /* Received additional CAN message, when firmware TX queue is
+ * already full. Something is wrong with the driver.
+ * This should never happen!
+ */
+ dev_err(&dev->intf->dev,
+ "Received error event TX_QUEUE_FULL\n");
+ break;
+ case KVASER_USB_LEAF_ERROR_EVENT_PARAM:
+ kvaser_usb_leaf_error_event_parameter(dev, cmd);
+ break;
+
+ default:
+ dev_warn(&dev->intf->dev,
+ "Unhandled error event (%d)\n", error_code);
+ break;
+ }
+}
+
static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
@@ -1115,9 +1489,31 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev,
complete(&priv->stop_comp);
}
+static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev,
+ const struct kvaser_cmd *cmd)
+{
+ struct kvaser_usb_net_priv *priv;
+ u8 channel = cmd->u.busparams.channel;
+
+ if (channel >= dev->nchannels) {
+ dev_err(&dev->intf->dev,
+ "Invalid channel number (%d)\n", channel);
+ return;
+ }
+
+ priv = dev->nets[channel];
+ memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams,
+ sizeof(priv->busparams_nominal));
+
+ complete(&priv->get_busparams_comp);
+}
+
static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
const struct kvaser_cmd *cmd)
{
+ if (kvaser_usb_leaf_verify_size(dev, cmd) < 0)
+ return;
+
switch (cmd->id) {
case CMD_START_CHIP_REPLY:
kvaser_usb_leaf_start_chip_reply(dev, cmd);
@@ -1133,14 +1529,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
break;
case CMD_LEAF_LOG_MESSAGE:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
kvaser_usb_leaf_rx_can_msg(dev, cmd);
break;
case CMD_CHIP_STATE_EVENT:
case CMD_CAN_ERROR_EVENT:
- if (dev->card_data.leaf.family == KVASER_LEAF)
+ if (dev->driver_info->family == KVASER_LEAF)
kvaser_usb_leaf_leaf_rx_error(dev, cmd);
else
kvaser_usb_leaf_usbcan_rx_error(dev, cmd);
@@ -1150,14 +1546,22 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev,
kvaser_usb_leaf_tx_acknowledge(dev, cmd);
break;
+ case CMD_ERROR_EVENT:
+ kvaser_usb_leaf_error_event(dev, cmd);
+ break;
+
+ case CMD_GET_BUS_PARAMS_REPLY:
+ kvaser_usb_leaf_get_busparams_reply(dev, cmd);
+ break;
+
/* Ignored commands */
case CMD_USBCAN_CLOCK_OVERFLOW_EVENT:
- if (dev->card_data.leaf.family != KVASER_USBCAN)
+ if (dev->driver_info->family != KVASER_USBCAN)
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
- if (dev->card_data.leaf.family != KVASER_LEAF)
+ if (dev->driver_info->family != KVASER_LEAF)
goto warn;
break;
@@ -1228,9 +1632,12 @@ static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv)
static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
- init_completion(&priv->start_comp);
+ leaf->joining_bus = true;
+
+ reinit_completion(&priv->start_comp);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP,
priv->channel);
@@ -1246,9 +1653,12 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv)
static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv)
{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
- init_completion(&priv->stop_comp);
+ reinit_completion(&priv->stop_comp);
+
+ cancel_delayed_work(&leaf->chip_state_req_work);
err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP,
priv->channel);
@@ -1296,10 +1706,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
return 0;
}
-static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
+static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_leaf_priv *leaf;
+
+ leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL);
+ if (!leaf)
+ return -ENOMEM;
+
+ leaf->net = priv;
+ INIT_DELAYED_WORK(&leaf->chip_state_req_work,
+ kvaser_usb_leaf_chip_state_req_work);
+
+ priv->sub_priv = leaf;
+
+ return 0;
+}
+
+static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv)
+{
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
+
+ if (leaf)
+ cancel_delayed_work_sync(&leaf->chip_state_req_work);
+}
+
+static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev,
+ const struct kvaser_usb_busparams *busparams)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
- struct can_bittiming *bt = &priv->can.bittiming;
struct kvaser_usb *dev = priv->dev;
struct kvaser_cmd *cmd;
int rc;
@@ -1312,15 +1747,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams);
cmd->u.busparams.channel = priv->channel;
cmd->u.busparams.tid = 0xff;
- cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate);
- cmd->u.busparams.sjw = bt->sjw;
- cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1;
- cmd->u.busparams.tseg2 = bt->phase_seg2;
-
- if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
- cmd->u.busparams.no_samp = 3;
- else
- cmd->u.busparams.no_samp = 1;
+ memcpy(&cmd->u.busparams.busparams, busparams,
+ sizeof(cmd->u.busparams.busparams));
rc = kvaser_usb_send_cmd(dev, cmd, cmd->len);
@@ -1328,17 +1756,45 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
return rc;
}
+static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv)
+{
+ int err;
+
+ if (priv->dev->driver_info->family == KVASER_USBCAN)
+ return -EOPNOTSUPP;
+
+ reinit_completion(&priv->get_busparams_comp);
+
+ err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS,
+ priv->channel);
+ if (err)
+ return err;
+
+ if (!wait_for_completion_timeout(&priv->get_busparams_comp,
+ msecs_to_jiffies(KVASER_USB_TIMEOUT)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int kvaser_usb_leaf_set_mode(struct net_device *netdev,
enum can_mode mode)
{
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
+ struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv;
int err;
switch (mode) {
case CAN_MODE_START:
+ kvaser_usb_unlink_tx_urbs(priv);
+
+ leaf->joining_bus = true;
+
err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP);
if (err)
return err;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
break;
default:
return -EOPNOTSUPP;
@@ -1385,14 +1841,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
.dev_set_mode = kvaser_usb_leaf_set_mode,
.dev_set_bittiming = kvaser_usb_leaf_set_bittiming,
+ .dev_get_busparams = kvaser_usb_leaf_get_busparams,
.dev_set_data_bittiming = NULL,
+ .dev_get_data_busparams = NULL,
.dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter,
.dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints,
.dev_init_card = kvaser_usb_leaf_init_card,
+ .dev_init_channel = kvaser_usb_leaf_init_channel,
+ .dev_remove_channel = kvaser_usb_leaf_remove_channel,
.dev_get_software_info = kvaser_usb_leaf_get_software_info,
.dev_get_software_details = NULL,
.dev_get_card_info = kvaser_usb_leaf_get_card_info,
- .dev_get_capabilities = NULL,
+ .dev_get_capabilities = kvaser_usb_leaf_get_capabilities,
.dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode,
.dev_start_chip = kvaser_usb_leaf_start_chip,
.dev_stop_chip = kvaser_usb_leaf_stop_chip,
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index a1a154c08b7f..47619e9cb005 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -10,7 +10,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -33,10 +33,6 @@
#define MCBA_USB_RX_BUFF_SIZE 64
#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
-/* MCBA endpoint numbers */
-#define MCBA_USB_EP_IN 1
-#define MCBA_USB_EP_OUT 1
-
/* Microchip command id */
#define MBCA_CMD_RECEIVE_MESSAGE 0xE3
#define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5
@@ -51,6 +47,10 @@
#define MCBA_VER_REQ_USB 1
#define MCBA_VER_REQ_CAN 2
+/* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */
+#define MCBA_VER_TERMINATION_ON 0
+#define MCBA_VER_TERMINATION_OFF 1
+
#define MCBA_SIDL_EXID_MASK 0x8
#define MCBA_DLC_MASK 0xf
#define MCBA_DLC_RTR_MASK 0x40
@@ -64,7 +64,6 @@
struct mcba_usb_ctx {
struct mcba_priv *priv;
u32 ndx;
- u8 dlc;
bool can;
};
@@ -84,6 +83,8 @@ struct mcba_priv {
atomic_t free_ctx_cnt;
void *rxbuf[MCBA_MAX_RX_URBS];
dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
+ int rx_pipe;
+ int tx_pipe;
};
/* CAN frame */
@@ -184,13 +185,10 @@ static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv,
ctx = &priv->tx_context[i];
ctx->ndx = i;
- if (cf) {
+ if (cf)
ctx->can = true;
- ctx->dlc = cf->len;
- } else {
+ else
ctx->can = false;
- ctx->dlc = 0;
- }
atomic_dec(&priv->free_ctx_cnt);
break;
@@ -236,10 +234,8 @@ static void mcba_usb_write_bulk_callback(struct urb *urb)
return;
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += ctx->dlc;
-
- can_led_event(netdev, CAN_LED_EVENT_TX);
- can_get_echo_skb(netdev, ctx->ndx, NULL);
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx,
+ NULL);
}
if (urb->status)
@@ -272,10 +268,8 @@ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv,
memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE);
- usb_fill_bulk_urb(urb, priv->udev,
- usb_sndbulkpipe(priv->udev, MCBA_USB_EP_OUT), buf,
- MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback,
- ctx);
+ usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE,
+ mcba_usb_write_bulk_callback, ctx);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
usb_anchor_urb(urb, &priv->tx_submitted);
@@ -321,7 +315,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
.cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV
};
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
ctx = mcba_usb_get_free_ctx(priv, cf);
@@ -368,7 +362,6 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb,
xmit_failed:
can_free_echo_skb(priv->netdev, ctx->ndx, NULL);
mcba_usb_free_ctx(ctx);
- dev_kfree_skb(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
@@ -450,17 +443,17 @@ static void mcba_usb_process_can(struct mcba_priv *priv,
cf->can_id = (sid & 0xffe0) >> 5;
}
- if (msg->dlc & MCBA_DLC_RTR_MASK)
- cf->can_id |= CAN_RTR_FLAG;
-
cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK);
- memcpy(cf->data, msg->data, cf->len);
+ if (msg->dlc & MCBA_DLC_RTR_MASK) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ memcpy(cf->data, msg->data, cf->len);
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
- can_led_event(priv->netdev, CAN_LED_EVENT_RX);
netif_rx(skb);
}
@@ -474,7 +467,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv,
priv->usb_ka_first_pass = false;
}
- if (msg->termination_state)
+ if (msg->termination_state == MCBA_VER_TERMINATION_ON)
priv->can.termination = MCBA_TERMINATION_ENABLED;
else
priv->can.termination = MCBA_TERMINATION_DISABLED;
@@ -611,7 +604,7 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_OUT),
+ priv->rx_pipe,
urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
@@ -656,7 +649,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
urb->transfer_dma = buf_dma;
usb_fill_bulk_urb(urb, priv->udev,
- usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
+ priv->rx_pipe,
buf, MCBA_USB_RX_BUFF_SIZE,
mcba_usb_read_bulk_callback, priv);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
@@ -708,7 +701,6 @@ static int mcba_usb_open(struct net_device *netdev)
priv->can_speed_check = true;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- can_led_event(netdev, CAN_LED_EVENT_OPEN);
netif_start_queue(netdev);
return 0;
@@ -740,7 +732,6 @@ static int mcba_usb_close(struct net_device *netdev)
mcba_urb_unlink(priv);
close_candev(netdev);
- can_led_event(netdev, CAN_LED_EVENT_STOP);
return 0;
}
@@ -772,6 +763,10 @@ static const struct net_device_ops mcba_netdev_ops = {
.ndo_start_xmit = mcba_usb_start_xmit,
};
+static const struct ethtool_ops mcba_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Microchip CANBUS has hardcoded bittiming values by default.
* This function sends request via USB to change the speed and align bittiming
* values for presentation purposes only
@@ -794,9 +789,9 @@ static int mcba_set_termination(struct net_device *netdev, u16 term)
};
if (term == MCBA_TERMINATION_ENABLED)
- usb_msg.termination = 1;
+ usb_msg.termination = MCBA_VER_TERMINATION_ON;
else
- usb_msg.termination = 0;
+ usb_msg.termination = MCBA_VER_TERMINATION_OFF;
mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg);
@@ -810,6 +805,13 @@ static int mcba_usb_probe(struct usb_interface *intf,
struct mcba_priv *priv;
int err;
struct usb_device *usbdev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *in, *out;
+
+ err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL);
+ if (err) {
+ dev_err(&intf->dev, "Can't find endpoints\n");
+ return err;
+ }
netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
if (!netdev) {
@@ -843,6 +845,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
priv->can.do_set_bittiming = mcba_net_set_bittiming;
netdev->netdev_ops = &mcba_netdev_ops;
+ netdev->ethtool_ops = &mcba_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -855,7 +858,8 @@ static int mcba_usb_probe(struct usb_interface *intf,
goto cleanup_free_candev;
}
- devm_can_led_init(netdev);
+ priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress);
+ priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress);
/* Start USB dev only if we have successfully registered CAN device */
err = mcba_usb_start(priv);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 876218752766..b211b6e283a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -8,10 +8,12 @@
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
+#include <asm/unaligned.h>
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -380,23 +382,42 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
}
/*
- * read device id from device
+ * read can channel id from device
*/
-static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
+static int pcan_usb_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id)
{
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
- netdev_err(dev->netdev, "getting device id failure: %d\n", err);
+ netdev_err(dev->netdev, "getting can channel id failure: %d\n", err);
else
- *device_id = args[0];
+ *can_ch_id = args[0];
return err;
}
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN];
+
+ /* this kind of device supports 8-bit values only */
+ if (can_ch_id > U8_MAX)
+ return -EINVAL;
+
+ /* during the flash process the device disconnects during ~1.25 s.:
+ * prohibit access when interface is UP
+ */
+ if (dev->netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ args[0] = can_ch_id;
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_DEVID, PCAN_USB_SET, args);
+}
+
/*
* update current time ref with received timestamp
*/
@@ -505,6 +526,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
/* Supply TX/RX error counters in case of
* controller error.
*/
+ cf->can_id = CAN_ERR_CNT;
cf->data[6] = mc->pdev->bec.txerr;
cf->data[7] = mc->pdev->bec.rxerr;
}
@@ -520,8 +542,6 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
&hwts->hwtstamp);
}
- mc->netdev->stats.rx_packets++;
- mc->netdev->stats.rx_bytes += cf->len;
netif_rx(skb);
return 0;
@@ -534,7 +554,7 @@ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
{
struct pcan_usb *pdev = mc->pdev;
- /* acccording to the content of the packet */
+ /* according to the content of the packet */
switch (ir) {
case PCAN_USB_ERR_CNT_DEC:
case PCAN_USB_ERR_CNT_INC:
@@ -678,15 +698,16 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
/* Ignore next byte (client private id) if SRR bit is set */
if (can_id_flags & PCAN_USB_TX_SRR)
mc->ptr++;
+
+ /* update statistics */
+ mc->netdev->stats.rx_bytes += cf->len;
}
+ mc->netdev->stats.rx_packets++;
/* convert timestamp into kernel time */
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp);
- /* update statistics */
- mc->netdev->stats.rx_packets++;
- mc->netdev->stats.rx_bytes += cf->len;
/* push the skb */
netif_rx(skb);
@@ -962,8 +983,18 @@ static int pcan_usb_set_phys_id(struct net_device *netdev,
return err;
}
+/* This device only handles 8-bit CAN channel id. */
+static int pcan_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u8);
+}
+
static const struct ethtool_ops pcan_usb_ethtool_ops = {
.set_phys_id = pcan_usb_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = pcan_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1015,7 +1046,8 @@ const struct peak_usb_adapter pcan_usb = {
.dev_init = pcan_usb_init,
.dev_set_bus = pcan_usb_write_mode,
.dev_set_bittiming = pcan_usb_set_bittiming,
- .dev_get_device_id = pcan_usb_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_set_can_channel_id,
.dev_decode_buf = pcan_usb_decode_buf,
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 6107fef9f4a0..d881e1d30183 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -8,13 +8,15 @@
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
+#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/usb.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -53,11 +55,31 @@ static const struct usb_device_id peak_usb_table[] = {
MODULE_DEVICE_TABLE(usb, peak_usb_table);
+static ssize_t can_channel_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct peak_usb_device *peak_dev = netdev_priv(netdev);
+
+ return sysfs_emit(buf, "%08X\n", peak_dev->can_channel_id);
+}
+static DEVICE_ATTR_RO(can_channel_id);
+
+/* mutable to avoid cast in attribute_group */
+static struct attribute *peak_usb_sysfs_attrs[] = {
+ &dev_attr_can_channel_id.attr,
+ NULL,
+};
+
+static const struct attribute_group peak_usb_sysfs_group = {
+ .name = "peak_usb",
+ .attrs = peak_usb_sysfs_attrs,
+};
+
/*
* dump memory
*/
#define DUMP_WIDTH 16
-void pcan_dump_mem(char *prompt, void *p, int l)
+void pcan_dump_mem(const char *prompt, const void *p, int l)
{
pr_info("%s dumping %s (%d bytes):\n",
PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l);
@@ -291,6 +313,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
struct peak_tx_urb_context *context = urb->context;
struct peak_usb_device *dev;
struct net_device *netdev;
+ int tx_bytes;
BUG_ON(!context);
@@ -305,10 +328,6 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
/* check tx status */
switch (urb->status) {
case 0:
- /* transmission complete */
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->data_len;
-
/* prevent tx timeout */
netif_trans_update(netdev);
break;
@@ -327,12 +346,17 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
}
/* should always release echo skb and corresponding context */
- can_get_echo_skb(netdev, context->echo_index, NULL);
+ tx_bytes = can_get_echo_skb(netdev, context->echo_index, NULL);
context->echo_index = PCAN_USB_MAX_TX_URBS;
- /* do wakeup tx queue in case of success only */
- if (!urb->status)
+ if (!urb->status) {
+ /* transmission complete */
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += tx_bytes;
+
+ /* do wakeup tx queue in case of success only */
netif_wake_queue(netdev);
+ }
}
/*
@@ -344,13 +368,12 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
struct peak_usb_device *dev = netdev_priv(netdev);
struct peak_tx_urb_context *context = NULL;
struct net_device_stats *stats = &netdev->stats;
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct urb *urb;
u8 *obuf;
int i, err;
size_t size = dev->adapter->tx_buffer_size;
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++)
@@ -378,9 +401,6 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
context->echo_index = i;
- /* Note: this works with CANFD frames too */
- context->data_len = cfd->len;
-
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, context->echo_index, 0);
@@ -777,13 +797,134 @@ static int peak_usb_set_data_bittiming(struct net_device *netdev)
return 0;
}
+static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct hwtstamp_config hwts_cfg = { 0 };
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP: /* set */
+ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
+ return -EFAULT;
+ if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
+ hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
+ return 0;
+ return -ERANGE;
+
+ case SIOCGHWTSTAMP: /* get */
+ hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
+ return -EFAULT;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_open = peak_usb_ndo_open,
.ndo_stop = peak_usb_ndo_stop,
+ .ndo_eth_ioctl = peak_eth_ioctl,
.ndo_start_xmit = peak_usb_ndo_start_xmit,
.ndo_change_mtu = can_change_mtu,
};
+/* CAN-USB devices generally handle 32-bit CAN channel IDs.
+ * In case one doesn't, then it have to overload this function.
+ */
+int peak_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u32);
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id() operation. It is used
+ * here to fill the data buffer with the user defined CAN channel ID.
+ */
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err)
+ return err;
+
+ /* ethtool operates on individual bytes. The byte order of the CAN
+ * channel id in memory depends on the kernel architecture. We
+ * convert the CAN channel id back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy(data, (u8 *)&ch_id_le + eeprom->offset, eeprom->len);
+
+ /* update cached value */
+ dev->can_channel_id = ch_id;
+ return err;
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id()/dev_set_can_channel_id()
+ * operations. They are used here to set the new user defined CAN channel ID.
+ */
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ /* first, read the current user defined CAN channel ID */
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to init CAN channel id (err %d)\n", err);
+ return err;
+ }
+
+ /* do update the value with user given bytes.
+ * ethtool operates on individual bytes. The byte order of the CAN
+ * channel ID in memory depends on the kernel architecture. We
+ * convert the CAN channel ID back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy((u8 *)&ch_id_le + eeprom->offset, data, eeprom->len);
+ ch_id = le32_to_cpu(ch_id_le);
+
+ /* flash the new value now */
+ err = dev->adapter->dev_set_can_channel_id(dev, ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to write new CAN channel id (err %d)\n",
+ err);
+ return err;
+ }
+
+ /* update cached value with the new one */
+ dev->can_channel_id = ch_id;
+
+ return 0;
+}
+
+int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
/*
* create one device which is attached to CAN controller #ctrl_idx of the
* usb adapter.
@@ -842,6 +983,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
/* add ethtool support */
netdev->ethtool_ops = peak_usb_adapter->ethtool_ops;
+ /* register peak_usb sysfs files */
+ netdev->sysfs_groups[0] = &peak_usb_sysfs_group;
+
init_usb_anchor(&dev->rx_submitted);
init_usb_anchor(&dev->tx_submitted);
@@ -882,12 +1026,11 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
goto adap_dev_free;
}
- /* get device number early */
- if (dev->adapter->dev_get_device_id)
- dev->adapter->dev_get_device_id(dev, &dev->device_number);
+ /* get CAN channel id early */
+ dev->adapter->dev_get_can_channel_id(dev, &dev->can_channel_id);
- netdev_info(netdev, "attached to %s channel %u (device %u)\n",
- peak_usb_adapter->name, ctrl_idx, dev->device_number);
+ netdev_info(netdev, "attached to %s channel %u (device 0x%08X)\n",
+ peak_usb_adapter->name, ctrl_idx, dev->can_channel_id);
return 0;
@@ -923,9 +1066,9 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
- unregister_netdev(netdev);
+ unregister_candev(netdev);
kfree(dev->cmd_buf);
dev->next_siblings = NULL;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index daa19f57e742..980e315186cf 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -60,7 +60,8 @@ struct peak_usb_adapter {
int (*dev_set_data_bittiming)(struct peak_usb_device *dev,
struct can_bittiming *bt);
int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
- int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
+ int (*dev_get_can_channel_id)(struct peak_usb_device *dev, u32 *can_ch_id);
+ int (*dev_set_can_channel_id)(struct peak_usb_device *dev, u32 can_ch_id);
int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
int (*dev_encode_msg)(struct peak_usb_device *dev, struct sk_buff *skb,
u8 *obuf, size_t *size);
@@ -99,7 +100,6 @@ struct peak_time_ref {
struct peak_tx_urb_context {
struct peak_usb_device *dev;
u32 echo_index;
- u8 data_len;
struct urb *urb;
};
@@ -123,7 +123,8 @@ struct peak_usb_device {
u8 *cmd_buf;
struct usb_anchor rx_submitted;
- u32 device_number;
+ /* equivalent to the device ID in the Windows API */
+ u32 can_channel_id;
u8 device_rev;
u8 ep_msg_in;
@@ -133,7 +134,7 @@ struct peak_usb_device {
struct peak_usb_device *next_siblings;
};
-void pcan_dump_mem(char *prompt, void *p, int l);
+void pcan_dump_mem(const char *prompt, const void *p, int l);
/* common timestamp management */
void peak_usb_init_time_ref(struct peak_time_ref *time_ref,
@@ -146,5 +147,12 @@ int peak_usb_netif_rx(struct sk_buff *skb,
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
-
+int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+
+/* common 32-bit CAN channel ID ethtool management */
+int peak_usb_get_eeprom_len(struct net_device *netdev);
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 6bd12549f101..4d85b29a17b7 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -4,10 +4,10 @@
*
* Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -33,6 +33,10 @@
#define PCAN_UFD_RX_BUFFER_SIZE 2048
#define PCAN_UFD_TX_BUFFER_SIZE 512
+/* struct pcan_ufd_fw_info::type */
+#define PCAN_USBFD_TYPE_STD 1
+#define PCAN_USBFD_TYPE_EXT 2 /* includes EP numbers */
+
/* read some versions info from the hw device */
struct __packed pcan_ufd_fw_info {
__le16 size_of; /* sizeof this */
@@ -44,6 +48,13 @@ struct __packed pcan_ufd_fw_info {
__le32 dev_id[2]; /* "device id" per CAN */
__le32 ser_no; /* S/N */
__le32 flags; /* special functions */
+
+ /* extended data when type == PCAN_USBFD_TYPE_EXT */
+ u8 cmd_out_ep; /* ep for cmd */
+ u8 cmd_in_ep; /* ep for replies */
+ u8 data_out_ep[2]; /* ep for CANx TX */
+ u8 data_in_ep; /* ep for CAN RX */
+ u8 dummy[3];
};
/* handle device specific info used by the netdevices */
@@ -136,6 +147,15 @@ struct __packed pcan_ufd_ovr_msg {
u8 unused[3];
};
+#define PCAN_UFD_CMD_DEVID_SET 0x81
+
+struct __packed pcan_ufd_device_id {
+ __le16 opcode_channel;
+
+ u16 unused;
+ __le32 device_id;
+};
+
static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om)
{
return om->channel & 0xf;
@@ -171,6 +191,9 @@ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev)
/* send PCAN-USB Pro FD commands synchronously */
static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info;
void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
int err = 0;
u8 *packet_ptr;
@@ -200,7 +223,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
do {
err = usb_bulk_msg(dev->udev,
usb_sndbulkpipe(dev->udev,
- PCAN_USBPRO_EP_CMDOUT),
+ fw_info->cmd_out_ep),
packet_ptr, packet_len,
NULL, PCAN_UFD_CMD_TIMEOUT_MS);
if (err) {
@@ -220,6 +243,15 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
return err;
}
+static int pcan_usb_fd_read_fwinfo(struct peak_usb_device *dev,
+ struct pcan_ufd_fw_info *fw_info)
+{
+ return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+ PCAN_USBPRO_INFO_FW,
+ fw_info,
+ sizeof(*fw_info));
+}
+
/* build the commands list in the given buffer, to enter operational mode */
static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
{
@@ -420,12 +452,43 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
return pcan_usb_fd_send_cmd(dev, ++cmd);
}
+/* read user CAN channel id from device */
+static int pcan_usb_fd_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
+{
+ int err;
+ struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev);
+
+ err = pcan_usb_fd_read_fwinfo(dev, &usb_if->fw_info);
+ if (err)
+ return err;
+
+ *can_ch_id = le32_to_cpu(usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ return err;
+}
+
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_fd_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ struct pcan_ufd_device_id *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
+ PCAN_UFD_CMD_DEVID_SET);
+ cmd->device_id = cpu_to_le32(can_ch_id);
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
/* handle restart but in asynchronously way
* (uses PCAN-USB Pro code to complete asynchronous request)
*/
static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
struct urb *urb, u8 *buf)
{
+ struct pcan_usb_fd_device *pdev =
+ container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info;
u8 *pc = buf;
/* build the entire cmds list in the provided buffer, to go back into
@@ -439,7 +502,7 @@ static int pcan_usb_fd_restart_async(struct peak_usb_device *dev,
/* complete the URB */
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT),
+ usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep),
buf, pc - buf,
pcan_usb_pro_restart_complete, dev);
@@ -507,13 +570,13 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
if (rx_msg_flags & PUCAN_MSG_EXT_ID)
cfd->can_id |= CAN_EFF_FLAG;
- if (rx_msg_flags & PUCAN_MSG_RTR)
+ if (rx_msg_flags & PUCAN_MSG_RTR) {
cfd->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cfd->data, rm->d, cfd->len);
-
+ netdev->stats.rx_bytes += cfd->len;
+ }
netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += cfd->len;
peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low),
le32_to_cpu(rm->ts_high));
@@ -577,9 +640,6 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
if (!skb)
return -ENOMEM;
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += cf->len;
-
peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low),
le32_to_cpu(sm->ts_high));
@@ -842,6 +902,15 @@ static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
return 0;
}
+/* probe function for all PCAN-USB FD family usb interfaces */
+static int pcan_usb_fd_probe(struct usb_interface *intf)
+{
+ struct usb_host_interface *iface_desc = &intf->altsetting[0];
+
+ /* CAN interface is always interface #0 */
+ return iface_desc->desc.bInterfaceNumber;
+}
+
/* stop interface (last chance before set bus off) */
static int pcan_usb_fd_stop(struct peak_usb_device *dev)
{
@@ -863,6 +932,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
{
struct pcan_usb_fd_device *pdev =
container_of(dev, struct pcan_usb_fd_device, dev);
+ struct pcan_ufd_fw_info *fw_info;
int i, err = -ENOMEM;
/* do this for 1st channel only */
@@ -881,10 +951,9 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
/* number of ts msgs to ignore before taking one into account */
pdev->usb_if->cm_ignore_count = 5;
- err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
- PCAN_USBPRO_INFO_FW,
- &pdev->usb_if->fw_info,
- sizeof(pdev->usb_if->fw_info));
+ fw_info = &pdev->usb_if->fw_info;
+
+ err = pcan_usb_fd_read_fwinfo(dev, fw_info);
if (err) {
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
@@ -898,14 +967,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
*/
dev_info(dev->netdev->dev.parent,
"PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n",
- dev->adapter->name, pdev->usb_if->fw_info.hw_version,
- pdev->usb_if->fw_info.fw_version[0],
- pdev->usb_if->fw_info.fw_version[1],
- pdev->usb_if->fw_info.fw_version[2],
+ dev->adapter->name, fw_info->hw_version,
+ fw_info->fw_version[0],
+ fw_info->fw_version[1],
+ fw_info->fw_version[2],
dev->adapter->ctrl_count);
/* check for ability to switch between ISO/non-ISO modes */
- if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
+ if (fw_info->fw_version[0] >= 2) {
/* firmware >= 2.x supports ISO/non-ISO switching */
dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
} else {
@@ -913,6 +982,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
}
+ /* if vendor rsp is of type 2, then it contains EP numbers to
+ * use for cmds pipes. If not, then default EP should be used.
+ */
+ if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT;
+ fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN;
+ }
+
/* tell the hardware the can driver is running */
err = pcan_usb_fd_drv_loaded(dev, 1);
if (err) {
@@ -933,12 +1010,23 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
/* do a copy of the ctrlmode[_supported] too */
dev->can.ctrlmode = ppdev->dev.can.ctrlmode;
dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported;
+
+ fw_info = &pdev->usb_if->fw_info;
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
- dev->device_number =
+ dev->can_channel_id =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ /* if vendor rsp is of type 2, then it contains EP numbers to
+ * use for data pipes. If not, then statically defined EP are used
+ * (see peak_usb_create_dev()).
+ */
+ if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) {
+ dev->ep_msg_in = fw_info->data_in_ep;
+ dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx];
+ }
+
/* set clock domain */
for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++)
if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i])
@@ -1035,6 +1123,10 @@ static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
.set_phys_id = pcan_usb_fd_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/* describes the PCAN-USB FD adapter */
@@ -1094,7 +1186,7 @@ const struct peak_usb_adapter pcan_usb_fd = {
.tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE,
/* device callbacks */
- .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */
+ .intf_probe = pcan_usb_fd_probe,
.dev_init = pcan_usb_fd_init,
.dev_exit = pcan_usb_fd_exit,
@@ -1102,6 +1194,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1176,6 +1270,8 @@ const struct peak_usb_adapter pcan_usb_chip = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1250,6 +1346,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1324,6 +1422,8 @@ const struct peak_usb_adapter pcan_usb_x6 = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 858ab22708fc..f736196383ac 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -6,10 +6,10 @@
* Copyright (C) 2003-2011 PEAK System-Technik GmbH
* Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -76,6 +76,7 @@ static u16 pcan_usb_pro_sizeof_rec[256] = {
[PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter),
[PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts),
[PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid),
+ [PCAN_USBPRO_SETDEVID] = sizeof(struct pcan_usb_pro_devid),
[PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled),
[PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg),
[PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4,
@@ -149,6 +150,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
case PCAN_USBPRO_SETBTR:
case PCAN_USBPRO_GETDEVID:
+ case PCAN_USBPRO_SETDEVID:
*pc++ = va_arg(ap, int);
pc += 2;
*(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
@@ -419,8 +421,8 @@ static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode,
return pcan_usb_pro_send_cmd(dev, &um);
}
-static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
- u32 *device_id)
+static int pcan_usb_pro_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
{
struct pcan_usb_pro_devid *pdn;
struct pcan_usb_pro_msg um;
@@ -439,11 +441,23 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
return err;
pdn = (struct pcan_usb_pro_devid *)pc;
- *device_id = le32_to_cpu(pdn->serial_num);
+ *can_ch_id = le32_to_cpu(pdn->dev_num);
return err;
}
+static int pcan_usb_pro_set_can_channel_id(struct peak_usb_device *dev,
+ u32 can_ch_id)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETDEVID, dev->ctrl_idx,
+ can_ch_id);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
struct can_bittiming *bt)
{
@@ -536,17 +550,19 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
if (rx->flags & PCAN_USBPRO_EXT)
can_frame->can_id |= CAN_EFF_FLAG;
- if (rx->flags & PCAN_USBPRO_RTR)
+ if (rx->flags & PCAN_USBPRO_RTR) {
can_frame->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(can_frame->data, rx->data, can_frame->len);
+ netdev->stats.rx_bytes += can_frame->len;
+ }
+ netdev->stats.rx_packets++;
+
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32),
&hwts->hwtstamp);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += can_frame->len;
netif_rx(skb);
return 0;
@@ -660,8 +676,6 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
hwts = skb_hwtstamps(skb);
peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp);
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += can_frame->len;
netif_rx(skb);
return 0;
@@ -1022,6 +1036,10 @@ static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
.set_phys_id = pcan_usb_pro_set_phys_id,
+ .get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1075,7 +1093,8 @@ const struct peak_usb_adapter pcan_usb_pro = {
.dev_free = pcan_usb_pro_free,
.dev_set_bus = pcan_usb_pro_set_bus,
.dev_set_bittiming = pcan_usb_pro_set_bittiming,
- .dev_get_device_id = pcan_usb_pro_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_pro_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_pro_set_can_channel_id,
.dev_decode_buf = pcan_usb_pro_decode_buf,
.dev_encode_msg = pcan_usb_pro_encode_msg,
.dev_start = pcan_usb_pro_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index 5d4cf14eb9d9..28e740af905d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -62,6 +62,7 @@ struct __packed pcan_usb_pro_fwinfo {
#define PCAN_USBPRO_SETBTR 0x02
#define PCAN_USBPRO_SETBUSACT 0x04
#define PCAN_USBPRO_SETSILENT 0x05
+#define PCAN_USBPRO_SETDEVID 0x06
#define PCAN_USBPRO_SETFILTR 0x0a
#define PCAN_USBPRO_SETTS 0x10
#define PCAN_USBPRO_GETDEVID 0x12
@@ -112,7 +113,7 @@ struct __packed pcan_usb_pro_devid {
u8 data_type;
u8 channel;
__le16 dummy;
- __le32 serial_num;
+ __le32 dev_num;
};
#define PCAN_USBPRO_LED_DEVICE 0x00
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 1679cbe45ded..a0f7bcec719c 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -28,6 +28,7 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
@@ -244,7 +245,8 @@ struct ucan_message_in {
/* CAN transmission complete
* (type == UCAN_IN_TX_COMPLETE)
*/
- struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
+ DECLARE_FLEX_ARRAY(struct ucan_tx_complete_entry_t,
+ can_tx_complete_msg);
} __aligned(0x4) msg;
} __packed __aligned(0x4);
@@ -259,7 +261,6 @@ struct ucan_priv;
/* Context Information for transmission URBs */
struct ucan_urb_context {
struct ucan_priv *up;
- u8 dlc;
bool allocated;
};
@@ -276,7 +277,6 @@ struct ucan_priv {
/* linux USB device structures */
struct usb_device *udev;
- struct usb_interface *intf;
struct net_device *netdev;
/* lock for can->echo_skb (used around
@@ -621,8 +621,11 @@ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
memcpy(cf->data, m->msg.can_msg.data, cf->len);
/* don't count error frames as real packets */
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (!(cf->can_id & CAN_ERR_FLAG)) {
+ stats->rx_packets++;
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
+ }
/* pass it to Linux */
netif_rx(skb);
@@ -634,7 +637,7 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
{
unsigned long flags;
u16 count, i;
- u8 echo_index, dlc;
+ u8 echo_index;
u16 len = le16_to_cpu(m->len);
struct ucan_urb_context *context;
@@ -658,7 +661,6 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
/* gather information from the context */
context = &up->context_array[echo_index];
- dlc = READ_ONCE(context->dlc);
/* Release context and restart queue if necessary.
* Also check if the context was allocated
@@ -671,8 +673,8 @@ static void ucan_tx_complete_msg(struct ucan_priv *up,
UCAN_TX_COMPLETE_SUCCESS) {
/* update statistics */
up->netdev->stats.tx_packets++;
- up->netdev->stats.tx_bytes += dlc;
- can_get_echo_skb(up->netdev, echo_index, NULL);
+ up->netdev->stats.tx_bytes +=
+ can_get_echo_skb(up->netdev, echo_index, NULL);
} else {
up->netdev->stats.tx_dropped++;
can_free_echo_skb(up->netdev, echo_index, NULL);
@@ -1086,8 +1088,6 @@ static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
}
m->len = cpu_to_le16(mlen);
- context->dlc = cf->len;
-
m->subtype = echo_index;
/* build the urb */
@@ -1120,7 +1120,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
struct can_frame *cf = (struct can_frame *)skb->data;
/* check skb */
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* allocate a context and slow down tx path, if fifo state is low */
@@ -1234,6 +1234,10 @@ static const struct net_device_ops ucan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops ucan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/* Request to set bittiming
*
* This function generates an USB set bittiming message and transmits
@@ -1393,7 +1397,7 @@ static int ucan_probe(struct usb_interface *intf,
* Stage 3 for the final driver initialisation.
*/
- /* Prepare Memory for control transferes */
+ /* Prepare Memory for control transfers */
ctl_msg_buffer = devm_kzalloc(&udev->dev,
sizeof(union ucan_ctl_payload),
GFP_KERNEL);
@@ -1496,7 +1500,6 @@ static int ucan_probe(struct usb_interface *intf,
/* initialize data */
up->udev = udev;
- up->intf = intf;
up->netdev = netdev;
up->intf_index = iface_desc->desc.bInterfaceNumber;
up->in_ep_addr = in_ep_addr;
@@ -1513,6 +1516,7 @@ static int ucan_probe(struct usb_interface *intf,
spin_lock_init(&up->context_lock);
spin_lock_init(&up->echo_skb_lock);
netdev->netdev_ops = &ucan_netdev_ops;
+ netdev->ethtool_ops = &ucan_ethtool_ops;
usb_set_intfdata(intf, up);
SET_NETDEV_DEV(netdev, &intf->dev);
@@ -1527,10 +1531,9 @@ static int ucan_probe(struct usb_interface *intf,
ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
sizeof(union ucan_ctl_payload));
if (ret > 0) {
- /* copy string while ensuring zero terminiation */
- strncpy(firmware_str, up->ctl_msg_buffer->raw,
- sizeof(union ucan_ctl_payload));
- firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
+ /* copy string while ensuring zero termination */
+ strscpy(firmware_str, up->ctl_msg_buffer->raw,
+ sizeof(union ucan_ctl_payload) + 1);
} else {
strcpy(firmware_str, "unknown");
}
@@ -1576,7 +1579,7 @@ static void ucan_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (up) {
- unregister_netdev(up->netdev);
+ unregister_candev(up->netdev);
free_candev(up->netdev);
}
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d1b83bd1b3cb..8a5596ce4e46 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -12,6 +12,7 @@
* who were very cooperative and answered my questions.
*/
+#include <linux/ethtool.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -21,7 +22,6 @@
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
/* driver constants */
#define MAX_RX_URBS 20
@@ -114,15 +114,12 @@ struct usb_8dev_tx_urb_context {
struct usb_8dev_priv *priv;
u32 echo_index;
- u8 dlc;
};
/* Structure to hold all of our device specific stuff */
struct usb_8dev_priv {
struct can_priv can; /* must be the first member */
- struct sk_buff *echo_skb[MAX_TX_URBS];
-
struct usb_device *udev;
struct net_device *netdev;
@@ -442,15 +439,15 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
if (rx_errors)
stats->rx_errors++;
-
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ if (priv->can.state != CAN_STATE_BUS_OFF) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
netif_rx(skb);
}
@@ -476,16 +473,15 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
if (msg->flags & USB_8DEV_EXTID)
cf->can_id |= CAN_EFF_FLAG;
- if (msg->flags & USB_8DEV_RTR)
+ if (msg->flags & USB_8DEV_RTR) {
cf->can_id |= CAN_RTR_FLAG;
- else
+ } else {
memcpy(cf->data, msg->data, cf->len);
-
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
- stats->rx_bytes += cf->len;
- netif_rx(skb);
- can_led_event(priv->netdev, CAN_LED_EVENT_RX);
+ netif_rx(skb);
} else {
netdev_warn(priv->netdev, "frame type %d unknown",
msg->type);
@@ -584,11 +580,7 @@ static void usb_8dev_write_bulk_callback(struct urb *urb)
urb->status);
netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += context->dlc;
-
- can_get_echo_skb(netdev, context->echo_index, NULL);
-
- can_led_event(netdev, CAN_LED_EVENT_TX);
+ netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL);
/* Release context */
context->echo_index = MAX_TX_URBS;
@@ -610,7 +602,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
int i, err;
size_t size = sizeof(struct usb_8dev_tx_msg);
- if (can_dropped_invalid_skb(netdev, skb))
+ if (can_dev_dropped_skb(netdev, skb))
return NETDEV_TX_OK;
/* create a URB, and a buffer for it, and copy the data to the URB */
@@ -657,7 +649,6 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
context->priv = priv;
context->echo_index = i;
- context->dlc = cf->len;
usb_fill_bulk_urb(urb, priv->udev,
usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX),
@@ -670,9 +661,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
atomic_inc(&priv->active_tx_urbs);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err))
- goto failed;
- else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
+ if (unlikely(err)) {
+ can_free_echo_skb(netdev, context->echo_index, NULL);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
+
+ atomic_dec(&priv->active_tx_urbs);
+
+ if (err == -ENODEV)
+ netif_device_detach(netdev);
+ else
+ netdev_warn(netdev, "failed tx_urb %d\n", err);
+ stats->tx_dropped++;
+ } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
/* Slow down tx path */
netif_stop_queue(netdev);
@@ -691,19 +693,6 @@ nofreecontext:
return NETDEV_TX_BUSY;
-failed:
- can_free_echo_skb(netdev, context->echo_index, NULL);
-
- usb_unanchor_urb(urb);
- usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
-
- atomic_dec(&priv->active_tx_urbs);
-
- if (err == -ENODEV)
- netif_device_detach(netdev);
- else
- netdev_warn(netdev, "failed tx_urb %d\n", err);
-
nomembuf:
usb_free_urb(urb);
@@ -816,8 +805,6 @@ static int usb_8dev_open(struct net_device *netdev)
if (err)
return err;
- can_led_event(netdev, CAN_LED_EVENT_OPEN);
-
/* finally start device */
err = usb_8dev_start(priv);
if (err) {
@@ -874,8 +861,6 @@ static int usb_8dev_close(struct net_device *netdev)
close_candev(netdev);
- can_led_event(netdev, CAN_LED_EVENT_STOP);
-
return err;
}
@@ -886,8 +871,12 @@ static const struct net_device_ops usb_8dev_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops usb_8dev_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct can_bittiming_const usb_8dev_bittiming_const = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
.tseg2_min = 1,
@@ -943,6 +932,7 @@ static int usb_8dev_probe(struct usb_interface *intf,
CAN_CTRLMODE_CC_LEN8_DLC;
netdev->netdev_ops = &usb_8dev_netdev_ops;
+ netdev->ethtool_ops = &usb_8dev_ethtool_ops;
netdev->flags |= IFF_ECHO; /* we support local echo */
@@ -983,8 +973,6 @@ static int usb_8dev_probe(struct usb_interface *intf,
(version>>8) & 0xff, version & 0xff);
}
- devm_can_led_init(netdev);
-
return 0;
cleanup_unregister_candev:
@@ -1015,7 +1003,7 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
}
static struct usb_driver usb_8dev_driver = {
- .name = "usb_8dev",
+ .name = KBUILD_MODNAME,
.probe = usb_8dev_probe,
.disconnect = usb_8dev_disconnect,
.id_table = usb_8dev_table,
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 067705e2850b..285635c23443 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -40,6 +40,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -70,34 +71,36 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += can_skb_get_data_len(skb);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx_ni(skb);
+ netif_rx(skb);
}
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
+ unsigned int len;
int loop;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
+ len = can_skb_get_data_len(skb);
stats->tx_packets++;
- stats->tx_bytes += cfd->len;
+ stats->tx_bytes += len;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
+ skb_tx_timestamp(skb);
+
if (!echo) {
/* no echo handling available inside this driver */
if (loop) {
@@ -105,7 +108,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += len;
}
consume_skb(skb);
return NETDEV_TX_OK;
@@ -133,7 +136,8 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
@@ -145,6 +149,10 @@ static const struct net_device_ops vcan_netdev_ops = {
.ndo_change_mtu = vcan_change_mtu,
};
+static const struct ethtool_ops vcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
@@ -160,6 +168,7 @@ static void vcan_setup(struct net_device *dev)
dev->flags |= IFF_ECHO;
dev->netdev_ops = &vcan_netdev_ops;
+ dev->ethtool_ops = &vcan_ethtool_ops;
dev->needs_free_netdev = true;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index 8861a7d875e7..4068d962203d 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -9,6 +9,7 @@
* Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
*/
+#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -33,28 +34,34 @@ struct vxcan_priv {
struct net_device __rcu *peer;
};
-static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
{
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
- u8 len;
+ struct sk_buff *skb;
+ unsigned int len;
- if (can_dropped_invalid_skb(dev, skb))
+ if (can_dropped_invalid_skb(dev, oskb))
return NETDEV_TX_OK;
rcu_read_lock();
peer = rcu_dereference(priv->peer);
if (unlikely(!peer)) {
- kfree_skb(skb);
+ kfree_skb(oskb);
dev->stats.tx_dropped++;
goto out_unlock;
}
- skb = can_create_echo_skb(skb);
- if (!skb)
+ skb_tx_timestamp(oskb);
+
+ skb = skb_clone(oskb, GFP_ATOMIC);
+ if (skb) {
+ consume_skb(oskb);
+ } else {
+ kfree_skb(oskb);
goto out_unlock;
+ }
/* reset CAN GW hop counter */
skb->csum_start = 0;
@@ -62,8 +69,8 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- len = cfd->len;
- if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
+ len = can_skb_get_data_len(skb);
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
peerstats = &peer->stats;
@@ -124,7 +131,8 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
@@ -139,6 +147,10 @@ static const struct net_device_ops vxcan_netdev_ops = {
.ndo_change_mtu = vxcan_change_mtu,
};
+static const struct ethtool_ops vxcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static void vxcan_setup(struct net_device *dev)
{
struct can_ml_priv *can_ml;
@@ -148,8 +160,9 @@ static void vxcan_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
- dev->flags = (IFF_NOARP|IFF_ECHO);
+ dev->flags = IFF_NOARP;
dev->netdev_ops = &vxcan_netdev_ops;
+ dev->ethtool_ops = &vxcan_ethtool_ops;
dev->needs_free_netdev = true;
can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
@@ -223,7 +236,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
netif_carrier_off(peer);
- err = rtnl_configure_link(peer, ifmp);
+ err = rtnl_configure_link(peer, ifmp, 0, NULL);
if (err < 0)
goto unregister_network_device;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index e2b15d29d15e..43c812ea1de0 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Xilinx CAN device driver
*
- * Copyright (C) 2012 - 2014 Xilinx, Inc.
+ * Copyright (C) 2012 - 2022 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
* Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
@@ -9,8 +9,10 @@
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -26,7 +28,6 @@
#include <linux/types.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
-#include <linux/can/led.h>
#include <linux/pm_runtime.h>
#define DRIVER_NAME "xilinx_can"
@@ -51,7 +52,7 @@ enum xcan_reg {
/* only on CAN FD cores */
XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate
- * Prescalar
+ * Prescaler
*/
XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */
XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */
@@ -87,6 +88,8 @@ enum xcan_reg {
#define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */
#define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */
#define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */
+#define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */
+#define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */
#define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */
#define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */
#define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */
@@ -100,6 +103,7 @@ enum xcan_reg {
#define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */
#define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */
#define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */
+#define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */
#define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */
#define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */
#define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */
@@ -133,6 +137,7 @@ enum xcan_reg {
#define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
+#define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
#define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */
#define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */
@@ -239,7 +244,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
};
/* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -259,24 +264,44 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
/* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
-static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
+static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.name = DRIVER_NAME,
.tseg1_min = 1,
.tseg1_max = 32,
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 2,
+ .brp_min = 1,
.brp_max = 256,
.brp_inc = 1,
};
+/* Transmission Delay Compensation constants for CANFD 1.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 32,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
+/* Transmission Delay Compensation constants for CANFD 2.0 */
+static const struct can_tdc_const xcan_tdc_const_canfd2 = {
+ .tdcv_min = 0,
+ .tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
+ .tdco_max = 64,
+ .tdcf_min = 0, /* Filter window not supported */
+ .tdcf_max = 0,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -406,7 +431,7 @@ static int xcan_set_bittiming(struct net_device *ndev)
return -EPERM;
}
- /* Setting Baud Rate prescalar value in BRPR Register */
+ /* Setting Baud Rate prescaler value in BRPR Register */
btr0 = (bt->brp - 1);
/* Setting Time Segment 1 in BTR Register */
@@ -423,8 +448,16 @@ static int xcan_set_bittiming(struct net_device *ndev)
if (priv->devtype.cantype == XAXI_CANFD ||
priv->devtype.cantype == XAXI_CANFD_2_0) {
- /* Setting Baud Rate prescalar value in F_BRPR Register */
+ /* Setting Baud Rate prescaler value in F_BRPR Register */
btr0 = dbt->brp - 1;
+ if (can_tdc_is_enabled(&priv->can)) {
+ if (priv->devtype.cantype == XAXI_CANFD)
+ btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ else
+ btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) |
+ XCAN_BRPR_TDC_ENABLE;
+ }
/* Setting Time Segment 1 in BTR Register */
btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
@@ -710,7 +743,7 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
- if (can_dropped_invalid_skb(ndev, skb))
+ if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
@@ -787,10 +820,11 @@ static int xcan_rx(struct net_device *ndev, int frame_base)
*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
if (cf->len > 4)
*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
- }
- stats->rx_bytes += cf->len;
+ stats->rx_bytes += cf->len;
+ }
stats->rx_packets++;
+
netif_receive_skb(skb);
return 1;
@@ -871,8 +905,11 @@ static int xcanfd_rx(struct net_device *ndev, int frame_base)
*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
}
}
- stats->rx_bytes += cf->len;
+
+ if (!(cf->can_id & CAN_RTR_FLAG))
+ stats->rx_bytes += cf->len;
stats->rx_packets++;
+
netif_receive_skb(skb);
return 1;
@@ -929,6 +966,7 @@ static void xcan_set_error_state(struct net_device *ndev,
can_change_state(ndev, cf, tx_state, rx_state);
if (cf) {
+ cf->can_id |= CAN_ERR_CNT;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
@@ -965,13 +1003,8 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
- if (skb) {
- struct net_device_stats *stats = &ndev->stats;
-
- stats->rx_packets++;
- stats->rx_bytes += cf->len;
+ if (skb)
netif_rx(skb);
- }
}
}
@@ -1095,8 +1128,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (skb) {
skb_cf->can_id |= cf.can_id;
memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
- stats->rx_packets++;
- stats->rx_bytes += CAN_ERR_DLC;
netif_rx(skb);
}
}
@@ -1212,16 +1243,15 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
XCAN_IXR_RXNEMP_MASK);
}
- if (work_done) {
- can_led_event(ndev, CAN_LED_EVENT_RX);
+ if (work_done)
xcan_update_error_state_after_rxtx(ndev);
- }
if (work_done < quota) {
- napi_complete_done(napi, work_done);
- ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= xcan_rx_int_mask(priv);
- priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ if (napi_complete_done(napi, work_done)) {
+ ier = priv->read_reg(priv, XCAN_IER_OFFSET);
+ ier |= xcan_rx_int_mask(priv);
+ priv->write_reg(priv, XCAN_IER_OFFSET, ier);
+ }
}
return work_done;
}
@@ -1300,7 +1330,6 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
spin_unlock_irqrestore(&priv->tx_lock, flags);
- can_led_event(ndev, CAN_LED_EVENT_TX);
xcan_update_error_state_after_rxtx(ndev);
}
@@ -1422,7 +1451,6 @@ static int xcan_open(struct net_device *ndev)
goto err_candev;
}
- can_led_event(ndev, CAN_LED_EVENT_OPEN);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -1454,7 +1482,6 @@ static int xcan_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
close_candev(ndev);
- can_led_event(ndev, CAN_LED_EVENT_STOP);
pm_runtime_put(priv->dev);
return 0;
@@ -1491,6 +1518,22 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
return 0;
}
+/**
+ * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value
+ * @ndev: Pointer to net_device structure
+ * @tdcv: Pointer to TDCV value
+ *
+ * Return: 0 on success
+ */
+static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+
+ *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET));
+
+ return 0;
+}
+
static const struct net_device_ops xcan_netdev_ops = {
.ndo_open = xcan_open,
.ndo_stop = xcan_close,
@@ -1498,6 +1541,10 @@ static const struct net_device_ops xcan_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+static const struct ethtool_ops xcan_ethtool_ops = {
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
/**
* xcan_suspend - Suspend method for the driver
* @dev: Address of the device structure
@@ -1743,17 +1790,24 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_BERR_REPORTING;
- if (devtype->cantype == XAXI_CANFD)
+ if (devtype->cantype == XAXI_CANFD) {
priv->can.data_bittiming_const =
&xcan_data_bittiming_const_canfd;
+ priv->can.tdc_const = &xcan_tdc_const_canfd;
+ }
- if (devtype->cantype == XAXI_CANFD_2_0)
+ if (devtype->cantype == XAXI_CANFD_2_0) {
priv->can.data_bittiming_const =
&xcan_data_bittiming_const_canfd2;
+ priv->can.tdc_const = &xcan_tdc_const_canfd2;
+ }
if (devtype->cantype == XAXI_CANFD ||
- devtype->cantype == XAXI_CANFD_2_0)
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ devtype->cantype == XAXI_CANFD_2_0) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_TDC_AUTO;
+ priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv;
+ }
priv->reg_base = addr;
priv->tx_max = tx_max;
@@ -1761,12 +1815,18 @@ static int xcan_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
- ndev->irq = platform_get_irq(pdev, 0);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_free;
+
+ ndev->irq = ret;
+
ndev->flags |= IFF_ECHO; /* We support local echo */
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->netdev_ops = &xcan_netdev_ops;
+ ndev->ethtool_ops = &xcan_ethtool_ops;
/* Getting the CAN can_clk info */
priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
@@ -1801,7 +1861,7 @@ static int xcan_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(priv->can_clk);
- netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
+ netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max);
ret = register_candev(ndev);
if (ret) {
@@ -1809,8 +1869,6 @@ static int xcan_probe(struct platform_device *pdev)
goto err_disableclks;
}
- devm_can_led_init(ndev);
-
pm_runtime_put(&pdev->dev);
if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7b1457a6e327..3ed5391bb18d 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -18,6 +18,7 @@ config NET_DSA_BCM_SF2
config NET_DSA_LOOP
tristate "DSA mock-up Ethernet switch chip support"
+ select NET_DSA_TAG_NONE
select FIXED_PHY
help
This enables support for a fake mock-up switch chip which
@@ -34,11 +35,37 @@ config NET_DSA_LANTIQ_GSWIP
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
- tristate "MediaTek MT753x and MT7621 Ethernet switch support"
+ tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select MEDIATEK_GE_PHY
+ imply NET_DSA_MT7530_MDIO
+ imply NET_DSA_MT7530_MMIO
help
- This enables support for the MediaTek MT7530, MT7531, and MT7621
- Ethernet switch chips.
+ This enables support for the MediaTek MT7530 and MT7531 Ethernet
+ switch chips. Multi-chip module MT7530 in MT7621AT, MT7621DAT,
+ MT7621ST and MT7623AI SoCs, and built-in switch in MT7988 SoC are
+ supported as well.
+
+config NET_DSA_MT7530_MDIO
+ tristate "MediaTek MT7530 MDIO interface driver"
+ depends on NET_DSA_MT7530
+ select PCS_MTK_LYNXI
+ help
+ This enables support for the MediaTek MT7530 and MT7531 switch
+ chips which are connected via MDIO, as well as multi-chip
+ module MT7530 which can be found in the MT7621AT, MT7621DAT,
+ MT7621ST and MT7623AI SoCs.
+
+config NET_DSA_MT7530_MMIO
+ tristate "MediaTek MT7530 MMIO interface driver"
+ depends on NET_DSA_MT7530
+ depends on HAS_IOMEM
+ help
+ This enables support for the built-in Ethernet switch found
+ in the MediaTek MT7988 SoC.
+ The switch is a similar design as MT7531, but the switch registers
+ are directly mapped into the SoCs register space rather than being
+ accessible via MDIO.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
@@ -59,37 +86,29 @@ source "drivers/net/dsa/sja1105/Kconfig"
source "drivers/net/dsa/xrs700x/Kconfig"
-config NET_DSA_QCA8K
- tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
- select NET_DSA_TAG_QCA
- select REGMAP
- help
- This enables support for the Qualcomm Atheros QCA8K Ethernet
- switch chips.
+source "drivers/net/dsa/realtek/Kconfig"
-config NET_DSA_REALTEK_SMI
- tristate "Realtek SMI Ethernet switch family support"
- select NET_DSA_TAG_RTL4_A
- select NET_DSA_TAG_RTL8_4
- select FIXED_PHY
- select IRQ_DOMAIN
- select REALTEK_PHY
- select REGMAP
+config NET_DSA_RZN1_A5PSW
+ tristate "Renesas RZ/N1 A5PSW Ethernet switch support"
+ depends on OF && ARCH_RZN1
+ select NET_DSA_TAG_RZN1_A5PSW
+ select PCS_RZN1_MIIC
help
- This enables support for the Realtek SMI-based switch
- chips, currently only RTL8366RB.
+ This driver supports the A5PSW switch, which is embedded in Renesas
+ RZ/N1 SoC.
config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
select REGMAP
help
- This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
config NET_DSA_SMSC_LAN9303_I2C
tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in I2C managed mode"
depends on I2C
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_SMSC_LAN9303
select REGMAP_I2C
help
@@ -97,14 +116,16 @@ config NET_DSA_SMSC_LAN9303_I2C
for I2C managed mode.
config NET_DSA_SMSC_LAN9303_MDIO
- tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
+ tristate "Microchip LAN9303/LAN9354 3-ports 10/100 ethernet switch in MDIO managed mode"
select NET_DSA_SMSC_LAN9303
+ depends on VLAN_8021Q || VLAN_8021Q=n
help
- Enable access functions if the SMSC/Microchip LAN9303 is configured
+ Enable access functions if the Microchip LAN9303/LAN9354 is configured
for MDIO managed mode.
config NET_DSA_VITESSE_VSC73XX
tristate
+ select NET_DSA_TAG_NONE
select FIXED_PHY
select VITESSE_PHY
select GPIOLIB
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 8da1569a34e6..cb9a97340e58 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -7,10 +7,10 @@ obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
endif
obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
+obj-$(CONFIG_NET_DSA_MT7530_MDIO) += mt7530-mdio.o
+obj-$(CONFIG_NET_DSA_MT7530_MMIO) += mt7530-mmio.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
-obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
+obj-$(CONFIG_NET_DSA_RZN1_A5PSW) += rzn1_a5psw.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
@@ -23,5 +23,6 @@ obj-y += microchip/
obj-y += mv88e6xxx/
obj-y += ocelot/
obj-y += qca/
+obj-y += realtek/
obj-y += sja1105/
obj-y += xrs700x/
diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig
index 90b525160b71..ebaa4a80d544 100644
--- a/drivers/net/dsa/b53/Kconfig
+++ b/drivers/net/dsa/b53/Kconfig
@@ -2,6 +2,7 @@
menuconfig B53
tristate "Broadcom BCM53xx managed switch support"
depends on NET_DSA
+ select NET_DSA_TAG_NONE
select NET_DSA_TAG_BRCM
select NET_DSA_TAG_BRCM_LEGACY
select NET_DSA_TAG_BRCM_PREPEND
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index af4761968733..3464ce5e7470 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -972,7 +972,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
if (stringset == ETH_SS_STATS) {
for (i = 0; i < mib_size; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mibs[i].name, ETH_GSTRING_LEN);
} else if (stringset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
@@ -1209,6 +1209,50 @@ static void b53_force_port_config(struct b53_device *dev, int port,
b53_write8(dev, B53_CTRL_PAGE, off, reg);
}
+static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct b53_device *dev = ds->priv;
+ u8 rgmii_ctrl = 0, off;
+
+ if (port == dev->imp_port)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
+
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ rgmii_ctrl |= (RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_TXC);
+ rgmii_ctrl |= RGMII_CTRL_DLL_RXC;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC);
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ default:
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC);
+ break;
+ }
+
+ if (port != dev->imp_port) {
+ if (is63268(dev))
+ rgmii_ctrl |= RGMII_CTRL_MII_OVERRIDE;
+
+ rgmii_ctrl |= RGMII_CTRL_ENABLE_GMII;
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
+
+ dev_dbg(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(interface));
+}
+
static void b53_adjust_link(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
@@ -1235,6 +1279,9 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
tx_pause, rx_pause);
b53_force_link(dev, port, phydev->link);
+ if (is63xx(dev) && port >= B53_63XX_RGMII0)
+ b53_adjust_63xx_rgmii(ds, port, phydev->interface);
+
if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
if (port == dev->imp_port)
off = B53_RGMII_CTRL_IMP;
@@ -1309,87 +1356,70 @@ void b53_port_event(struct dsa_switch *ds, int port)
}
EXPORT_SYMBOL(b53_port_event);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct b53_device *dev = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- if (dev->ops->serdes_phylink_validate)
- dev->ops->serdes_phylink_validate(dev, port, mask, state);
+ /* Internal ports need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
+
+ /* These switches appear to support MII and RevMII too, but beyond
+ * this, the code gives very few clues. FIXME: We probably need more
+ * interface modes here.
+ *
+ * According to b53_srab_mux_init(), ports 3..5 can support:
+ * SGMII, MII, GMII, RGMII or INTERNAL depending on the MUX setting.
+ * However, the interface mode read from the MUX configuration is
+ * not passed back to DSA, so phylink uses NA.
+ * DT can specify RGMII for ports 0, 1.
+ * For MDIO, port 8 can be RGMII_TXID.
+ */
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, config->supported_interfaces);
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
- /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we
- * support Gigabit, including Half duplex.
+ /* 5325/5365 are not capable of gigabit speeds, everything else is.
+ * Note: the original code also exclulded Gigagbit for MII, RevMII
+ * and 802.3z modes. MII and RevMII are not able to work above 100M,
+ * so will be excluded by the generic validator implementation.
+ * However, the exclusion of Gigabit for 802.3z just seems wrong.
*/
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- !phy_interface_mode_is_8023z(state->interface) &&
- !(is5325(dev) || is5365(dev))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
+ if (!(is5325(dev) || is5365(dev)))
+ config->mac_capabilities |= MAC_1000;
- if (!phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ /* Get the implementation specific capabilities */
+ if (dev->ops->phylink_get_caps)
+ dev->ops->phylink_get_caps(dev, port, config);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- phylink_helper_basex_speed(state);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
}
-EXPORT_SYMBOL(b53_phylink_validate);
-int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- int ret = -EOPNOTSUPP;
- if ((phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII) &&
- dev->ops->serdes_link_state)
- ret = dev->ops->serdes_link_state(dev, port, state);
+ if (!dev->ops->phylink_mac_select_pcs)
+ return NULL;
- return ret;
+ return dev->ops->phylink_mac_select_pcs(dev, port, interface);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_state);
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct b53_device *dev = ds->priv;
-
- if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED)
- return;
-
- if ((phy_interface_mode_is_8023z(state->interface) ||
- state->interface == PHY_INTERFACE_MODE_SGMII) &&
- dev->ops->serdes_config)
- dev->ops->serdes_config(dev, port, mode, state);
}
EXPORT_SYMBOL(b53_phylink_mac_config);
-void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct b53_device *dev = ds->priv;
-
- if (dev->ops->serdes_an_restart)
- dev->ops->serdes_an_restart(dev, port);
-}
-EXPORT_SYMBOL(b53_phylink_mac_an_restart);
-
void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
@@ -1419,6 +1449,9 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct b53_device *dev = ds->priv;
+ if (is63xx(dev) && port >= B53_63XX_RGMII0)
+ b53_adjust_63xx_rgmii(ds, port, interface);
+
if (mode == MLO_AN_PHY)
return;
@@ -1620,12 +1653,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
return 0;
}
- if (bitmap_weight(free_bins, dev->num_arl_bins) == 0)
- return -ENOSPC;
-
*idx = find_first_bit(free_bins, dev->num_arl_bins);
-
- return -ENOENT;
+ return *idx >= dev->num_arl_bins ? -ENOSPC : -ENOENT;
}
static int b53_arl_op(struct b53_device *dev, int op, int port,
@@ -1704,7 +1733,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
}
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1724,7 +1754,8 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_add);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1825,7 +1856,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_fdb_dump);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1845,7 +1877,8 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
EXPORT_SYMBOL(b53_mdb_add);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct b53_device *priv = ds->priv;
int ret;
@@ -1860,7 +1893,8 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_mdb_del);
-int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
+int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
@@ -1887,7 +1921,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
b53_for_each_port(dev, i) {
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Add this local port to the remote port VLAN control
@@ -1911,7 +1945,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
}
EXPORT_SYMBOL(b53_br_join);
-void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
+void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
@@ -1923,7 +1957,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
b53_for_each_port(dev, i) {
/* Don't touch the remaining ports */
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
@@ -2101,7 +2135,8 @@ out:
EXPORT_SYMBOL(b53_get_tag_protocol);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack)
{
struct b53_device *dev = ds->priv;
u16 reg, loc;
@@ -2185,7 +2220,7 @@ int b53_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy)
{
int ret;
- ret = phy_init_eee(phy, 0);
+ ret = phy_init_eee(phy, false);
if (ret)
return 0;
@@ -2258,10 +2293,9 @@ static const struct dsa_switch_ops b53_switch_ops = {
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
.adjust_link = b53_adjust_link,
- .phylink_validate = b53_phylink_validate,
- .phylink_mac_link_state = b53_phylink_mac_link_state,
+ .phylink_get_caps = b53_phylink_get_caps,
+ .phylink_mac_select_pcs = b53_phylink_mac_select_pcs,
.phylink_mac_config = b53_phylink_mac_config,
- .phylink_mac_an_restart = b53_phylink_mac_an_restart,
.phylink_mac_link_down = b53_phylink_mac_link_down,
.phylink_mac_link_up = b53_phylink_mac_link_up,
.port_enable = b53_enable_port,
@@ -2436,6 +2470,19 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
},
{
+ .chip_id = BCM63268_DEVICE_ID,
+ .dev_name = "BCM63268",
+ .vlans = 4096,
+ .enabled_ports = 0, /* pdata must provide them */
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .imp_port = 8,
+ .vta_regs = B53_VTA_REGS_63XX,
+ .duplex_reg = B53_DUPLEX_STAT_63XX,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX,
+ },
+ {
.chip_id = BCM53010_DEVICE_ID,
.dev_name = "BCM53010",
.vlans = 4096,
@@ -2566,6 +2613,20 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
+ {
+ .chip_id = BCM53134_DEVICE_ID,
+ .dev_name = "BCM53134",
+ .vlans = 4096,
+ .enabled_ports = 0x12f,
+ .imp_port = 8,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .arl_bins = 4,
+ .arl_buckets = 1024,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
};
static int b53_switch_init(struct b53_device *dev)
@@ -2743,6 +2804,7 @@ int b53_switch_detect(struct b53_device *dev)
case BCM53012_DEVICE_ID:
case BCM53018_DEVICE_ID:
case BCM53019_DEVICE_ID:
+ case BCM53134_DEVICE_ID:
dev->chip_id = id32;
break;
default:
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a7aeb3c132c9..8b422b298cd5 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -286,6 +286,7 @@ static const struct b53_io_ops b53_mdio_ops = {
#define B53_BRCM_OUI_2 0x03625c00
#define B53_BRCM_OUI_3 0x00406000
#define B53_BRCM_OUI_4 0x01410c00
+#define B53_BRCM_OUI_5 0xae025000
static int b53_mdio_probe(struct mdio_device *mdiodev)
{
@@ -313,7 +314,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
(phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
- (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_4 &&
+ (phy_id & 0xfffffc00) != B53_BRCM_OUI_5) {
dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
return -ENODEV;
}
@@ -356,8 +358,6 @@ static void b53_mdio_remove(struct mdio_device *mdiodev)
return;
b53_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void b53_mdio_shutdown(struct mdio_device *mdiodev)
@@ -377,6 +377,7 @@ static const struct of_device_id b53_of_match[] = {
{ .compatible = "brcm,bcm53115" },
{ .compatible = "brcm,bcm53125" },
{ .compatible = "brcm,bcm53128" },
+ { .compatible = "brcm,bcm53134" },
{ .compatible = "brcm,bcm5365" },
{ .compatible = "brcm,bcm5389" },
{ .compatible = "brcm,bcm5395" },
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index ae4c79d39bc0..5db1ed26f03a 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -216,6 +216,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg,
return 0;
}
+static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg,
+ u16 *value)
+{
+ return -EIO;
+}
+
+static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg,
+ u16 value)
+{
+ return -EIO;
+}
+
static const struct b53_io_ops b53_mmap_ops = {
.read8 = b53_mmap_read8,
.read16 = b53_mmap_read16,
@@ -227,6 +239,8 @@ static const struct b53_io_ops b53_mmap_ops = {
.write32 = b53_mmap_write32,
.write48 = b53_mmap_write48,
.write64 = b53_mmap_write64,
+ .phy_read16 = b53_mmap_phy_read16,
+ .phy_write16 = b53_mmap_phy_write16,
};
static int b53_mmap_probe_of(struct platform_device *pdev,
@@ -248,7 +262,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
return -ENOMEM;
pdata->regs = mem;
- pdata->chip_id = BCM63XX_DEVICE_ID;
+ pdata->chip_id = (u32)(unsigned long)device_get_match_data(dev);
pdata->big_endian = of_property_read_bool(np, "big-endian");
of_ports = of_get_child_by_name(np, "ports");
@@ -263,7 +277,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
if (of_property_read_u32(of_port, "reg", &reg))
continue;
- if (reg < B53_CPU_PORT)
+ if (reg < B53_N_PORTS)
pdata->enabled_ports |= BIT(reg);
}
@@ -316,8 +330,6 @@ static int b53_mmap_remove(struct platform_device *pdev)
if (dev)
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
@@ -332,11 +344,28 @@ static void b53_mmap_shutdown(struct platform_device *pdev)
}
static const struct of_device_id b53_mmap_of_table[] = {
- { .compatible = "brcm,bcm3384-switch" },
- { .compatible = "brcm,bcm6328-switch" },
- { .compatible = "brcm,bcm6368-switch" },
- { .compatible = "brcm,bcm63xx-switch" },
- { /* sentinel */ },
+ {
+ .compatible = "brcm,bcm3384-switch",
+ .data = (void *)BCM63XX_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm6318-switch",
+ .data = (void *)BCM63268_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm6328-switch",
+ .data = (void *)BCM63XX_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm6362-switch",
+ .data = (void *)BCM63XX_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm6368-switch",
+ .data = (void *)BCM63XX_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm63268-switch",
+ .data = (void *)BCM63268_DEVICE_ID,
+ }, {
+ .compatible = "brcm,bcm63xx-switch",
+ .data = (void *)BCM63XX_DEVICE_ID,
+ }, { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 579da74ada64..fdcfd5081c28 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -21,7 +21,7 @@
#include <linux/kernel.h>
#include <linux/mutex.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <net/dsa.h>
@@ -29,7 +29,6 @@
struct b53_device;
struct net_device;
-struct phylink_link_state;
struct b53_io_ops {
int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value);
@@ -46,19 +45,15 @@ struct b53_io_ops {
int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value);
int (*irq_enable)(struct b53_device *dev, int port);
void (*irq_disable)(struct b53_device *dev, int port);
+ void (*phylink_get_caps)(struct b53_device *dev, int port,
+ struct phylink_config *config);
+ struct phylink_pcs *(*phylink_mac_select_pcs)(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
u8 (*serdes_map_lane)(struct b53_device *dev, int port);
- int (*serdes_link_state)(struct b53_device *dev, int port,
- struct phylink_link_state *state);
- void (*serdes_config)(struct b53_device *dev, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
- void (*serdes_an_restart)(struct b53_device *dev, int port);
void (*serdes_link_set)(struct b53_device *dev, int port,
unsigned int mode, phy_interface_t interface,
bool link_up);
- void (*serdes_phylink_validate)(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
};
#define B53_INVALID_LANE 0xff
@@ -75,6 +70,7 @@ enum {
BCM53125_DEVICE_ID = 0x53125,
BCM53128_DEVICE_ID = 0x53128,
BCM63XX_DEVICE_ID = 0x6300,
+ BCM63268_DEVICE_ID = 0x63268,
BCM53010_DEVICE_ID = 0x53010,
BCM53011_DEVICE_ID = 0x53011,
BCM53012_DEVICE_ID = 0x53012,
@@ -84,10 +80,18 @@ enum {
BCM583XX_DEVICE_ID = 0x58300,
BCM7445_DEVICE_ID = 0x7445,
BCM7278_DEVICE_ID = 0x7278,
+ BCM53134_DEVICE_ID = 0x5075,
+};
+
+struct b53_pcs {
+ struct phylink_pcs pcs;
+ struct b53_device *dev;
+ u8 lane;
};
#define B53_N_PORTS 9
#define B53_N_PORTS_25 6
+#define B53_N_PCS 2
struct b53_port {
u16 vlan_ctl_mask;
@@ -144,6 +148,8 @@ struct b53_device {
bool vlan_enabled;
unsigned int num_ports;
struct b53_port *ports;
+
+ struct b53_pcs pcs[B53_N_PCS];
};
#define b53_for_each_port(dev, i) \
@@ -182,12 +188,19 @@ static inline int is531x5(struct b53_device *dev)
{
return dev->chip_id == BCM53115_DEVICE_ID ||
dev->chip_id == BCM53125_DEVICE_ID ||
- dev->chip_id == BCM53128_DEVICE_ID;
+ dev->chip_id == BCM53128_DEVICE_ID ||
+ dev->chip_id == BCM53134_DEVICE_ID;
}
static inline int is63xx(struct b53_device *dev)
{
- return dev->chip_id == BCM63XX_DEVICE_ID;
+ return dev->chip_id == BCM63XX_DEVICE_ID ||
+ dev->chip_id == BCM63268_DEVICE_ID;
+}
+
+static inline int is63268(struct b53_device *dev)
+{
+ return dev->chip_id == BCM63268_DEVICE_ID;
}
static inline int is5301x(struct b53_device *dev)
@@ -204,9 +217,11 @@ static inline int is58xx(struct b53_device *dev)
return dev->chip_id == BCM58XX_DEVICE_ID ||
dev->chip_id == BCM583XX_DEVICE_ID ||
dev->chip_id == BCM7445_DEVICE_ID ||
- dev->chip_id == BCM7278_DEVICE_ID;
+ dev->chip_id == BCM7278_DEVICE_ID ||
+ dev->chip_id == BCM53134_DEVICE_ID;
}
+#define B53_63XX_RGMII0 4
#define B53_CPU_PORT_25 5
#define B53_CPU_PORT 8
@@ -324,8 +339,9 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds, int port, int sset);
void b53_get_ethtool_phy_stats(struct dsa_switch *ds, int port, uint64_t *data);
-int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
-void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
+int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack);
+void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_br_flags_pre(struct dsa_switch *ds, int port,
@@ -336,15 +352,9 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-void b53_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
-int b53_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
void b53_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state);
-void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port);
void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
@@ -362,17 +372,22 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
int b53_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
int b53_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid);
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
int b53_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
int b53_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
int b53_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress,
+ struct netlink_ext_ack *extack);
enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mprot);
void b53_mirror_del(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index b2c539a42154..bfbcb66bef66 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -138,6 +138,7 @@
#define B53_RGMII_CTRL_IMP 0x60
#define RGMII_CTRL_ENABLE_GMII BIT(7)
+#define RGMII_CTRL_MII_OVERRIDE BIT(6)
#define RGMII_CTRL_TIMING_SEL BIT(2)
#define RGMII_CTRL_DLL_RXC BIT(1)
#define RGMII_CTRL_DLL_TXC BIT(0)
diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c
index 5ae3d9783b68..0690210770ff 100644
--- a/drivers/net/dsa/b53/b53_serdes.c
+++ b/drivers/net/dsa/b53/b53_serdes.c
@@ -17,6 +17,11 @@
#include "b53_serdes.h"
#include "b53_regs.h"
+static inline struct b53_pcs *pcs_to_b53_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct b53_pcs, pcs);
+}
+
static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block,
u16 value)
{
@@ -60,51 +65,47 @@ static u16 b53_serdes_read(struct b53_device *dev, u8 lane,
return b53_serdes_read_blk(dev, offset, block);
}
-void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
- const struct phylink_link_state *state)
+static int b53_serdes_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
- if (lane == B53_INVALID_LANE)
- return;
-
reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ if (interface == PHY_INTERFACE_MODE_1000BASEX)
reg |= FIBER_MODE_1000X;
else
reg &= ~FIBER_MODE_1000X;
b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1),
SERDES_DIGITAL_BLK, reg);
+
+ return 0;
}
-EXPORT_SYMBOL(b53_serdes_config);
-void b53_serdes_an_restart(struct b53_device *dev, int port)
+static void b53_serdes_an_restart(struct phylink_pcs *pcs)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 reg;
- if (lane == B53_INVALID_LANE)
- return;
-
reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK);
reg |= BMCR_ANRESTART;
b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR),
SERDES_MII_BLK, reg);
}
-EXPORT_SYMBOL(b53_serdes_an_restart);
-int b53_serdes_link_state(struct b53_device *dev, int port,
- struct phylink_link_state *state)
+static void b53_serdes_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_device *dev = pcs_to_b53_pcs(pcs)->dev;
+ u8 lane = pcs_to_b53_pcs(pcs)->lane;
u16 dig, bmsr;
- if (lane == B53_INVALID_LANE)
- return 1;
-
dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS,
SERDES_DIGITAL_BLK);
bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR),
@@ -133,10 +134,7 @@ int b53_serdes_link_state(struct b53_device *dev, int port,
state->pause |= MLO_PAUSE_RX;
if (dig & PAUSE_RESOLUTION_TX_SIDE)
state->pause |= MLO_PAUSE_TX;
-
- return 0;
}
-EXPORT_SYMBOL(b53_serdes_link_state);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up)
@@ -158,9 +156,14 @@ void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
}
EXPORT_SYMBOL(b53_serdes_link_set);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static const struct phylink_pcs_ops b53_pcs_ops = {
+ .pcs_get_state = b53_serdes_get_state,
+ .pcs_config = b53_serdes_config,
+ .pcs_an_restart = b53_serdes_an_restart,
+};
+
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
{
u8 lane = b53_serdes_map_lane(dev, port);
@@ -169,20 +172,47 @@ void b53_serdes_phylink_validate(struct b53_device *dev, int port,
switch (lane) {
case 0:
- phylink_set(supported, 2500baseX_Full);
+ /* It appears lane 0 supports 2500base-X and 1000base-X */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_2500FD;
fallthrough;
case 1:
- phylink_set(supported, 1000baseX_Full);
+ /* It appears lane 1 only supports 1000base-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000FD;
break;
default:
break;
}
}
-EXPORT_SYMBOL(b53_serdes_phylink_validate);
+EXPORT_SYMBOL(b53_serdes_phylink_get_caps);
+
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface)
+{
+ u8 lane = b53_serdes_map_lane(dev, port);
+
+ if (lane == B53_INVALID_LANE || lane >= B53_N_PCS ||
+ !dev->pcs[lane].dev)
+ return NULL;
+
+ if (!phy_interface_mode_is_8023z(interface) &&
+ interface != PHY_INTERFACE_MODE_SGMII)
+ return NULL;
+
+ return &dev->pcs[lane].pcs;
+}
+EXPORT_SYMBOL(b53_serdes_phylink_mac_select_pcs);
int b53_serdes_init(struct b53_device *dev, int port)
{
u8 lane = b53_serdes_map_lane(dev, port);
+ struct b53_pcs *pcs;
u16 id0, msb, lsb;
if (lane == B53_INVALID_LANE)
@@ -205,6 +235,11 @@ int b53_serdes_init(struct b53_device *dev, int port)
(id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK,
(u32)msb << 16 | lsb);
+ pcs = &dev->pcs[lane];
+ pcs->dev = dev;
+ pcs->lane = lane;
+ pcs->pcs.ops = &b53_pcs_ops;
+
return 0;
}
EXPORT_SYMBOL(b53_serdes_init);
diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h
index 55d280fe38e4..ef81f5da5f81 100644
--- a/drivers/net/dsa/b53/b53_serdes.h
+++ b/drivers/net/dsa/b53/b53_serdes.h
@@ -107,17 +107,13 @@ static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port)
return dev->ops->serdes_map_lane(dev, port);
}
-int b53_serdes_get_link(struct b53_device *dev, int port);
-int b53_serdes_link_state(struct b53_device *dev, int port,
- struct phylink_link_state *state);
-void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode,
- const struct phylink_link_state *state);
-void b53_serdes_an_restart(struct b53_device *dev, int port);
void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode,
phy_interface_t interface, bool link_up);
-void b53_serdes_phylink_validate(struct b53_device *dev, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
+struct phylink_pcs *b53_serdes_phylink_mac_select_pcs(struct b53_device *dev,
+ int port,
+ phy_interface_t interface);
+void b53_serdes_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config);
#if IS_ENABLED(CONFIG_B53_SERDES)
int b53_serdes_init(struct b53_device *dev, int port);
#else
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 2b88f03e5252..308f15d3832e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -314,16 +314,12 @@ static int b53_spi_probe(struct spi_device *spi)
return 0;
}
-static int b53_spi_remove(struct spi_device *spi)
+static void b53_spi_remove(struct spi_device *spi)
{
struct b53_device *dev = spi_get_drvdata(spi);
if (dev)
b53_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void b53_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 4591bb1c05d2..bcb44034404d 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -443,6 +443,39 @@ static void b53_srab_irq_disable(struct b53_device *dev, int port)
}
}
+static void b53_srab_phylink_get_caps(struct b53_device *dev, int port,
+ struct phylink_config *config)
+{
+ struct b53_srab_priv *priv = dev->priv;
+ struct b53_srab_port_priv *p = &priv->port_intrs[port];
+
+ switch (p->mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+#if IS_ENABLED(CONFIG_B53_SERDES)
+ /* If p->mode indicates SGMII mode, that essentially means we
+ * are using a serdes. As the serdes for the capabilities.
+ */
+ b53_serdes_phylink_get_caps(dev, port, config);
+#endif
+ break;
+
+ case PHY_INTERFACE_MODE_NA:
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* If we support RGMII, support all RGMII modes, since
+ * that dictates the PHY delay settings.
+ */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+
+ default:
+ /* Some other mode (e.g. MII, GMII etc) */
+ __set_bit(p->mode, config->supported_interfaces);
+ break;
+ }
+}
+
static const struct b53_io_ops b53_srab_ops = {
.read8 = b53_srab_read8,
.read16 = b53_srab_read16,
@@ -456,13 +489,11 @@ static const struct b53_io_ops b53_srab_ops = {
.write64 = b53_srab_write64,
.irq_enable = b53_srab_irq_enable,
.irq_disable = b53_srab_irq_disable,
+ .phylink_get_caps = b53_srab_phylink_get_caps,
#if IS_ENABLED(CONFIG_B53_SERDES)
+ .phylink_mac_select_pcs = b53_serdes_phylink_mac_select_pcs,
.serdes_map_lane = b53_srab_serdes_map_lane,
- .serdes_link_state = b53_serdes_link_state,
- .serdes_config = b53_serdes_config,
- .serdes_an_restart = b53_serdes_an_restart,
.serdes_link_set = b53_serdes_link_set,
- .serdes_phylink_validate = b53_serdes_phylink_validate,
#endif
};
@@ -636,8 +667,6 @@ static int b53_srab_remove(struct platform_device *pdev)
b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 13aa43b5cffd..cde253d27bd0 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -62,6 +62,56 @@ static u16 bcm_sf2_reg_rgmii_cntrl(struct bcm_sf2_priv *priv, int port)
return REG_SWITCH_STATUS;
}
+static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
+{
+ switch (port) {
+ case 0:
+ return REG_LED_0_CNTRL;
+ case 1:
+ return REG_LED_1_CNTRL;
+ case 2:
+ return REG_LED_2_CNTRL;
+ }
+
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ switch (port) {
+ case 3:
+ return REG_LED_3_CNTRL;
+ case 7:
+ return REG_LED_4_CNTRL;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ONCE(1, "Unsupported port %d\n", port);
+
+ /* RO fallback reg */
+ return REG_SWITCH_STATUS;
+}
+
+static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ case BCM7445_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP :
+ CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ case BCM7278_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
+ CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ default:
+ WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
+ }
+
+ /* RO fallback register */
+ return REG_SWITCH_STATUS;
+}
+
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
@@ -109,7 +159,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
- u32 reg, offset;
+ u32 reg;
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
@@ -135,21 +185,6 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_IMP;
- else
- offset = CORE_STS_OVERRIDE_IMP2;
-
- /* Force link status for IMP port */
- reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
- if (priv->type == BCM4908_DEVICE_ID)
- reg |= GMII_SPEED_UP_2G;
- else
- reg &= ~GMII_SPEED_UP_2G;
- core_writel(priv, reg, offset);
-
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
reg = core_readl(priv, CORE_IMP_CTL);
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
@@ -187,9 +222,14 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
/* Use PHY-driven LED signaling */
if (!enable) {
- reg = reg_readl(priv, REG_LED_CNTRL(0));
- reg |= SPDLNK_SRC_SEL;
- reg_writel(priv, reg, REG_LED_CNTRL(0));
+ u16 led_ctrl = bcm_sf2_reg_led_base(priv, 0);
+
+ if (priv->type == BCM7278_DEVICE_ID ||
+ priv->type == BCM7445_DEVICE_ID) {
+ reg = reg_led_readl(priv, led_ctrl, 0);
+ reg |= LED_CNTRL_SPDLNK_SRC_SEL;
+ reg_led_writel(priv, reg, led_ctrl, 0);
+ }
}
}
@@ -584,7 +624,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
- priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
of_node_put(dn);
return -ENOMEM;
@@ -644,8 +684,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn)
+ if (err && dn) {
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(dn);
+ }
return err;
}
@@ -653,6 +695,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(priv->master_mii_dn);
}
@@ -672,49 +715,25 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
PHY_BRCM_IDDQ_SUSPEND;
}
-static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
+ unsigned long *interfaces = config->supported_interfaces;
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_MOCA) {
- linkmode_zero(supported);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID))
- dev_err(ds->dev,
- "Unsupported interface: %d for port %d\n",
- state->interface, port);
- return;
- }
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
- */
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ if (priv->int_phy_mask & BIT(port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+ } else if (priv->moca_port == port) {
+ __set_bit(PHY_INTERFACE_MODE_MOCA, interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_MII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII, interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+ phy_interface_set_rgmii(interfaces);
}
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
@@ -793,17 +812,13 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, offset;
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ if (priv->wol_ports_mask & BIT(port))
+ return;
- reg = core_readl(priv, offset);
- reg &= ~LINK_STS;
- core_writel(priv, reg, offset);
- }
+ offset = bcm_sf2_port_override_offset(priv, port);
+ reg = core_readl(priv, offset);
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, offset);
bcm_sf2_sw_mac_link_set(ds, port, interface, false);
}
@@ -817,51 +832,56 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ u32 reg_rgmii_ctrl = 0;
+ u32 reg, offset;
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl = 0;
- u32 reg, offset;
+ offset = bcm_sf2_port_override_offset(priv, port);
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- if (interface == PHY_INTERFACE_MODE_RGMII ||
- interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- interface == PHY_INTERFACE_MODE_MII ||
- interface == PHY_INTERFACE_MODE_REVMII) {
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
- reg = reg_readl(priv, reg_rgmii_ctrl);
- reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
-
- if (tx_pause)
- reg |= TX_PAUSE_EN;
- if (rx_pause)
- reg |= RX_PAUSE_EN;
-
- reg_writel(priv, reg, reg_rgmii_ctrl);
- }
+ if (phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
- reg = SW_OVERRIDE | LINK_STS;
- switch (speed) {
- case SPEED_1000:
- reg |= SPDSTS_1000 << SPEED_SHIFT;
- break;
- case SPEED_100:
- reg |= SPDSTS_100 << SPEED_SHIFT;
- break;
- }
+ if (tx_pause)
+ reg |= TX_PAUSE_EN;
+ if (rx_pause)
+ reg |= RX_PAUSE_EN;
+
+ reg_writel(priv, reg, reg_rgmii_ctrl);
+ }
- if (duplex == DUPLEX_FULL)
- reg |= DUPLX_MODE;
+ reg = LINK_STS;
+ if (port == 8) {
+ if (priv->type == BCM4908_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
+ reg |= MII_SW_OR;
+ } else {
+ reg |= SW_OVERRIDE;
+ }
- core_writel(priv, reg, offset);
+ switch (speed) {
+ case SPEED_1000:
+ reg |= SPDSTS_1000 << SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ reg |= SPDSTS_100 << SPEED_SHIFT;
+ break;
}
+ if (duplex == DUPLEX_FULL)
+ reg |= DUPLX_MODE;
+
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
+ core_writel(priv, reg, offset);
+
if (mode == MLO_AN_PHY && phydev)
p->eee_enabled = b53_eee_init(ds, port, phydev);
}
@@ -963,7 +983,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -987,7 +1007,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
@@ -1181,7 +1201,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_sset_count = bcm_sf2_sw_get_sset_count,
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
- .phylink_validate = bcm_sf2_sw_validate,
+ .phylink_get_caps = bcm_sf2_sw_get_caps,
.phylink_mac_config = bcm_sf2_sw_mac_config,
.phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
.phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
@@ -1232,9 +1252,14 @@ static const u16 bcm_sf2_4908_reg_offsets[] = {
[REG_SPHY_CNTRL] = 0x24,
[REG_CROSSBAR] = 0xc8,
[REG_RGMII_11_CNTRL] = 0x014c,
- [REG_LED_0_CNTRL] = 0x40,
- [REG_LED_1_CNTRL] = 0x4c,
- [REG_LED_2_CNTRL] = 0x58,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+ [REG_LED_3_CNTRL] = 0x64,
+ [REG_LED_4_CNTRL] = 0x88,
+ [REG_LED_5_CNTRL] = 0xa0,
+ [REG_LED_AGGREGATE_CTRL] = 0xb8,
+
};
static const struct bcm_sf2_of_data bcm_sf2_4908_data = {
@@ -1526,8 +1551,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 0d48402068d3..00afc94ce522 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -210,6 +210,16 @@ SF2_IO_MACRO(acb);
SWITCH_INTR_L2(0);
SWITCH_INTR_L2(1);
+static inline u32 reg_led_readl(struct bcm_sf2_priv *priv, u16 off, u16 reg)
+{
+ return readl_relaxed(priv->reg + priv->reg_offsets[off] + reg);
+}
+
+static inline void reg_led_writel(struct bcm_sf2_priv *priv, u32 val, u16 off, u16 reg)
+{
+ writel_relaxed(val, priv->reg + priv->reg_offsets[off] + reg);
+}
+
/* RXNFC */
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs);
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index a7e2fcf2df2c..c4010b7bf089 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -567,14 +567,14 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
int port, u32 location)
{
- struct cfp_rule *rule = NULL;
+ struct cfp_rule *rule;
list_for_each_entry(rule, &priv->cfp.rules_list, next) {
if (rule->port == port && rule->fs.location == location)
- break;
+ return rule;
}
- return rule;
+ return NULL;
}
static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
@@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1296,7 +1296,7 @@ void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
"CFP%03d_%sCntr",
i, bcm_sf2_cfp_stats[j].name);
iter = (i - 1) * s + j;
- strlcpy(data + iter * ETH_GSTRING_LEN,
+ strscpy(data + iter * ETH_GSTRING_LEN,
buf, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 7bffc80f241f..da0dedbd6555 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -25,6 +25,10 @@ enum bcm_sf2_reg_offs {
REG_LED_0_CNTRL,
REG_LED_1_CNTRL,
REG_LED_2_CNTRL,
+ REG_LED_3_CNTRL,
+ REG_LED_4_CNTRL,
+ REG_LED_5_CNTRL,
+ REG_LED_AGGREGATE_CTRL,
REG_SWITCH_REG_MAX,
};
@@ -56,6 +60,63 @@ enum bcm_sf2_reg_offs {
#define CROSSBAR_BCM4908_EXT_GPHY4 1
#define CROSSBAR_BCM4908_EXT_RGMII 2
+/* Relative to REG_LED_*_CNTRL (BCM7278, BCM7445) */
+#define LED_CNTRL_NO_LINK_ENCODE_SHIFT 0
+#define LED_CNTRL_M10_ENCODE_SHIFT 2
+#define LED_CNTRL_M100_ENCODE_SHIFT 4
+#define LED_CNTRL_M1000_ENCODE_SHIFT 6
+#define LED_CNTRL_SEL_NO_LINK_ENCODE_SHIFT 8
+#define LED_CNTRL_SEL_10M_ENCODE_SHIFT 10
+#define LED_CNTRL_SEL_100M_ENCODE_SHIFT 12
+#define LED_CNTRL_SEL_1000M_ENCODE_SHIFT 14
+#define LED_CNTRL_RX_DV_EN (1 << 16)
+#define LED_CNTRL_TX_EN_EN (1 << 17)
+#define LED_CNTRL_SPDLNK_LED0_ACT_SEL_SHIFT 18
+#define LED_CNTRL_SPDLNK_LED1_ACT_SEL_SHIFT 20
+#define LED_CNTRL_ACT_LED_ACT_SEL_SHIFT 22
+#define LED_CNTRL_SPDLNK_SRC_SEL (1 << 24)
+#define LED_CNTRL_SPDLNK_LED0_ACT_POL_SEL (1 << 25)
+#define LED_CNTRL_SPDLNK_LED1_ACT_POL_SEL (1 << 26)
+#define LED_CNTRL_ACT_LED_POL_SEL (1 << 27)
+#define LED_CNTRL_MASK 0x3
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_CTRL 0x0
+#define LED_CTRL_RX_ACT_EN 0x00000001
+#define LED_CTRL_TX_ACT_EN 0x00000002
+#define LED_CTRL_SPDLNK_LED0_ACT_SEL 0x00000004
+#define LED_CTRL_SPDLNK_LED1_ACT_SEL 0x00000008
+#define LED_CTRL_SPDLNK_LED2_ACT_SEL 0x00000010
+#define LED_CTRL_ACT_LED_ACT_SEL 0x00000020
+#define LED_CTRL_SPDLNK_LED0_ACT_POL_SEL 0x00000040
+#define LED_CTRL_SPDLNK_LED1_ACT_POL_SEL 0x00000080
+#define LED_CTRL_SPDLNK_LED2_ACT_POL_SEL 0x00000100
+#define LED_CTRL_ACT_LED_POL_SEL 0x00000200
+#define LED_CTRL_LED_SPD_OVRD 0x00001c00
+#define LED_CTRL_LNK_STATUS_OVRD 0x00002000
+#define LED_CTRL_SPD_OVRD_EN 0x00004000
+#define LED_CTRL_LNK_OVRD_EN 0x00008000
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_LINK_SPEED_ENC_SEL 0x4
+#define LED_LINK_SPEED_ENC_SEL_NO_LINK_SHIFT 0
+#define LED_LINK_SPEED_ENC_SEL_10M_SHIFT 3
+#define LED_LINK_SPEED_ENC_SEL_100M_SHIFT 6
+#define LED_LINK_SPEED_ENC_SEL_1000M_SHIFT 9
+#define LED_LINK_SPEED_ENC_SEL_2500M_SHIFT 12
+#define LED_LINK_SPEED_ENC_SEL_10G_SHIFT 15
+#define LED_LINK_SPEED_ENC_SEL_MASK 0x7
+
+/* Register relative to REG_LED_*_CNTRL (BCM4908) */
+#define REG_LED_LINK_SPEED_ENC 0x8
+#define LED_LINK_SPEED_ENC_NO_LINK_SHIFT 0
+#define LED_LINK_SPEED_ENC_M10_SHIFT 3
+#define LED_LINK_SPEED_ENC_M100_SHIFT 6
+#define LED_LINK_SPEED_ENC_M1000_SHIFT 9
+#define LED_LINK_SPEED_ENC_M2500_SHIFT 12
+#define LED_LINK_SPEED_ENC_M10G_SHIFT 15
+#define LED_LINK_SPEED_ENC_MASK 0x7
+
/* Relative to REG_RGMII_CNTRL */
#define RGMII_MODE_EN (1 << 0)
#define ID_MODE_DIS (1 << 1)
@@ -73,10 +134,6 @@ enum bcm_sf2_reg_offs {
#define LPI_COUNT_SHIFT 9
#define LPI_COUNT_MASK 0x3F
-#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x))
-
-#define SPDLNK_SRC_SEL (1 << 24)
-
/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
#define INTRL2_CPU_STATUS 0x00
#define INTRL2_CPU_SET 0x04
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index e638e3eea911..5b139f2206b6 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -167,19 +167,21 @@ static int dsa_loop_phy_write(struct dsa_switch *ds, int port,
}
static int dsa_loop_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
- __func__, port, bridge->name);
+ __func__, port, bridge.dev->name);
return 0;
}
static void dsa_loop_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
dev_dbg(ds->dev, "%s: port: %d, bridge: %s\n",
- __func__, port, bridge->name);
+ __func__, port, bridge.dev->name);
}
static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
@@ -349,8 +351,6 @@ static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(ds);
dev_put(ps->netdev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
@@ -376,6 +376,17 @@ static struct mdio_driver dsa_loop_drv = {
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
+static void dsa_loop_phydevs_unregister(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_FIXED_PHYS; i++)
+ if (!IS_ERR(phydevs[i])) {
+ fixed_phy_unregister(phydevs[i]);
+ phy_device_free(phydevs[i]);
+ }
+}
+
static int __init dsa_loop_init(void)
{
struct fixed_phy_status status = {
@@ -383,23 +394,23 @@ static int __init dsa_loop_init(void)
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
};
- unsigned int i;
+ unsigned int i, ret;
for (i = 0; i < NUM_FIXED_PHYS; i++)
phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL);
- return mdio_driver_register(&dsa_loop_drv);
+ ret = mdio_driver_register(&dsa_loop_drv);
+ if (ret)
+ dsa_loop_phydevs_unregister();
+
+ return ret;
}
module_init(dsa_loop_init);
static void __exit dsa_loop_exit(void)
{
- unsigned int i;
-
mdio_driver_unregister(&dsa_loop_drv);
- for (i = 0; i < NUM_FIXED_PHYS; i++)
- if (!IS_ERR(phydevs[i]))
- fixed_phy_unregister(phydevs[i]);
+ dsa_loop_phydevs_unregister();
}
module_exit(dsa_loop_exit);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 4e0b53d94b52..595a548bb0a8 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -128,6 +128,16 @@ static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
hellcreek_write(hellcreek, val, HR_PSEL);
}
+static void hellcreek_select_port_prio(struct hellcreek *hellcreek, int port,
+ int prio)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ val |= prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
{
u16 val = counter << HR_CSEL_SHIFT;
@@ -288,7 +298,7 @@ static void hellcreek_get_strings(struct dsa_switch *ds, int port,
for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
const struct hellcreek_counter *counter = &hellcreek_counter[i];
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
counter->name, ETH_GSTRING_LEN);
}
}
@@ -674,7 +684,9 @@ static int hellcreek_bridge_flags(struct dsa_switch *ds, int port,
}
static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
@@ -691,7 +703,7 @@ static int hellcreek_port_bridge_join(struct dsa_switch *ds, int port,
}
static void hellcreek_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct hellcreek *hellcreek = ds->priv;
@@ -710,8 +722,9 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
u16 meta = 0;
dev_dbg(hellcreek->dev, "Add static FDB entry: MAC=%pM, MASK=0x%02x, "
- "OBT=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac, entry->portmask,
- entry->is_obt, entry->reprio_en, entry->reprio_tc);
+ "OBT=%d, PASS_BLOCKED=%d, REPRIO_EN=%d, PRIO=%d\n", entry->mac,
+ entry->portmask, entry->is_obt, entry->pass_blocked,
+ entry->reprio_en, entry->reprio_tc);
/* Add mac address */
hellcreek_write(hellcreek, entry->mac[1] | (entry->mac[0] << 8), HR_FDBWDH);
@@ -722,6 +735,8 @@ static int __hellcreek_fdb_add(struct hellcreek *hellcreek,
meta |= entry->portmask << HR_FDBWRM0_PORTMASK_SHIFT;
if (entry->is_obt)
meta |= HR_FDBWRM0_OBT;
+ if (entry->pass_blocked)
+ meta |= HR_FDBWRM0_PASS_BLOCKED;
if (entry->reprio_en) {
meta |= HR_FDBWRM0_REPRIO_EN;
meta |= entry->reprio_tc << HR_FDBWRM0_REPRIO_TC_SHIFT;
@@ -823,7 +838,8 @@ static int hellcreek_fdb_get(struct hellcreek *hellcreek,
}
static int hellcreek_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
@@ -868,7 +884,8 @@ out:
}
static int hellcreek_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct hellcreek_fdb_entry entry = { 0 };
struct hellcreek *hellcreek = ds->priv;
@@ -1049,7 +1066,7 @@ static void hellcreek_setup_tc_identity_mapping(struct hellcreek *hellcreek)
static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
{
- static struct hellcreek_fdb_entry ptp = {
+ static struct hellcreek_fdb_entry l2_ptp = {
/* MAC: 01-1B-19-00-00-00 */
.mac = { 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 },
.portmask = 0x03, /* Management ports */
@@ -1060,24 +1077,94 @@ static int hellcreek_setup_fdb(struct hellcreek *hellcreek)
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
- static struct hellcreek_fdb_entry p2p = {
+ static struct hellcreek_fdb_entry udp4_ptp = {
+ /* MAC: 01-00-5E-00-01-81 */
+ .mac = { 0x01, 0x00, 0x5e, 0x00, 0x01, 0x81 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry udp6_ptp = {
+ /* MAC: 33-33-00-00-01-81 */
+ .mac = { 0x33, 0x33, 0x00, 0x00, 0x01, 0x81 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 0,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry l2_p2p = {
/* MAC: 01-80-C2-00-00-0E */
.mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e },
.portmask = 0x03, /* Management ports */
.age = 0,
.is_obt = 0,
- .pass_blocked = 0,
+ .pass_blocked = 1,
.is_static = 1,
.reprio_tc = 6, /* TC: 6 as per IEEE 802.1AS */
.reprio_en = 1,
};
+ static struct hellcreek_fdb_entry udp4_p2p = {
+ /* MAC: 01-00-5E-00-00-6B */
+ .mac = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x6b },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry udp6_p2p = {
+ /* MAC: 33-33-00-00-00-6B */
+ .mac = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x6b },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
+ static struct hellcreek_fdb_entry stp = {
+ /* MAC: 01-80-C2-00-00-00 */
+ .mac = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 },
+ .portmask = 0x03, /* Management ports */
+ .age = 0,
+ .is_obt = 0,
+ .pass_blocked = 1,
+ .is_static = 1,
+ .reprio_tc = 6,
+ .reprio_en = 1,
+ };
int ret;
mutex_lock(&hellcreek->reg_lock);
- ret = __hellcreek_fdb_add(hellcreek, &ptp);
+ ret = __hellcreek_fdb_add(hellcreek, &l2_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp4_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp6_ptp);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &l2_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &udp4_p2p);
if (ret)
goto out;
- ret = __hellcreek_fdb_add(hellcreek, &p2p);
+ ret = __hellcreek_fdb_add(hellcreek, &udp6_p2p);
+ if (ret)
+ goto out;
+ ret = __hellcreek_fdb_add(hellcreek, &stp);
out:
mutex_unlock(&hellcreek->reg_lock);
@@ -1089,11 +1176,6 @@ static int hellcreek_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct hellcreek *hellcreek = ds->priv;
- int ret;
-
- ret = devlink_info_driver_name_put(req, "hellcreek");
- if (ret)
- return ret;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
@@ -1384,14 +1466,19 @@ static void hellcreek_teardown(struct dsa_switch *ds)
dsa_devlink_resources_unregister(ds);
}
-static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void hellcreek_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct hellcreek *hellcreek = ds->priv;
- dev_dbg(hellcreek->dev, "Phylink validate for port %d\n", port);
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, config->supported_interfaces);
+
+ /* Include GMII - the hardware does not support this interface
+ * mode, but it's the default interface mode for phylib, so we
+ * need it for compatibility with existing DT.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces);
/* The MAC settings are a hardware configuration option and cannot be
* changed at run time or by strapping. Therefore the attached PHYs
@@ -1399,12 +1486,9 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
* by the hardware.
*/
if (hellcreek->pdata->is_100_mbits)
- phylink_set(mask, 100baseT_Full);
+ config->mac_capabilities = MAC_100FD;
else
- phylink_set(mask, 1000baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ config->mac_capabilities = MAC_1000FD;
}
static int
@@ -1458,6 +1542,45 @@ out:
return ret;
}
+static void hellcreek_setup_maxsdu(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u32 max_sdu = schedule->max_sdu[tc] + VLAN_ETH_HLEN - ETH_FCS_LEN;
+ u16 val;
+
+ if (!schedule->max_sdu[tc])
+ continue;
+
+ dev_dbg(hellcreek->dev, "Configure max-sdu %u for tc %d on port %d\n",
+ max_sdu, tc, port);
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (max_sdu & HR_PTPRTCCFG_MAXSDU_MASK) << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
+static void hellcreek_reset_maxsdu(struct hellcreek *hellcreek, int port)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u16 val;
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (HELLCREEK_DEFAULT_MAX_SDU & HR_PTPRTCCFG_MAXSDU_MASK)
+ << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
const struct tc_taprio_qopt_offload *schedule)
{
@@ -1641,7 +1764,10 @@ static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
}
hellcreek_port->current_schedule = taprio_offload_get(taprio);
- /* Then select port */
+ /* Configure max sdu */
+ hellcreek_setup_maxsdu(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Select tdg */
hellcreek_select_tgd(hellcreek, port);
/* Enable gating and keep defaults */
@@ -1693,7 +1819,10 @@ static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
hellcreek_port->current_schedule = NULL;
}
- /* Then select port */
+ /* Reset max sdu */
+ hellcreek_reset_maxsdu(hellcreek, port);
+
+ /* Select tgd */
hellcreek_select_tgd(hellcreek, port);
/* Disable gating and return to regular switching flow */
@@ -1730,22 +1859,43 @@ static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
return true;
}
+static int hellcreek_tc_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data)
{
- struct tc_taprio_qopt_offload *taprio = type_data;
struct hellcreek *hellcreek = ds->priv;
- if (type != TC_SETUP_QDISC_TAPRIO)
- return -EOPNOTSUPP;
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return hellcreek_tc_query_caps(type_data);
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_qopt_offload *taprio = type_data;
- if (!hellcreek_validate_schedule(hellcreek, taprio))
- return -EOPNOTSUPP;
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
- if (taprio->enable)
- return hellcreek_port_set_schedule(ds, port, taprio);
+ if (taprio->enable)
+ return hellcreek_port_set_schedule(ds, port, taprio);
- return hellcreek_port_del_schedule(ds, port);
+ return hellcreek_port_del_schedule(ds, port);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct dsa_switch_ops hellcreek_ds_ops = {
@@ -1755,7 +1905,7 @@ static const struct dsa_switch_ops hellcreek_ds_ops = {
.get_strings = hellcreek_get_strings,
.get_tag_protocol = hellcreek_get_tag_protocol,
.get_ts_info = hellcreek_get_ts_info,
- .phylink_validate = hellcreek_phylink_validate,
+ .phylink_get_caps = hellcreek_phylink_get_caps,
.port_bridge_flags = hellcreek_bridge_flags,
.port_bridge_join = hellcreek_port_bridge_join,
.port_bridge_leave = hellcreek_port_bridge_leave,
@@ -1815,11 +1965,8 @@ static int hellcreek_probe(struct platform_device *pdev)
if (!port->counter_values)
return -ENOMEM;
- port->vlan_dev_bitmap =
- devm_kcalloc(dev,
- BITS_TO_LONGS(VLAN_N_VID),
- sizeof(unsigned long),
- GFP_KERNEL);
+ port->vlan_dev_bitmap = devm_bitmap_zalloc(dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!port->vlan_dev_bitmap)
return -ENOMEM;
@@ -1920,7 +2067,6 @@ static int hellcreek_remove(struct platform_device *pdev)
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9e303b8ab13c..4a678f7d61ae 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -37,6 +37,7 @@
#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
#define HELLCREEK_NUM_EGRESS_QUEUES 8
+#define HELLCREEK_DEFAULT_MAX_SDU 1536
/* Register definitions */
#define HR_MODID_C (0 * 2)
@@ -72,6 +73,12 @@
#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+#define HR_PTPRTCCFG (0xa9 * 2)
+#define HR_PTPRTCCFG_SET_QTRACK BIT(15)
+#define HR_PTPRTCCFG_REJECT BIT(14)
+#define HR_PTPRTCCFG_MAXSDU_SHIFT 0
+#define HR_PTPRTCCFG_MAXSDU_MASK GENMASK(10, 0)
+
#define HR_CSEL (0x8d * 2)
#define HR_CSEL_SHIFT 0
#define HR_CSEL_MASK GENMASK(7, 0)
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index 40b41c794dfa..ffd06cf8c44f 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -52,10 +52,6 @@ static int hellcreek_set_hwtstamp_config(struct hellcreek *hellcreek, int port,
*/
clear_bit_unlock(HELLCREEK_HWTSTAMP_ENABLED, &ps->state);
- /* Reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_ON:
tx_tstamp_enable = true;
@@ -335,7 +331,7 @@ static void hellcreek_get_rxts(struct hellcreek *hellcreek,
shwt = skb_hwtstamps(skb);
memset(shwt, 0, sizeof(*shwt));
shwt->hwtstamp = ns_to_ktime(ns);
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 2572c6087bb5..3e44ccb7db84 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -297,9 +297,11 @@ static enum led_brightness hellcreek_led_is_gm_get(struct led_classdev *ldev)
static int hellcreek_led_setup(struct hellcreek *hellcreek)
{
struct device_node *leds, *led = NULL;
- const char *label, *state;
+ enum led_default_state state;
+ const char *label;
int ret = -EINVAL;
+ of_node_get(hellcreek->dev->of_node);
leds = of_find_node_by_name(hellcreek->dev->of_node, "leds");
if (!leds) {
dev_err(hellcreek->dev, "No LEDs specified in device tree!\n");
@@ -317,16 +319,17 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
ret = of_property_read_string(led, "label", &label);
hellcreek->led_sync_good.name = ret ? "sync_good" : label;
- ret = of_property_read_string(led, "default-state", &state);
- if (!ret) {
- if (!strcmp(state, "on"))
- hellcreek->led_sync_good.brightness = 1;
- else if (!strcmp(state, "off"))
- hellcreek->led_sync_good.brightness = 0;
- else if (!strcmp(state, "keep"))
- hellcreek->led_sync_good.brightness =
- hellcreek_get_brightness(hellcreek,
- STATUS_OUT_SYNC_GOOD);
+ state = led_init_default_state_get(of_fwnode_handle(led));
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ hellcreek->led_sync_good.brightness = 1;
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ hellcreek->led_sync_good.brightness =
+ hellcreek_get_brightness(hellcreek, STATUS_OUT_SYNC_GOOD);
+ break;
+ default:
+ hellcreek->led_sync_good.brightness = 0;
}
hellcreek->led_sync_good.max_brightness = 1;
@@ -343,16 +346,17 @@ static int hellcreek_led_setup(struct hellcreek *hellcreek)
ret = of_property_read_string(led, "label", &label);
hellcreek->led_is_gm.name = ret ? "is_gm" : label;
- ret = of_property_read_string(led, "default-state", &state);
- if (!ret) {
- if (!strcmp(state, "on"))
- hellcreek->led_is_gm.brightness = 1;
- else if (!strcmp(state, "off"))
- hellcreek->led_is_gm.brightness = 0;
- else if (!strcmp(state, "keep"))
- hellcreek->led_is_gm.brightness =
- hellcreek_get_brightness(hellcreek,
- STATUS_OUT_IS_GM);
+ state = led_init_default_state_get(of_fwnode_handle(led));
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ hellcreek->led_is_gm.brightness = 1;
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ hellcreek->led_is_gm.brightness =
+ hellcreek_get_brightness(hellcreek, STATUS_OUT_IS_GM);
+ break;
+ default:
+ hellcreek->led_is_gm.brightness = 0;
}
hellcreek->led_is_gm.max_brightness = 1;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 89f920289ae2..cbe831875347 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -10,10 +10,14 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
+/* For the LAN9303 and LAN9354, only port 0 is an XMII port. */
+#define IS_PORT_XMII(port) ((port) == 0)
+
#define LAN9303_NUM_PORTS 3
/* 13.2 System Control and Status Registers
@@ -21,6 +25,10 @@
*/
#define LAN9303_CHIP_REV 0x14
# define LAN9303_CHIP_ID 0x9303
+# define LAN9352_CHIP_ID 0x9352
+# define LAN9353_CHIP_ID 0x9353
+# define LAN9354_CHIP_ID 0x9354
+# define LAN9355_CHIP_ID 0x9355
#define LAN9303_IRQ_CFG 0x15
# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
@@ -31,6 +39,7 @@
#define LAN9303_INT_EN 0x17
# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_BYTE_ORDER 0x19
#define LAN9303_HW_CFG 0x1D
# define LAN9303_HW_CFG_READY BIT(27)
# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
@@ -44,6 +53,9 @@
#define LAN9303_MANUAL_FC_1 0x68
#define LAN9303_MANUAL_FC_2 0x69
#define LAN9303_MANUAL_FC_0 0x6a
+# define LAN9303_BP_EN BIT(6)
+# define LAN9303_RX_FC_EN BIT(2)
+# define LAN9303_TX_FC_EN BIT(1)
#define LAN9303_SWITCH_CSR_DATA 0x6b
#define LAN9303_SWITCH_CSR_CMD 0x6c
#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31)
@@ -219,6 +231,13 @@ const struct regmap_access_table lan9303_register_set = {
};
EXPORT_SYMBOL(lan9303_register_set);
+/* Flow Control registers indexed by port number */
+static unsigned int flow_ctl_reg[] = {
+ LAN9303_MANUAL_FC_0,
+ LAN9303_MANUAL_FC_1,
+ LAN9303_MANUAL_FC_2
+};
+
static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
{
int ret, i;
@@ -850,15 +869,12 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
ret);
- if (!chip->reset_gpio) {
- dev_dbg(chip->dev,
- "hint: maybe failed due to missing reset GPIO\n");
- }
return ret;
}
- if ((reg >> 16) != LAN9303_CHIP_ID) {
- dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
+ if (((reg >> 16) != LAN9303_CHIP_ID) &&
+ ((reg >> 16) != LAN9354_CHIP_ID)) {
+ dev_err(chip->dev, "unexpected device found: LAN%4.4X\n",
reg >> 16);
return -ENODEV;
}
@@ -874,7 +890,7 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret)
dev_warn(chip->dev, "failed to disable switching %d\n", ret);
- dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff);
+ dev_info(chip->dev, "Found LAN%4.4X rev. %u\n", (reg >> 16), reg & 0xffff);
ret = lan9303_detect_phy_setup(chip);
if (ret) {
@@ -899,6 +915,7 @@ static int lan9303_setup(struct dsa_switch *ds)
{
struct lan9303 *chip = ds->priv;
int ret;
+ u32 reg;
/* Make sure that port 0 is the cpu port */
if (!dsa_is_cpu_port(ds, 0)) {
@@ -906,6 +923,17 @@ static int lan9303_setup(struct dsa_switch *ds)
return -EINVAL;
}
+ /* Virtual Phy: Remove Turbo 200Mbit mode */
+ ret = lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &reg);
+ if (ret)
+ return (ret);
+
+ /* Clear the TURBO Mode bit if it was set. */
+ if (reg & LAN9303_VIRT_SPECIAL_TURBO) {
+ reg &= ~LAN9303_VIRT_SPECIAL_TURBO;
+ regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, reg);
+ }
+
ret = lan9303_setup_tagging(chip);
if (ret)
dev_err(chip->dev, "failed to setup port tagging %d\n", ret);
@@ -958,7 +986,7 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
{ .offset = LAN9303_MAC_TX_BRDCST_CNT_0, .name = "TxBroad", },
{ .offset = LAN9303_MAC_TX_PAUSE_CNT_0, .name = "TxPause", },
{ .offset = LAN9303_MAC_TX_MULCST_CNT_0, .name = "TxMulti", },
- { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "TxUnderRun", },
+ { .offset = LAN9303_MAC_RX_UNDSZE_CNT_0, .name = "RxShort", },
{ .offset = LAN9303_MAC_TX_64_CNT_0, .name = "Tx64Byte", },
{ .offset = LAN9303_MAC_TX_127_CNT_0, .name = "Tx128Byte", },
{ .offset = LAN9303_MAC_TX_255_CNT_0, .name = "Tx256Byte", },
@@ -1002,9 +1030,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
ret = lan9303_read_switch_port(
chip, port, lan9303_mib[u].offset, &reg);
- if (ret)
+ if (ret) {
dev_warn(chip->dev, "Reading status port %d reg %u failed\n",
port, lan9303_mib[u].offset);
+ reg = 0;
+ }
data[u] = reg;
}
}
@@ -1044,71 +1074,43 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
return chip->ops->phy_write(chip, phy, regnum, val);
}
-static void lan9303_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct lan9303 *chip = ds->priv;
- int ctl;
-
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
-
- ctl = lan9303_phy_read(ds, port, MII_BMCR);
-
- ctl &= ~BMCR_ANENABLE;
-
- if (phydev->speed == SPEED_100)
- ctl |= BMCR_SPEED100;
- else if (phydev->speed == SPEED_10)
- ctl &= ~BMCR_SPEED100;
- else
- dev_err(ds->dev, "unsupported speed: %d\n", phydev->speed);
-
- if (phydev->duplex == DUPLEX_FULL)
- ctl |= BMCR_FULLDPLX;
- else
- ctl &= ~BMCR_FULLDPLX;
-
- lan9303_phy_write(ds, port, MII_BMCR, ctl);
-
- if (port == chip->phy_addr_base) {
- /* Virtual Phy: Remove Turbo 200Mbit mode */
- lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
-
- ctl &= ~LAN9303_VIRT_SPECIAL_TURBO;
- regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ctl);
- }
-}
-
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
+ vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return;
+ vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(port %d)\n", __func__, port);
- if (dsa_to_port(ds, 1)->bridge_dev == dsa_to_port(ds, 2)->bridge_dev) {
+ if (dsa_port_bridge_same(dsa_to_port(ds, 1), dsa_to_port(ds, 2))) {
lan9303_bridge_ports(chip);
chip->is_bridged = true; /* unleash stp_state_set() */
}
@@ -1117,7 +1119,7 @@ static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
}
static void lan9303_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct lan9303 *chip = ds->priv;
@@ -1180,7 +1182,8 @@ static void lan9303_port_fast_age(struct dsa_switch *ds, int port)
}
static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1192,8 +1195,8 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1237,7 +1240,8 @@ static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
int err;
@@ -1252,7 +1256,8 @@ static int lan9303_port_mdb_add(struct dsa_switch *ds, int port,
}
static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct lan9303 *chip = ds->priv;
@@ -1265,26 +1270,96 @@ static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
return 0;
}
+static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d) entered.", __func__, port);
+
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+}
+
+static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct lan9303 *chip = ds->priv;
+ u32 ctl;
+ u32 reg;
+
+ /* On this device, we are only interested in doing something here if
+ * this is the xMII port. All other ports are 10/100 phys using MDIO
+ * to control there link settings.
+ */
+ if (!IS_PORT_XMII(port))
+ return;
+
+ /* Disable auto-negotiation and force the speed/duplex settings. */
+ ctl = lan9303_phy_read(ds, port, MII_BMCR);
+ ctl &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ lan9303_phy_write(ds, port, MII_BMCR, ctl);
+
+ /* Force the flow control settings. */
+ lan9303_read(chip->regmap, flow_ctl_reg[port], &reg);
+ reg &= ~(LAN9303_BP_EN | LAN9303_RX_FC_EN | LAN9303_TX_FC_EN);
+ if (rx_pause)
+ reg |= (LAN9303_RX_FC_EN | LAN9303_BP_EN);
+ if (tx_pause)
+ reg |= LAN9303_TX_FC_EN;
+ regmap_write(chip->regmap, flow_ctl_reg[port], reg);
+}
+
static const struct dsa_switch_ops lan9303_switch_ops = {
- .get_tag_protocol = lan9303_get_tag_protocol,
- .setup = lan9303_setup,
- .get_strings = lan9303_get_strings,
- .phy_read = lan9303_phy_read,
- .phy_write = lan9303_phy_write,
- .adjust_link = lan9303_adjust_link,
- .get_ethtool_stats = lan9303_get_ethtool_stats,
- .get_sset_count = lan9303_get_sset_count,
- .port_enable = lan9303_port_enable,
- .port_disable = lan9303_port_disable,
- .port_bridge_join = lan9303_port_bridge_join,
- .port_bridge_leave = lan9303_port_bridge_leave,
- .port_stp_state_set = lan9303_port_stp_state_set,
- .port_fast_age = lan9303_port_fast_age,
- .port_fdb_add = lan9303_port_fdb_add,
- .port_fdb_del = lan9303_port_fdb_del,
- .port_fdb_dump = lan9303_port_fdb_dump,
- .port_mdb_add = lan9303_port_mdb_add,
- .port_mdb_del = lan9303_port_mdb_del,
+ .get_tag_protocol = lan9303_get_tag_protocol,
+ .setup = lan9303_setup,
+ .get_strings = lan9303_get_strings,
+ .phy_read = lan9303_phy_read,
+ .phy_write = lan9303_phy_write,
+ .phylink_get_caps = lan9303_phylink_get_caps,
+ .phylink_mac_link_up = lan9303_phylink_mac_link_up,
+ .get_ethtool_stats = lan9303_get_ethtool_stats,
+ .get_sset_count = lan9303_get_sset_count,
+ .port_enable = lan9303_port_enable,
+ .port_disable = lan9303_port_disable,
+ .port_bridge_join = lan9303_port_bridge_join,
+ .port_bridge_leave = lan9303_port_bridge_leave,
+ .port_stp_state_set = lan9303_port_stp_state_set,
+ .port_fast_age = lan9303_port_fast_age,
+ .port_fdb_add = lan9303_port_fdb_add,
+ .port_fdb_del = lan9303_port_fdb_del,
+ .port_fdb_dump = lan9303_port_fdb_dump,
+ .port_mdb_add = lan9303_port_mdb_add,
+ .port_mdb_del = lan9303_port_mdb_del,
};
static int lan9303_register_switch(struct lan9303 *chip)
@@ -1309,7 +1384,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
@@ -1337,6 +1412,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
{
int ret;
+ u32 reg;
mutex_init(&chip->indirect_mutex);
mutex_init(&chip->alr_mutex);
@@ -1347,6 +1423,19 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
lan9303_handle_reset(chip);
+ /* First read to the device. This is a Dummy read to ensure MDIO */
+ /* access is in 32-bit sync. */
+ ret = lan9303_read(chip->regmap, LAN9303_BYTE_ORDER, &reg);
+ if (ret) {
+ dev_err(chip->dev, "failed to access the device: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
ret = lan9303_check_device(chip);
if (ret)
return ret;
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 8ca4713310fa..e8844820c3a9 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -29,8 +29,7 @@ static const struct regmap_config lan9303_i2c_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static int lan9303_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lan9303_i2c_probe(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev;
int ret;
@@ -65,18 +64,14 @@ static int lan9303_i2c_probe(struct i2c_client *client,
return 0;
}
-static int lan9303_i2c_remove(struct i2c_client *client)
+static void lan9303_i2c_remove(struct i2c_client *client)
{
struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
- return 0;
+ return;
lan9303_remove(&sw_dev->chip);
-
- i2c_set_clientdata(client, NULL);
-
- return 0;
}
static void lan9303_i2c_shutdown(struct i2c_client *client)
@@ -108,9 +103,9 @@ MODULE_DEVICE_TABLE(of, lan9303_i2c_of_match);
static struct i2c_driver lan9303_i2c_driver = {
.driver = {
.name = "LAN9303_I2C",
- .of_match_table = of_match_ptr(lan9303_i2c_of_match),
+ .of_match_table = lan9303_i2c_of_match,
},
- .probe = lan9303_i2c_probe,
+ .probe_new = lan9303_i2c_probe,
.remove = lan9303_i2c_remove,
.shutdown = lan9303_i2c_shutdown,
.id_table = lan9303_i2c_id,
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index bbb7032409ba..d8ab2b77d201 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -138,8 +138,6 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
@@ -158,6 +156,7 @@ static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id lan9303_mdio_of_match[] = {
{ .compatible = "smsc,lan9303-mdio" },
+ { .compatible = "microchip,lan9354-mdio" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
@@ -165,7 +164,7 @@ MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
static struct mdio_driver lan9303_mdio_driver = {
.mdiodrv.driver = {
.name = "LAN9303_MDIO",
- .of_match_table = of_match_ptr(lan9303_mdio_of_match),
+ .of_match_table = lan9303_mdio_of_match,
},
.probe = lan9303_mdio_probe,
.remove = lan9303_mdio_remove,
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 7056d98d8177..3c76a1a14aee 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -213,6 +213,7 @@
#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
+#define GSWIP_MAC_CTRL_2_LCHKL BIT(2) /* Frame Length Check Long Enable */
#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
/* Ethernet Switch Fetch DMA Port Control Register */
@@ -239,6 +240,15 @@
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
+/* Maximum packet size supported by the switch. In theory this should be 10240,
+ * but long packets currently cause lock-ups with an MTU of over 2526. Medium
+ * packets are sometimes dropped (e.g. TCP over 2477, UDP over 2516-2519, ICMP
+ * over 2526), hence an MTU value of 2400 seems safe. This issue only affects
+ * packet reception. This is probably caused by the PPA engine, which is on the
+ * RX part of the device. Packet transmission works properly up to 10240.
+ */
+#define GSWIP_MAX_PACKET_LENGTH 2400
+
struct gswip_hw_info {
int max_ports;
int cpu_port;
@@ -498,8 +508,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
{
struct dsa_switch *ds = priv->ds;
+ int err;
- ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus)
return -ENOMEM;
@@ -512,7 +523,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
ds->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
- return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ if (err)
+ mdiobus_free(ds->slave_mii_bus);
+
+ return err;
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
@@ -759,7 +774,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
/* Do not allow changing the VLAN filtering options while in bridge */
@@ -858,10 +873,6 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
GSWIP_PCE_PCTRL_0p(cpu_port));
- gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
- GSWIP_MAC_CTRL_2p(cpu_port));
- gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8 + ETH_FCS_LEN,
- GSWIP_MAC_FLEN);
gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD,
GSWIP_BM_QUEUE_GCTRL);
@@ -878,6 +889,8 @@ static int gswip_setup(struct dsa_switch *ds)
return err;
}
+ ds->mtu_enforcement_ingress = true;
+
gswip_port_enable(ds, cpu_port, NULL);
ds->configure_vlan_while_not_filtering = false;
@@ -1146,16 +1159,19 @@ static int gswip_vlan_remove(struct gswip_priv *priv,
}
static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
+ struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
int err;
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges. No bridge is configured here.
*/
- if (!br_vlan_enabled(bridge)) {
- err = gswip_vlan_add_unaware(priv, bridge, port);
+ if (!br_vlan_enabled(br)) {
+ err = gswip_vlan_add_unaware(priv, br, port);
if (err)
return err;
priv->port_vlan_filter &= ~BIT(port);
@@ -1166,8 +1182,9 @@ static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
}
static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
+ struct net_device *br = bridge.dev;
struct gswip_priv *priv = ds->priv;
gswip_add_single_port_br(priv, port, true);
@@ -1175,16 +1192,16 @@ static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
/* When the bridge uses VLAN filtering we have to configure VLAN
* specific bridges. No bridge is configured here.
*/
- if (!br_vlan_enabled(bridge))
- gswip_vlan_remove(priv, bridge, port, 0, true, false);
+ if (!br_vlan_enabled(br))
+ gswip_vlan_remove(priv, br, port, 0, true, false);
}
static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
unsigned int max_ports = priv->hw_info->max_ports;
int pos = max_ports;
int i, idx = -1;
@@ -1229,8 +1246,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
int err;
@@ -1254,8 +1271,8 @@ static int gswip_port_vlan_add(struct dsa_switch *ds, int port,
static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
/* We have to receive all packets on the CPU port and should not
@@ -1340,10 +1357,10 @@ static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static int gswip_port_fdb(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid, bool add)
{
+ struct net_device *bridge = dsa_port_bridge_dev_get(dsa_to_port(ds, port));
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned int cpu_port = priv->hw_info->cpu_port;
+ unsigned int max_ports = priv->hw_info->max_ports;
int fid = -1;
int i;
int err;
@@ -1351,7 +1368,7 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
if (!bridge)
return -EINVAL;
- for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
fid = priv->vlans[i].fid;
break;
@@ -1381,13 +1398,15 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
}
static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, true);
}
static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
return gswip_port_fdb(ds, port, addr, vid, false);
}
@@ -1407,8 +1426,9 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
err = gswip_pce_table_entry_read(priv, &mac_bridge);
if (err) {
- dev_err(priv->dev, "failed to write mac bridge: %d\n",
- err);
+ dev_err(priv->dev,
+ "failed to read mac bridge entry %d: %d\n",
+ i, err);
return err;
}
@@ -1438,114 +1458,103 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
return 0;
}
-static void gswip_phylink_set_capab(unsigned long *supported,
- struct phylink_link_state *state)
+static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ /* Includes 8 bytes for special header. */
+ return GSWIP_MAX_PACKET_LENGTH - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct gswip_priv *priv = ds->priv;
+ int cpu_port = priv->hw_info->cpu_port;
- /* With the exclusion of MII, Reverse MII and Reduced MII, we
- * support Gigabit, including Half duplex
+ /* CPU port always has maximum mtu of user ports, so use it to set
+ * switch frame size, including 8 byte special header.
*/
- if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_RMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ if (port == cpu_port) {
+ new_mtu += 8;
+ gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
+ GSWIP_MAC_FLEN);
}
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
+ /* Enable MLEN for ports with non-standard MTUs, including the special
+ * header on the CPU port added above.
+ */
+ if (new_mtu != ETH_DATA_LEN)
+ gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
+ GSWIP_MAC_CTRL_2p(port));
+ else
+ gswip_switch_mask(priv, GSWIP_MAC_CTRL_2_MLEN, 0,
+ GSWIP_MAC_CTRL_2p(port));
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ return 0;
}
-static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
switch (port) {
case 0:
case 1:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_REVMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
break;
+
case 2:
case 3:
case 4:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
+
case 5:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
- gswip_phylink_set_capab(supported, state);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
- phy_modes(state->interface), port);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
-static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
switch (port) {
case 0:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
break;
+
case 1:
case 2:
case 3:
case 4:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
+
case 5:
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_RMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
- gswip_phylink_set_capab(supported, state);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
- phy_modes(state->interface), port);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000;
}
static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
@@ -1673,9 +1682,6 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
break;
case PHY_INTERFACE_MODE_RMII:
miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
-
- /* Configure the RMII clock as output: */
- miicfg |= GSWIP_MII_CFG_RMII_CLK;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1827,7 +1833,9 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
- .phylink_validate = gswip_xrx200_phylink_validate,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
+ .phylink_get_caps = gswip_xrx200_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
.phylink_mac_link_up = gswip_phylink_mac_link_up,
@@ -1851,7 +1859,9 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.port_fdb_add = gswip_port_fdb_add,
.port_fdb_del = gswip_port_fdb_del,
.port_fdb_dump = gswip_port_fdb_dump,
- .phylink_validate = gswip_xrx300_phylink_validate,
+ .port_change_mtu = gswip_port_change_mtu,
+ .port_max_mtu = gswip_port_max_mtu,
+ .phylink_get_caps = gswip_xrx300_phylink_get_caps,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
.phylink_mac_link_up = gswip_phylink_mac_link_up,
@@ -1875,7 +1885,7 @@ static const struct xway_gphy_match_data xrx300_gphy_data = {
.ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin",
};
-static const struct of_device_id xway_gphy_match[] = {
+static const struct of_device_id xway_gphy_match[] __maybe_unused = {
{ .compatible = "lantiq,xrx200-gphy-fw", .data = NULL },
{ .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data },
{ .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data },
@@ -1979,11 +1989,9 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
}
gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
- if (IS_ERR(gphy_fw->reset)) {
- if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
- dev_err(dev, "Failed to lookup gphy reset\n");
- return PTR_ERR(gphy_fw->reset);
- }
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
return gswip_gphy_fw_load(priv, gphy_fw);
}
@@ -2060,8 +2068,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
gphy_fw_np, i);
- if (err)
+ if (err) {
+ of_node_put(gphy_fw_np);
goto remove_gphy;
+ }
i++;
}
@@ -2186,8 +2196,10 @@ disable_switch:
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
mdio_bus:
- if (mdio_np)
+ if (mdio_np) {
mdiobus_unregister(priv->ds->slave_mii_bus);
+ mdiobus_free(priv->ds->slave_mii_bus);
+ }
put_mdio_node:
of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
@@ -2211,13 +2223,12 @@ static int gswip_remove(struct platform_device *pdev)
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->slave_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index c9e2a8989556..394ca8678d2b 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,49 +1,40 @@
# SPDX-License-Identifier: GPL-2.0-only
-config NET_DSA_MICROCHIP_KSZ_COMMON
- select NET_DSA_TAG_KSZ
- tristate
-
-menuconfig NET_DSA_MICROCHIP_KSZ9477
- tristate "Microchip KSZ9477 series switch support"
+menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
+ tristate "Microchip KSZ8795/KSZ9477/LAN937x series switch support"
depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
+ select NET_DSA_TAG_KSZ
+ select NET_DSA_TAG_NONE
help
- This driver adds support for Microchip KSZ9477 switch chips.
+ This driver adds support for Microchip KSZ9477 series switch and
+ KSZ8795/KSZ88x3 switch chips.
config NET_DSA_MICROCHIP_KSZ9477_I2C
- tristate "KSZ9477 series I2C connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && I2C
+ tristate "KSZ series I2C connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && I2C
select REGMAP_I2C
help
Select to enable support for registering switches configured through I2C.
-config NET_DSA_MICROCHIP_KSZ9477_SPI
- tristate "KSZ9477 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
+config NET_DSA_MICROCHIP_KSZ_SPI
+ tristate "KSZ series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && SPI
select REGMAP_SPI
help
Select to enable support for registering switches configured through SPI.
-menuconfig NET_DSA_MICROCHIP_KSZ8795
- tristate "Microchip KSZ8795 series switch support"
- depends on NET_DSA
- select NET_DSA_MICROCHIP_KSZ_COMMON
- help
- This driver adds support for Microchip KSZ8795/KSZ88X3 switch chips.
-
-config NET_DSA_MICROCHIP_KSZ8795_SPI
- tristate "KSZ8795 series SPI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795 && SPI
- select REGMAP_SPI
+config NET_DSA_MICROCHIP_KSZ_PTP
+ bool "Support for the PTP clock on the KSZ9563/LAN937x Ethernet Switch"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && PTP_1588_CLOCK
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON=m || PTP_1588_CLOCK=y
help
- This driver accesses KSZ8795 chip through SPI.
-
- It is required to use the KSZ8795 switch driver as the only access
- is through SPI.
+ Select to enable support for timestamping & PTP clock manipulation in
+ KSZ8563/KSZ9563/LAN937x series of switches. KSZ9563/KSZ8563 supports
+ only one step timestamping. LAN937x switch supports both one step and
+ two step timestamping.
config NET_DSA_MICROCHIP_KSZ8863_SMI
tristate "KSZ series SMI connected switch driver"
- depends on NET_DSA_MICROCHIP_KSZ8795
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON
select MDIO_BITBANG
help
Select to enable support for registering switches configured through
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 2a03b21a3386..48360cc9fc68 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,8 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
+ksz_switch-objs := ksz_common.o
+ksz_switch-objs += ksz9477.o
+ksz_switch-objs += ksz8795.o
+ksz_switch-objs += lan937x_main.o
+
+ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP
+ksz_switch-objs += ksz_ptp.o
+endif
+
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795) += ksz8795.o
-obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8795_SPI) += ksz8795_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 9d611895d3cf..e68465fdf6b9 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -7,63 +7,56 @@
#ifndef __KSZ8XXX_H
#define __KSZ8XXX_H
-#include <linux/kernel.h>
-enum ksz_regs {
- REG_IND_CTRL_0,
- REG_IND_DATA_8,
- REG_IND_DATA_CHECK,
- REG_IND_DATA_HI,
- REG_IND_DATA_LO,
- REG_IND_MIB_CHECK,
- P_FORCE_CTRL,
- P_LINK_STATUS,
- P_LOCAL_CTRL,
- P_NEG_RESTART_CTRL,
- P_REMOTE_STATUS,
- P_SPEED_STATUS,
- S_TAIL_TAG_CTRL,
-};
+#include <linux/types.h>
+#include <net/dsa.h>
+#include "ksz_common.h"
-enum ksz_masks {
- PORT_802_1P_REMAPPING,
- SW_TAIL_TAG_ENABLE,
- MIB_COUNTER_OVERFLOW,
- MIB_COUNTER_VALID,
- VLAN_TABLE_FID,
- VLAN_TABLE_MEMBERSHIP,
- VLAN_TABLE_VALID,
- STATIC_MAC_TABLE_VALID,
- STATIC_MAC_TABLE_USE_FID,
- STATIC_MAC_TABLE_FID,
- STATIC_MAC_TABLE_OVERRIDE,
- STATIC_MAC_TABLE_FWD_PORTS,
- DYNAMIC_MAC_TABLE_ENTRIES_H,
- DYNAMIC_MAC_TABLE_MAC_EMPTY,
- DYNAMIC_MAC_TABLE_NOT_READY,
- DYNAMIC_MAC_TABLE_ENTRIES,
- DYNAMIC_MAC_TABLE_FID,
- DYNAMIC_MAC_TABLE_SRC_PORT,
- DYNAMIC_MAC_TABLE_TIMESTAMP,
-};
-
-enum ksz_shifts {
- VLAN_TABLE_MEMBERSHIP_S,
- VLAN_TABLE,
- STATIC_MAC_FWD_PORTS,
- STATIC_MAC_FID,
- DYNAMIC_MAC_ENTRIES_H,
- DYNAMIC_MAC_ENTRIES,
- DYNAMIC_MAC_FID,
- DYNAMIC_MAC_TIMESTAMP,
- DYNAMIC_MAC_SRC_PORT,
-};
-
-struct ksz8 {
- const u8 *regs;
- const u32 *masks;
- const u8 *shifts;
- void *priv;
-};
+int ksz8_setup(struct dsa_switch *ds);
+u32 ksz8_get_port_addr(int port, int offset);
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz8_port_init_cnt(struct ksz_device *dev, int port);
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db);
+int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db);
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz8_get_stp_reg(void);
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void ksz8_config_cpu_port(struct dsa_switch *ds);
+int ksz8_enable_stp_addr(struct ksz_device *dev);
+int ksz8_reset_switch(struct ksz_device *dev);
+int ksz8_switch_detect(struct ksz_device *dev);
+int ksz8_switch_init(struct ksz_device *dev);
+void ksz8_switch_exit(struct ksz_device *dev);
+int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 013e9c02be71..f56fca1b1a22 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/microchip-ksz.h>
@@ -25,190 +26,6 @@
#include "ksz8795_reg.h"
#include "ksz8.h"
-static const u8 ksz8795_regs[] = {
- [REG_IND_CTRL_0] = 0x6E,
- [REG_IND_DATA_8] = 0x70,
- [REG_IND_DATA_CHECK] = 0x72,
- [REG_IND_DATA_HI] = 0x71,
- [REG_IND_DATA_LO] = 0x75,
- [REG_IND_MIB_CHECK] = 0x74,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x07,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x08,
- [P_SPEED_STATUS] = 0x09,
- [S_TAIL_TAG_CTRL] = 0x0C,
-};
-
-static const u32 ksz8795_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(7),
- [SW_TAIL_TAG_ENABLE] = BIT(1),
- [MIB_COUNTER_OVERFLOW] = BIT(6),
- [MIB_COUNTER_VALID] = BIT(5),
- [VLAN_TABLE_FID] = GENMASK(6, 0),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
- [VLAN_TABLE_VALID] = BIT(12),
- [STATIC_MAC_TABLE_VALID] = BIT(21),
- [STATIC_MAC_TABLE_USE_FID] = BIT(23),
- [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
-};
-
-static const u8 ksz8795_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 7,
- [VLAN_TABLE] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 24,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 29,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 27,
- [DYNAMIC_MAC_SRC_PORT] = 24,
-};
-
-static const u8 ksz8863_regs[] = {
- [REG_IND_CTRL_0] = 0x79,
- [REG_IND_DATA_8] = 0x7B,
- [REG_IND_DATA_CHECK] = 0x7B,
- [REG_IND_DATA_HI] = 0x7C,
- [REG_IND_DATA_LO] = 0x80,
- [REG_IND_MIB_CHECK] = 0x80,
- [P_FORCE_CTRL] = 0x0C,
- [P_LINK_STATUS] = 0x0E,
- [P_LOCAL_CTRL] = 0x0C,
- [P_NEG_RESTART_CTRL] = 0x0D,
- [P_REMOTE_STATUS] = 0x0E,
- [P_SPEED_STATUS] = 0x0F,
- [S_TAIL_TAG_CTRL] = 0x03,
-};
-
-static const u32 ksz8863_masks[] = {
- [PORT_802_1P_REMAPPING] = BIT(3),
- [SW_TAIL_TAG_ENABLE] = BIT(6),
- [MIB_COUNTER_OVERFLOW] = BIT(7),
- [MIB_COUNTER_VALID] = BIT(6),
- [VLAN_TABLE_FID] = GENMASK(15, 12),
- [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
- [VLAN_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_VALID] = BIT(19),
- [STATIC_MAC_TABLE_USE_FID] = BIT(21),
- [STATIC_MAC_TABLE_FID] = GENMASK(29, 26),
- [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
- [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
- [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(5, 0),
- [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(7),
- [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
- [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 28),
- [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
- [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
- [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
-};
-
-static u8 ksz8863_shifts[] = {
- [VLAN_TABLE_MEMBERSHIP_S] = 16,
- [STATIC_MAC_FWD_PORTS] = 16,
- [STATIC_MAC_FID] = 22,
- [DYNAMIC_MAC_ENTRIES_H] = 3,
- [DYNAMIC_MAC_ENTRIES] = 24,
- [DYNAMIC_MAC_FID] = 16,
- [DYNAMIC_MAC_TIMESTAMP] = 24,
- [DYNAMIC_MAC_SRC_PORT] = 20,
-};
-
-struct mib_names {
- char string[ETH_GSTRING_LEN];
-};
-
-static const struct mib_names ksz87xx_mib_names[] = {
- { "rx_hi" },
- { "rx_undersize" },
- { "rx_fragments" },
- { "rx_oversize" },
- { "rx_jabbers" },
- { "rx_symbol_err" },
- { "rx_crc_err" },
- { "rx_align_err" },
- { "rx_mac_ctrl" },
- { "rx_pause" },
- { "rx_bcast" },
- { "rx_mcast" },
- { "rx_ucast" },
- { "rx_64_or_less" },
- { "rx_65_127" },
- { "rx_128_255" },
- { "rx_256_511" },
- { "rx_512_1023" },
- { "rx_1024_1522" },
- { "rx_1523_2000" },
- { "rx_2001" },
- { "tx_hi" },
- { "tx_late_col" },
- { "tx_pause" },
- { "tx_bcast" },
- { "tx_mcast" },
- { "tx_ucast" },
- { "tx_deferred" },
- { "tx_total_col" },
- { "tx_exc_col" },
- { "tx_single_col" },
- { "tx_mult_col" },
- { "rx_total" },
- { "tx_total" },
- { "rx_discards" },
- { "tx_discards" },
-};
-
-static const struct mib_names ksz88xx_mib_names[] = {
- { "rx" },
- { "rx_hi" },
- { "rx_undersize" },
- { "rx_fragments" },
- { "rx_oversize" },
- { "rx_jabbers" },
- { "rx_symbol_err" },
- { "rx_crc_err" },
- { "rx_align_err" },
- { "rx_mac_ctrl" },
- { "rx_pause" },
- { "rx_bcast" },
- { "rx_mcast" },
- { "rx_ucast" },
- { "rx_64_or_less" },
- { "rx_65_127" },
- { "rx_128_255" },
- { "rx_256_511" },
- { "rx_512_1023" },
- { "rx_1024_1522" },
- { "tx" },
- { "tx_hi" },
- { "tx_late_col" },
- { "tx_pause" },
- { "tx_bcast" },
- { "tx_mcast" },
- { "tx_ucast" },
- { "tx_deferred" },
- { "tx_total_col" },
- { "tx_exc_col" },
- { "tx_single_col" },
- { "tx_mult_col" },
- { "rx_discards" },
- { "tx_discards" },
-};
-
-static bool ksz_is_ksz88x3(struct ksz_device *dev)
-{
- return dev->chip_id == 0x8830;
-}
-
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
@@ -221,7 +38,27 @@ static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
bits, set ? bits : 0);
}
-static int ksz8_reset_switch(struct ksz_device *dev)
+static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
+{
+ const u16 *regs;
+ u16 ctrl_addr;
+ int ret = 0;
+
+ regs = dev->info->regs;
+
+ mutex_lock(&dev->alu_mutex);
+
+ ctrl_addr = IND_ACC_TABLE(table) | addr;
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ if (!ret)
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+int ksz8_reset_switch(struct ksz_device *dev)
{
if (ksz_is_ksz88x3(dev)) {
/* reset switch */
@@ -239,6 +76,57 @@ static int ksz8_reset_switch(struct ksz_device *dev)
return 0;
}
+static int ksz8863_change_mtu(struct ksz_device *dev, int frame_size)
+{
+ u8 ctrl2 = 0;
+
+ if (frame_size <= KSZ8_LEGAL_PACKET_SIZE)
+ ctrl2 |= KSZ8863_LEGAL_PACKET_ENABLE;
+ else if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+ ctrl2 |= KSZ8863_HUGE_PACKET_ENABLE;
+
+ return ksz_rmw8(dev, REG_SW_CTRL_2, KSZ8863_LEGAL_PACKET_ENABLE |
+ KSZ8863_HUGE_PACKET_ENABLE, ctrl2);
+}
+
+static int ksz8795_change_mtu(struct ksz_device *dev, int frame_size)
+{
+ u8 ctrl1 = 0, ctrl2 = 0;
+ int ret;
+
+ if (frame_size > KSZ8_LEGAL_PACKET_SIZE)
+ ctrl2 |= SW_LEGAL_PACKET_DISABLE;
+ if (frame_size > KSZ8863_NORMAL_PACKET_SIZE)
+ ctrl1 |= SW_HUGE_PACKET;
+
+ ret = ksz_rmw8(dev, REG_SW_CTRL_1, SW_HUGE_PACKET, ctrl1);
+ if (ret)
+ return ret;
+
+ return ksz_rmw8(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, ctrl2);
+}
+
+int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
+{
+ u16 frame_size;
+
+ if (!dsa_is_cpu_port(dev->ds, port))
+ return 0;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ switch (dev->chip_id) {
+ case KSZ8795_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8765_CHIP_ID:
+ return ksz8795_change_mtu(dev, frame_size);
+ case KSZ8830_CHIP_ID:
+ return ksz8863_change_mtu(dev, frame_size);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
{
u8 hi, lo;
@@ -272,20 +160,19 @@ static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
true);
}
-static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
+void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
- ctrl_addr = addr + dev->reg_mib_cnt * port;
+ ctrl_addr = addr + dev->info->reg_mib_cnt * port;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
mutex_lock(&dev->alu_mutex);
@@ -311,18 +198,17 @@ static void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u8 check;
int loop;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
- addr -= dev->reg_mib_cnt;
+ addr -= dev->info->reg_mib_cnt;
ctrl_addr = (KSZ8795_MIB_TOTAL_RX_1 - KSZ8795_MIB_TOTAL_RX_0) * port;
ctrl_addr += addr + KSZ8795_MIB_TOTAL_RX_0;
ctrl_addr |= IND_ACC_TABLE(TABLE_MIB | TABLE_READ);
@@ -364,14 +250,15 @@ static void ksz8795_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
u32 *last = (u32 *)dropped;
+ const u16 *regs;
u16 ctrl_addr;
u32 data;
u32 cur;
- addr -= dev->reg_mib_cnt;
+ regs = dev->info->regs;
+
+ addr -= dev->info->reg_mib_cnt;
ctrl_addr = addr ? KSZ8863_MIB_PACKET_DROPPED_TX_0 :
KSZ8863_MIB_PACKET_DROPPED_RX_0;
ctrl_addr += port;
@@ -393,8 +280,8 @@ static void ksz8863_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
}
}
-static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
if (ksz_is_ksz88x3(dev))
ksz8863_r_mib_pkt(dev, port, addr, dropped, cnt);
@@ -402,7 +289,7 @@ static void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
ksz8795_r_mib_pkt(dev, port, addr, dropped, cnt);
}
-static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
if (ksz_is_ksz88x3(dev))
return;
@@ -417,7 +304,7 @@ static void ksz8_freeze_mib(struct ksz_device *dev, int port, bool freeze)
ksz_cfg(dev, REG_SW_CTRL_6, BIT(port), false);
}
-static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
+void ksz8_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
u64 *dropped;
@@ -432,62 +319,75 @@ static void ksz8_port_init_cnt(struct ksz_device *dev, int port)
mib->cnt_ptr = 0;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
- dropped = &mib->counters[dev->mib_cnt];
+ dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->mib_cnt) {
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
- mib->cnt_ptr = 0;
- memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
}
-static void ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
+static int ksz8_r_table(struct ksz_device *dev, int table, u16 addr, u64 *data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
+ int ret;
+
+ regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(table | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
- ksz_read64(dev, regs[REG_IND_DATA_HI], data);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz_read64(dev, regs[REG_IND_DATA_HI], data);
+unlock_alu:
mutex_unlock(&dev->alu_mutex);
+
+ return ret;
}
-static void ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
+static int ksz8_w_table(struct ksz_device *dev, int table, u16 addr, u64 data)
{
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u16 ctrl_addr;
+ int ret;
+
+ regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(table) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write64(dev, regs[REG_IND_DATA_HI], data);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write64(dev, regs[REG_IND_DATA_HI], data);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+unlock_alu:
mutex_unlock(&dev->alu_mutex);
+
+ return ret;
}
static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
{
- struct ksz8 *ksz8 = dev->priv;
int timeout = 100;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
do {
ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
@@ -508,22 +408,20 @@ static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
return 0;
}
-static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
- u8 *mac_addr, u8 *fid, u8 *src_port,
- u8 *timestamp, u16 *entries)
+int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
- const u8 *regs;
+ const u16 *regs;
u16 ctrl_addr;
u8 data;
int rc;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
- regs = ksz8->regs;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
@@ -574,55 +472,61 @@ static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr,
}
static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+ struct alu_struct *alu, bool *valid)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
+ int ret;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
+ ret = ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
+ if (ret)
+ return ret;
- ksz8_r_table(dev, TABLE_STATIC_MAC, addr, &data);
data_hi = data >> 32;
data_lo = (u32)data;
- if (data_hi & (masks[STATIC_MAC_TABLE_VALID] |
- masks[STATIC_MAC_TABLE_OVERRIDE])) {
- alu->mac[5] = (u8)data_lo;
- alu->mac[4] = (u8)(data_lo >> 8);
- alu->mac[3] = (u8)(data_lo >> 16);
- alu->mac[2] = (u8)(data_lo >> 24);
- alu->mac[1] = (u8)data_hi;
- alu->mac[0] = (u8)(data_hi >> 8);
- alu->port_forward =
- (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
- shifts[STATIC_MAC_FWD_PORTS];
- alu->is_override =
- (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
- data_hi >>= 1;
- alu->is_static = true;
- alu->is_use_fid =
- (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
- alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
- shifts[STATIC_MAC_FID];
+
+ if (!(data_hi & (masks[STATIC_MAC_TABLE_VALID] |
+ masks[STATIC_MAC_TABLE_OVERRIDE]))) {
+ *valid = false;
return 0;
}
- return -ENXIO;
+
+ alu->mac[5] = (u8)data_lo;
+ alu->mac[4] = (u8)(data_lo >> 8);
+ alu->mac[3] = (u8)(data_lo >> 16);
+ alu->mac[2] = (u8)(data_lo >> 24);
+ alu->mac[1] = (u8)data_hi;
+ alu->mac[0] = (u8)(data_hi >> 8);
+ alu->port_forward =
+ (data_hi & masks[STATIC_MAC_TABLE_FWD_PORTS]) >>
+ shifts[STATIC_MAC_FWD_PORTS];
+ alu->is_override = (data_hi & masks[STATIC_MAC_TABLE_OVERRIDE]) ? 1 : 0;
+ data_hi >>= 1;
+ alu->is_static = true;
+ alu->is_use_fid = (data_hi & masks[STATIC_MAC_TABLE_USE_FID]) ? 1 : 0;
+ alu->fid = (data_hi & masks[STATIC_MAC_TABLE_FID]) >>
+ shifts[STATIC_MAC_FID];
+
+ *valid = true;
+
+ return 0;
}
-static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu)
+static int ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu)
{
- struct ksz8 *ksz8 = dev->priv;
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
u64 data;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
data_lo = ((u32)alu->mac[2] << 24) |
((u32)alu->mac[3] << 16) |
@@ -642,18 +546,18 @@ static void ksz8_w_sta_mac_table(struct ksz_device *dev, u16 addr,
data_hi &= ~masks[STATIC_MAC_TABLE_OVERRIDE];
data = (u64)data_hi << 32 | data_lo;
- ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
+
+ return ksz8_w_table(dev, TABLE_STATIC_MAC, addr, data);
}
static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
u8 *member, u8 *valid)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
const u32 *masks;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
*fid = vlan & masks[VLAN_TABLE_FID];
*member = (vlan & masks[VLAN_TABLE_MEMBERSHIP]) >>
@@ -664,12 +568,11 @@ static void ksz8_from_vlan(struct ksz_device *dev, u32 vlan, u8 *fid,
static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
u16 *vlan)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
const u32 *masks;
- shifts = ksz8->shifts;
- masks = ksz8->masks;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
*vlan = fid;
*vlan |= (u16)member << shifts[VLAN_TABLE_MEMBERSHIP_S];
@@ -679,12 +582,11 @@ static void ksz8_to_vlan(struct ksz_device *dev, u8 fid, u8 member, u8 valid,
static void ksz8_r_vlan_entries(struct ksz_device *dev, u16 addr)
{
- struct ksz8 *ksz8 = dev->priv;
const u8 *shifts;
u64 data;
int i;
- shifts = ksz8->shifts;
+ shifts = dev->info->shifts;
ksz8_r_table(dev, TABLE_VLAN, addr, &data);
addr *= 4;
@@ -724,21 +626,32 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
-static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
- struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, link;
- const u8 *regs = ksz8->regs;
int processed = true;
+ const u16 *regs;
u8 val1, val2;
u16 data = 0;
u8 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
if (restart & PORT_PHY_LOOPBACK)
data |= BMCR_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
@@ -768,7 +681,10 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
data = BMSR_100FULL |
BMSR_100HALF |
BMSR_10FULL |
@@ -789,7 +705,10 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data = KSZ8795_ID_LO;
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ADVERTISE_CSMA;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
data |= ADVERTISE_PAUSE_CAP;
@@ -803,7 +722,10 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= ADVERTISE_10HALF;
break;
case MII_LPA:
- ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ if (ret)
+ return ret;
+
data = LPA_SLCT;
if (link & PORT_REMOTE_SYM_PAUSE)
data |= LPA_PAUSE_CAP;
@@ -819,8 +741,14 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= LPA_LPACK;
break;
case PHY_REG_LINK_MD:
- ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
- ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (ret)
+ return ret;
+
if (val1 & PORT_START_CABLE_DIAG)
data |= PHY_START_CABLE_DIAG;
@@ -835,7 +763,10 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
break;
case PHY_REG_PHY_CTRL:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
if (link & PORT_MDIX_STATUS)
data |= KSZ886X_CTRL_MDIX_STAT;
break;
@@ -845,14 +776,18 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
}
if (processed)
*val = data;
+
+ return 0;
}
-static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
- struct ksz8 *ksz8 = dev->priv;
u8 restart, speed, ctrl, data;
- const u8 *regs = ksz8->regs;
+ const u16 *regs;
u8 p = phy;
+ int ret;
+
+ regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
@@ -860,15 +795,26 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
/* Do not support PHY reset function. */
if (val & BMCR_RESET)
break;
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
data = speed;
if (val & KSZ886X_BMCR_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
- if (data != speed)
- ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+
+ if (data != speed) {
+ ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
if (ksz_is_ksz88x3(dev)) {
if ((val & BMCR_ANENABLE))
@@ -894,9 +840,17 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_FORCE_FULL_DUPLEX;
else
data &= ~PORT_FORCE_FULL_DUPLEX;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
data = restart;
if (val & KSZ886X_BMCR_DISABLE_LED)
data |= PORT_LED_OFF;
@@ -926,11 +880,19 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_PHY_LOOPBACK;
else
data &= ~PORT_PHY_LOOPBACK;
- if (data != restart)
- ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
+
+ if (data != restart) {
+ ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
+ data);
+ if (ret)
+ return ret;
+ }
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
PORT_AUTO_NEG_100BTX_FD |
@@ -947,8 +909,12 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_AUTO_NEG_10BT_FD;
if (val & ADVERTISE_10HALF)
data |= PORT_AUTO_NEG_10BT;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+ if (ret)
+ return ret;
+ }
break;
case PHY_REG_LINK_MD:
if (val & PHY_START_CABLE_DIAG)
@@ -957,44 +923,11 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
default:
break;
}
-}
-
-static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
-{
- struct ksz_device *dev = ds->priv;
-
- /* ksz88x3 uses the same tag schema as KSZ9893 */
- return ksz_is_ksz88x3(dev) ?
- DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
-}
-
-static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
-{
- /* Silicon Errata Sheet (DS80000830A):
- * Port 1 does not work with LinkMD Cable-Testing.
- * Port 1 does not respond to received PAUSE control frames.
- */
- if (!port)
- return MICREL_KSZ8_P1_ERRATA;
return 0;
}
-static void ksz8_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
-{
- struct ksz_device *dev = ds->priv;
- int i;
-
- for (i = 0; i < dev->mib_cnt; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN,
- dev->mib_names[i].string, ETH_GSTRING_LEN);
- }
-}
-
-static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
+void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
u8 data;
@@ -1004,65 +937,30 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
}
-static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p;
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
-
- p = &dev->ports[port];
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
-}
-
-static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
+void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
u8 learn[DSA_MAX_PORTS];
int first, index, cnt;
struct ksz_port *p;
+ const u16 *regs;
+
+ regs = dev->info->regs;
- if ((uint)port < dev->port_cnt) {
+ if ((uint)port < dev->info->port_cnt) {
first = port;
cnt = port + 1;
} else {
/* Flush all ports. */
first = 0;
- cnt = dev->port_cnt;
+ cnt = dev->info->port_cnt;
}
for (index = first; index < cnt; index++) {
p = &dev->ports[index];
if (!p->on)
continue;
- ksz_pread8(dev, index, P_STP_CTRL, &learn[index]);
+ ksz_pread8(dev, index, regs[P_STP_CTRL], &learn[index]);
if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL,
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL],
learn[index] | PORT_LEARN_DISABLE);
}
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
@@ -1071,15 +969,142 @@ static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
if (!p->on)
continue;
if (!(learn[index] & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, index, P_STP_CTRL, learn[index]);
+ ksz_pwrite8(dev, index, regs[P_STP_CTRL], learn[index]);
}
}
-static int ksz8_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag,
- struct netlink_ext_ack *extack)
+int ksz8_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
+ int ret = 0;
+ u16 i = 0;
+ u16 entries = 0;
+ u8 timestamp = 0;
+ u8 fid;
+ u8 src_port;
+ u8 mac[ETH_ALEN];
+
+ do {
+ ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
+ &timestamp, &entries);
+ if (!ret && port == src_port) {
+ ret = cb(mac, fid, false, data);
+ if (ret)
+ break;
+ }
+ i++;
+ } while (i < entries);
+ if (i >= entries)
+ ret = 0;
+ return ret;
+}
+
+static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct alu_struct alu;
+ int index, ret;
+ int empty = 0;
+
+ alu.port_forward = 0;
+ for (index = 0; index < dev->info->num_statics; index++) {
+ bool valid;
+
+ ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
+ if (ret)
+ return ret;
+ if (!valid) {
+ /* Remember the first empty entry. */
+ if (!empty)
+ empty = index + 1;
+ continue;
+ }
+
+ if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
+ break;
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics && !empty)
+ return -ENOSPC;
+
+ /* add entry */
+ if (index == dev->info->num_statics) {
+ index = empty - 1;
+ memset(&alu, 0, sizeof(alu));
+ memcpy(alu.mac, addr, ETH_ALEN);
+ alu.is_static = true;
+ }
+ alu.port_forward |= BIT(port);
+ if (vid) {
+ alu.is_use_fid = true;
+
+ /* Need a way to map VID to FID. */
+ alu.fid = vid;
+ }
+
+ return ksz8_w_sta_mac_table(dev, index, &alu);
+}
+
+static int ksz8_del_sta_mac(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct alu_struct alu;
+ int index, ret;
+
+ for (index = 0; index < dev->info->num_statics; index++) {
+ bool valid;
+
+ ret = ksz8_r_sta_mac_table(dev, index, &alu, &valid);
+ if (ret)
+ return ret;
+ if (!valid)
+ continue;
+
+ if (!memcmp(alu.mac, addr, ETH_ALEN) && alu.fid == vid)
+ break;
+ }
+
+ /* no available entry */
+ if (index == dev->info->num_statics)
+ return 0;
+
+ /* clear port */
+ alu.port_forward &= ~BIT(port);
+ if (!alu.port_forward)
+ alu.is_static = false;
+
+ return ksz8_w_sta_mac_table(dev, index, &alu);
+}
+
+int ksz8_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ return ksz8_add_sta_mac(dev, port, mdb->addr, mdb->vid);
+}
+
+int ksz8_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
+{
+ return ksz8_del_sta_mac(dev, port, mdb->addr, mdb->vid);
+}
+
+int ksz8_fdb_add(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ return ksz8_add_sta_mac(dev, port, addr, vid);
+}
+
+int ksz8_fdb_del(struct ksz_device *dev, int port, const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ return ksz8_del_sta_mac(dev, port, addr, vid);
+}
+
+int ksz8_port_vlan_filtering(struct ksz_device *dev, int port, bool flag,
+ struct netlink_ext_ack *extack)
+{
if (ksz_is_ksz88x3(dev))
return -ENOTSUPP;
@@ -1104,12 +1129,11 @@ static void ksz8_port_enable_pvid(struct ksz_device *dev, int port, bool state)
}
}
-static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz8_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
u16 data, new_pvid = 0;
u8 fid, member, valid;
@@ -1130,7 +1154,7 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
* Remove Tag flag to be changed, unless there are no
* other VLANs currently configured.
*/
- for (vid = 1; vid < dev->num_vlans; ++vid) {
+ for (vid = 1; vid < dev->info->num_vlans; ++vid) {
/* Skip the VID we are going to add or reconfigure */
if (vid == vlan->vid)
continue;
@@ -1177,10 +1201,9 @@ static int ksz8_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz8_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
u16 data, pvid;
u8 fid, member, valid;
@@ -1210,12 +1233,10 @@ static int ksz8_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+int ksz8_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (ingress) {
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
dev->mirror_rx |= BIT(port);
@@ -1234,10 +1255,9 @@ static int ksz8_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz8_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
u8 data;
if (mirror->ingress) {
@@ -1258,7 +1278,6 @@ static void ksz8_port_mirror_del(struct dsa_switch *ds, int port,
static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
- u8 data8;
if (!p->interface && dev->compat_interface) {
dev_warn(dev->dev,
@@ -1267,50 +1286,15 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
port);
p->interface = dev->compat_interface;
}
-
- /* Configure MII interface for proper network communication. */
- ksz_read8(dev, REG_PORT_5_CTRL_6, &data8);
- data8 &= ~PORT_INTERFACE_TYPE;
- data8 &= ~PORT_GMII_1GPS_MODE;
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- data8 |= PORT_INTERFACE_RMII;
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_GMII;
- p->phydev.speed = SPEED_1000;
- break;
- default:
- data8 &= ~PORT_RGMII_ID_IN_ENABLE;
- data8 &= ~PORT_RGMII_ID_OUT_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IN_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_OUT_ENABLE;
- data8 |= PORT_GMII_1GPS_MODE;
- data8 |= PORT_INTERFACE_RGMII;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_write8(dev, REG_PORT_5_CTRL_6, data8);
- p->phydev.duplex = 1;
}
-static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
- struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
u8 member;
- masks = ksz8->masks;
+ masks = dev->info->masks;
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
@@ -1340,20 +1324,18 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz8_cfg_port_member(dev, port, member);
}
-static void ksz8_config_cpu_port(struct dsa_switch *ds)
+void ksz8_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
- struct ksz8 *ksz8 = dev->priv;
- const u8 *regs = ksz8->regs;
struct ksz_port *p;
const u32 *masks;
+ const u16 *regs;
u8 remote;
int i;
- masks = ksz8->masks;
+ masks = dev->info->masks;
+ regs = dev->info->regs;
- /* Switch marks the maximum frame with extra byte as oversize. */
- ksz_cfg(dev, REG_SW_CTRL_2, SW_LEGAL_PACKET_DISABLE, true);
ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
p = &dev->ports[dev->cpu_port];
@@ -1364,13 +1346,12 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Last port may be disabled. */
if (i == dev->phy_port_cnt)
break;
p->on = 1;
- p->phy = 1;
}
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
@@ -1378,34 +1359,65 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
continue;
if (!ksz_is_ksz88x3(dev)) {
ksz_pread8(dev, i, regs[P_REMOTE_STATUS], &remote);
- if (remote & PORT_FIBER_MODE)
+ if (remote & KSZ8_PORT_FIBER_MODE)
p->fiber = 1;
}
if (p->fiber)
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- true);
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, true);
else
- ksz_port_cfg(dev, i, P_STP_CTRL, PORT_FORCE_FLOW_CTRL,
- false);
+ ksz_port_cfg(dev, i, regs[P_STP_CTRL],
+ PORT_FORCE_FLOW_CTRL, false);
}
}
-static int ksz8_setup(struct dsa_switch *ds)
+static int ksz8_handle_global_errata(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ /* KSZ87xx Errata DS80000687C.
+ * Module 2: Link drops with some EEE link partners.
+ * An issue with the EEE next page exchange between the
+ * KSZ879x/KSZ877x/KSZ876x and some EEE link partners may result in
+ * the link dropping.
+ */
+ if (dev->info->ksz87xx_eee_link_erratum)
+ ret = ksz8_ind_write8(dev, TABLE_EEE, REG_IND_EEE_GLOB2_HI, 0);
+
+ return ret;
+}
+
+int ksz8_enable_stp_addr(struct ksz_device *dev)
+{
struct alu_struct alu;
- int i, ret = 0;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
+ /* Setup STP address for STP operation. */
+ memset(&alu, 0, sizeof(alu));
+ ether_addr_copy(alu.mac, eth_stp_addr);
+ alu.is_static = true;
+ alu.is_override = true;
+ alu.port_forward = dev->info->cpu_ports;
- ret = ksz8_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
- }
+ return ksz8_w_sta_mac_table(dev, 0, &alu);
+}
+
+int ksz8_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+
+ ds->mtu_enforcement_ingress = true;
+
+ /* We rely on software untagging on the CPU port, so that we
+ * can support both tagged and untagged VLANs
+ */
+ ds->untag_bridge_pvid = true;
+
+ /* VLAN filtering is partly controlled by the global VLAN
+ * Enable flag
+ */
+ ds->vlan_filtering_is_global = true;
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_FLOW_CTRL, true);
@@ -1424,10 +1436,6 @@ static int ksz8_setup(struct dsa_switch *ds)
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP,
UNICAST_VLAN_BOUNDARY | NO_EXC_COLLISION_DROP);
- ksz8_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_CTRL_2, MULTICAST_STORM_DISABLE, true);
-
ksz_cfg(dev, S_REPLACE_VID_CTRL, SW_REPLACE_VID, false);
ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
@@ -1435,52 +1443,16 @@ static int ksz8_setup(struct dsa_switch *ds)
if (!ksz_is_ksz88x3(dev))
ksz_cfg(dev, REG_SW_CTRL_19, SW_INS_TAG_ENABLE, true);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], S_REPLACE_VID_CTRL,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
-
- for (i = 0; i < (dev->num_vlans / 4); i++)
+ for (i = 0; i < (dev->info->num_vlans / 4); i++)
ksz8_r_vlan_entries(dev, i);
- /* Setup STP address for STP operation. */
- memset(&alu, 0, sizeof(alu));
- ether_addr_copy(alu.mac, eth_stp_addr);
- alu.is_static = true;
- alu.is_override = true;
- alu.port_forward = dev->host_mask;
-
- ksz8_w_sta_mac_table(dev, 0, &alu);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
- return 0;
+ return ksz8_handle_global_errata(ds);
}
-static void ksz8_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+void ksz8_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct ksz_device *dev = ds->priv;
-
- if (port == dev->cpu_port) {
- if (state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
- } else {
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_NA)
- goto unsupported;
- }
-
- /* Allow all the expected bits */
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
+ config->mac_capabilities = MAC_10 | MAC_100;
/* Silicon Errata Sheet (DS80000830A):
* "Port 1 does not respond to received flow control PAUSE frames"
@@ -1488,277 +1460,32 @@ static void ksz8_validate(struct dsa_switch *ds, int port,
* switches.
*/
if (!ksz_is_ksz88x3(dev) || port)
- phylink_set(mask, Pause);
+ config->mac_capabilities |= MAC_SYM_PAUSE;
/* Asym pause is not supported on KSZ8863 and KSZ8873 */
if (!ksz_is_ksz88x3(dev))
- phylink_set(mask, Asym_Pause);
-
- /* 10M and 100M are only supported */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
- phy_modes(state->interface), port);
+ config->mac_capabilities |= MAC_ASYM_PAUSE;
}
-static const struct dsa_switch_ops ksz8_switch_ops = {
- .get_tag_protocol = ksz8_get_tag_protocol,
- .get_phy_flags = ksz8_sw_get_phy_flags,
- .setup = ksz8_setup,
- .phy_read = ksz_phy_read16,
- .phy_write = ksz_phy_write16,
- .phylink_validate = ksz8_validate,
- .phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
- .get_strings = ksz8_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz8_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz8_port_vlan_filtering,
- .port_vlan_add = ksz8_port_vlan_add,
- .port_vlan_del = ksz8_port_vlan_del,
- .port_fdb_dump = ksz_port_fdb_dump,
- .port_mdb_add = ksz_port_mdb_add,
- .port_mdb_del = ksz_port_mdb_del,
- .port_mirror_add = ksz8_port_mirror_add,
- .port_mirror_del = ksz8_port_mirror_del,
-};
-
-static u32 ksz8_get_port_addr(int port, int offset)
+u32 ksz8_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz8_switch_detect(struct ksz_device *dev)
-{
- u8 id1, id2;
- u16 id16;
- int ret;
-
- /* read chip id */
- ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
- if (ret)
- return ret;
-
- id1 = id16 >> 8;
- id2 = id16 & SW_CHIP_ID_M;
-
- switch (id1) {
- case KSZ87_FAMILY_ID:
- if ((id2 != CHIP_ID_94 && id2 != CHIP_ID_95))
- return -ENODEV;
-
- if (id2 == CHIP_ID_95) {
- u8 val;
-
- id2 = 0x95;
- ksz_read8(dev, REG_PORT_STATUS_0, &val);
- if (val & PORT_FIBER_MODE)
- id2 = 0x65;
- } else if (id2 == CHIP_ID_94) {
- id2 = 0x94;
- }
- break;
- case KSZ88_FAMILY_ID:
- if (id2 != CHIP_ID_63)
- return -ENODEV;
- break;
- default:
- dev_err(dev->dev, "invalid family id: %d\n", id1);
- return -ENODEV;
- }
- id16 &= ~0xff;
- id16 |= id2;
- dev->chip_id = id16;
-
- return 0;
-}
-
-struct ksz_chip_data {
- u16 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
-};
-
-static const struct ksz_chip_data ksz8_switch_chips[] = {
- {
- .chip_id = 0x8795,
- .dev_name = "KSZ8795",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 5, /* total cpu and user ports */
- },
- {
- /*
- * WARNING
- * =======
- * KSZ8794 is similar to KSZ8795, except the port map
- * contains a gap between external and CPU ports, the
- * port map is NOT continuous. The per-port register
- * map is shifted accordingly too, i.e. registers at
- * offset 0x40 are NOT used on KSZ8794 and they ARE
- * used on KSZ8795 for external port 3.
- * external cpu
- * KSZ8794 0,1,2 4
- * KSZ8795 0,1,2,3 4
- * KSZ8765 0,1,2,3 4
- */
- .chip_id = 0x8794,
- .dev_name = "KSZ8794",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 4, /* total cpu and user ports */
- },
- {
- .chip_id = 0x8765,
- .dev_name = "KSZ8765",
- .num_vlans = 4096,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x10, /* can be configured as cpu port */
- .port_cnt = 5, /* total cpu and user ports */
- },
- {
- .chip_id = 0x8830,
- .dev_name = "KSZ8863/KSZ8873",
- .num_vlans = 16,
- .num_alus = 0,
- .num_statics = 8,
- .cpu_ports = 0x4, /* can be configured as cpu port */
- .port_cnt = 3,
- },
-};
-
-static int ksz8_switch_init(struct ksz_device *dev)
+int ksz8_switch_init(struct ksz_device *dev)
{
- struct ksz8 *ksz8 = dev->priv;
- int i;
-
- dev->ds->ops = &ksz8_switch_ops;
-
- for (i = 0; i < ARRAY_SIZE(ksz8_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz8_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = fls(chip->cpu_ports);
- dev->cpu_port = fls(chip->cpu_ports) - 1;
- dev->phy_port_cnt = dev->port_cnt - 1;
- dev->cpu_ports = chip->cpu_ports;
- dev->host_mask = chip->cpu_ports;
- dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
- chip->cpu_ports;
- break;
- }
- }
-
- /* no switch found */
- if (!dev->cpu_ports)
- return -ENODEV;
-
- if (ksz_is_ksz88x3(dev)) {
- ksz8->regs = ksz8863_regs;
- ksz8->masks = ksz8863_masks;
- ksz8->shifts = ksz8863_shifts;
- dev->mib_cnt = ARRAY_SIZE(ksz88xx_mib_names);
- dev->mib_names = ksz88xx_mib_names;
- } else {
- ksz8->regs = ksz8795_regs;
- ksz8->masks = ksz8795_masks;
- ksz8->shifts = ksz8795_shifts;
- dev->mib_cnt = ARRAY_SIZE(ksz87xx_mib_names);
- dev->mib_names = ksz87xx_mib_names;
- }
-
- dev->reg_mib_cnt = MIB_COUNTER_NUM;
-
- dev->ports = devm_kzalloc(dev->dev,
- dev->port_cnt * sizeof(struct ksz_port),
- GFP_KERNEL);
- if (!dev->ports)
- return -ENOMEM;
- for (i = 0; i < dev->port_cnt; i++) {
- mutex_init(&dev->ports[i].mib.cnt_mutex);
- dev->ports[i].mib.counters =
- devm_kzalloc(dev->dev,
- sizeof(u64) *
- (dev->mib_cnt + 1),
- GFP_KERNEL);
- if (!dev->ports[i].mib.counters)
- return -ENOMEM;
- }
-
- /* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
-
- /* We rely on software untagging on the CPU port, so that we
- * can support both tagged and untagged VLANs
- */
- dev->ds->untag_bridge_pvid = true;
-
- /* VLAN filtering is partly controlled by the global VLAN
- * Enable flag
- */
- dev->ds->vlan_filtering_is_global = true;
+ dev->cpu_port = fls(dev->info->cpu_ports) - 1;
+ dev->phy_port_cnt = dev->info->port_cnt - 1;
+ dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | dev->info->cpu_ports;
return 0;
}
-static void ksz8_switch_exit(struct ksz_device *dev)
+void ksz8_switch_exit(struct ksz_device *dev)
{
ksz8_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz8_dev_ops = {
- .get_port_addr = ksz8_get_port_addr,
- .cfg_port_member = ksz8_cfg_port_member,
- .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
- .port_setup = ksz8_port_setup,
- .r_phy = ksz8_r_phy,
- .w_phy = ksz8_w_phy,
- .r_dyn_mac_table = ksz8_r_dyn_mac_table,
- .r_sta_mac_table = ksz8_r_sta_mac_table,
- .w_sta_mac_table = ksz8_w_sta_mac_table,
- .r_mib_cnt = ksz8_r_mib_cnt,
- .r_mib_pkt = ksz8_r_mib_pkt,
- .freeze_mib = ksz8_freeze_mib,
- .port_init_cnt = ksz8_port_init_cnt,
- .shutdown = ksz8_reset_switch,
- .detect = ksz8_switch_detect,
- .init = ksz8_switch_init,
- .exit = ksz8_switch_exit,
-};
-
-int ksz8_switch_register(struct ksz_device *dev)
-{
- return ksz_switch_register(dev, &ksz8_dev_ops);
-}
-EXPORT_SYMBOL(ksz8_switch_register);
-
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 6b40bc25f7ff..7a57c6088f80 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -14,22 +14,8 @@
#define KS_PRIO_M 0x3
#define KS_PRIO_S 2
-#define REG_CHIP_ID0 0x00
-
-#define KSZ87_FAMILY_ID 0x87
-#define KSZ88_FAMILY_ID 0x88
-
-#define REG_CHIP_ID1 0x01
-
-#define SW_CHIP_ID_M 0xF0
-#define SW_CHIP_ID_S 4
#define SW_REVISION_M 0x0E
#define SW_REVISION_S 1
-#define SW_START 0x01
-
-#define CHIP_ID_94 0x60
-#define CHIP_ID_95 0x90
-#define CHIP_ID_63 0x30
#define KSZ8863_REG_SW_RESET 0x43
@@ -57,12 +43,14 @@
#define REG_SW_CTRL_2 0x04
#define UNICAST_VLAN_BOUNDARY BIT(7)
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define KSZ8863_HUGE_PACKET_ENABLE BIT(2)
+#define KSZ8863_LEGAL_PACKET_ENABLE BIT(1)
+
#define REG_SW_CTRL_3 0x05
#define WEIGHTED_FAIR_QUEUE_ENABLE BIT(3)
@@ -77,13 +65,9 @@
#define SW_FLOW_CTRL BIT(5)
#define SW_10_MBIT BIT(4)
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_CTRL_5 0x07
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_CTRL_6 0x08
#define SW_MIB_COUNTER_FLUSH BIT(7)
@@ -160,9 +144,6 @@
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
#define PORT_BACK_PRESSURE BIT(3)
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
#define REG_PORT_1_CTRL_3 0x13
#define REG_PORT_2_CTRL_3 0x23
@@ -192,15 +173,7 @@
#define REG_PORT_5_CTRL_6 0x56
#define PORT_MII_INTERNAL_CLOCK BIT(7)
-#define PORT_GMII_1GPS_MODE BIT(6)
-#define PORT_RGMII_ID_IN_ENABLE BIT(4)
-#define PORT_RGMII_ID_OUT_ENABLE BIT(3)
#define PORT_GMII_MAC_MODE BIT(2)
-#define PORT_INTERFACE_TYPE 0x3
-#define PORT_INTERFACE_MII 0
-#define PORT_INTERFACE_RMII 1
-#define PORT_INTERFACE_GMII 2
-#define PORT_INTERFACE_RGMII 3
#define REG_PORT_1_CTRL_7 0x17
#define REG_PORT_2_CTRL_7 0x27
@@ -220,8 +193,6 @@
#define REG_PORT_4_STATUS_0 0x48
/* For KSZ8765. */
-#define PORT_FIBER_MODE BIT(7)
-
#define PORT_REMOTE_ASYM_PAUSE BIT(5)
#define PORT_REMOTE_SYM_PAUSE BIT(4)
#define PORT_REMOTE_100BTX_FD BIT(3)
@@ -325,7 +296,6 @@
#define REG_PORT_CTRL_5 0x05
-#define REG_PORT_STATUS_0 0x08
#define REG_PORT_STATUS_1 0x09
#define REG_PORT_LINK_MD_CTRL 0x0A
#define REG_PORT_LINK_MD_RESULT 0x0B
@@ -791,7 +761,6 @@
#define P_TAG_CTRL REG_PORT_CTRL_0
#define P_MIRROR_CTRL REG_PORT_CTRL_1
#define P_802_1P_CTRL REG_PORT_CTRL_2
-#define P_STP_CTRL REG_PORT_CTRL_2
#define P_PASS_ALL_CTRL REG_PORT_CTRL_12
#define P_INS_SRC_PVID_CTRL REG_PORT_CTRL_12
#define P_DROP_TAG_CTRL REG_PORT_CTRL_13
@@ -812,11 +781,9 @@
#define IND_ACC_TABLE(table) ((table) << 8)
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
+/* */
+#define REG_IND_EEE_GLOB2_LO 0x34
+#define REG_IND_EEE_GLOB2_HI 0x35
/**
* MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
deleted file mode 100644
index 866767b70d65..000000000000
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Microchip KSZ8795 series register access through SPI
- *
- * Copyright (C) 2017 Microchip Technology Inc.
- * Tristram Ha <Tristram.Ha@microchip.com>
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-#include "ksz8.h"
-#include "ksz_common.h"
-
-#define KSZ8795_SPI_ADDR_SHIFT 12
-#define KSZ8795_SPI_ADDR_ALIGN 3
-#define KSZ8795_SPI_TURNAROUND_SHIFT 1
-
-#define KSZ8863_SPI_ADDR_SHIFT 8
-#define KSZ8863_SPI_ADDR_ALIGN 8
-#define KSZ8863_SPI_TURNAROUND_SHIFT 0
-
-KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
- KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
-
-KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
- KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
-
-static int ksz8795_spi_probe(struct spi_device *spi)
-{
- const struct regmap_config *regmap_config;
- struct device *ddev = &spi->dev;
- struct regmap_config rc;
- struct ksz_device *dev;
- struct ksz8 *ksz8;
- int i, ret = 0;
-
- ksz8 = devm_kzalloc(&spi->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = spi;
-
- dev = ksz_switch_alloc(&spi->dev, ksz8);
- if (!dev)
- return -ENOMEM;
-
- regmap_config = device_get_match_data(ddev);
- if (!regmap_config)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
- rc = regmap_config[i];
- rc.lock_arg = &dev->regmap_mutex;
- dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
- if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- regmap_config[i].val_bits, ret);
- return ret;
- }
- }
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- /* setup spi */
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- ret = ksz8_switch_register(dev);
-
- /* Main DSA driver may not be started yet. */
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static int ksz8795_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
-}
-
-static void ksz8795_spi_shutdown(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (!dev)
- return;
-
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
-
- dsa_switch_shutdown(dev->ds);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static const struct of_device_id ksz8795_dt_ids[] = {
- { .compatible = "microchip,ksz8765", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8794", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8795", .data = &ksz8795_regmap_config },
- { .compatible = "microchip,ksz8863", .data = &ksz8863_regmap_config },
- { .compatible = "microchip,ksz8873", .data = &ksz8863_regmap_config },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz8795_dt_ids);
-
-static struct spi_driver ksz8795_spi_driver = {
- .driver = {
- .name = "ksz8795-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz8795_dt_ids),
- },
- .probe = ksz8795_spi_probe,
- .remove = ksz8795_spi_remove,
- .shutdown = ksz8795_spi_shutdown,
-};
-
-module_spi_driver(ksz8795_spi_driver);
-
-MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ8795 Series Switch SPI Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5883fa7edda2..3698112138b7 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -26,11 +26,9 @@ static int ksz8863_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
struct mdio_device *mdev;
u8 reg = *(u8 *)reg_buf;
u8 *val = val_buf;
- struct ksz8 *ksz8;
int i, ret = 0;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
mutex_lock_nested(&mdev->bus->mdio_lock, MDIO_MUTEX_NESTED);
for (i = 0; i < val_len; i++) {
@@ -55,13 +53,11 @@ static int ksz8863_mdio_write(void *ctx, const void *data, size_t count)
{
struct ksz_device *dev = ctx;
struct mdio_device *mdev;
- struct ksz8 *ksz8;
int i, ret = 0;
u32 reg;
u8 *val;
- ksz8 = dev->priv;
- mdev = ksz8->priv;
+ mdev = dev->priv;
val = (u8 *)(data + 4);
reg = *(u32 *)data;
@@ -86,22 +82,16 @@ static const struct regmap_bus regmap_smi[] = {
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
- .max_raw_read = 1,
- .max_raw_write = 1,
},
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
- .max_raw_read = 2,
- .max_raw_write = 2,
},
{
.read = ksz8863_mdio_read,
.write = ksz8863_mdio_write,
.val_format_endian_default = REGMAP_ENDIAN_BIG,
- .max_raw_read = 4,
- .max_raw_write = 4,
}
};
@@ -112,7 +102,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 8,
.cache_type = REGCACHE_NONE,
- .use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
},
@@ -122,7 +111,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 16,
.cache_type = REGCACHE_NONE,
- .use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
},
@@ -132,7 +120,6 @@ static const struct regmap_config ksz8863_regmap_config[] = {
.pad_bits = 24,
.val_bits = 32,
.cache_type = REGCACHE_NONE,
- .use_single_read = 1,
.lock = ksz_regmap_lock,
.unlock = ksz_regmap_unlock,
}
@@ -142,17 +129,10 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
{
struct regmap_config rc;
struct ksz_device *dev;
- struct ksz8 *ksz8;
int ret;
int i;
- ksz8 = devm_kzalloc(&mdiodev->dev, sizeof(struct ksz8), GFP_KERNEL);
- if (!ksz8)
- return -ENOMEM;
-
- ksz8->priv = mdiodev;
-
- dev = ksz_switch_alloc(&mdiodev->dev, ksz8);
+ dev = ksz_switch_alloc(&mdiodev->dev, mdiodev);
if (!dev)
return -ENOMEM;
@@ -163,18 +143,17 @@ static int ksz8863_smi_probe(struct mdio_device *mdiodev)
&regmap_smi[i], dev,
&rc);
if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&mdiodev->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz8863_regmap_config[i].val_bits, ret);
- return ret;
+ return dev_err_probe(&mdiodev->dev,
+ PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ ksz8863_regmap_config[i].val_bits);
}
}
if (mdiodev->dev.platform_data)
dev->pdata = mdiodev->dev.platform_data;
- ret = ksz8_switch_register(dev);
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -191,8 +170,6 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
@@ -206,8 +183,14 @@ static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
}
static const struct of_device_id ksz8863_dt_ids[] = {
- { .compatible = "microchip,ksz8863" },
- { .compatible = "microchip,ksz8873" },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
{ },
};
MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 353b5f981740..bf13d47c26cf 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -11,58 +11,13 @@
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz9477_reg.h"
#include "ksz_common.h"
-
-/* Used with variable features to indicate capabilities. */
-#define GBIT_SUPPORT BIT(0)
-#define NEW_XMII BIT(1)
-#define IS_9893 BIT(2)
-
-static const struct {
- int index;
- char string[ETH_GSTRING_LEN];
-} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
- { 0x00, "rx_hi" },
- { 0x01, "rx_undersize" },
- { 0x02, "rx_fragments" },
- { 0x03, "rx_oversize" },
- { 0x04, "rx_jabbers" },
- { 0x05, "rx_symbol_err" },
- { 0x06, "rx_crc_err" },
- { 0x07, "rx_align_err" },
- { 0x08, "rx_mac_ctrl" },
- { 0x09, "rx_pause" },
- { 0x0A, "rx_bcast" },
- { 0x0B, "rx_mcast" },
- { 0x0C, "rx_ucast" },
- { 0x0D, "rx_64_or_less" },
- { 0x0E, "rx_65_127" },
- { 0x0F, "rx_128_255" },
- { 0x10, "rx_256_511" },
- { 0x11, "rx_512_1023" },
- { 0x12, "rx_1024_1522" },
- { 0x13, "rx_1523_2000" },
- { 0x14, "rx_2001" },
- { 0x15, "tx_hi" },
- { 0x16, "tx_late_col" },
- { 0x17, "tx_pause" },
- { 0x18, "tx_bcast" },
- { 0x19, "tx_mcast" },
- { 0x1A, "tx_ucast" },
- { 0x1B, "tx_deferred" },
- { 0x1C, "tx_total_col" },
- { 0x1D, "tx_exc_col" },
- { 0x1E, "tx_single_col" },
- { 0x1F, "tx_mult_col" },
- { 0x80, "rx_total" },
- { 0x81, "tx_total" },
- { 0x82, "rx_discards" },
- { 0x83, "tx_discards" },
-};
+#include "ksz9477.h"
static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
{
@@ -88,6 +43,19 @@ static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
bits, set ? bits : 0);
}
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
+{
+ u16 frame_size;
+
+ if (!dsa_is_cpu_port(dev->ds, port))
+ return 0;
+
+ frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ return regmap_update_bits(dev->regmap[1], REG_SW_MTU__2,
+ REG_SW_MTU_MASK, frame_size);
+}
+
static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
{
unsigned int val;
@@ -193,7 +161,7 @@ static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
10, 1000);
}
-static int ksz9477_reset_switch(struct ksz_device *dev)
+int ksz9477_reset_switch(struct ksz_device *dev)
{
u8 data8;
u32 data32;
@@ -216,21 +184,23 @@ static int ksz9477_reset_switch(struct ksz_device *dev)
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
- /* set broadcast storm protection 10% rate */
- regmap_update_bits(dev->regmap[1], REG_SW_MAC_CTRL_2,
- BROADCAST_STORM_RATE,
- (BROADCAST_STORM_VALUE *
- BROADCAST_STORM_PROT_RATE) / 100);
+ /* KSZ9893 compatible chips do not support refclk configuration */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID)
+ return 0;
- if (dev->synclko_125)
- ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
- SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ);
+ data8 = SW_ENABLE_REFCLKO;
+ if (dev->synclko_disable)
+ data8 = 0;
+ else if (dev->synclko_125)
+ data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
+ ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
return 0;
}
-static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
- u64 *cnt)
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
{
struct ksz_port *p = &dev->ports[port];
unsigned int val;
@@ -257,14 +227,14 @@ static void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr,
*cnt += data;
}
-static void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
- u64 *dropped, u64 *cnt)
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt)
{
- addr = ksz9477_mib_names[addr].index;
+ addr = dev->info->mib_names[addr].index;
ksz9477_r_mib_cnt(dev, port, addr, cnt);
}
-static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
{
u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
struct ksz_port *p = &dev->ports[port];
@@ -278,7 +248,7 @@ static void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
-static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -289,27 +259,22 @@ static void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
mutex_unlock(&mib->cnt_mutex);
-
- mib->cnt_ptr = 0;
- memset(mib->counters, 0, dev->mib_cnt * sizeof(u64));
}
-static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
- int port,
- enum dsa_tag_protocol mp)
+static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
+ u16 *data)
{
- enum dsa_tag_protocol proto = DSA_TAG_PROTO_KSZ9477;
- struct ksz_device *dev = ds->priv;
-
- if (dev->features & IS_9893)
- proto = DSA_TAG_PROTO_KSZ9893;
- return proto;
+ /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
+ * BMSR_ERCAP bits are set.
+ */
+ if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
+ *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
}
-static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
- struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
/* No real PHY after this. Simulate the PHY.
* A fixed PHY can be setup in the device tree, but this function is
@@ -317,7 +282,7 @@ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
* For RGMII PHY there is no way to access it so the fixed PHY should
* be used. For SGMII PHY the supporting code will be added later.
*/
- if (addr >= dev->phy_port_cnt) {
+ if (!dev->info->internal_phy[addr]) {
struct ksz_port *p = &dev->ports[addr];
switch (reg) {
@@ -350,114 +315,58 @@ static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
break;
}
} else {
- ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
- }
-
- return val;
-}
-
-static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
- u16 val)
-{
- struct ksz_device *dev = ds->priv;
+ ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ if (ret)
+ return ret;
- /* No real PHY after this. */
- if (addr >= dev->phy_port_cnt)
- return 0;
+ ksz9477_r_phy_quirks(dev, addr, reg, &val);
+ }
- /* No gigabit support. Do not write to this register. */
- if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
- return 0;
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+ *data = val;
return 0;
}
-static void ksz9477_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
+ /* No real PHY after this. */
+ if (!dev->info->internal_phy[addr])
+ return 0;
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string,
- ETH_GSTRING_LEN);
- }
+ return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
}
-static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
- u8 member)
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
{
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
}
-static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
- u8 state)
-{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
-
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
- p->stp_state = state;
-
- ksz_update_port_member(dev, port);
-}
-
-static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
{
+ const u16 *regs = dev->info->regs;
u8 data;
regmap_update_bits(dev->regmap[0], REG_SW_LUE_CTRL_2,
SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
- if (port < dev->port_cnt) {
+ if (port < dev->info->port_cnt) {
/* flush individual port */
- ksz_pread8(dev, port, P_STP_CTRL, &data);
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
if (!(data & PORT_LEARN_DISABLE))
- ksz_pwrite8(dev, port, P_STP_CTRL,
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL],
data | PORT_LEARN_DISABLE);
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
} else {
/* flush all */
ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
}
}
-static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
-
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -471,11 +380,10 @@ static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
u32 vlan_table[3];
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
int err;
@@ -508,10 +416,9 @@ static int ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
- struct ksz_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
u32 vlan_table[3];
u16 pvid;
@@ -542,10 +449,9 @@ static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -599,10 +505,9 @@ exit:
return ret;
}
-static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 alu_table[4];
u32 data;
int ret = 0;
@@ -635,10 +540,10 @@ static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
/* clear forwarding port */
- alu_table[2] &= ~BIT(port);
+ alu_table[1] &= ~BIT(port);
/* if there is no port to forward, clear table */
- if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+ if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
alu_table[0] = 0;
alu_table[1] = 0;
alu_table[2] = 0;
@@ -689,10 +594,9 @@ static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
alu->mac[5] = alu_table[3] & 0xFF;
}
-static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
- struct ksz_device *dev = ds->priv;
int ret = 0;
u32 ksz_data;
u32 alu_table[4];
@@ -719,6 +623,9 @@ static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
goto exit;
}
+ if (!(ksz_data & ALU_VALID))
+ continue;
+
/* read ALU table */
ksz9477_read_table(dev, alu_table);
@@ -741,26 +648,30 @@ exit:
return ret;
}
-static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
u32 mac_hi, mac_lo;
int err = 0;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
mutex_lock(&dev->alu_mutex);
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -788,7 +699,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics) {
+ if (index == dev->info->num_statics) {
err = -ENOSPC;
goto exit;
}
@@ -804,7 +715,7 @@ static int ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -816,26 +727,30 @@ exit:
return err;
}
-static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
{
- struct ksz_device *dev = ds->priv;
u32 static_table[4];
+ const u8 *shifts;
+ const u32 *masks;
u32 data;
int index;
int ret = 0;
u32 mac_hi, mac_lo;
+ shifts = dev->info->shifts;
+ masks = dev->info->masks;
+
mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
mutex_lock(&dev->alu_mutex);
- for (index = 0; index < dev->num_statics; index++) {
+ for (index = 0; index < dev->info->num_statics; index++) {
/* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) |
+ masks[ALU_STAT_READ] | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -861,7 +776,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
}
/* no available entry */
- if (index == dev->num_statics)
+ if (index == dev->info->num_statics)
goto exit;
/* clear port */
@@ -877,7 +792,7 @@ static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
ksz9477_write_table(dev, static_table);
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
/* wait to be finished */
@@ -891,19 +806,36 @@ exit:
return ret;
}
-static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
{
- struct ksz_device *dev = ds->priv;
+ u8 data;
+ int p;
+
+ /* Limit to one sniffer port
+ * Check if any of the port is already set for sniffing
+ * If yes, instruct the user to remove the previous entry & exit
+ */
+ for (p = 0; p < dev->info->port_cnt; p++) {
+ /* Skip the current sniffing port */
+ if (p == mirror->to_local_port)
+ continue;
+
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
+
+ if (data & PORT_MIRROR_SNIFFER) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Sniffer port is already configured, delete existing rules & retry");
+ return -EBUSY;
+ }
+ }
if (ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
-
/* configure mirror port */
ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
PORT_MIRROR_SNIFFER, true);
@@ -913,160 +845,47 @@ static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
{
- struct ksz_device *dev = ds->priv;
+ bool in_use = false;
u8 data;
+ int p;
if (mirror->ingress)
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
else
ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
-
- if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
- PORT_MIRROR_SNIFFER, false);
-}
-
-static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
-{
- bool gbit;
-
- if (dev->features & NEW_XMII)
- gbit = !(data & PORT_MII_NOT_1GBIT);
- else
- gbit = !!(data & PORT_MII_1000MBIT_S1);
- return gbit;
-}
-
-static void ksz9477_set_gbit(struct ksz_device *dev, bool gbit, u8 *data)
-{
- if (dev->features & NEW_XMII) {
- if (gbit)
- *data &= ~PORT_MII_NOT_1GBIT;
- else
- *data |= PORT_MII_NOT_1GBIT;
- } else {
- if (gbit)
- *data |= PORT_MII_1000MBIT_S1;
- else
- *data &= ~PORT_MII_1000MBIT_S1;
- }
-}
-static int ksz9477_get_xmii(struct ksz_device *dev, u8 data)
-{
- int mode;
+ /* Check if any of the port is still referring to sniffer port */
+ for (p = 0; p < dev->info->port_cnt; p++) {
+ ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
- if (dev->features & NEW_XMII) {
- switch (data & PORT_MII_SEL_M) {
- case PORT_MII_SEL:
- mode = 0;
+ if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
+ in_use = true;
break;
- case PORT_RMII_SEL:
- mode = 1;
- break;
- case PORT_GMII_SEL:
- mode = 2;
- break;
- default:
- mode = 3;
- }
- } else {
- switch (data & PORT_MII_SEL_M) {
- case PORT_MII_SEL_S1:
- mode = 0;
- break;
- case PORT_RMII_SEL_S1:
- mode = 1;
- break;
- case PORT_GMII_SEL_S1:
- mode = 2;
- break;
- default:
- mode = 3;
}
}
- return mode;
-}
-
-static void ksz9477_set_xmii(struct ksz_device *dev, int mode, u8 *data)
-{
- u8 xmii;
- if (dev->features & NEW_XMII) {
- switch (mode) {
- case 0:
- xmii = PORT_MII_SEL;
- break;
- case 1:
- xmii = PORT_RMII_SEL;
- break;
- case 2:
- xmii = PORT_GMII_SEL;
- break;
- default:
- xmii = PORT_RGMII_SEL;
- break;
- }
- } else {
- switch (mode) {
- case 0:
- xmii = PORT_MII_SEL_S1;
- break;
- case 1:
- xmii = PORT_RMII_SEL_S1;
- break;
- case 2:
- xmii = PORT_GMII_SEL_S1;
- break;
- default:
- xmii = PORT_RGMII_SEL_S1;
- break;
- }
- }
- *data &= ~PORT_MII_SEL_M;
- *data |= xmii;
+ /* delete sniffing if there are no other mirroring rules */
+ if (!in_use)
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
}
static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
{
phy_interface_t interface;
bool gbit;
- int mode;
- u8 data8;
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
return PHY_INTERFACE_MODE_NA;
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- gbit = ksz9477_get_gbit(dev, data8);
- mode = ksz9477_get_xmii(dev, data8);
- switch (mode) {
- case 2:
- interface = PHY_INTERFACE_MODE_GMII;
- if (gbit)
- break;
- fallthrough;
- case 0:
- interface = PHY_INTERFACE_MODE_MII;
- break;
- case 1:
- interface = PHY_INTERFACE_MODE_RMII;
- break;
- default:
- interface = PHY_INTERFACE_MODE_RGMII;
- if (data8 & PORT_RGMII_ID_EG_ENABLE)
- interface = PHY_INTERFACE_MODE_RGMII_TXID;
- if (data8 & PORT_RGMII_ID_IG_ENABLE) {
- interface = PHY_INTERFACE_MODE_RGMII_RXID;
- if (data8 & PORT_RGMII_ID_EG_ENABLE)
- interface = PHY_INTERFACE_MODE_RGMII_ID;
- }
- break;
- }
+
+ gbit = ksz_get_gbit(dev, port);
+
+ interface = ksz_get_xmii(dev, port, gbit);
+
return interface;
}
@@ -1105,7 +924,7 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
/* Energy Efficient Ethernet (EEE) feature select must
* be manually disabled (except on KSZ8565 which is 100Mbit)
*/
- if (dev->features & GBIT_SUPPORT)
+ if (dev->info->gbit_capable[port])
ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
/* Register settings are required to meet data sheet
@@ -1126,18 +945,70 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
ksz9477_port_mmd_write(dev, port, 0x1c, 0x20, 0xeeee);
}
-static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
+
+ if (dev->info->gbit_capable[port])
+ config->mac_capabilities |= MAC_1000FD;
+}
+
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u8 value;
+ u8 data;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ if (ret < 0)
+ return ret;
+
+ data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+
+ return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+}
+
+void ksz9477_port_queue_split(struct ksz_device *dev, int port)
+{
+ u8 data;
+
+ if (dev->info->num_tx_queues == 8)
+ data = PORT_EIGHT_QUEUE;
+ else if (dev->info->num_tx_queues == 4)
+ data = PORT_FOUR_QUEUE;
+ else if (dev->info->num_tx_queues == 2)
+ data = PORT_TWO_QUEUE;
+ else
+ data = PORT_SINGLE_QUEUE;
+
+ ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
+}
+
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
- struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
- u8 data8, member;
u16 data16;
+ u8 member;
/* enable tag tail for host port */
if (cpu_port)
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
true);
+ ksz9477_port_queue_split(dev, port);
+
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
/* set back pressure */
@@ -1158,57 +1029,19 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
- if (port < dev->phy_port_cnt) {
+ if (dev->info->internal_phy[port]) {
/* do not force flow control */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
false);
- if (dev->phy_errata_9477)
+ if (dev->info->phy_errata_9477)
ksz9477_phy_errata_setup(dev, port);
} else {
/* force flow control */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
true);
-
- /* configure MAC to 1G & RGMII mode */
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- switch (p->interface) {
- case PHY_INTERFACE_MODE_MII:
- ksz9477_set_xmii(dev, 0, &data8);
- ksz9477_set_gbit(dev, false, &data8);
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_RMII:
- ksz9477_set_xmii(dev, 1, &data8);
- ksz9477_set_gbit(dev, false, &data8);
- p->phydev.speed = SPEED_100;
- break;
- case PHY_INTERFACE_MODE_GMII:
- ksz9477_set_xmii(dev, 2, &data8);
- ksz9477_set_gbit(dev, true, &data8);
- p->phydev.speed = SPEED_1000;
- break;
- default:
- ksz9477_set_xmii(dev, 3, &data8);
- ksz9477_set_gbit(dev, true, &data8);
- data8 &= ~PORT_RGMII_ID_IG_ENABLE;
- data8 &= ~PORT_RGMII_ID_EG_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- data8 |= PORT_RGMII_ID_IG_ENABLE;
- if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
- p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- data8 |= PORT_RGMII_ID_EG_ENABLE;
- /* On KSZ9893, disable RGMII in-band status support */
- if (dev->features & IS_9893)
- data8 &= ~PORT_MII_MAC_MODE;
- p->phydev.speed = SPEED_1000;
- break;
- }
- ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
- p->phydev.duplex = 1;
}
if (cpu_port)
@@ -1219,18 +1052,19 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
}
-static void ksz9477_config_cpu_port(struct dsa_switch *ds)
+void ksz9477_config_cpu_port(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
- if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ if (dsa_is_cpu_port(ds, i) &&
+ (dev->info->cpu_ports & (1 << i))) {
phy_interface_t interface;
const char *prev_msg;
const char *prev_mode;
@@ -1270,43 +1104,55 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p->on = 1;
}
}
- for (i = 0; i < dev->port_cnt; i++) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
- p = &dev->ports[i];
+ ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ }
+}
- ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
- p->on = 1;
- if (i < dev->phy_port_cnt)
- p->phy = 1;
- if (dev->chip_id == 0x00947700 && i == 6) {
- p->sgmii = 1;
+int ksz9477_enable_stp_addr(struct ksz_device *dev)
+{
+ const u32 *masks;
+ u32 data;
+ int ret;
- /* SGMII PHY detection code is not implemented yet. */
- p->phy = 0;
- }
+ masks = dev->info->masks;
+
+ /* Enable Reserved multicast table */
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
+
+ /* Set the Override bit for forwarding BPDU packet to CPU */
+ ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
+ ALU_V_OVERRIDE | BIT(dev->cpu_port));
+ if (ret < 0)
+ return ret;
+
+ data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
+
+ ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+ if (ret < 0)
+ return ret;
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
+ return ret;
}
+
+ return 0;
}
-static int ksz9477_setup(struct dsa_switch *ds)
+int ksz9477_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
int ret = 0;
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
-
- ret = ksz9477_reset_switch(dev);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
- }
+ ds->mtu_enforcement_ingress = true;
/* Required for port partitioning. */
ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
@@ -1315,12 +1161,14 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* Do not work correctly with tail tagging. */
ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
- /* accept packet up to 2000bytes */
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
-
- ksz9477_config_cpu_port(ds);
+ /* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
+ /* Now we can configure default MTU value */
+ ret = regmap_update_bits(dev->regmap[1], REG_SW_MTU__2, REG_SW_MTU_MASK,
+ VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
+ if (ret)
+ return ret;
/* queue based egress rate limit */
ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
@@ -1328,55 +1176,28 @@ static int ksz9477_setup(struct dsa_switch *ds)
/* enable global MIB counter freeze function */
ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
- /* start switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
-
- ksz_init_mib_timer(dev);
-
- ds->configure_vlan_while_not_filtering = false;
-
return 0;
}
-static const struct dsa_switch_ops ksz9477_switch_ops = {
- .get_tag_protocol = ksz9477_get_tag_protocol,
- .setup = ksz9477_setup,
- .phy_read = ksz9477_phy_read16,
- .phy_write = ksz9477_phy_write16,
- .phylink_mac_link_down = ksz_mac_link_down,
- .port_enable = ksz_enable_port,
- .get_strings = ksz9477_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_bridge_join = ksz_port_bridge_join,
- .port_bridge_leave = ksz_port_bridge_leave,
- .port_stp_state_set = ksz9477_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz9477_port_vlan_filtering,
- .port_vlan_add = ksz9477_port_vlan_add,
- .port_vlan_del = ksz9477_port_vlan_del,
- .port_fdb_dump = ksz9477_port_fdb_dump,
- .port_fdb_add = ksz9477_port_fdb_add,
- .port_fdb_del = ksz9477_port_fdb_del,
- .port_mdb_add = ksz9477_port_mdb_add,
- .port_mdb_del = ksz9477_port_mdb_del,
- .port_mirror_add = ksz9477_port_mirror_add,
- .port_mirror_del = ksz9477_port_mirror_del,
-};
-
-static u32 ksz9477_get_port_addr(int port, int offset)
+u32 ksz9477_get_port_addr(int port, int offset)
{
return PORT_CTRL_ADDR(port, offset);
}
-static int ksz9477_switch_detect(struct ksz_device *dev)
+int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
+{
+ val = val >> 8;
+
+ return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
+}
+
+int ksz9477_switch_init(struct ksz_device *dev)
{
u8 data8;
- u8 id_hi;
- u8 id_lo;
- u32 id32;
int ret;
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
/* turn off SPI DO Edge select */
ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
if (ret)
@@ -1387,204 +1208,14 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
if (ret)
return ret;
- /* read chip id */
- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
- if (ret)
- return ret;
- ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
- if (ret)
- return ret;
-
- /* Number of ports can be reduced depending on chip. */
- dev->phy_port_cnt = 5;
-
- /* Default capability is gigabit capable. */
- dev->features = GBIT_SUPPORT;
-
- dev_dbg(dev->dev, "Switch detect: ID=%08x%02x\n", id32, data8);
- id_hi = (u8)(id32 >> 16);
- id_lo = (u8)(id32 >> 8);
- if ((id_lo & 0xf) == 3) {
- /* Chip is from KSZ9893 design. */
- dev_info(dev->dev, "Found KSZ9893\n");
- dev->features |= IS_9893;
-
- /* Chip does not support gigabit. */
- if (data8 & SW_QW_ABLE)
- dev->features &= ~GBIT_SUPPORT;
- dev->phy_port_cnt = 2;
- } else {
- dev_info(dev->dev, "Found KSZ9477 or compatible\n");
- /* Chip uses new XMII register definitions. */
- dev->features |= NEW_XMII;
-
- /* Chip does not support gigabit. */
- if (!(data8 & SW_GIGABIT_ABLE))
- dev->features &= ~GBIT_SUPPORT;
- }
-
- /* Change chip id to known ones so it can be matched against them. */
- id32 = (id_hi << 16) | (id_lo << 8);
-
- dev->chip_id = id32;
-
return 0;
}
-struct ksz_chip_data {
- u32 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
- bool phy_errata_9477;
-};
-
-static const struct ksz_chip_data ksz9477_switch_chips[] = {
- {
- .chip_id = 0x00947700,
- .dev_name = "KSZ9477",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
- {
- .chip_id = 0x00989700,
- .dev_name = "KSZ9897",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
- {
- .chip_id = 0x00989300,
- .dev_name = "KSZ9893",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x07, /* can be configured as cpu port */
- .port_cnt = 3, /* total port count */
- },
- {
- .chip_id = 0x00956700,
- .dev_name = "KSZ9567",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- .phy_errata_9477 = true,
- },
-};
-
-static int ksz9477_switch_init(struct ksz_device *dev)
-{
- int i;
-
- dev->ds->ops = &ksz9477_switch_ops;
-
- for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz9477_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
- dev->cpu_ports = chip->cpu_ports;
- dev->phy_errata_9477 = chip->phy_errata_9477;
-
- break;
- }
- }
-
- /* no switch found */
- if (!dev->port_cnt)
- return -ENODEV;
-
- dev->port_mask = (1 << dev->port_cnt) - 1;
-
- dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
- dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
-
- dev->ports = devm_kzalloc(dev->dev,
- dev->port_cnt * sizeof(struct ksz_port),
- GFP_KERNEL);
- if (!dev->ports)
- return -ENOMEM;
- for (i = 0; i < dev->port_cnt; i++) {
- mutex_init(&dev->ports[i].mib.cnt_mutex);
- dev->ports[i].mib.counters =
- devm_kzalloc(dev->dev,
- sizeof(u64) *
- (TOTAL_SWITCH_COUNTER_NUM + 1),
- GFP_KERNEL);
- if (!dev->ports[i].mib.counters)
- return -ENOMEM;
- }
-
- /* set the real number of ports */
- dev->ds->num_ports = dev->port_cnt;
-
- return 0;
-}
-
-static void ksz9477_switch_exit(struct ksz_device *dev)
+void ksz9477_switch_exit(struct ksz_device *dev)
{
ksz9477_reset_switch(dev);
}
-static const struct ksz_dev_ops ksz9477_dev_ops = {
- .get_port_addr = ksz9477_get_port_addr,
- .cfg_port_member = ksz9477_cfg_port_member,
- .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
- .port_setup = ksz9477_port_setup,
- .r_mib_cnt = ksz9477_r_mib_cnt,
- .r_mib_pkt = ksz9477_r_mib_pkt,
- .freeze_mib = ksz9477_freeze_mib,
- .port_init_cnt = ksz9477_port_init_cnt,
- .shutdown = ksz9477_reset_switch,
- .detect = ksz9477_switch_detect,
- .init = ksz9477_switch_init,
- .exit = ksz9477_switch_exit,
-};
-
-int ksz9477_switch_register(struct ksz_device *dev)
-{
- int ret, i;
- struct phy_device *phydev;
-
- ret = ksz_switch_register(dev, &ksz9477_dev_ops);
- if (ret)
- return ret;
-
- for (i = 0; i < dev->phy_port_cnt; ++i) {
- if (!dsa_is_user_port(dev->ds, i))
- continue;
-
- phydev = dsa_to_port(dev->ds, i)->slave->phydev;
-
- /* The MAC actually cannot run in 1000 half-duplex mode. */
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* PHY does not support gigabit. */
- if (!(dev->features & GBIT_SUPPORT))
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
- }
- return ret;
-}
-EXPORT_SYMBOL(ksz9477_switch_register);
-
MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
new file mode 100644
index 000000000000..b6f7e3c46e3f
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Microchip KSZ9477 series Header file
+ *
+ * Copyright (C) 2017-2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ9477_H
+#define __KSZ9477_H
+
+#include <net/dsa.h>
+#include "ksz_common.h"
+
+int ksz9477_setup(struct dsa_switch *ds);
+u32 ksz9477_get_port_addr(int port, int offset);
+void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
+void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
+void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
+void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze);
+void ksz9477_port_init_cnt(struct ksz_device *dev, int port);
+int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_get_stp_reg(void);
+void ksz9477_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+int ksz9477_fdb_dump(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ksz9477_fdb_add(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_fdb_del(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+int ksz9477_mdb_add(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_mdb_del(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
+int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
+void ksz9477_config_cpu_port(struct dsa_switch *ds);
+int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val);
+int ksz9477_enable_stp_addr(struct ksz_device *dev);
+int ksz9477_reset_switch(struct ksz_device *dev);
+int ksz9477_dsa_init(struct ksz_device *dev);
+int ksz9477_switch_init(struct ksz_device *dev);
+void ksz9477_switch_exit(struct ksz_device *dev);
+void ksz9477_port_queue_split(struct ksz_device *dev, int port);
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index f3afb8b8c4cc..97a317263a2f 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -14,8 +14,7 @@
KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
-static int ksz9477_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int ksz9477_i2c_probe(struct i2c_client *i2c)
{
struct regmap_config rc;
struct ksz_device *dev;
@@ -30,18 +29,18 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
rc.lock_arg = &dev->regmap_mutex;
dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&i2c->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz9477_regmap_config[i].val_bits, ret);
- return ret;
+ return dev_err_probe(&i2c->dev, PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ ksz9477_regmap_config[i].val_bits);
}
}
if (i2c->dev.platform_data)
dev->pdata = i2c->dev.platform_data;
- ret = ksz9477_switch_register(dev);
+ dev->irq = i2c->irq;
+
+ ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
if (ret)
@@ -52,16 +51,12 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int ksz9477_i2c_remove(struct i2c_client *i2c)
+static void ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
if (dev)
ksz_switch_remove(dev);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
@@ -71,8 +66,8 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
if (!dev)
return;
- if (dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
dsa_switch_shutdown(dev->ds);
@@ -87,11 +82,34 @@ static const struct i2c_device_id ksz9477_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
static const struct of_device_id ksz9477_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- { .compatible = "microchip,ksz9893" },
- { .compatible = "microchip,ksz9563" },
- { .compatible = "microchip,ksz9567" },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9563]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ8563]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
{},
};
MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
@@ -99,9 +117,9 @@ MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
static struct i2c_driver ksz9477_i2c_driver = {
.driver = {
.name = "ksz9477-switch",
- .of_match_table = of_match_ptr(ksz9477_dt_ids),
+ .of_match_table = ksz9477_dt_ids,
},
- .probe = ksz9477_i2c_probe,
+ .probe_new = ksz9477_i2c_probe,
.remove = ksz9477_i2c_remove,
.shutdown = ksz9477_i2c_shutdown,
.id_table = ksz9477_i2c_id,
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 16939f29faa5..cba3dba58bc3 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -25,7 +25,6 @@
#define REG_CHIP_ID2__1 0x0002
-#define CHIP_ID_63 0x63
#define CHIP_ID_66 0x66
#define CHIP_ID_67 0x67
#define CHIP_ID_77 0x77
@@ -166,7 +165,6 @@
#define SW_DOUBLE_TAG BIT(7)
#define SW_RESET BIT(1)
-#define SW_START BIT(0)
#define REG_SW_MAC_ADDR_0 0x0302
#define REG_SW_MAC_ADDR_1 0x0303
@@ -176,6 +174,7 @@
#define REG_SW_MAC_ADDR_5 0x0307
#define REG_SW_MTU__2 0x0308
+#define REG_SW_MTU_MASK GENMASK(13, 0)
#define REG_SW_ISP_TPID__2 0x030A
@@ -190,8 +189,9 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_AGE_CNT_S 3
+#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
@@ -226,6 +226,7 @@
#define SW_PRIO_LOWEST_DA_SA 3
#define REG_SW_LUE_CTRL_3 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
#define REG_SW_LUE_INT_STATUS 0x0314
#define REG_SW_LUE_INT_ENABLE 0x0315
@@ -265,7 +266,6 @@
#define REG_SW_MAC_CTRL_1 0x0331
-#define MULTICAST_STORM_DISABLE BIT(6)
#define SW_BACK_PRESSURE BIT(5)
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
@@ -276,13 +276,9 @@
#define REG_SW_MAC_CTRL_2 0x0332
#define SW_REPLACE_VID BIT(3)
-#define BROADCAST_STORM_RATE_HI 0x07
#define REG_SW_MAC_CTRL_3 0x0333
-#define BROADCAST_STORM_RATE_LO 0xFF
-#define BROADCAST_STORM_RATE 0x07FF
-
#define REG_SW_MAC_CTRL_4 0x0334
#define SW_PASS_PAUSE BIT(3)
@@ -425,12 +421,9 @@
#define REG_SW_ALU_STAT_CTRL__4 0x041C
-#define ALU_STAT_INDEX_M (BIT(4) - 1)
-#define ALU_STAT_INDEX_S 16
#define ALU_RESV_MCAST_INDEX_M (BIT(6) - 1)
#define ALU_STAT_START BIT(7)
#define ALU_RESV_MCAST_ADDR BIT(1)
-#define ALU_STAT_READ BIT(0)
#define REG_SW_ALU_VAL_A 0x0420
@@ -857,7 +850,11 @@
#define PORT_FORCE_TX_FLOW_CTRL BIT(4)
#define PORT_FORCE_RX_FLOW_CTRL BIT(3)
#define PORT_TAIL_TAG_ENABLE BIT(2)
-#define PORT_QUEUE_SPLIT_ENABLE 0x3
+#define PORT_QUEUE_SPLIT_MASK GENMASK(1, 0)
+#define PORT_EIGHT_QUEUE 0x3
+#define PORT_FOUR_QUEUE 0x2
+#define PORT_TWO_QUEUE 0x1
+#define PORT_SINGLE_QUEUE 0x0
#define REG_PORT_CTRL_1 0x0021
@@ -1184,35 +1181,11 @@
#define PORT_LINK_STATUS_FAIL BIT(0)
/* 3 - xMII */
-#define REG_PORT_XMII_CTRL_0 0x0300
-
#define PORT_SGMII_SEL BIT(7)
-#define PORT_MII_FULL_DUPLEX BIT(6)
-#define PORT_MII_100MBIT BIT(4)
#define PORT_GRXC_ENABLE BIT(0)
-#define REG_PORT_XMII_CTRL_1 0x0301
-
#define PORT_RMII_CLK_SEL BIT(7)
-/* S1 */
-#define PORT_MII_1000MBIT_S1 BIT(6)
-/* S2 */
-#define PORT_MII_NOT_1GBIT BIT(6)
#define PORT_MII_SEL_EDGE BIT(5)
-#define PORT_RGMII_ID_IG_ENABLE BIT(4)
-#define PORT_RGMII_ID_EG_ENABLE BIT(3)
-#define PORT_MII_MAC_MODE BIT(2)
-#define PORT_MII_SEL_M 0x3
-/* S1 */
-#define PORT_MII_SEL_S1 0x0
-#define PORT_RMII_SEL_S1 0x1
-#define PORT_GMII_SEL_S1 0x2
-#define PORT_RGMII_SEL_S1 0x3
-/* S2 */
-#define PORT_RGMII_SEL 0x0
-#define PORT_RMII_SEL 0x1
-#define PORT_GMII_SEL 0x2
-#define PORT_MII_SEL 0x3
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1268,8 +1241,6 @@
/* 5 - MIB Counters */
#define REG_PORT_MIB_CTRL_STAT__4 0x0500
-#define MIB_COUNTER_OVERFLOW BIT(31)
-#define MIB_COUNTER_VALID BIT(30)
#define MIB_COUNTER_READ BIT(25)
#define MIB_COUNTER_FLUSH_FREEZE BIT(24)
#define MIB_COUNTER_INDEX_M (BIT(8) - 1)
@@ -1513,33 +1484,10 @@
/* 9 - Shaping */
-#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
-
-#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
-
-#define MTI_PVID_REPLACE BIT(0)
-
-#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
+#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
-#define MTI_SCHEDULE_MODE_M 0x3
-#define MTI_SCHEDULE_MODE_S 6
-#define MTI_SCHEDULE_STRICT_PRIO 0
-#define MTI_SCHEDULE_WRR 2
-#define MTI_SHAPING_M 0x3
-#define MTI_SHAPING_S 4
-#define MTI_SHAPING_OFF 0
-#define MTI_SHAPING_SRP 1
-#define MTI_SHAPING_TIME_AWARE 2
+#define MTI_PVID_REPLACE BIT(0)
-#define REG_PORT_MTI_QUEUE_CTRL_1 0x0915
-
-#define MTI_TX_RATIO_M (BIT(7) - 1)
-
-#define REG_PORT_MTI_QUEUE_CTRL_2__2 0x0916
-#define REG_PORT_MTI_HI_WATER_MARK 0x0916
-#define REG_PORT_MTI_QUEUE_CTRL_3__2 0x0918
-#define REG_PORT_MTI_LO_WATER_MARK 0x0918
-#define REG_PORT_MTI_QUEUE_CTRL_4__2 0x091A
#define REG_PORT_MTI_CREDIT_INCREMENT 0x091A
/* A - QM */
@@ -1585,10 +1533,6 @@
#define REG_PORT_LUE_MSTP_STATE 0x0B04
-#define PORT_TX_ENABLE BIT(2)
-#define PORT_RX_ENABLE BIT(1)
-#define PORT_LEARN_DISABLE BIT(0)
-
/* C - PTP */
#define REG_PTP_PORT_RX_DELAY__2 0x0C00
@@ -1632,11 +1576,7 @@
#define P_BCAST_STORM_CTRL REG_PORT_MAC_CTRL_0
#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
#define P_MIRROR_CTRL REG_PORT_MRI_MIRROR_CTRL
-#define P_STP_CTRL REG_PORT_LUE_MSTP_STATE
#define P_PHY_CTRL REG_PORT_PHY_CTRL
-#define P_NEG_RESTART_CTRL REG_PORT_PHY_CTRL
-#define P_LINK_STATUS REG_PORT_PHY_STATUS
-#define P_SPEED_STATUS REG_PORT_PHY_PHY_CTRL
#define P_RATE_LIMIT_CTRL REG_PORT_MAC_IN_RATE_LIMIT
#define S_LINK_AGING_CTRL REG_SW_LUE_CTRL_1
@@ -1656,10 +1596,4 @@
#define PTP_TRIG_UNIT_M (BIT(MAX_TRIG_UNIT) - 1)
#define PTP_TS_UNIT_M (BIT(MAX_TIMESTAMP_UNIT) - 1)
-/* Driver set switch broadcast storm protection at 10% rate. */
-#define BROADCAST_STORM_PROT_RATE 10
-
-/* 148,800 frames * 67 ms / 100 */
-#define BROADCAST_STORM_VALUE 9969
-
#endif /* KSZ9477_REGS_H */
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
deleted file mode 100644
index e3cb0e6c9f6f..000000000000
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Microchip KSZ9477 series register access through SPI
- *
- * Copyright (C) 2017-2019 Microchip Technology Inc.
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-#include "ksz_common.h"
-
-#define SPI_ADDR_SHIFT 24
-#define SPI_ADDR_ALIGN 3
-#define SPI_TURNAROUND_SHIFT 5
-
-KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
- SPI_TURNAROUND_SHIFT, SPI_ADDR_ALIGN);
-
-static int ksz9477_spi_probe(struct spi_device *spi)
-{
- struct regmap_config rc;
- struct ksz_device *dev;
- int i, ret;
-
- dev = ksz_switch_alloc(&spi->dev, spi);
- if (!dev)
- return -ENOMEM;
-
- for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
- rc = ksz9477_regmap_config[i];
- rc.lock_arg = &dev->regmap_mutex;
- dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
- if (IS_ERR(dev->regmap[i])) {
- ret = PTR_ERR(dev->regmap[i]);
- dev_err(&spi->dev,
- "Failed to initialize regmap%i: %d\n",
- ksz9477_regmap_config[i].val_bits, ret);
- return ret;
- }
- }
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- /* setup spi */
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret)
- return ret;
-
- ret = ksz9477_switch_register(dev);
-
- /* Main DSA driver may not be started yet. */
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static int ksz9477_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
-}
-
-static void ksz9477_spi_shutdown(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- dsa_switch_shutdown(dev->ds);
-
- spi_set_drvdata(spi, NULL);
-}
-
-static const struct of_device_id ksz9477_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- { .compatible = "microchip,ksz9893" },
- { .compatible = "microchip,ksz9563" },
- { .compatible = "microchip,ksz8563" },
- { .compatible = "microchip,ksz9567" },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
-
-static struct spi_driver ksz9477_spi_driver = {
- .driver = {
- .name = "ksz9477-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz9477_dt_ids),
- },
- .probe = ksz9477_spi_probe,
- .remove = ksz9477_spi_remove,
- .shutdown = ksz9477_spi_shutdown,
-};
-
-module_spi_driver(ksz9477_spi_driver);
-
-MODULE_ALIAS("spi:ksz9477");
-MODULE_ALIAS("spi:ksz9897");
-MODULE_ALIAS("spi:ksz9893");
-MODULE_ALIAS("spi:ksz9563");
-MODULE_ALIAS("spi:ksz8563");
-MODULE_ALIAS("spi:ksz9567");
-MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 8a04302018dc..a4428be5f483 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -6,6 +6,7 @@
*/
#include <linux/delay.h>
+#include <linux/dsa/ksz_common.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
@@ -14,19 +15,1750 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/micrel_phy.h>
#include <net/dsa.h>
+#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz_ptp.h"
+#include "ksz8.h"
+#include "ksz9477.h"
+#include "lan937x.h"
+
+#define MIB_COUNTER_NUM 0x20
+
+struct ksz_stats_raw {
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 rx_1523_2000;
+ u64 rx_2001;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_total;
+ u64 tx_total;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+struct ksz88xx_stats_raw {
+ u64 rx;
+ u64 rx_hi;
+ u64 rx_undersize;
+ u64 rx_fragments;
+ u64 rx_oversize;
+ u64 rx_jabbers;
+ u64 rx_symbol_err;
+ u64 rx_crc_err;
+ u64 rx_align_err;
+ u64 rx_mac_ctrl;
+ u64 rx_pause;
+ u64 rx_bcast;
+ u64 rx_mcast;
+ u64 rx_ucast;
+ u64 rx_64_or_less;
+ u64 rx_65_127;
+ u64 rx_128_255;
+ u64 rx_256_511;
+ u64 rx_512_1023;
+ u64 rx_1024_1522;
+ u64 tx;
+ u64 tx_hi;
+ u64 tx_late_col;
+ u64 tx_pause;
+ u64 tx_bcast;
+ u64 tx_mcast;
+ u64 tx_ucast;
+ u64 tx_deferred;
+ u64 tx_total_col;
+ u64 tx_exc_col;
+ u64 tx_single_col;
+ u64 tx_mult_col;
+ u64 rx_discards;
+ u64 tx_discards;
+};
+
+static const struct ksz_mib_names ksz88xx_mib_names[] = {
+ { 0x00, "rx" },
+ { 0x01, "rx_hi" },
+ { 0x02, "rx_undersize" },
+ { 0x03, "rx_fragments" },
+ { 0x04, "rx_oversize" },
+ { 0x05, "rx_jabbers" },
+ { 0x06, "rx_symbol_err" },
+ { 0x07, "rx_crc_err" },
+ { 0x08, "rx_align_err" },
+ { 0x09, "rx_mac_ctrl" },
+ { 0x0a, "rx_pause" },
+ { 0x0b, "rx_bcast" },
+ { 0x0c, "rx_mcast" },
+ { 0x0d, "rx_ucast" },
+ { 0x0e, "rx_64_or_less" },
+ { 0x0f, "rx_65_127" },
+ { 0x10, "rx_128_255" },
+ { 0x11, "rx_256_511" },
+ { 0x12, "rx_512_1023" },
+ { 0x13, "rx_1024_1522" },
+ { 0x14, "tx" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1a, "tx_ucast" },
+ { 0x1b, "tx_deferred" },
+ { 0x1c, "tx_total_col" },
+ { 0x1d, "tx_exc_col" },
+ { 0x1e, "tx_single_col" },
+ { 0x1f, "tx_mult_col" },
+ { 0x100, "rx_discards" },
+ { 0x101, "tx_discards" },
+};
+
+static const struct ksz_mib_names ksz9477_mib_names[] = {
+ { 0x00, "rx_hi" },
+ { 0x01, "rx_undersize" },
+ { 0x02, "rx_fragments" },
+ { 0x03, "rx_oversize" },
+ { 0x04, "rx_jabbers" },
+ { 0x05, "rx_symbol_err" },
+ { 0x06, "rx_crc_err" },
+ { 0x07, "rx_align_err" },
+ { 0x08, "rx_mac_ctrl" },
+ { 0x09, "rx_pause" },
+ { 0x0A, "rx_bcast" },
+ { 0x0B, "rx_mcast" },
+ { 0x0C, "rx_ucast" },
+ { 0x0D, "rx_64_or_less" },
+ { 0x0E, "rx_65_127" },
+ { 0x0F, "rx_128_255" },
+ { 0x10, "rx_256_511" },
+ { 0x11, "rx_512_1023" },
+ { 0x12, "rx_1024_1522" },
+ { 0x13, "rx_1523_2000" },
+ { 0x14, "rx_2001" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1A, "tx_ucast" },
+ { 0x1B, "tx_deferred" },
+ { 0x1C, "tx_total_col" },
+ { 0x1D, "tx_exc_col" },
+ { 0x1E, "tx_single_col" },
+ { 0x1F, "tx_mult_col" },
+ { 0x80, "rx_total" },
+ { 0x81, "tx_total" },
+ { 0x82, "rx_discards" },
+ { 0x83, "tx_discards" },
+};
+
+static const struct ksz_dev_ops ksz8_dev_ops = {
+ .setup = ksz8_setup,
+ .get_port_addr = ksz8_get_port_addr,
+ .cfg_port_member = ksz8_cfg_port_member,
+ .flush_dyn_mac_table = ksz8_flush_dyn_mac_table,
+ .port_setup = ksz8_port_setup,
+ .r_phy = ksz8_r_phy,
+ .w_phy = ksz8_w_phy,
+ .r_mib_cnt = ksz8_r_mib_cnt,
+ .r_mib_pkt = ksz8_r_mib_pkt,
+ .r_mib_stat64 = ksz88xx_r_mib_stats64,
+ .freeze_mib = ksz8_freeze_mib,
+ .port_init_cnt = ksz8_port_init_cnt,
+ .fdb_dump = ksz8_fdb_dump,
+ .fdb_add = ksz8_fdb_add,
+ .fdb_del = ksz8_fdb_del,
+ .mdb_add = ksz8_mdb_add,
+ .mdb_del = ksz8_mdb_del,
+ .vlan_filtering = ksz8_port_vlan_filtering,
+ .vlan_add = ksz8_port_vlan_add,
+ .vlan_del = ksz8_port_vlan_del,
+ .mirror_add = ksz8_port_mirror_add,
+ .mirror_del = ksz8_port_mirror_del,
+ .get_caps = ksz8_get_caps,
+ .config_cpu_port = ksz8_config_cpu_port,
+ .enable_stp_addr = ksz8_enable_stp_addr,
+ .reset = ksz8_reset_switch,
+ .init = ksz8_switch_init,
+ .exit = ksz8_switch_exit,
+ .change_mtu = ksz8_change_mtu,
+};
+
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause);
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .setup = ksz9477_setup,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = ksz9477_port_setup,
+ .set_ageing_time = ksz9477_set_ageing_time,
+ .r_phy = ksz9477_r_phy,
+ .w_phy = ksz9477_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = ksz9477_get_caps,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = ksz9477_change_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
+ .config_cpu_port = ksz9477_config_cpu_port,
+ .tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = ksz9477_reset_switch,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+};
+
+static const struct ksz_dev_ops lan937x_dev_ops = {
+ .setup = lan937x_setup,
+ .teardown = lan937x_teardown,
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = lan937x_port_setup,
+ .set_ageing_time = lan937x_set_ageing_time,
+ .r_phy = lan937x_r_phy,
+ .w_phy = lan937x_w_phy,
+ .r_mib_cnt = ksz9477_r_mib_cnt,
+ .r_mib_pkt = ksz9477_r_mib_pkt,
+ .r_mib_stat64 = ksz_r_mib_stats64,
+ .freeze_mib = ksz9477_freeze_mib,
+ .port_init_cnt = ksz9477_port_init_cnt,
+ .vlan_filtering = ksz9477_port_vlan_filtering,
+ .vlan_add = ksz9477_port_vlan_add,
+ .vlan_del = ksz9477_port_vlan_del,
+ .mirror_add = ksz9477_port_mirror_add,
+ .mirror_del = ksz9477_port_mirror_del,
+ .get_caps = lan937x_phylink_get_caps,
+ .setup_rgmii_delay = lan937x_setup_rgmii_delay,
+ .fdb_dump = ksz9477_fdb_dump,
+ .fdb_add = ksz9477_fdb_add,
+ .fdb_del = ksz9477_fdb_del,
+ .mdb_add = ksz9477_mdb_add,
+ .mdb_del = ksz9477_mdb_del,
+ .change_mtu = lan937x_change_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
+ .config_cpu_port = lan937x_config_cpu_port,
+ .tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc,
+ .enable_stp_addr = ksz9477_enable_stp_addr,
+ .reset = lan937x_reset_switch,
+ .init = lan937x_switch_init,
+ .exit = lan937x_switch_exit,
+};
+
+static const u16 ksz8795_regs[] = {
+ [REG_IND_CTRL_0] = 0x6E,
+ [REG_IND_DATA_8] = 0x70,
+ [REG_IND_DATA_CHECK] = 0x72,
+ [REG_IND_DATA_HI] = 0x71,
+ [REG_IND_DATA_LO] = 0x75,
+ [REG_IND_MIB_CHECK] = 0x74,
+ [REG_IND_BYTE] = 0xA0,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x07,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x08,
+ [P_SPEED_STATUS] = 0x09,
+ [S_TAIL_TAG_CTRL] = 0x0C,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+ [P_XMII_CTRL_0] = 0x06,
+ [P_XMII_CTRL_1] = 0x06,
+};
+
+static const u32 ksz8795_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(7),
+ [SW_TAIL_TAG_ENABLE] = BIT(1),
+ [MIB_COUNTER_OVERFLOW] = BIT(6),
+ [MIB_COUNTER_VALID] = BIT(5),
+ [VLAN_TABLE_FID] = GENMASK(6, 0),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(11, 7),
+ [VLAN_TABLE_VALID] = BIT(12),
+ [STATIC_MAC_TABLE_VALID] = BIT(21),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(23),
+ [STATIC_MAC_TABLE_FID] = GENMASK(30, 24),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(26),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(24, 20),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(6, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(8),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 29),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(26, 20),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(26, 24),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(28, 27),
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(5),
+};
+
+static const u8 ksz8795_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 0,
+ [P_MII_10MBIT] = 1,
+ [P_MII_FULL_DUPLEX] = 0,
+ [P_MII_HALF_DUPLEX] = 1,
+};
+
+static const u8 ksz8795_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 3,
+ [P_GMII_SEL] = 2,
+ [P_RMII_SEL] = 1,
+ [P_MII_SEL] = 0,
+ [P_GMII_1GBIT] = 1,
+ [P_GMII_NOT_1GBIT] = 0,
+};
+
+static const u8 ksz8795_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 7,
+ [VLAN_TABLE] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 24,
+ [DYNAMIC_MAC_ENTRIES_H] = 3,
+ [DYNAMIC_MAC_ENTRIES] = 29,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 27,
+ [DYNAMIC_MAC_SRC_PORT] = 24,
+};
+
+static const u16 ksz8863_regs[] = {
+ [REG_IND_CTRL_0] = 0x79,
+ [REG_IND_DATA_8] = 0x7B,
+ [REG_IND_DATA_CHECK] = 0x7B,
+ [REG_IND_DATA_HI] = 0x7C,
+ [REG_IND_DATA_LO] = 0x80,
+ [REG_IND_MIB_CHECK] = 0x80,
+ [P_FORCE_CTRL] = 0x0C,
+ [P_LINK_STATUS] = 0x0E,
+ [P_LOCAL_CTRL] = 0x0C,
+ [P_NEG_RESTART_CTRL] = 0x0D,
+ [P_REMOTE_STATUS] = 0x0E,
+ [P_SPEED_STATUS] = 0x0F,
+ [S_TAIL_TAG_CTRL] = 0x03,
+ [P_STP_CTRL] = 0x02,
+ [S_START_CTRL] = 0x01,
+ [S_BROADCAST_CTRL] = 0x06,
+ [S_MULTICAST_CTRL] = 0x04,
+};
+
+static const u32 ksz8863_masks[] = {
+ [PORT_802_1P_REMAPPING] = BIT(3),
+ [SW_TAIL_TAG_ENABLE] = BIT(6),
+ [MIB_COUNTER_OVERFLOW] = BIT(7),
+ [MIB_COUNTER_VALID] = BIT(6),
+ [VLAN_TABLE_FID] = GENMASK(15, 12),
+ [VLAN_TABLE_MEMBERSHIP] = GENMASK(18, 16),
+ [VLAN_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_VALID] = BIT(19),
+ [STATIC_MAC_TABLE_USE_FID] = BIT(21),
+ [STATIC_MAC_TABLE_FID] = GENMASK(25, 22),
+ [STATIC_MAC_TABLE_OVERRIDE] = BIT(20),
+ [STATIC_MAC_TABLE_FWD_PORTS] = GENMASK(18, 16),
+ [DYNAMIC_MAC_TABLE_ENTRIES_H] = GENMASK(1, 0),
+ [DYNAMIC_MAC_TABLE_MAC_EMPTY] = BIT(2),
+ [DYNAMIC_MAC_TABLE_NOT_READY] = BIT(7),
+ [DYNAMIC_MAC_TABLE_ENTRIES] = GENMASK(31, 24),
+ [DYNAMIC_MAC_TABLE_FID] = GENMASK(19, 16),
+ [DYNAMIC_MAC_TABLE_SRC_PORT] = GENMASK(21, 20),
+ [DYNAMIC_MAC_TABLE_TIMESTAMP] = GENMASK(23, 22),
+};
+
+static u8 ksz8863_shifts[] = {
+ [VLAN_TABLE_MEMBERSHIP_S] = 16,
+ [STATIC_MAC_FWD_PORTS] = 16,
+ [STATIC_MAC_FID] = 22,
+ [DYNAMIC_MAC_ENTRIES_H] = 8,
+ [DYNAMIC_MAC_ENTRIES] = 24,
+ [DYNAMIC_MAC_FID] = 16,
+ [DYNAMIC_MAC_TIMESTAMP] = 22,
+ [DYNAMIC_MAC_SRC_PORT] = 20,
+};
+
+static const u16 ksz9477_regs[] = {
+ [P_STP_CTRL] = 0x0B04,
+ [S_START_CTRL] = 0x0300,
+ [S_BROADCAST_CTRL] = 0x0332,
+ [S_MULTICAST_CTRL] = 0x0331,
+ [P_XMII_CTRL_0] = 0x0300,
+ [P_XMII_CTRL_1] = 0x0301,
+};
+
+static const u32 ksz9477_masks[] = {
+ [ALU_STAT_WRITE] = 0,
+ [ALU_STAT_READ] = 1,
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 ksz9477_shifts[] = {
+ [ALU_STAT_INDEX] = 16,
+};
+
+static const u8 ksz9477_xmii_ctrl0[] = {
+ [P_MII_100MBIT] = 1,
+ [P_MII_10MBIT] = 0,
+ [P_MII_FULL_DUPLEX] = 1,
+ [P_MII_HALF_DUPLEX] = 0,
+};
+
+static const u8 ksz9477_xmii_ctrl1[] = {
+ [P_RGMII_SEL] = 0,
+ [P_RMII_SEL] = 1,
+ [P_GMII_SEL] = 2,
+ [P_MII_SEL] = 3,
+ [P_GMII_1GBIT] = 0,
+ [P_GMII_NOT_1GBIT] = 1,
+};
+
+static const u32 lan937x_masks[] = {
+ [ALU_STAT_WRITE] = 1,
+ [ALU_STAT_READ] = 2,
+ [P_MII_TX_FLOW_CTRL] = BIT(5),
+ [P_MII_RX_FLOW_CTRL] = BIT(3),
+};
+
+static const u8 lan937x_shifts[] = {
+ [ALU_STAT_INDEX] = 8,
+};
+
+static const struct regmap_range ksz8563_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x000f, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0104, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1004, 0x100b),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1021),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1111),
+ regmap_reg_range(0x111a, 0x111d),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a08),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2004, 0x200b),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2021),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2111),
+ regmap_reg_range(0x211a, 0x211d),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a08),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3004, 0x300b),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3021),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3300, 0x3301),
+ regmap_reg_range(0x3303, 0x3303),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a08),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+};
+
+static const struct regmap_access_table ksz8563_register_set = {
+ .yes_ranges = ksz8563_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs),
+};
+
+static const struct regmap_range ksz9477_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0444, 0x044b),
+ regmap_reg_range(0x0450, 0x046f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+ regmap_reg_range(0x0604, 0x060b),
+ regmap_reg_range(0x0610, 0x0612),
+ regmap_reg_range(0x0614, 0x062c),
+ regmap_reg_range(0x0640, 0x0645),
+ regmap_reg_range(0x0648, 0x064d),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1613),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1920, 0x1920),
+ regmap_reg_range(0x1923, 0x1927),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2613),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2920, 0x2920),
+ regmap_reg_range(0x2923, 0x2927),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3613),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3920, 0x3920),
+ regmap_reg_range(0x3923, 0x3927),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4613),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x491b),
+ regmap_reg_range(0x4920, 0x4920),
+ regmap_reg_range(0x4923, 0x4927),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+ regmap_reg_range(0x4c00, 0x4c05),
+ regmap_reg_range(0x4c08, 0x4c1b),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5613),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x591b),
+ regmap_reg_range(0x5920, 0x5920),
+ regmap_reg_range(0x5923, 0x5927),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+ regmap_reg_range(0x5c00, 0x5c05),
+ regmap_reg_range(0x5c08, 0x5c1b),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6613),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x691b),
+ regmap_reg_range(0x6920, 0x6920),
+ regmap_reg_range(0x6923, 0x6927),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+ regmap_reg_range(0x6c00, 0x6c05),
+ regmap_reg_range(0x6c08, 0x6c1b),
+
+ /* port 7 */
+ regmap_reg_range(0x7000, 0x7001),
+ regmap_reg_range(0x7013, 0x7013),
+ regmap_reg_range(0x7017, 0x7017),
+ regmap_reg_range(0x701b, 0x701b),
+ regmap_reg_range(0x701f, 0x7020),
+ regmap_reg_range(0x7030, 0x7030),
+ regmap_reg_range(0x7200, 0x7203),
+ regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7300, 0x7301),
+ regmap_reg_range(0x7400, 0x7401),
+ regmap_reg_range(0x7403, 0x7403),
+ regmap_reg_range(0x7410, 0x7417),
+ regmap_reg_range(0x7420, 0x7423),
+ regmap_reg_range(0x7500, 0x7507),
+ regmap_reg_range(0x7600, 0x7613),
+ regmap_reg_range(0x7800, 0x780f),
+ regmap_reg_range(0x7820, 0x7827),
+ regmap_reg_range(0x7830, 0x7837),
+ regmap_reg_range(0x7840, 0x784b),
+ regmap_reg_range(0x7900, 0x7907),
+ regmap_reg_range(0x7914, 0x791b),
+ regmap_reg_range(0x7920, 0x7920),
+ regmap_reg_range(0x7923, 0x7927),
+ regmap_reg_range(0x7a00, 0x7a03),
+ regmap_reg_range(0x7a04, 0x7a07),
+ regmap_reg_range(0x7b00, 0x7b01),
+ regmap_reg_range(0x7b04, 0x7b04),
+ regmap_reg_range(0x7c00, 0x7c05),
+ regmap_reg_range(0x7c08, 0x7c1b),
+};
+
+static const struct regmap_access_table ksz9477_register_set = {
+ .yes_ranges = ksz9477_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs),
+};
+
+static const struct regmap_range ksz9896_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x0127),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x030b),
+ regmap_reg_range(0x0310, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x1915),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x2915),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x3915),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4612),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x4915),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5612),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x5915),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6100, 0x6115),
+ regmap_reg_range(0x611a, 0x611f),
+ regmap_reg_range(0x6122, 0x6127),
+ regmap_reg_range(0x612a, 0x612b),
+ regmap_reg_range(0x6136, 0x6139),
+ regmap_reg_range(0x613e, 0x613f),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6612),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x6915),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+};
+
+static const struct regmap_access_table ksz9896_register_set = {
+ .yes_ranges = ksz9896_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs),
+};
+
+const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8563] = {
+ .chip_id = KSZ8563_CHIP_ID,
+ .dev_name = "KSZ8563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {false, false, true},
+ .wr_table = &ksz8563_register_set,
+ .rd_table = &ksz8563_register_set,
+ },
+
+ [KSZ8795] = {
+ .chip_id = KSZ8795_CHIP_ID,
+ .dev_name = "KSZ8795",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8794] = {
+ /* WARNING
+ * =======
+ * KSZ8794 is similar to KSZ8795, except the port map
+ * contains a gap between external and CPU ports, the
+ * port map is NOT continuous. The per-port register
+ * map is shifted accordingly too, i.e. registers at
+ * offset 0x40 are NOT used on KSZ8794 and they ARE
+ * used on KSZ8795 for external port 3.
+ * external cpu
+ * KSZ8794 0,1,2 4
+ * KSZ8795 0,1,2,3 4
+ * KSZ8765 0,1,2,3 4
+ * port_cnt is configured as 5, even though it is 4
+ */
+ .chip_id = KSZ8794_CHIP_ID,
+ .dev_name = "KSZ8794",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, false, false},
+ },
+
+ [KSZ8765] = {
+ .chip_id = KSZ8765_CHIP_ID,
+ .dev_name = "KSZ8765",
+ .num_vlans = 4096,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
+ .ops = &ksz8_dev_ops,
+ .ksz87xx_eee_link_erratum = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8795_regs,
+ .masks = ksz8795_masks,
+ .shifts = ksz8795_shifts,
+ .xmii_ctrl0 = ksz8795_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [KSZ8830] = {
+ .chip_id = KSZ8830_CHIP_ID,
+ .dev_name = "KSZ8863/KSZ8873",
+ .num_vlans = 16,
+ .num_alus = 0,
+ .num_statics = 8,
+ .cpu_ports = 0x4, /* can be configured as cpu port */
+ .port_cnt = 3,
+ .num_tx_queues = 4,
+ .ops = &ksz8_dev_ops,
+ .mib_names = ksz88xx_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz8863_regs,
+ .masks = ksz8863_masks,
+ .shifts = ksz8863_shifts,
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ },
+
+ [KSZ9477] = {
+ .chip_id = KSZ9477_CHIP_ID,
+ .dev_name = "KSZ9477",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, false},
+ .supports_rmii = {false, false, false, false,
+ false, true, false},
+ .supports_rgmii = {false, false, false, false,
+ false, true, false},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
+ },
+
+ [KSZ9896] = {
+ .chip_id = KSZ9896_CHIP_ID,
+ .dev_name = "KSZ9896",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x3F, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 2,
+ .num_tx_queues = 4,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true},
+ .supports_rmii = {false, false, false, false,
+ false, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true},
+ .internal_phy = {true, true, true, true,
+ true, false},
+ .gbit_capable = {true, true, true, true, true, true},
+ .wr_table = &ksz9896_register_set,
+ .rd_table = &ksz9896_register_set,
+ },
+
+ [KSZ9897] = {
+ .chip_id = KSZ9897_CHIP_ID,
+ .dev_name = "KSZ9897",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 2,
+ .num_tx_queues = 4,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ },
+
+ [KSZ9893] = {
+ .chip_id = KSZ9893_CHIP_ID,
+ .dev_name = "KSZ9893",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 2,
+ .num_tx_queues = 4,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
+ },
+
+ [KSZ9563] = {
+ .chip_id = KSZ9563_CHIP_ID,
+ .dev_name = "KSZ9563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
+ },
+
+ [KSZ9567] = {
+ .chip_id = KSZ9567_CHIP_ID,
+ .dev_name = "KSZ9567",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ .port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true, true},
+ .supports_rmii = {false, false, false, false,
+ false, true, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true, true},
+ .internal_phy = {true, true, true, true,
+ true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ },
+
+ [LAN9370] = {
+ .chip_id = LAN9370_CHIP_ID,
+ .dev_name = "LAN9370",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x10, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true},
+ .supports_rmii = {false, false, false, false, true},
+ .supports_rgmii = {false, false, false, false, true},
+ .internal_phy = {true, true, true, true, false},
+ },
+
+ [LAN9371] = {
+ .chip_id = LAN9371_CHIP_ID,
+ .dev_name = "LAN9371",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false, true, true},
+ .supports_rmii = {false, false, false, false, true, true},
+ .supports_rgmii = {false, false, false, false, true, true},
+ .internal_phy = {true, true, true, true, false, false},
+ },
+
+ [LAN9372] = {
+ .chip_id = LAN9372_CHIP_ID,
+ .dev_name = "LAN9372",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ },
+
+ [LAN9373] = {
+ .chip_id = LAN9373_CHIP_ID,
+ .dev_name = "LAN9373",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x38, /* can be configured as cpu port */
+ .port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, false,
+ false, false, true, true},
+ },
+
+ [LAN9374] = {
+ .chip_id = LAN9374_CHIP_ID,
+ .dev_name = "LAN9374",
+ .num_vlans = 4096,
+ .num_alus = 1024,
+ .num_statics = 256,
+ .cpu_ports = 0x30, /* can be configured as cpu port */
+ .port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
+ .tc_ets_supported = true,
+ .ops = &lan937x_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = lan937x_masks,
+ .shifts = lan937x_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rmii = {false, false, false, false,
+ true, true, false, false},
+ .supports_rgmii = {false, false, false, false,
+ true, true, false, false},
+ .internal_phy = {true, true, true, true,
+ false, false, true, true},
+ },
+};
+EXPORT_SYMBOL_GPL(ksz_switch_chips);
+
+static const struct ksz_chip_data *ksz_lookup_info(unsigned int prod_num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz_switch_chips[i];
+
+ if (chip->chip_id == prod_num)
+ return chip;
+ }
+
+ return NULL;
+}
+
+static int ksz_check_device_id(struct ksz_device *dev)
+{
+ const struct ksz_chip_data *dt_chip_data;
+
+ dt_chip_data = of_device_get_match_data(dev->dev);
+
+ /* Check for Device Tree and Chip ID */
+ if (dt_chip_data->chip_id != dev->chip_id) {
+ dev_err(dev->dev,
+ "Device tree specifies chip %s but found %s, please fix it!\n",
+ dt_chip_data->dev_name, dev->info->dev_name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ksz_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct ksz_device *dev = ds->priv;
+
+ config->legacy_pre_march2020 = false;
+
+ if (dev->info->supports_mii[port])
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+
+ if (dev->info->supports_rmii[port])
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+
+ if (dev->info->supports_rgmii[port])
+ phy_interface_set_rgmii(config->supported_interfaces);
+
+ if (dev->info->internal_phy[port]) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+
+ if (dev->dev_ops->get_caps)
+ dev->dev_ops->get_caps(dev, port, config);
+}
+
+void ksz_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct ethtool_pause_stats *pstats;
+ struct rtnl_link_stats64 *stats;
+ struct ksz_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ pstats = &mib->pause_stats;
+ raw = (struct ksz_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx_total - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx_total - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
+ spin_unlock(&mib->stats64_lock);
+}
+
+void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
+{
+ struct ethtool_pause_stats *pstats;
+ struct rtnl_link_stats64 *stats;
+ struct ksz88xx_stats_raw *raw;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+ stats = &mib->stats64;
+ pstats = &mib->pause_stats;
+ raw = (struct ksz88xx_stats_raw *)mib->counters;
+
+ spin_lock(&mib->stats64_lock);
+
+ stats->rx_packets = raw->rx_bcast + raw->rx_mcast + raw->rx_ucast +
+ raw->rx_pause;
+ stats->tx_packets = raw->tx_bcast + raw->tx_mcast + raw->tx_ucast +
+ raw->tx_pause;
+
+ /* HW counters are counting bytes + FCS which is not acceptable
+ * for rtnl_link_stats64 interface
+ */
+ stats->rx_bytes = raw->rx + raw->rx_hi - stats->rx_packets * ETH_FCS_LEN;
+ stats->tx_bytes = raw->tx + raw->tx_hi - stats->tx_packets * ETH_FCS_LEN;
+
+ stats->rx_length_errors = raw->rx_undersize + raw->rx_fragments +
+ raw->rx_oversize;
+
+ stats->rx_crc_errors = raw->rx_crc_err;
+ stats->rx_frame_errors = raw->rx_align_err;
+ stats->rx_dropped = raw->rx_discards;
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors + stats->rx_dropped;
+
+ stats->tx_window_errors = raw->tx_late_col;
+ stats->tx_fifo_errors = raw->tx_discards;
+ stats->tx_aborted_errors = raw->tx_exc_col;
+ stats->tx_errors = stats->tx_window_errors + stats->tx_fifo_errors +
+ stats->tx_aborted_errors;
+
+ stats->multicast = raw->rx_mcast;
+ stats->collisions = raw->tx_total_col;
+
+ pstats->tx_pause_frames = raw->tx_pause;
+ pstats->rx_pause_frames = raw->rx_pause;
+
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(s, &mib->stats64, sizeof(*s));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port_mib *mib;
+
+ mib = &dev->ports[port].mib;
+
+ spin_lock(&mib->stats64_lock);
+ memcpy(pause_stats, &mib->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&mib->stats64_lock);
+}
+
+static void ksz_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < dev->info->mib_cnt; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN,
+ dev->info->mib_names[i].string, ETH_GSTRING_LEN);
+ }
+}
-void ksz_update_port_member(struct ksz_device *dev, int port)
+static void ksz_update_port_member(struct ksz_device *dev, int port)
{
struct ksz_port *p = &dev->ports[port];
struct dsa_switch *ds = dev->ds;
u8 port_member = 0, cpu_port;
const struct dsa_port *dp;
- int i;
+ int i, j;
if (!dsa_is_user_port(ds, port))
return;
@@ -43,21 +1775,429 @@ void ksz_update_port_member(struct ksz_device *dev, int port)
continue;
if (port == i)
continue;
- if (!dp->bridge_dev || dp->bridge_dev != other_dp->bridge_dev)
+ if (!dsa_port_bridge_same(dp, other_dp))
+ continue;
+ if (other_p->stp_state != BR_STATE_FORWARDING)
continue;
- if (other_p->stp_state == BR_STATE_FORWARDING &&
- p->stp_state == BR_STATE_FORWARDING) {
+ if (p->stp_state == BR_STATE_FORWARDING) {
val |= BIT(port);
port_member |= BIT(i);
}
+ /* Retain port [i]'s relationship to other ports than [port] */
+ for (j = 0; j < ds->num_ports; j++) {
+ const struct dsa_port *third_dp;
+ struct ksz_port *third_p;
+
+ if (j == i)
+ continue;
+ if (j == port)
+ continue;
+ if (!dsa_is_user_port(ds, j))
+ continue;
+ third_p = &dev->ports[j];
+ if (third_p->stp_state != BR_STATE_FORWARDING)
+ continue;
+ third_dp = dsa_to_port(ds, j);
+ if (dsa_port_bridge_same(other_dp, third_dp))
+ val |= BIT(j);
+ }
+
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
-EXPORT_SYMBOL_GPL(ksz_update_port_member);
+
+static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ return dev->dev_ops->w_phy(dev, addr, regnum, val);
+}
+
+static int ksz_irq_phy_setup(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+ int irq;
+ int ret;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
+ if (BIT(phy) & ds->phys_mii_mask) {
+ irq = irq_find_mapping(dev->ports[phy].pirq.domain,
+ PORT_SRC_PHY_INT);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+ ds->slave_mii_bus->irq[phy] = irq;
+ }
+ }
+ return 0;
+out:
+ while (phy--)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+
+ return ret;
+}
+
+static void ksz_irq_phy_free(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+}
+
+static int ksz_mdio_register(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np)
+ return 0;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ of_node_put(mdio_np);
+ return -ENOMEM;
+ }
+
+ bus->priv = dev;
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz slave smi";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ if (dev->irq > 0) {
+ ret = ksz_irq_phy_setup(dev);
+ if (ret) {
+ of_node_put(mdio_np);
+ return ret;
+ }
+ }
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ if (dev->irq > 0)
+ ksz_irq_phy_free(dev);
+ }
+
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static void ksz_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq);
+}
+
+static void ksz_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq);
+}
+
+static void ksz_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_irq_mask,
+ .irq_unmask = ksz_irq_unmask,
+ .irq_bus_lock = ksz_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_irq_bus_sync_unlock,
+};
+
+static int ksz_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_irq_domain_ops = {
+ .map = ksz_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_irq_free(struct ksz_irq *kirq)
+{
+ int irq, virq;
+
+ free_irq(kirq->irq_num, kirq);
+
+ for (irq = 0; irq < kirq->nirqs; irq++) {
+ virq = irq_find_mapping(kirq->domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(kirq->domain);
+}
+
+static irqreturn_t ksz_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *kirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u8 data;
+ int ret;
+ u8 n;
+
+ dev = kirq->dev;
+
+ /* Read interrupt status register */
+ ret = ksz_read8(dev, kirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ for (n = 0; n < kirq->nirqs; ++n) {
+ if (data & BIT(n)) {
+ sub_irq = irq_find_mapping(kirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+{
+ int ret, n;
+
+ kirq->dev = dev;
+ kirq->masked = ~0;
+
+ kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
+ if (!kirq->domain)
+ return -ENOMEM;
+
+ for (n = 0; n < kirq->nirqs; n++)
+ irq_create_mapping(kirq->domain, n);
+
+ ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+ IRQF_ONESHOT, kirq->name, kirq);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ ksz_irq_free(kirq);
+
+ return ret;
+}
+
+static int ksz_girq_setup(struct ksz_device *dev)
+{
+ struct ksz_irq *girq = &dev->girq;
+
+ girq->nirqs = dev->info->port_cnt;
+ girq->reg_mask = REG_SW_PORT_INT_MASK__1;
+ girq->reg_status = REG_SW_PORT_INT_STATUS__1;
+ snprintf(girq->name, sizeof(girq->name), "global_port_irq");
+
+ girq->irq_num = dev->irq;
+
+ return ksz_irq_common_setup(dev, girq);
+}
+
+static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
+{
+ struct ksz_irq *pirq = &dev->ports[p].pirq;
+
+ pirq->nirqs = dev->info->port_nirqs;
+ pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
+ pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
+ snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
+
+ pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
+ if (pirq->irq_num < 0)
+ return pirq->irq_num;
+
+ return ksz_irq_common_setup(dev, pirq);
+}
+
+static int ksz_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+ struct ksz_port *p;
+ const u16 *regs;
+ int ret;
+
+ regs = dev->info->regs;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->info->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = dev->dev_ops->reset(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ /* set broadcast storm protection 10% rate */
+ regmap_update_bits(dev->regmap[1], regs[S_BROADCAST_CTRL],
+ BROADCAST_STORM_RATE,
+ (BROADCAST_STORM_VALUE *
+ BROADCAST_STORM_PROT_RATE) / 100);
+
+ dev->dev_ops->config_cpu_port(ds);
+
+ dev->dev_ops->enable_stp_addr(dev);
+
+ ds->num_tx_queues = dev->info->num_tx_queues;
+
+ regmap_update_bits(dev->regmap[0], regs[S_MULTICAST_CTRL],
+ MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
+
+ ksz_init_mib_timer(dev);
+
+ ds->configure_vlan_while_not_filtering = false;
+
+ if (dev->dev_ops->setup) {
+ ret = dev->dev_ops->setup(ds);
+ if (ret)
+ return ret;
+ }
+
+ /* Start with learning disabled on standalone user ports, and enabled
+ * on the CPU port. In lack of other finer mechanisms, learning on the
+ * CPU port will avoid flooding bridge local addresses on the network
+ * in some cases.
+ */
+ p = &dev->ports[dev->cpu_port];
+ p->learning = true;
+
+ if (dev->irq > 0) {
+ ret = ksz_girq_setup(dev);
+ if (ret)
+ return ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ret = ksz_pirq_setup(dev, dp->index);
+ if (ret)
+ goto out_girq;
+
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto out_pirq;
+ }
+ }
+
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
+ goto out_ptpirq;
+ }
+
+ ret = ksz_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ goto out_ptp_clock_unregister;
+ }
+
+ /* start switch */
+ regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
+ SW_START, SW_START);
+
+ return 0;
+
+out_ptp_clock_unregister:
+ ksz_ptp_clock_unregister(ds);
+out_ptpirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_ptp_irq_free(ds, dp->index);
+out_pirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+out_girq:
+ if (dev->irq > 0)
+ ksz_irq_free(&dev->girq);
+
+ return ret;
+}
+
+static void ksz_teardown(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ ksz_ptp_clock_unregister(ds);
+
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ksz_ptp_irq_free(ds, dp->index);
+
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+ }
+
+ ksz_irq_free(&dev->girq);
+ }
+
+ if (dev->dev_ops->teardown)
+ dev->dev_ops->teardown(ds);
+}
static void port_r_cnt(struct ksz_device *dev, int port)
{
@@ -65,17 +2205,17 @@ static void port_r_cnt(struct ksz_device *dev, int port)
u64 *dropped;
/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->reg_mib_cnt) {
+ while (mib->cnt_ptr < dev->info->reg_mib_cnt) {
dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
&mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
}
/* last one in storage */
- dropped = &mib->counters[dev->mib_cnt];
+ dropped = &mib->counters[dev->info->mib_cnt];
/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
- while (mib->cnt_ptr < dev->mib_cnt) {
+ while (mib->cnt_ptr < dev->info->mib_cnt) {
dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
dropped, &mib->counters[mib->cnt_ptr]);
++mib->cnt_ptr;
@@ -91,7 +2231,7 @@ static void ksz_mib_read_work(struct work_struct *work)
struct ksz_port *p;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
+ for (i = 0; i < dev->info->port_cnt; i++) {
if (dsa_is_unused_port(dev->ds, i))
continue;
@@ -106,10 +2246,14 @@ static void ksz_mib_read_work(struct work_struct *work)
const struct dsa_port *dp = dsa_to_port(dev->ds, i);
if (!netif_carrier_ok(dp->slave))
- mib->cnt_ptr = dev->reg_mib_cnt;
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
}
port_r_cnt(dev, i);
p->read = false;
+
+ if (dev->dev_ops->r_mib_stat64)
+ dev->dev_ops->r_mib_stat64(dev, i);
+
mutex_unlock(&mib->cnt_mutex);
}
@@ -122,34 +2266,59 @@ void ksz_init_mib_timer(struct ksz_device *dev)
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
- for (i = 0; i < dev->port_cnt; i++)
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ struct ksz_port_mib *mib = &dev->ports[i].mib;
+
dev->dev_ops->port_init_cnt(dev, i);
+
+ mib->cnt_ptr = 0;
+ memset(mib->counters, 0, dev->info->mib_cnt * sizeof(u64));
+ }
}
-EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
+static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
- dev->dev_ops->r_phy(dev, addr, reg, &val);
+ ret = dev->dev_ops->r_phy(dev, addr, reg, &val);
+ if (ret)
+ return ret;
return val;
}
-EXPORT_SYMBOL_GPL(ksz_phy_read16);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
+ int ret;
- dev->dev_ops->w_phy(dev, addr, reg, val);
+ ret = dev->dev_ops->w_phy(dev, addr, reg, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID) {
+ /* Silicon Errata Sheet (DS80000830A):
+ * Port 1 does not work with LinkMD Cable-Testing.
+ * Port 1 does not respond to received PAUSE control frames.
+ */
+ if (!port)
+ return MICREL_KSZ8_P1_ERRATA;
+ }
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_phy_write16);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface)
+static void ksz_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface)
{
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
@@ -160,20 +2329,19 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
}
-EXPORT_SYMBOL_GPL(ksz_mac_link_down);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
+static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
if (sset != ETH_SS_STATS)
return 0;
- return dev->mib_cnt;
+ return dev->info->mib_cnt;
}
-EXPORT_SYMBOL_GPL(ksz_sset_count);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
{
const struct dsa_port *dp = dsa_to_port(ds, port);
struct ksz_device *dev = ds->priv;
@@ -184,15 +2352,16 @@ void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
/* Only read dropped counters if no link. */
if (!netif_carrier_ok(dp->slave))
- mib->cnt_ptr = dev->reg_mib_cnt;
+ mib->cnt_ptr = dev->info->reg_mib_cnt;
port_r_cnt(dev, port);
- memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
+ memcpy(buf, mib->counters, dev->info->mib_cnt * sizeof(u64));
mutex_unlock(&mib->cnt_mutex);
}
-EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+static int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
@@ -200,134 +2369,93 @@ int ksz_port_bridge_join(struct dsa_switch *ds, int port,
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+static void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
{
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
*/
}
-EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
-void ksz_port_fast_age(struct dsa_switch *ds, int port)
+static void ksz_port_fast_age(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
-EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data)
+static int ksz_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
struct ksz_device *dev = ds->priv;
- int ret = 0;
- u16 i = 0;
- u16 entries = 0;
- u8 timestamp = 0;
- u8 fid;
- u8 member;
- struct alu_struct alu;
- do {
- alu.is_static = false;
- ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
- &member, &timestamp,
- &entries);
- if (!ret && (member & BIT(port))) {
- ret = cb(alu.mac, alu.fid, alu.is_static, data);
- if (ret)
- break;
- }
- i++;
- } while (i < entries);
- if (i >= entries)
- ret = 0;
+ if (!dev->dev_ops->set_ageing_time)
+ return -EOPNOTSUPP;
- return ret;
+ return dev->dev_ops->set_ageing_time(dev, msecs);
}
-EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
- int empty = 0;
- alu.port_forward = 0;
- for (index = 0; index < dev->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- /* Remember the first empty entry. */
- } else if (!empty) {
- empty = index + 1;
- }
- }
+ if (!dev->dev_ops->fdb_add)
+ return -EOPNOTSUPP;
- /* no available entry */
- if (index == dev->num_statics && !empty)
- return -ENOSPC;
+ return dev->dev_ops->fdb_add(dev, port, addr, vid, db);
+}
- /* add entry */
- if (index == dev->num_statics) {
- index = empty - 1;
- memset(&alu, 0, sizeof(alu));
- memcpy(alu.mac, mdb->addr, ETH_ALEN);
- alu.is_static = true;
- }
- alu.port_forward |= BIT(port);
- if (mdb->vid) {
- alu.is_use_fid = true;
+static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr,
+ u16 vid, struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
- /* Need a way to map VID to FID. */
- alu.fid = mdb->vid;
- }
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+ if (!dev->dev_ops->fdb_del)
+ return -EOPNOTSUPP;
- return 0;
+ return dev->dev_ops->fdb_del(dev, port, addr, vid, db);
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
{
struct ksz_device *dev = ds->priv;
- struct alu_struct alu;
- int index;
- int ret = 0;
- for (index = 0; index < dev->num_statics; index++) {
- if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
- /* Found one already in static MAC table. */
- if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
- alu.fid == mdb->vid)
- break;
- }
- }
+ if (!dev->dev_ops->fdb_dump)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->fdb_dump(dev, port, cb, data);
+}
- /* no available entry */
- if (index == dev->num_statics)
- goto exit;
+static int ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
- /* clear port */
- alu.port_forward &= ~BIT(port);
- if (!alu.port_forward)
- alu.is_static = false;
- dev->dev_ops->w_sta_mac_table(dev, index, &alu);
+ if (!dev->dev_ops->mdb_add)
+ return -EOPNOTSUPP;
-exit:
- return ret;
+ return dev->dev_ops->mdb_add(dev, port, mdb, db);
+}
+
+static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mdb_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
-EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+static int ksz_enable_port(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
@@ -343,7 +2471,956 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
return 0;
}
-EXPORT_SYMBOL_GPL(ksz_enable_port);
+
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ const u16 *regs;
+ u8 data;
+
+ regs = dev->info->regs;
+
+ ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ p = &dev->ports[port];
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ if (!p->learning)
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
+
+ p->stp_state = state;
+
+ ksz_update_port_member(dev, port);
+}
+
+static int ksz_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ksz_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+
+ if (flags.mask & BR_LEARNING) {
+ p->learning = !!(flags.val & BR_LEARNING);
+
+ /* Make the change take effect immediately */
+ ksz_port_stp_state_set(ds, port, p->stp_state);
+ }
+
+ return 0;
+}
+
+static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ struct ksz_device *dev = ds->priv;
+ enum dsa_tag_protocol proto = DSA_TAG_PROTO_NONE;
+
+ if (dev->chip_id == KSZ8795_CHIP_ID ||
+ dev->chip_id == KSZ8794_CHIP_ID ||
+ dev->chip_id == KSZ8765_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ8795;
+
+ if (dev->chip_id == KSZ8830_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9893;
+
+ if (dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9896_CHIP_ID ||
+ dev->chip_id == KSZ9897_CHIP_ID ||
+ dev->chip_id == KSZ9567_CHIP_ID)
+ proto = DSA_TAG_PROTO_KSZ9477;
+
+ if (is_lan937x(dev))
+ proto = DSA_TAG_PROTO_LAN937X_VALUE;
+
+ return proto;
+}
+
+static int ksz_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct ksz_tagger_data *tagger_data;
+
+ tagger_data = ksz_tagger_data(ds);
+ tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
+
+ return 0;
+}
+
+static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_filtering)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_filtering(dev, port, flag, extack);
+}
+
+static int ksz_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_add(dev, port, vlan, extack);
+}
+
+static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->vlan_del)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->vlan_del(dev, port, vlan);
+}
+
+static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->mirror_add)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->mirror_add(dev, port, mirror, ingress, extack);
+}
+
+static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->mirror_del)
+ dev->dev_ops->mirror_del(dev, port, mirror);
+}
+
+static int ksz_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->change_mtu)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->change_mtu(dev, port, mtu);
+}
+
+static int ksz_max_mtu(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ switch (dev->chip_id) {
+ case KSZ8795_CHIP_ID:
+ case KSZ8794_CHIP_ID:
+ case KSZ8765_CHIP_ID:
+ return KSZ8795_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ case KSZ8830_CHIP_ID:
+ return KSZ8863_HUGE_PACKET_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+ return KSZ9477_MAX_FRAME_SIZE - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int ksz_validate_eee(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->info->internal_phy[port])
+ return -EOPNOTSUPP;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ int ret;
+
+ ret = ksz_validate_eee(ds, port);
+ if (ret)
+ return ret;
+
+ /* There is no documented control of Tx LPI configuration. */
+ e->tx_lpi_enabled = true;
+
+ /* There is no documented control of Tx LPI timer. According to tests
+ * Tx LPI timer seems to be set by default to minimal value.
+ */
+ e->tx_lpi_timer = 0;
+
+ return 0;
+}
+
+static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ ret = ksz_validate_eee(ds, port);
+ if (ret)
+ return ret;
+
+ if (!e->tx_lpi_enabled) {
+ dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
+ return -EINVAL;
+ }
+
+ if (e->tx_lpi_timer) {
+ dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ksz_set_xmii(struct ksz_device *dev, int port,
+ phy_interface_t interface)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ struct ksz_port *p = &dev->ports[port];
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~(P_MII_SEL_M | P_RGMII_ID_IG_ENABLE |
+ P_RGMII_ID_EG_ENABLE);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ data8 |= bitval[P_MII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= bitval[P_RMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= bitval[P_GMII_SEL];
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ data8 |= bitval[P_RGMII_SEL];
+ /* On KSZ9893, disable RGMII in-band status support */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
+ dev->chip_id == KSZ9563_CHIP_ID)
+ data8 &= ~P_MII_MAC_MODE;
+ break;
+ default:
+ dev_err(dev->dev, "Unsupported interface '%s' for port %d\n",
+ phy_modes(interface), port);
+ return;
+ }
+
+ if (p->rgmii_tx_val)
+ data8 |= P_RGMII_ID_EG_ENABLE;
+
+ if (p->rgmii_rx_val)
+ data8 |= P_RGMII_ID_IG_ENABLE;
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ phy_interface_t interface;
+ u8 data8;
+ u8 val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_MII_SEL_M, data8);
+
+ if (val == bitval[P_MII_SEL]) {
+ if (gbit)
+ interface = PHY_INTERFACE_MODE_GMII;
+ else
+ interface = PHY_INTERFACE_MODE_MII;
+ } else if (val == bitval[P_RMII_SEL]) {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ } else {
+ interface = PHY_INTERFACE_MODE_RGMII;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_TXID;
+ if (data8 & P_RGMII_ID_IG_ENABLE) {
+ interface = PHY_INTERFACE_MODE_RGMII_RXID;
+ if (data8 & P_RGMII_ID_EG_ENABLE)
+ interface = PHY_INTERFACE_MODE_RGMII_ID;
+ }
+ }
+
+ return interface;
+}
+
+static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ksz_is_ksz88x3(dev))
+ return;
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(dev->dev, "In-band AN not supported!\n");
+ return;
+ }
+
+ ksz_set_xmii(dev, port, state->interface);
+
+ if (dev->dev_ops->phylink_mac_config)
+ dev->dev_ops->phylink_mac_config(dev, port, mode, state);
+
+ if (dev->dev_ops->setup_rgmii_delay)
+ dev->dev_ops->setup_rgmii_delay(dev, port);
+}
+
+bool ksz_get_gbit(struct ksz_device *dev, int port)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ bool gbit = false;
+ u8 data8;
+ bool val;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ val = FIELD_GET(P_GMII_1GBIT_M, data8);
+
+ if (val == bitval[P_GMII_1GBIT])
+ gbit = true;
+
+ return gbit;
+}
+
+static void ksz_set_gbit(struct ksz_device *dev, int port, bool gbit)
+{
+ const u8 *bitval = dev->info->xmii_ctrl1;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_1], &data8);
+
+ data8 &= ~P_GMII_1GBIT_M;
+
+ if (gbit)
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_1GBIT]);
+ else
+ data8 |= FIELD_PREP(P_GMII_1GBIT_M, bitval[P_GMII_NOT_1GBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_1], data8);
+}
+
+static void ksz_set_100_10mbit(struct ksz_device *dev, int port, int speed)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u16 *regs = dev->info->regs;
+ u8 data8;
+
+ ksz_pread8(dev, port, regs[P_XMII_CTRL_0], &data8);
+
+ data8 &= ~P_MII_100MBIT_M;
+
+ if (speed == SPEED_100)
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_100MBIT]);
+ else
+ data8 |= FIELD_PREP(P_MII_100MBIT_M, bitval[P_MII_10MBIT]);
+
+ /* Write the updated value */
+ ksz_pwrite8(dev, port, regs[P_XMII_CTRL_0], data8);
+}
+
+static void ksz_port_set_xmii_speed(struct ksz_device *dev, int port, int speed)
+{
+ if (speed == SPEED_1000)
+ ksz_set_gbit(dev, port, true);
+ else
+ ksz_set_gbit(dev, port, false);
+
+ if (speed == SPEED_100 || speed == SPEED_10)
+ ksz_set_100_10mbit(dev, port, speed);
+}
+
+static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ const u8 *bitval = dev->info->xmii_ctrl0;
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ u8 mask;
+ u8 val;
+
+ mask = P_MII_DUPLEX_M | masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL];
+
+ if (duplex == DUPLEX_FULL)
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_FULL_DUPLEX]);
+ else
+ val = FIELD_PREP(P_MII_DUPLEX_M, bitval[P_MII_HALF_DUPLEX]);
+
+ if (tx_pause)
+ val |= masks[P_MII_TX_FLOW_CTRL];
+
+ if (rx_pause)
+ val |= masks[P_MII_RX_FLOW_CTRL];
+
+ ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
+}
+
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct ksz_port *p;
+
+ p = &dev->ports[port];
+
+ /* Internal PHYs */
+ if (dev->info->internal_phy[port])
+ return;
+
+ p->phydev.speed = speed;
+
+ ksz_port_set_xmii_speed(dev, port, speed);
+
+ ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (dev->dev_ops->phylink_mac_link_up)
+ dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
+ phydev, speed, duplex,
+ tx_pause, rx_pause);
+}
+
+static int ksz_switch_detect(struct ksz_device *dev)
+{
+ u8 id1, id2, id4;
+ u16 id16;
+ u32 id32;
+ int ret;
+
+ /* read chip id */
+ ret = ksz_read16(dev, REG_CHIP_ID0, &id16);
+ if (ret)
+ return ret;
+
+ id1 = FIELD_GET(SW_FAMILY_ID_M, id16);
+ id2 = FIELD_GET(SW_CHIP_ID_M, id16);
+
+ switch (id1) {
+ case KSZ87_FAMILY_ID:
+ if (id2 == KSZ87_CHIP_ID_95) {
+ u8 val;
+
+ dev->chip_id = KSZ8795_CHIP_ID;
+
+ ksz_read8(dev, KSZ8_PORT_STATUS_0, &val);
+ if (val & KSZ8_PORT_FIBER_MODE)
+ dev->chip_id = KSZ8765_CHIP_ID;
+ } else if (id2 == KSZ87_CHIP_ID_94) {
+ dev->chip_id = KSZ8794_CHIP_ID;
+ } else {
+ return -ENODEV;
+ }
+ break;
+ case KSZ88_FAMILY_ID:
+ if (id2 == KSZ88_CHIP_ID_63)
+ dev->chip_id = KSZ8830_CHIP_ID;
+ else
+ return -ENODEV;
+ break;
+ default:
+ ret = ksz_read32(dev, REG_CHIP_ID0, &id32);
+ if (ret)
+ return ret;
+
+ dev->chip_rev = FIELD_GET(SW_REV_ID_M, id32);
+ id32 &= ~0xFF;
+
+ switch (id32) {
+ case KSZ9477_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case LAN9370_CHIP_ID:
+ case LAN9371_CHIP_ID:
+ case LAN9372_CHIP_ID:
+ case LAN9373_CHIP_ID:
+ case LAN9374_CHIP_ID:
+ dev->chip_id = id32;
+ break;
+ case KSZ9893_CHIP_ID:
+ ret = ksz_read8(dev, REG_CHIP_ID4,
+ &id4);
+ if (ret)
+ return ret;
+
+ if (id4 == SKU_ID_KSZ8563)
+ dev->chip_id = KSZ8563_CHIP_ID;
+ else if (id4 == SKU_ID_KSZ9563)
+ dev->chip_id = KSZ9563_CHIP_ID;
+ else
+ dev->chip_id = KSZ9893_CHIP_ID;
+
+ break;
+ default:
+ dev_err(dev->dev,
+ "unsupported switch detected %x)\n", id32);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+/* Bandwidth is calculated by idle slope/transmission speed. Then the Bandwidth
+ * is converted to Hex-decimal using the successive multiplication method. On
+ * every step, integer part is taken and decimal part is carry forwarded.
+ */
+static int cinc_cal(s32 idle_slope, s32 send_slope, u32 *bw)
+{
+ u32 cinc = 0;
+ u32 txrate;
+ u32 rate;
+ u8 temp;
+ u8 i;
+
+ txrate = idle_slope - send_slope;
+
+ if (!txrate)
+ return -EINVAL;
+
+ rate = idle_slope;
+
+ /* 24 bit register */
+ for (i = 0; i < 6; i++) {
+ rate = rate * 16;
+
+ temp = rate / txrate;
+
+ rate %= txrate;
+
+ cinc = ((cinc << 4) | temp);
+ }
+
+ *bw = cinc;
+
+ return 0;
+}
+
+static int ksz_setup_tc_mode(struct ksz_device *dev, int port, u8 scheduler,
+ u8 shaper)
+{
+ return ksz_pwrite8(dev, port, REG_PORT_MTI_QUEUE_CTRL_0,
+ FIELD_PREP(MTI_SCHEDULE_MODE_M, scheduler) |
+ FIELD_PREP(MTI_SHAPING_M, shaper));
+}
+
+static int ksz_setup_tc_cbs(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+ u32 bw;
+
+ if (!dev->info->tc_cbs_supported)
+ return -EOPNOTSUPP;
+
+ if (qopt->queue > dev->info->num_tx_queues)
+ return -EINVAL;
+
+ /* Queue Selection */
+ ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue);
+ if (ret)
+ return ret;
+
+ if (!qopt->enable)
+ return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_WRR,
+ MTI_SHAPING_OFF);
+
+ /* High Credit */
+ ret = ksz_pwrite16(dev, port, REG_PORT_MTI_HI_WATER_MARK,
+ qopt->hicredit);
+ if (ret)
+ return ret;
+
+ /* Low Credit */
+ ret = ksz_pwrite16(dev, port, REG_PORT_MTI_LO_WATER_MARK,
+ qopt->locredit);
+ if (ret)
+ return ret;
+
+ /* Credit Increment Register */
+ ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw);
+ if (ret)
+ return ret;
+
+ if (dev->dev_ops->tc_cbs_set_cinc) {
+ ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw);
+ if (ret)
+ return ret;
+ }
+
+ return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_STRICT_PRIO,
+ MTI_SHAPING_SRP);
+}
+
+static int ksz_disable_egress_rate_limit(struct ksz_device *dev, int port)
+{
+ int queue, ret;
+
+ /* Configuration will not take effect until the last Port Queue X
+ * Egress Limit Control Register is written.
+ */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ ret = ksz_pwrite8(dev, port, KSZ9477_REG_PORT_OUT_RATE_0 + queue,
+ KSZ9477_OUT_RATE_NO_LIMIT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ksz_ets_band_to_queue(struct tc_ets_qopt_offload_replace_params *p,
+ int band)
+{
+ /* Compared to queues, bands prioritize packets differently. In strict
+ * priority mode, the lowest priority is assigned to Queue 0 while the
+ * highest priority is given to Band 0.
+ */
+ return p->bands - 1 - band;
+}
+
+static int ksz_queue_set_strict(struct ksz_device *dev, int port, int queue)
+{
+ int ret;
+
+ ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, queue);
+ if (ret)
+ return ret;
+
+ return ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_STRICT_PRIO,
+ MTI_SHAPING_OFF);
+}
+
+static int ksz_queue_set_wrr(struct ksz_device *dev, int port, int queue,
+ int weight)
+{
+ int ret;
+
+ ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, queue);
+ if (ret)
+ return ret;
+
+ ret = ksz_setup_tc_mode(dev, port, MTI_SCHEDULE_WRR,
+ MTI_SHAPING_OFF);
+ if (ret)
+ return ret;
+
+ return ksz_pwrite8(dev, port, KSZ9477_PORT_MTI_QUEUE_CTRL_1, weight);
+}
+
+static int ksz_tc_ets_add(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int ret, band, tc_prio;
+ u32 queue_map = 0;
+
+ /* In order to ensure proper prioritization, it is necessary to set the
+ * rate limit for the related queue to zero. Otherwise strict priority
+ * or WRR mode will not work. This is a hardware limitation.
+ */
+ ret = ksz_disable_egress_rate_limit(dev, port);
+ if (ret)
+ return ret;
+
+ /* Configure queue scheduling mode for all bands. Currently only strict
+ * prio mode is supported.
+ */
+ for (band = 0; band < p->bands; band++) {
+ int queue = ksz_ets_band_to_queue(p, band);
+
+ ret = ksz_queue_set_strict(dev, port, queue);
+ if (ret)
+ return ret;
+ }
+
+ /* Configure the mapping between traffic classes and queues. Note:
+ * priomap variable support 16 traffic classes, but the chip can handle
+ * only 8 classes.
+ */
+ for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) {
+ int queue;
+
+ if (tc_prio > KSZ9477_MAX_TC_PRIO)
+ break;
+
+ queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]);
+ queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S);
+ }
+
+ return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+}
+
+static int ksz_tc_ets_del(struct ksz_device *dev, int port)
+{
+ int ret, queue, tc_prio, s;
+ u32 queue_map = 0;
+
+ /* To restore the default chip configuration, set all queues to use the
+ * WRR scheduler with a weight of 1.
+ */
+ for (queue = 0; queue < dev->info->num_tx_queues; queue++) {
+ ret = ksz_queue_set_wrr(dev, port, queue,
+ KSZ9477_DEFAULT_WRR_WEIGHT);
+ if (ret)
+ return ret;
+ }
+
+ switch (dev->info->num_tx_queues) {
+ case 2:
+ s = 2;
+ break;
+ case 4:
+ s = 1;
+ break;
+ case 8:
+ s = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Revert the queue mapping for TC-priority to its default setting on
+ * the chip.
+ */
+ for (tc_prio = 0; tc_prio <= KSZ9477_MAX_TC_PRIO; tc_prio++) {
+ int queue;
+
+ queue = tc_prio >> s;
+ queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S);
+ }
+
+ return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+}
+
+static int ksz_tc_ets_validate(struct ksz_device *dev, int port,
+ struct tc_ets_qopt_offload_replace_params *p)
+{
+ int band;
+
+ /* Since it is not feasible to share one port among multiple qdisc,
+ * the user must configure all available queues appropriately.
+ */
+ if (p->bands != dev->info->num_tx_queues) {
+ dev_err(dev->dev, "Not supported amount of bands. It should be %d\n",
+ dev->info->num_tx_queues);
+ return -EOPNOTSUPP;
+ }
+
+ for (band = 0; band < p->bands; ++band) {
+ /* The KSZ switches utilize a weighted round robin configuration
+ * where a certain number of packets can be transmitted from a
+ * queue before the next queue is serviced. For more information
+ * on this, refer to section 5.2.8.4 of the KSZ8565R
+ * documentation on the Port Transmit Queue Control 1 Register.
+ * However, the current ETS Qdisc implementation (as of February
+ * 2023) assigns a weight to each queue based on the number of
+ * bytes or extrapolated bandwidth in percentages. Since this
+ * differs from the KSZ switches' method and we don't want to
+ * fake support by converting bytes to packets, it is better to
+ * return an error instead.
+ */
+ if (p->quanta[band]) {
+ dev_err(dev->dev, "Quanta/weights configuration is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
+ struct tc_ets_qopt_offload *qopt)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ if (!dev->info->tc_ets_supported)
+ return -EOPNOTSUPP;
+
+ if (qopt->parent != TC_H_ROOT) {
+ dev_err(dev->dev, "Parent should be \"root\"\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (qopt->command) {
+ case TC_ETS_REPLACE:
+ ret = ksz_tc_ets_validate(dev, port, &qopt->replace_params);
+ if (ret)
+ return ret;
+
+ return ksz_tc_ets_add(dev, port, &qopt->replace_params);
+ case TC_ETS_DESTROY:
+ return ksz_tc_ets_del(dev, port);
+ case TC_ETS_STATS:
+ case TC_ETS_GRAFT:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int ksz_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return ksz_setup_tc_cbs(ds, port, type_data);
+ case TC_SETUP_QDISC_ETS:
+ return ksz_tc_setup_qdisc_ets(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct dsa_switch_ops ksz_switch_ops = {
+ .get_tag_protocol = ksz_get_tag_protocol,
+ .connect_tag_protocol = ksz_connect_tag_protocol,
+ .get_phy_flags = ksz_get_phy_flags,
+ .setup = ksz_setup,
+ .teardown = ksz_teardown,
+ .phy_read = ksz_phy_read16,
+ .phy_write = ksz_phy_write16,
+ .phylink_get_caps = ksz_phylink_get_caps,
+ .phylink_mac_config = ksz_phylink_mac_config,
+ .phylink_mac_link_up = ksz_phylink_mac_link_up,
+ .phylink_mac_link_down = ksz_mac_link_down,
+ .port_enable = ksz_enable_port,
+ .set_ageing_time = ksz_set_ageing_time,
+ .get_strings = ksz_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz_port_stp_state_set,
+ .port_pre_bridge_flags = ksz_port_pre_bridge_flags,
+ .port_bridge_flags = ksz_port_bridge_flags,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz_port_vlan_filtering,
+ .port_vlan_add = ksz_port_vlan_add,
+ .port_vlan_del = ksz_port_vlan_del,
+ .port_fdb_dump = ksz_port_fdb_dump,
+ .port_fdb_add = ksz_port_fdb_add,
+ .port_fdb_del = ksz_port_fdb_del,
+ .port_mdb_add = ksz_port_mdb_add,
+ .port_mdb_del = ksz_port_mdb_del,
+ .port_mirror_add = ksz_port_mirror_add,
+ .port_mirror_del = ksz_port_mirror_del,
+ .get_stats64 = ksz_get_stats64,
+ .get_pause_stats = ksz_get_pause_stats,
+ .port_change_mtu = ksz_change_mtu,
+ .port_max_mtu = ksz_max_mtu,
+ .get_ts_info = ksz_get_ts_info,
+ .port_hwtstamp_get = ksz_hwtstamp_get,
+ .port_hwtstamp_set = ksz_hwtstamp_set,
+ .port_txtstamp = ksz_port_txtstamp,
+ .port_rxtstamp = ksz_port_rxtstamp,
+ .port_setup_tc = ksz_setup_tc,
+ .get_mac_eee = ksz_get_mac_eee,
+ .set_mac_eee = ksz_set_mac_eee,
+};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
{
@@ -356,6 +3433,7 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
ds->dev = base;
ds->num_ports = DSA_MAX_PORTS;
+ ds->ops = &ksz_switch_ops;
swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
if (!swdev)
@@ -371,13 +3449,51 @@ struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
}
EXPORT_SYMBOL(ksz_switch_alloc);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops)
+static void ksz_parse_rgmii_delay(struct ksz_device *dev, int port_num,
+ struct device_node *port_dn)
+{
+ phy_interface_t phy_mode = dev->ports[port_num].interface;
+ int rx_delay = -1, tx_delay = -1;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return;
+
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
+
+ if (rx_delay == -1 && tx_delay == -1) {
+ dev_warn(dev->dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port_num);
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ dev->ports[port_num].rgmii_rx_val = rx_delay;
+ dev->ports[port_num].rgmii_tx_val = tx_delay;
+}
+
+int ksz_switch_register(struct ksz_device *dev)
{
+ const struct ksz_chip_data *info;
struct device_node *port, *ports;
phy_interface_t interface;
unsigned int port_num;
int ret;
+ int i;
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
@@ -399,19 +3515,57 @@ int ksz_switch_register(struct ksz_device *dev,
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
- dev->dev_ops = ops;
+ ret = ksz_switch_detect(dev);
+ if (ret)
+ return ret;
- if (dev->dev_ops->detect(dev))
- return -EINVAL;
+ info = ksz_lookup_info(dev->chip_id);
+ if (!info)
+ return -ENODEV;
+
+ /* Update the compatible info with the probed one */
+ dev->info = info;
+
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->info->dev_name, dev->chip_rev);
+
+ ret = ksz_check_device_id(dev);
+ if (ret)
+ return ret;
+
+ dev->dev_ops = dev->info->ops;
ret = dev->dev_ops->init(dev);
if (ret)
return ret;
+ dev->ports = devm_kzalloc(dev->dev,
+ dev->info->port_cnt * sizeof(struct ksz_port),
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->info->port_cnt; i++) {
+ spin_lock_init(&dev->ports[i].mib.stats64_lock);
+ mutex_init(&dev->ports[i].mib.cnt_mutex);
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) * (dev->info->mib_cnt + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+
+ dev->ports[i].ksz_dev = dev;
+ dev->ports[i].num = i;
+ }
+
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->info->port_cnt;
+
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
- for (port_num = 0; port_num < dev->port_cnt; ++port_num)
+ for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
ret = of_get_phy_mode(dev->dev->of_node, &interface);
@@ -420,20 +3574,31 @@ int ksz_switch_register(struct ksz_device *dev,
ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports");
if (!ports)
ports = of_get_child_by_name(dev->dev->of_node, "ports");
- if (ports)
+ if (ports) {
for_each_available_child_of_node(ports, port) {
if (of_property_read_u32(port, "reg",
&port_num))
continue;
if (!(dev->port_mask & BIT(port_num))) {
of_node_put(port);
+ of_node_put(ports);
return -EINVAL;
}
of_get_phy_mode(port,
&dev->ports[port_num].interface);
+
+ ksz_parse_rgmii_delay(dev, port_num, port);
}
+ of_node_put(ports);
+ }
dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
"microchip,synclko-125");
+ dev->synclko_disable = of_property_read_bool(dev->dev->of_node,
+ "microchip,synclko-disable");
+ if (dev->synclko_125 && dev->synclko_disable) {
+ dev_err(dev->dev, "inconsistent synclko settings\n");
+ return -EINVAL;
+ }
}
ret = dsa_register_switch(dev->ds);
@@ -443,12 +3608,12 @@ int ksz_switch_register(struct ksz_device *dev,
}
/* Read MIB counters every 30 seconds to avoid overflow. */
- dev->mib_read_interval = msecs_to_jiffies(30000);
+ dev->mib_read_interval = msecs_to_jiffies(5000);
/* Start the MIB timer. */
schedule_delayed_work(&dev->mib_read, 0);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ksz_switch_register);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 54b456bc8972..8abecaf6089e 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -13,6 +13,14 @@
#include <linux/phy.h>
#include <linux/regmap.h>
#include <net/dsa.h>
+#include <linux/irq.h>
+
+#include "ksz_ptp.h"
+
+#define KSZ_MAX_NUM_PORTS 8
+
+struct ksz_device;
+struct ksz_port;
struct vlan_table {
u32 table[3];
@@ -22,29 +30,105 @@ struct ksz_port_mib {
struct mutex cnt_mutex; /* structure access */
u8 cnt_ptr;
u64 *counters;
+ struct rtnl_link_stats64 stats64;
+ struct ethtool_pause_stats pause_stats;
+ struct spinlock stats64_lock;
+};
+
+struct ksz_mib_names {
+ int index;
+ char string[ETH_GSTRING_LEN];
+};
+
+struct ksz_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+ u8 port_nirqs;
+ u8 num_tx_queues;
+ bool tc_cbs_supported;
+ bool tc_ets_supported;
+ const struct ksz_dev_ops *ops;
+ bool phy_errata_9477;
+ bool ksz87xx_eee_link_erratum;
+ const struct ksz_mib_names *mib_names;
+ int mib_cnt;
+ u8 reg_mib_cnt;
+ const u16 *regs;
+ const u32 *masks;
+ const u8 *shifts;
+ const u8 *xmii_ctrl0;
+ const u8 *xmii_ctrl1;
+ int stp_ctrl_reg;
+ int broadcast_ctrl_reg;
+ int multicast_ctrl_reg;
+ int start_ctrl_reg;
+ bool supports_mii[KSZ_MAX_NUM_PORTS];
+ bool supports_rmii[KSZ_MAX_NUM_PORTS];
+ bool supports_rgmii[KSZ_MAX_NUM_PORTS];
+ bool internal_phy[KSZ_MAX_NUM_PORTS];
+ bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+};
+
+struct ksz_irq {
+ u16 masked;
+ u16 reg_mask;
+ u16 reg_status;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq_num;
+ char name[16];
+ struct ksz_device *dev;
+};
+
+struct ksz_ptp_irq {
+ struct ksz_port *port;
+ u16 ts_reg;
+ bool ts_en;
+ char name[16];
+ int num;
};
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
+ bool learning;
int stp_state;
struct phy_device phydev;
u32 on:1; /* port is not disabled by hardware */
- u32 phy:1; /* port has a PHY */
u32 fiber:1; /* port is fiber */
- u32 sgmii:1; /* port is SGMII */
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
struct ksz_port_mib mib;
phy_interface_t interface;
+ u32 rgmii_tx_val;
+ u32 rgmii_rx_val;
+ struct ksz_device *ksz_dev;
+ struct ksz_irq pirq;
+ u8 num;
+#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
+ struct hwtstamp_config tstamp_config;
+ bool hwts_tx_en;
+ bool hwts_rx_en;
+ struct ksz_irq ptpirq;
+ struct ksz_ptp_irq ptpmsg_irq[3];
+ ktime_t tstamp_msg;
+ struct completion tstamp_msg_comp;
+#endif
};
struct ksz_device {
struct dsa_switch *ds;
struct ksz_platform_data *pdata;
- const char *name;
+ const struct ksz_chip_data *info;
struct mutex dev_mutex; /* device access */
struct mutex regmap_mutex; /* regmap access */
@@ -56,25 +140,18 @@ struct ksz_device {
struct regmap *regmap[3];
void *priv;
+ int irq;
struct gpio_desc *reset_gpio; /* Optional reset GPIO */
/* chip specific data */
u32 chip_id;
- int num_vlans;
- int num_alus;
- int num_statics;
+ u8 chip_rev;
int cpu_port; /* port connected to CPU */
- int cpu_ports; /* port bitmap can be cpu port */
int phy_port_cnt;
- int port_cnt;
- u8 reg_mib_cnt;
- int mib_cnt;
- const struct mib_names *mib_names;
phy_interface_t compat_interface;
- u32 regs_size;
- bool phy_errata_9477;
bool synclko_125;
+ bool synclko_disable;
struct vlan_table *vlan_cache;
@@ -83,10 +160,127 @@ struct ksz_device {
unsigned long mib_read_interval;
u16 mirror_rx;
u16 mirror_tx;
- u32 features; /* chip specific features */
- u32 overrides; /* chip functions set by user */
- u16 host_mask;
u16 port_mask;
+ struct mutex lock_irq; /* IRQ Access */
+ struct ksz_irq girq;
+ struct ksz_ptp_data ptp_data;
+};
+
+/* List of supported models */
+enum ksz_model {
+ KSZ8563,
+ KSZ8795,
+ KSZ8794,
+ KSZ8765,
+ KSZ8830,
+ KSZ9477,
+ KSZ9896,
+ KSZ9897,
+ KSZ9893,
+ KSZ9563,
+ KSZ9567,
+ LAN9370,
+ LAN9371,
+ LAN9372,
+ LAN9373,
+ LAN9374,
+};
+
+enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
+ KSZ8795_CHIP_ID = 0x8795,
+ KSZ8794_CHIP_ID = 0x8794,
+ KSZ8765_CHIP_ID = 0x8765,
+ KSZ8830_CHIP_ID = 0x8830,
+ KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
+ KSZ9897_CHIP_ID = 0x00989700,
+ KSZ9893_CHIP_ID = 0x00989300,
+ KSZ9563_CHIP_ID = 0x00956300,
+ KSZ9567_CHIP_ID = 0x00956700,
+ LAN9370_CHIP_ID = 0x00937000,
+ LAN9371_CHIP_ID = 0x00937100,
+ LAN9372_CHIP_ID = 0x00937200,
+ LAN9373_CHIP_ID = 0x00937300,
+ LAN9374_CHIP_ID = 0x00937400,
+};
+
+enum ksz_regs {
+ REG_IND_CTRL_0,
+ REG_IND_DATA_8,
+ REG_IND_DATA_CHECK,
+ REG_IND_DATA_HI,
+ REG_IND_DATA_LO,
+ REG_IND_MIB_CHECK,
+ REG_IND_BYTE,
+ P_FORCE_CTRL,
+ P_LINK_STATUS,
+ P_LOCAL_CTRL,
+ P_NEG_RESTART_CTRL,
+ P_REMOTE_STATUS,
+ P_SPEED_STATUS,
+ S_TAIL_TAG_CTRL,
+ P_STP_CTRL,
+ S_START_CTRL,
+ S_BROADCAST_CTRL,
+ S_MULTICAST_CTRL,
+ P_XMII_CTRL_0,
+ P_XMII_CTRL_1,
+};
+
+enum ksz_masks {
+ PORT_802_1P_REMAPPING,
+ SW_TAIL_TAG_ENABLE,
+ MIB_COUNTER_OVERFLOW,
+ MIB_COUNTER_VALID,
+ VLAN_TABLE_FID,
+ VLAN_TABLE_MEMBERSHIP,
+ VLAN_TABLE_VALID,
+ STATIC_MAC_TABLE_VALID,
+ STATIC_MAC_TABLE_USE_FID,
+ STATIC_MAC_TABLE_FID,
+ STATIC_MAC_TABLE_OVERRIDE,
+ STATIC_MAC_TABLE_FWD_PORTS,
+ DYNAMIC_MAC_TABLE_ENTRIES_H,
+ DYNAMIC_MAC_TABLE_MAC_EMPTY,
+ DYNAMIC_MAC_TABLE_NOT_READY,
+ DYNAMIC_MAC_TABLE_ENTRIES,
+ DYNAMIC_MAC_TABLE_FID,
+ DYNAMIC_MAC_TABLE_SRC_PORT,
+ DYNAMIC_MAC_TABLE_TIMESTAMP,
+ ALU_STAT_WRITE,
+ ALU_STAT_READ,
+ P_MII_TX_FLOW_CTRL,
+ P_MII_RX_FLOW_CTRL,
+};
+
+enum ksz_shifts {
+ VLAN_TABLE_MEMBERSHIP_S,
+ VLAN_TABLE,
+ STATIC_MAC_FWD_PORTS,
+ STATIC_MAC_FID,
+ DYNAMIC_MAC_ENTRIES_H,
+ DYNAMIC_MAC_ENTRIES,
+ DYNAMIC_MAC_FID,
+ DYNAMIC_MAC_TIMESTAMP,
+ DYNAMIC_MAC_SRC_PORT,
+ ALU_STAT_INDEX,
+};
+
+enum ksz_xmii_ctrl0 {
+ P_MII_100MBIT,
+ P_MII_10MBIT,
+ P_MII_FULL_DUPLEX,
+ P_MII_HALF_DUPLEX,
+};
+
+enum ksz_xmii_ctrl1 {
+ P_RGMII_SEL,
+ P_RMII_SEL,
+ P_GMII_SEL,
+ P_MII_SEL,
+ P_GMII_1GBIT,
+ P_GMII_NOT_1GBIT,
};
struct alu_struct {
@@ -109,63 +303,78 @@ struct alu_struct {
};
struct ksz_dev_ops {
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
- void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
- void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
- int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp,
- u16 *entries);
- int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
- void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
- struct alu_struct *alu);
+ int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+ int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
+ void (*r_mib_stat64)(struct ksz_device *dev, int port);
+ int (*vlan_filtering)(struct ksz_device *dev, int port,
+ bool flag, struct netlink_ext_ack *extack);
+ int (*vlan_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+ int (*vlan_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+ int (*mirror_add)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+ void (*mirror_del)(struct ksz_device *dev, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+ int (*fdb_add)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_del)(struct ksz_device *dev, int port,
+ const unsigned char *addr, u16 vid, struct dsa_db db);
+ int (*fdb_dump)(struct ksz_device *dev, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+ int (*mdb_add)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ int (*mdb_del)(struct ksz_device *dev, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+ void (*get_caps)(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+ int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
- int (*shutdown)(struct ksz_device *dev);
- int (*detect)(struct ksz_device *dev);
+ void (*phylink_mac_config)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause);
+ void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
+ int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
+ void (*config_cpu_port)(struct dsa_switch *ds);
+ int (*enable_stp_addr)(struct ksz_device *dev);
+ int (*reset)(struct ksz_device *dev);
int (*init)(struct ksz_device *dev);
void (*exit)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv);
-int ksz_switch_register(struct ksz_device *dev,
- const struct ksz_dev_ops *ops);
+int ksz_switch_register(struct ksz_device *dev);
void ksz_switch_remove(struct ksz_device *dev);
-int ksz8_switch_register(struct ksz_device *dev);
-int ksz9477_switch_register(struct ksz_device *dev);
-
-void ksz_update_port_member(struct ksz_device *dev, int port);
void ksz_init_mib_timer(struct ksz_device *dev);
-
-/* Common DSA access functions */
-
-int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
-int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
-void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface);
-int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
-void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
-int ksz_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br);
-void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br);
-void ksz_port_fast_age(struct dsa_switch *ds, int port);
-int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
- void *data);
-int ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb);
-int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void ksz_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port);
+void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+bool ksz_get_gbit(struct ksz_device *dev, int port);
+phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit);
+extern const struct ksz_chip_data ksz_switch_chips[];
/* Common register access functions */
@@ -174,6 +383,10 @@ static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[0], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -183,6 +396,10 @@ static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[1], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -192,6 +409,10 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[2], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -202,7 +423,10 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
int ret;
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
- if (!ret)
+ if (ret)
+ dev_err(dev->dev, "can't read 64bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+ else
*val = (u64)value[0] << 32 | value[1];
return ret;
@@ -210,17 +434,64 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
- return regmap_write(dev->regmap[0], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[0], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
{
- return regmap_write(dev->regmap[1], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[1], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
{
- return regmap_write(dev->regmap[2], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[2], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_rmw16(struct ksz_device *dev, u32 reg, u16 mask,
+ u16 value)
+{
+ int ret;
+
+ ret = regmap_update_bits(dev->regmap[1], reg, mask, value);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 16bit reg 0x%x: %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_rmw32(struct ksz_device *dev, u32 reg, u32 mask,
+ u32 value)
+{
+ int ret;
+
+ ret = regmap_update_bits(dev->regmap[2], reg, mask, value);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 32bit reg 0x%x: %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
@@ -235,40 +506,55 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
return regmap_bulk_write(dev->regmap[2], reg, val, 2);
}
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
- u8 *data)
+static inline int ksz_rmw8(struct ksz_device *dev, int offset, u8 mask, u8 val)
+{
+ return regmap_update_bits(dev->regmap[0], offset, mask, val);
+}
+
+static inline int ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
+{
+ return ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline int ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
{
- ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
- u16 *data)
+static inline int ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
{
- ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
- u32 *data)
+static inline int ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
{
- ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
- u8 data)
+static inline int ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
{
- ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
- u16 data)
+static inline int ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
{
- ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
- u32 data)
+static inline void ksz_prmw8(struct ksz_device *dev, int port, int offset,
+ u8 mask, u8 val)
{
- ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ regmap_update_bits(dev->regmap[0],
+ dev->dev_ops->get_port_addr(port, offset),
+ mask, val);
}
static inline void ksz_regmap_lock(void *__mtx)
@@ -283,6 +569,114 @@ static inline void ksz_regmap_unlock(void *__mtx)
mutex_unlock(mtx);
}
+static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
+{
+ return dev->chip_id == KSZ8830_CHIP_ID;
+}
+
+static inline int is_lan937x(struct ksz_device *dev)
+{
+ return dev->chip_id == LAN9370_CHIP_ID ||
+ dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID ||
+ dev->chip_id == LAN9373_CHIP_ID ||
+ dev->chip_id == LAN9374_CHIP_ID;
+}
+
+/* STP State Defines */
+#define PORT_TX_ENABLE BIT(2)
+#define PORT_RX_ENABLE BIT(1)
+#define PORT_LEARN_DISABLE BIT(0)
+
+/* Switch ID Defines */
+#define REG_CHIP_ID0 0x00
+
+#define SW_FAMILY_ID_M GENMASK(15, 8)
+#define KSZ87_FAMILY_ID 0x87
+#define KSZ88_FAMILY_ID 0x88
+
+#define KSZ8_PORT_STATUS_0 0x08
+#define KSZ8_PORT_FIBER_MODE BIT(7)
+
+#define SW_CHIP_ID_M GENMASK(7, 4)
+#define KSZ87_CHIP_ID_94 0x6
+#define KSZ87_CHIP_ID_95 0x9
+#define KSZ88_CHIP_ID_63 0x3
+
+#define SW_REV_ID_M GENMASK(7, 4)
+
+/* KSZ9893, KSZ9563, KSZ8563 specific register */
+#define REG_CHIP_ID4 0x0f
+#define SKU_ID_KSZ8563 0x3c
+#define SKU_ID_KSZ9563 0x1c
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROT_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+#define BROADCAST_STORM_RATE_HI 0x07
+#define BROADCAST_STORM_RATE_LO 0xFF
+#define BROADCAST_STORM_RATE 0x07FF
+
+#define MULTICAST_STORM_DISABLE BIT(6)
+
+#define SW_START 0x01
+
+/* xMII configuration */
+#define P_MII_DUPLEX_M BIT(6)
+#define P_MII_100MBIT_M BIT(4)
+
+#define P_GMII_1GBIT_M BIT(6)
+#define P_RGMII_ID_IG_ENABLE BIT(4)
+#define P_RGMII_ID_EG_ENABLE BIT(3)
+#define P_MII_MAC_MODE BIT(2)
+#define P_MII_SEL_M 0x3
+
+/* Interrupt */
+#define REG_SW_PORT_INT_STATUS__1 0x001B
+#define REG_SW_PORT_INT_MASK__1 0x001F
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SRC_PHY_INT 1
+#define PORT_SRC_PTP_INT 2
+
+#define KSZ8795_HUGE_PACKET_SIZE 2000
+#define KSZ8863_HUGE_PACKET_SIZE 1916
+#define KSZ8863_NORMAL_PACKET_SIZE 1536
+#define KSZ8_LEGAL_PACKET_SIZE 1518
+#define KSZ9477_MAX_FRAME_SIZE 9000
+
+#define KSZ9477_REG_PORT_OUT_RATE_0 0x0420
+#define KSZ9477_OUT_RATE_NO_LIMIT 0
+
+#define KSZ9477_PORT_MRI_TC_MAP__4 0x0808
+
+#define KSZ9477_PORT_TC_MAP_S 4
+#define KSZ9477_MAX_TC_PRIO 7
+
+/* CBS related registers */
+#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
+
+#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
+
+#define MTI_SCHEDULE_MODE_M GENMASK(7, 6)
+#define MTI_SCHEDULE_STRICT_PRIO 0
+#define MTI_SCHEDULE_WRR 2
+#define MTI_SHAPING_M GENMASK(5, 4)
+#define MTI_SHAPING_OFF 0
+#define MTI_SHAPING_SRP 1
+#define MTI_SHAPING_TIME_AWARE 2
+
+#define KSZ9477_PORT_MTI_QUEUE_CTRL_1 0x0915
+#define KSZ9477_DEFAULT_WRR_WEIGHT 1
+
+#define REG_PORT_MTI_HI_WATER_MARK 0x0916
+#define REG_PORT_MTI_LO_WATER_MARK 0x0918
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
new file mode 100644
index 000000000000..4e22a695a64c
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -0,0 +1,1201 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip KSZ PTP Implementation
+ *
+ * Copyright (C) 2020 ARRI Lighting
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#include <linux/dsa/ksz_common.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "ksz_common.h"
+#include "ksz_ptp.h"
+#include "ksz_ptp_reg.h"
+
+#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
+#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
+#define work_to_xmit_work(w) \
+ container_of((w), struct ksz_deferred_xmit_work, work)
+
+/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
+ * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
+ */
+#define KSZ_MAX_DRIFT_CORR 6249999
+#define KSZ_MAX_PULSE_WIDTH 125000000LL
+
+#define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
+#define KSZ_PTP_SUBNS_BITS 32
+
+#define KSZ_PTP_INT_START 13
+
+static int ksz_ptp_tou_gpio(struct ksz_device *dev)
+{
+ int ret;
+
+ if (!is_lan937x(dev))
+ return 0;
+
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
+ GPIO_OUT);
+ if (ret)
+ return ret;
+
+ ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
+ LED_OVR_1 | LED_OVR_2);
+ if (ret)
+ return ret;
+
+ return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
+ LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
+ LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
+}
+
+static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
+{
+ u32 data;
+ int ret;
+
+ /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
+
+ data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
+ ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
+ if (ret)
+ return ret;
+
+ data = FIELD_PREP(TRIG_INT_M, BIT(unit));
+ ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
+ if (ret)
+ return ret;
+
+ /* Clear reset and set GPIO direction */
+ return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
+ 0);
+}
+
+static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
+{
+ u32 data;
+
+ if (pulse_ns & 0x3)
+ return -EINVAL;
+
+ data = (pulse_ns / 8);
+ if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
+ return -ERANGE;
+
+ return 0;
+}
+
+static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
+ struct timespec64 const *ts)
+{
+ int ret;
+
+ /* Hardware has only 32 bit */
+ if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
+ return -EINVAL;
+
+ ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
+ if (ret)
+ return ret;
+
+ ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
+{
+ u32 data;
+ int ret;
+
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
+ if (ret)
+ return ret;
+
+ /* Check error flag:
+ * - the ACTIVE flag is NOT cleared an error!
+ */
+ ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
+ dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
+ unit);
+ ret = -EIO;
+ /* Unit will be reset on next access */
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ksz_ptp_configure_perout(struct ksz_device *dev,
+ u32 cycle_width_ns, u32 pulse_width_ns,
+ struct timespec64 const *target_time,
+ u8 index)
+{
+ u32 data;
+ int ret;
+
+ data = FIELD_PREP(TRIG_NOTIFY, 1) |
+ FIELD_PREP(TRIG_GPO_M, index) |
+ FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
+ ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
+ if (ret)
+ return ret;
+
+ ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
+ if (ret)
+ return ret;
+
+ /* Set cycle count 0 - Infinite */
+ ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
+ if (ret)
+ return ret;
+
+ data = (pulse_width_ns / 8);
+ ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_target_time_set(dev, target_time);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ksz_ptp_enable_perout(struct ksz_device *dev,
+ struct ptp_perout_request const *request,
+ int on)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ u64 req_pulse_width_ns;
+ u64 cycle_width_ns;
+ u64 pulse_width_ns;
+ int pin = 0;
+ u32 data32;
+ int ret;
+
+ if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
+ ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
+ return -EBUSY;
+
+ pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
+ if (pin < 0)
+ return -EINVAL;
+
+ data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
+ FIELD_PREP(PTP_TOU_INDEX, request->index);
+ ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
+ PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_reset(dev, request->index);
+ if (ret)
+ return ret;
+
+ if (!on) {
+ ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
+ return 0;
+ }
+
+ ptp_data->perout_target_time_first.tv_sec = request->start.sec;
+ ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
+
+ ptp_data->perout_period.tv_sec = request->period.sec;
+ ptp_data->perout_period.tv_nsec = request->period.nsec;
+
+ cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
+ if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
+ return -EINVAL;
+
+ if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ pulse_width_ns = request->on.sec * NSEC_PER_SEC +
+ request->on.nsec;
+ } else {
+ /* Use a duty cycle of 50%. Maximum pulse width supported by the
+ * hardware is a little bit more than 125 ms.
+ */
+ req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
+ request->period.nsec) / 2;
+ pulse_width_ns = min_t(u64, req_pulse_width_ns,
+ KSZ_MAX_PULSE_WIDTH);
+ }
+
+ ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
+ &ptp_data->perout_target_time_first,
+ pin);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_gpio(dev);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_start(dev, request->index);
+ if (ret)
+ return ret;
+
+ ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
+
+ return 0;
+}
+
+static int ksz_ptp_enable_mode(struct ksz_device *dev)
+{
+ struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ struct ksz_port *prt;
+ struct dsa_port *dp;
+ bool tag_en = false;
+ int ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ prt = &dev->ports[dp->index];
+ if (prt->hwts_tx_en || prt->hwts_rx_en) {
+ tag_en = true;
+ break;
+ }
+ }
+
+ if (tag_en) {
+ ret = ptp_schedule_worker(ptp_data->clock, 0);
+ if (ret)
+ return ret;
+ } else {
+ ptp_cancel_worker_sync(ptp_data->clock);
+ }
+
+ tagger_data->hwtstamp_set_state(dev->ds, tag_en);
+
+ return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
+ tag_en ? PTP_ENABLE : 0);
+}
+
+/* The function is return back the capability of timestamping feature when
+ * requested through ethtool -T <interface> utility
+ */
+int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+
+ ptp_data = &dev->ptp_data;
+
+ if (!ptp_data->clock)
+ return -ENODEV;
+
+ ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
+
+ if (is_lan937x(dev))
+ ts->tx_types |= BIT(HWTSTAMP_TX_ON);
+
+ ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ ts->phc_index = ptp_clock_index(ptp_data->clock);
+
+ return 0;
+}
+
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct ksz_device *dev = ds->priv;
+ struct hwtstamp_config *config;
+ struct ksz_port *prt;
+
+ prt = &dev->ports[port];
+ config = &prt->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+static int ksz_set_hwtstamp_config(struct ksz_device *dev,
+ struct ksz_port *prt,
+ struct hwtstamp_config *config)
+{
+ int ret;
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
+ prt->hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
+ prt->hwts_tx_en = true;
+
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
+ if (ret)
+ return ret;
+
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!is_lan937x(dev))
+ return -ERANGE;
+
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
+ prt->hwts_tx_en = true;
+
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ prt->hwts_rx_en = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ return ksz_ptp_enable_mode(dev);
+}
+
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct ksz_device *dev = ds->priv;
+ struct hwtstamp_config config;
+ struct ksz_port *prt;
+ int ret;
+
+ prt = &dev->ports[port];
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ret = ksz_set_hwtstamp_config(dev, prt, &config);
+ if (ret)
+ return ret;
+
+ memcpy(&prt->tstamp_config, &config, sizeof(config));
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
+{
+ struct timespec64 ptp_clock_time;
+ struct ksz_ptp_data *ptp_data;
+ struct timespec64 diff;
+ struct timespec64 ts;
+
+ ptp_data = &dev->ptp_data;
+ ts = ktime_to_timespec64(tstamp);
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_clock_time = ptp_data->clock_time;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+ /* calculate full time from partial time stamp */
+ ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
+
+ /* find nearest possible point in time */
+ diff = timespec64_sub(ts, ptp_clock_time);
+ if (diff.tv_sec > 2)
+ ts.tv_sec -= 4;
+ else if (diff.tv_sec < -2)
+ ts.tv_sec += 4;
+
+ return timespec64_to_ktime(ts);
+}
+
+bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct ksz_device *dev = ds->priv;
+ struct ptp_header *ptp_hdr;
+ struct ksz_port *prt;
+ u8 ptp_msg_type;
+ ktime_t tstamp;
+ s64 correction;
+
+ prt = &dev->ports[port];
+
+ tstamp = KSZ_SKB_CB(skb)->tstamp;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
+
+ if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
+ goto out;
+
+ ptp_hdr = ptp_parse_header(skb, type);
+ if (!ptp_hdr)
+ goto out;
+
+ ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
+ if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
+ goto out;
+
+ /* Only subtract the partial time stamp from the correction field. When
+ * the hardware adds the egress time stamp to the correction field of
+ * the PDelay_Resp message on tx, also only the partial time stamp will
+ * be added.
+ */
+ correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
+ correction -= ktime_to_ns(tstamp) << 16;
+
+ ptp_header_update_correction(skb, type, ptp_hdr, correction);
+
+out:
+ return false;
+}
+
+void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ptp_header *hdr;
+ struct sk_buff *clone;
+ struct ksz_port *prt;
+ unsigned int type;
+ u8 ptp_msg_type;
+
+ prt = &dev->ports[port];
+
+ if (!prt->hwts_tx_en)
+ return;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return;
+
+ ptp_msg_type = ptp_get_msgtype(hdr, type);
+
+ switch (ptp_msg_type) {
+ case PTP_MSGTYPE_SYNC:
+ if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
+ return;
+ break;
+ case PTP_MSGTYPE_PDELAY_REQ:
+ break;
+ case PTP_MSGTYPE_PDELAY_RESP:
+ if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
+ KSZ_SKB_CB(skb)->ptp_type = type;
+ KSZ_SKB_CB(skb)->update_correction = true;
+ return;
+ }
+ break;
+
+ default:
+ return;
+ }
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ /* caching the value to be used in tag_ksz.c */
+ KSZ_SKB_CB(skb)->clone = clone;
+}
+
+static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
+ struct ksz_port *prt, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps hwtstamps = {};
+ int ret;
+
+ /* timeout must include DSA master to transmit data, tstamp latency,
+ * IRQ latency and time for reading the time stamp.
+ */
+ ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
+ msecs_to_jiffies(100));
+ if (!ret)
+ return;
+
+ hwtstamps.hwtstamp = prt->tstamp_msg;
+ skb_complete_tx_timestamp(skb, &hwtstamps);
+}
+
+void ksz_port_deferred_xmit(struct kthread_work *work)
+{
+ struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *prt;
+
+ prt = &dev->ports[xmit_work->dp->index];
+
+ clone = KSZ_SKB_CB(skb)->clone;
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ reinit_completion(&prt->tstamp_msg_comp);
+
+ dsa_enqueue_skb(skb, skb->dev);
+
+ ksz_ptp_txtstamp_skb(dev, prt, clone);
+
+ kfree(xmit_work);
+}
+
+static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
+{
+ u32 nanoseconds;
+ u32 seconds;
+ u8 phase;
+ int ret;
+
+ /* Copy current PTP clock into shadow registers and read */
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
+ if (ret)
+ return ret;
+
+ ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
+ if (ret)
+ return ret;
+
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nanoseconds + phase * 8;
+
+ return 0;
+}
+
+static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+ ret = _ksz_ptp_gettime(dev, ts);
+ mutex_unlock(&ptp_data->lock);
+
+ return ret;
+}
+
+static int ksz_ptp_restart_perout(struct ksz_device *dev)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ s64 now_ns, first_ns, period_ns, next_ns;
+ struct ptp_perout_request request;
+ struct timespec64 next;
+ struct timespec64 now;
+ unsigned int count;
+ int ret;
+
+ dev_info(dev->dev, "Restarting periodic output signal\n");
+
+ ret = _ksz_ptp_gettime(dev, &now);
+ if (ret)
+ return ret;
+
+ now_ns = timespec64_to_ns(&now);
+ first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
+
+ /* Calculate next perout event based on start time and period */
+ period_ns = timespec64_to_ns(&ptp_data->perout_period);
+
+ if (first_ns < now_ns) {
+ count = div_u64(now_ns - first_ns, period_ns);
+ next_ns = first_ns + count * period_ns;
+ } else {
+ next_ns = first_ns;
+ }
+
+ /* Ensure 100 ms guard time prior next event */
+ while (next_ns < now_ns + 100000000)
+ next_ns += period_ns;
+
+ /* Restart periodic output signal */
+ next = ns_to_timespec64(next_ns);
+ request.start.sec = next.tv_sec;
+ request.start.nsec = next.tv_nsec;
+ request.period.sec = ptp_data->perout_period.tv_sec;
+ request.period.nsec = ptp_data->perout_period.tv_nsec;
+ request.index = 0;
+ request.flags = 0;
+
+ return ksz_ptp_enable_perout(dev, &request, 1);
+}
+
+static int ksz_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ /* Write to shadow registers and Load PTP clock */
+ ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
+ if (ret)
+ goto unlock;
+
+ switch (ptp_data->tou_mode) {
+ case KSZ_PTP_TOU_IDLE:
+ break;
+
+ case KSZ_PTP_TOU_PEROUT:
+ ret = ksz_ptp_restart_perout(dev);
+ if (ret)
+ goto unlock;
+
+ break;
+ }
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = *ts;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+
+ return ret;
+}
+
+static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ u64 base, adj;
+ bool negative;
+ u32 data32;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ if (scaled_ppm) {
+ base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
+ negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
+
+ data32 = (u32)adj;
+ data32 &= PTP_SUBNANOSEC_M;
+ if (!negative)
+ data32 |= PTP_RATE_DIR;
+
+ ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
+ PTP_CLK_ADJ_ENABLE);
+ if (ret)
+ goto unlock;
+ } else {
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
+ if (ret)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+ return ret;
+}
+
+static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ struct timespec64 delta64 = ns_to_timespec64(delta);
+ s32 sec, nsec;
+ u16 data16;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ /* do not use ns_to_timespec64(),
+ * both sec and nsec are subtracted by hw
+ */
+ sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
+
+ ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
+ if (ret)
+ goto unlock;
+
+ ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
+ if (ret)
+ goto unlock;
+
+ data16 |= PTP_STEP_ADJ;
+
+ /* PTP_STEP_DIR -- 0: subtract, 1: add */
+ if (delta < 0)
+ data16 &= ~PTP_STEP_DIR;
+ else
+ data16 |= PTP_STEP_DIR;
+
+ ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
+ if (ret)
+ goto unlock;
+
+ switch (ptp_data->tou_mode) {
+ case KSZ_PTP_TOU_IDLE:
+ break;
+
+ case KSZ_PTP_TOU_PEROUT:
+ ret = ksz_ptp_restart_perout(dev);
+ if (ret)
+ goto unlock;
+
+ break;
+ }
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+ return ret;
+}
+
+static int ksz_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ switch (req->type) {
+ case PTP_CLK_REQ_PEROUT:
+ mutex_lock(&ptp_data->lock);
+ ret = ksz_ptp_enable_perout(dev, &req->perout, on);
+ mutex_unlock(&ptp_data->lock);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ int ret = 0;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Function is pointer to the do_aux_work in the ptp_clock capability */
+static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ struct timespec64 ts;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+ ret = _ksz_ptp_gettime(dev, &ts);
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = ts;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return HZ; /* reschedule in 1 second */
+}
+
+static int ksz_ptp_start_clock(struct ksz_device *dev)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ int ret;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
+ if (ret)
+ return ret;
+
+ ptp_data->clock_time.tv_sec = 0;
+ ptp_data->clock_time.tv_nsec = 0;
+
+ return 0;
+}
+
+int ksz_ptp_clock_register(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+ int ret;
+ u8 i;
+
+ ptp_data = &dev->ptp_data;
+ mutex_init(&ptp_data->lock);
+ spin_lock_init(&ptp_data->clock_lock);
+
+ ptp_data->caps.owner = THIS_MODULE;
+ snprintf(ptp_data->caps.name, 16, "Microchip Clock");
+ ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
+ ptp_data->caps.gettime64 = ksz_ptp_gettime;
+ ptp_data->caps.settime64 = ksz_ptp_settime;
+ ptp_data->caps.adjfine = ksz_ptp_adjfine;
+ ptp_data->caps.adjtime = ksz_ptp_adjtime;
+ ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
+ ptp_data->caps.enable = ksz_ptp_enable;
+ ptp_data->caps.verify = ksz_ptp_verify_pin;
+ ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
+ ptp_data->caps.n_per_out = 3;
+
+ ret = ksz_ptp_start_clock(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
+ struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
+
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
+
+ ptp_data->caps.pin_config = ptp_data->pin_config;
+
+ /* Currently only P2P mode is supported. When 802_1AS bit is set, it
+ * forwards all PTP packets to host port and none to other ports.
+ */
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
+ PTP_TC_P2P | PTP_802_1AS);
+ if (ret)
+ return ret;
+
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
+
+ return 0;
+}
+
+void ksz_ptp_clock_unregister(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+
+ ptp_data = &dev->ptp_data;
+
+ if (ptp_data->clock)
+ ptp_clock_unregister(ptp_data->clock);
+}
+
+static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_ptp_irq *ptpmsg_irq = dev_id;
+ struct ksz_device *dev;
+ struct ksz_port *port;
+ u32 tstamp_raw;
+ ktime_t tstamp;
+ int ret;
+
+ port = ptpmsg_irq->port;
+ dev = port->ksz_dev;
+
+ if (ptpmsg_irq->ts_en) {
+ ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
+ if (ret)
+ return IRQ_NONE;
+
+ tstamp = ksz_decode_tstamp(tstamp_raw);
+
+ port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
+
+ complete(&port->tstamp_msg_comp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *ptpirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u16 data;
+ int ret;
+ u8 n;
+
+ dev = ptpirq->dev;
+
+ ret = ksz_read16(dev, ptpirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ /* Clear the interrupts W1C */
+ ret = ksz_write16(dev, ptpirq->reg_status, data);
+ if (ret)
+ return IRQ_NONE;
+
+ for (n = 0; n < ptpirq->nirqs; ++n) {
+ if (data & BIT(n + KSZ_PTP_INT_START)) {
+ sub_irq = irq_find_mapping(ptpirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void ksz_ptp_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
+}
+
+static void ksz_ptp_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
+}
+
+static void ksz_ptp_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_ptp_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_ptp_irq_mask,
+ .irq_unmask = ksz_ptp_irq_unmask,
+ .irq_bus_lock = ksz_ptp_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
+};
+
+static int ksz_ptp_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
+ .map = ksz_ptp_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
+{
+ struct ksz_ptp_irq *ptpmsg_irq;
+
+ ptpmsg_irq = &port->ptpmsg_irq[n];
+
+ free_irq(ptpmsg_irq->num, ptpmsg_irq);
+ irq_dispose_mapping(ptpmsg_irq->num);
+}
+
+static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
+{
+ u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
+ REG_PTP_PORT_SYNC_TS};
+ static const char * const name[] = {"pdresp-msg", "xdreq-msg",
+ "sync-msg"};
+ const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
+ struct ksz_ptp_irq *ptpmsg_irq;
+
+ ptpmsg_irq = &port->ptpmsg_irq[n];
+
+ ptpmsg_irq->port = port;
+ ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
+
+ snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
+
+ ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
+ if (ptpmsg_irq->num < 0)
+ return ptpmsg_irq->num;
+
+ return request_threaded_irq(ptpmsg_irq->num, NULL,
+ ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
+ ptpmsg_irq->name, ptpmsg_irq);
+}
+
+int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
+{
+ struct ksz_device *dev = ds->priv;
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ struct ksz_port *port = &dev->ports[p];
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ int irq;
+ int ret;
+
+ ptpirq->dev = dev;
+ ptpirq->masked = 0;
+ ptpirq->nirqs = 3;
+ ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
+ ptpirq->reg_status = ops->get_port_addr(p,
+ REG_PTP_PORT_TX_INT_STATUS__2);
+ snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
+
+ init_completion(&port->tstamp_msg_comp);
+
+ ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
+ &ksz_ptp_irq_domain_ops, ptpirq);
+ if (!ptpirq->domain)
+ return -ENOMEM;
+
+ for (irq = 0; irq < ptpirq->nirqs; irq++)
+ irq_create_mapping(ptpirq->domain, irq);
+
+ ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
+ if (ptpirq->irq_num < 0) {
+ ret = ptpirq->irq_num;
+ goto out;
+ }
+
+ ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
+ IRQF_ONESHOT, ptpirq->name, ptpirq);
+ if (ret)
+ goto out;
+
+ for (irq = 0; irq < ptpirq->nirqs; irq++) {
+ ret = ksz_ptp_msg_irq_setup(port, irq);
+ if (ret)
+ goto out_ptp_msg;
+ }
+
+ return 0;
+
+out_ptp_msg:
+ free_irq(ptpirq->irq_num, ptpirq);
+ while (irq--)
+ free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
+out:
+ for (irq = 0; irq < ptpirq->nirqs; irq++)
+ irq_dispose_mapping(port->ptpmsg_irq[irq].num);
+
+ irq_domain_remove(ptpirq->domain);
+
+ return ret;
+}
+
+void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *port = &dev->ports[p];
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ u8 n;
+
+ for (n = 0; n < ptpirq->nirqs; n++)
+ ksz_ptp_msg_irq_free(port, n);
+
+ free_irq(ptpirq->irq_num, ptpirq);
+ irq_dispose_mapping(ptpirq->irq_num);
+
+ irq_domain_remove(ptpirq->domain);
+}
+
+MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("PTP support for KSZ switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
new file mode 100644
index 000000000000..0ca8ca4f804e
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip KSZ PTP Implementation
+ *
+ * Copyright (C) 2020 ARRI Lighting
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_DRIVERS_KSZ_PTP_H
+#define _NET_DSA_DRIVERS_KSZ_PTP_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
+
+#include <linux/ptp_clock_kernel.h>
+
+#define KSZ_PTP_N_GPIO 2
+
+enum ksz_ptp_tou_mode {
+ KSZ_PTP_TOU_IDLE,
+ KSZ_PTP_TOU_PEROUT,
+};
+
+struct ksz_ptp_data {
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct ptp_pin_desc pin_config[KSZ_PTP_N_GPIO];
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+ /* lock for accessing the clock_time */
+ spinlock_t clock_lock;
+ struct timespec64 clock_time;
+ enum ksz_ptp_tou_mode tou_mode;
+ struct timespec64 perout_target_time_first; /* start of first pulse */
+ struct timespec64 perout_period;
+};
+
+int ksz_ptp_clock_register(struct dsa_switch *ds);
+
+void ksz_ptp_clock_unregister(struct dsa_switch *ds);
+
+int ksz_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void ksz_port_deferred_xmit(struct kthread_work *work);
+bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
+ unsigned int type);
+int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p);
+void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p);
+
+#else
+
+struct ksz_ptp_data {
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+};
+
+static inline int ksz_ptp_clock_register(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static inline void ksz_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
+{
+ return 0;
+}
+
+static inline void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p) {}
+
+#define ksz_get_ts_info NULL
+
+#define ksz_hwtstamp_get NULL
+
+#define ksz_hwtstamp_set NULL
+
+#define ksz_port_rxtstamp NULL
+
+#define ksz_port_txtstamp NULL
+
+#define ksz_port_deferred_xmit NULL
+
+#endif /* End of CONFIG_NET_DSA_MICROCHIP_KSZ_PTP */
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_ptp_reg.h b/drivers/net/dsa/microchip/ksz_ptp_reg.h
new file mode 100644
index 000000000000..d71e85510cda
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp_reg.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip KSZ PTP register definitions
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ_PTP_REGS_H
+#define __KSZ_PTP_REGS_H
+
+#define REG_SW_GLOBAL_LED_OVR__4 0x0120
+#define LED_OVR_2 BIT(1)
+#define LED_OVR_1 BIT(0)
+
+#define REG_SW_GLOBAL_LED_SRC__4 0x0128
+#define LED_SRC_PTP_GPIO_1 BIT(3)
+#define LED_SRC_PTP_GPIO_2 BIT(2)
+
+/* 5 - PTP Clock */
+#define REG_PTP_CLK_CTRL 0x0500
+
+#define PTP_STEP_ADJ BIT(6)
+#define PTP_STEP_DIR BIT(5)
+#define PTP_READ_TIME BIT(4)
+#define PTP_LOAD_TIME BIT(3)
+#define PTP_CLK_ADJ_ENABLE BIT(2)
+#define PTP_CLK_ENABLE BIT(1)
+#define PTP_CLK_RESET BIT(0)
+
+#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502
+
+#define PTP_RTC_SUB_NANOSEC_M 0x0007
+#define PTP_RTC_0NS 0x00
+
+#define REG_PTP_RTC_NANOSEC 0x0504
+
+#define REG_PTP_RTC_SEC 0x0508
+
+#define REG_PTP_SUBNANOSEC_RATE 0x050C
+
+#define PTP_SUBNANOSEC_M 0x3FFFFFFF
+#define PTP_RATE_DIR BIT(31)
+#define PTP_TMP_RATE_ENABLE BIT(30)
+
+#define REG_PTP_SUBNANOSEC_RATE_L 0x050E
+
+#define REG_PTP_RATE_DURATION 0x0510
+#define REG_PTP_RATE_DURATION_H 0x0510
+#define REG_PTP_RATE_DURATION_L 0x0512
+
+#define REG_PTP_MSG_CONF1 0x0514
+
+#define PTP_802_1AS BIT(7)
+#define PTP_ENABLE BIT(6)
+#define PTP_ETH_ENABLE BIT(5)
+#define PTP_IPV4_UDP_ENABLE BIT(4)
+#define PTP_IPV6_UDP_ENABLE BIT(3)
+#define PTP_TC_P2P BIT(2)
+#define PTP_MASTER BIT(1)
+#define PTP_1STEP BIT(0)
+
+#define REG_PTP_UNIT_INDEX__4 0x0520
+
+#define PTP_GPIO_INDEX GENMASK(19, 16)
+#define PTP_TSI_INDEX BIT(8)
+#define PTP_TOU_INDEX GENMASK(1, 0)
+
+#define REG_PTP_TRIG_STATUS__4 0x0524
+
+#define TRIG_ERROR_M GENMASK(18, 16)
+#define TRIG_DONE_M GENMASK(2, 0)
+
+#define REG_PTP_INT_STATUS__4 0x0528
+
+#define TRIG_INT_M GENMASK(18, 16)
+#define TS_INT_M GENMASK(1, 0)
+
+#define REG_PTP_CTRL_STAT__4 0x052C
+
+#define GPIO_IN BIT(7)
+#define GPIO_OUT BIT(6)
+#define TS_INT_ENABLE BIT(5)
+#define TRIG_ACTIVE BIT(4)
+#define TRIG_ENABLE BIT(3)
+#define TRIG_RESET BIT(2)
+#define TS_ENABLE BIT(1)
+#define TS_RESET BIT(0)
+
+#define REG_TRIG_TARGET_NANOSEC 0x0530
+#define REG_TRIG_TARGET_SEC 0x0534
+
+#define REG_TRIG_CTRL__4 0x0538
+
+#define TRIG_CASCADE_ENABLE BIT(31)
+#define TRIG_CASCADE_TAIL BIT(30)
+#define TRIG_CASCADE_UPS_M GENMASK(29, 26)
+#define TRIG_NOW BIT(25)
+#define TRIG_NOTIFY BIT(24)
+#define TRIG_EDGE BIT(23)
+#define TRIG_PATTERN_M GENMASK(22, 20)
+#define TRIG_NEG_EDGE 0
+#define TRIG_POS_EDGE 1
+#define TRIG_NEG_PULSE 2
+#define TRIG_POS_PULSE 3
+#define TRIG_NEG_PERIOD 4
+#define TRIG_POS_PERIOD 5
+#define TRIG_REG_OUTPUT 6
+#define TRIG_GPO_M GENMASK(19, 16)
+#define TRIG_CASCADE_ITERATE_CNT_M GENMASK(15, 0)
+
+#define REG_TRIG_CYCLE_WIDTH 0x053C
+#define TRIG_CYCLE_WIDTH_M GENMASK(31, 0)
+
+#define REG_TRIG_CYCLE_CNT 0x0540
+
+#define TRIG_CYCLE_CNT_M GENMASK(31, 16)
+#define TRIG_BIT_PATTERN_M GENMASK(15, 0)
+
+#define REG_TRIG_ITERATE_TIME 0x0544
+
+#define REG_TRIG_PULSE_WIDTH__4 0x0548
+
+#define TRIG_PULSE_WIDTH_M GENMASK(23, 0)
+
+/* Port PTP Register */
+#define REG_PTP_PORT_RX_DELAY__2 0x0C00
+#define REG_PTP_PORT_TX_DELAY__2 0x0C02
+#define REG_PTP_PORT_ASYM_DELAY__2 0x0C04
+
+#define REG_PTP_PORT_XDELAY_TS 0x0C08
+#define REG_PTP_PORT_SYNC_TS 0x0C0C
+#define REG_PTP_PORT_PDRESP_TS 0x0C10
+
+#define REG_PTP_PORT_TX_INT_STATUS__2 0x0C14
+#define REG_PTP_PORT_TX_INT_ENABLE__2 0x0C16
+
+#define PTP_PORT_SYNC_INT BIT(15)
+#define PTP_PORT_XDELAY_REQ_INT BIT(14)
+#define PTP_PORT_PDELAY_RESP_INT BIT(13)
+#define KSZ_SYNC_MSG 2
+#define KSZ_XDREQ_MSG 1
+#define KSZ_PDRES_MSG 0
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
new file mode 100644
index 000000000000..96c52e8fb51b
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip ksz series register access through SPI
+ *
+ * Copyright (C) 2017 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_common.h"
+
+#define KSZ8795_SPI_ADDR_SHIFT 12
+#define KSZ8795_SPI_ADDR_ALIGN 3
+#define KSZ8795_SPI_TURNAROUND_SHIFT 1
+
+#define KSZ8863_SPI_ADDR_SHIFT 8
+#define KSZ8863_SPI_ADDR_ALIGN 8
+#define KSZ8863_SPI_TURNAROUND_SHIFT 0
+
+#define KSZ9477_SPI_ADDR_SHIFT 24
+#define KSZ9477_SPI_ADDR_ALIGN 3
+#define KSZ9477_SPI_TURNAROUND_SHIFT 5
+
+KSZ_REGMAP_TABLE(ksz8795, 16, KSZ8795_SPI_ADDR_SHIFT,
+ KSZ8795_SPI_TURNAROUND_SHIFT, KSZ8795_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz8863, 16, KSZ8863_SPI_ADDR_SHIFT,
+ KSZ8863_SPI_TURNAROUND_SHIFT, KSZ8863_SPI_ADDR_ALIGN);
+
+KSZ_REGMAP_TABLE(ksz9477, 32, KSZ9477_SPI_ADDR_SHIFT,
+ KSZ9477_SPI_TURNAROUND_SHIFT, KSZ9477_SPI_ADDR_ALIGN);
+
+static int ksz_spi_probe(struct spi_device *spi)
+{
+ const struct regmap_config *regmap_config;
+ const struct ksz_chip_data *chip;
+ struct device *ddev = &spi->dev;
+ struct regmap_config rc;
+ struct ksz_device *dev;
+ int i, ret = 0;
+
+ dev = ksz_switch_alloc(&spi->dev, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ chip = device_get_match_data(ddev);
+ if (!chip)
+ return -EINVAL;
+
+ if (chip->chip_id == KSZ8830_CHIP_ID)
+ regmap_config = ksz8863_regmap_config;
+ else if (chip->chip_id == KSZ8795_CHIP_ID ||
+ chip->chip_id == KSZ8794_CHIP_ID ||
+ chip->chip_id == KSZ8765_CHIP_ID)
+ regmap_config = ksz8795_regmap_config;
+ else
+ regmap_config = ksz9477_regmap_config;
+
+ for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
+ rc = regmap_config[i];
+ rc.lock_arg = &dev->regmap_mutex;
+ rc.wr_table = chip->wr_table;
+ rc.rd_table = chip->rd_table;
+ dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
+
+ if (IS_ERR(dev->regmap[i])) {
+ return dev_err_probe(&spi->dev, PTR_ERR(dev->regmap[i]),
+ "Failed to initialize regmap%i\n",
+ regmap_config[i].val_bits);
+ }
+ }
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ /* setup spi */
+ spi->mode = SPI_MODE_3;
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ dev->irq = spi->irq;
+
+ ret = ksz_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static void ksz_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+}
+
+static void ksz_spi_shutdown(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->reset)
+ dev->dev_ops->reset(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
+}
+
+static const struct of_device_id ksz_dt_ids[] = {
+ {
+ .compatible = "microchip,ksz8765",
+ .data = &ksz_switch_chips[KSZ8765]
+ },
+ {
+ .compatible = "microchip,ksz8794",
+ .data = &ksz_switch_chips[KSZ8794]
+ },
+ {
+ .compatible = "microchip,ksz8795",
+ .data = &ksz_switch_chips[KSZ8795]
+ },
+ {
+ .compatible = "microchip,ksz8863",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz8873",
+ .data = &ksz_switch_chips[KSZ8830]
+ },
+ {
+ .compatible = "microchip,ksz9477",
+ .data = &ksz_switch_chips[KSZ9477]
+ },
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
+ {
+ .compatible = "microchip,ksz9897",
+ .data = &ksz_switch_chips[KSZ9897]
+ },
+ {
+ .compatible = "microchip,ksz9893",
+ .data = &ksz_switch_chips[KSZ9893]
+ },
+ {
+ .compatible = "microchip,ksz9563",
+ .data = &ksz_switch_chips[KSZ9563]
+ },
+ {
+ .compatible = "microchip,ksz8563",
+ .data = &ksz_switch_chips[KSZ8563]
+ },
+ {
+ .compatible = "microchip,ksz9567",
+ .data = &ksz_switch_chips[KSZ9567]
+ },
+ {
+ .compatible = "microchip,lan9370",
+ .data = &ksz_switch_chips[LAN9370]
+ },
+ {
+ .compatible = "microchip,lan9371",
+ .data = &ksz_switch_chips[LAN9371]
+ },
+ {
+ .compatible = "microchip,lan9372",
+ .data = &ksz_switch_chips[LAN9372]
+ },
+ {
+ .compatible = "microchip,lan9373",
+ .data = &ksz_switch_chips[LAN9373]
+ },
+ {
+ .compatible = "microchip,lan9374",
+ .data = &ksz_switch_chips[LAN9374]
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz_dt_ids);
+
+static const struct spi_device_id ksz_spi_ids[] = {
+ { "ksz8765" },
+ { "ksz8794" },
+ { "ksz8795" },
+ { "ksz8863" },
+ { "ksz8873" },
+ { "ksz9477" },
+ { "ksz9896" },
+ { "ksz9897" },
+ { "ksz9893" },
+ { "ksz9563" },
+ { "ksz8563" },
+ { "ksz9567" },
+ { "lan9370" },
+ { "lan9371" },
+ { "lan9372" },
+ { "lan9373" },
+ { "lan9374" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
+
+static struct spi_driver ksz_spi_driver = {
+ .driver = {
+ .name = "ksz-switch",
+ .owner = THIS_MODULE,
+ .of_match_table = ksz_dt_ids,
+ },
+ .id_table = ksz_spi_ids,
+ .probe = ksz_spi_probe,
+ .remove = ksz_spi_remove,
+ .shutdown = ksz_spi_shutdown,
+};
+
+module_spi_driver(ksz_spi_driver);
+
+MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9896");
+MODULE_ALIAS("spi:ksz9897");
+MODULE_ALIAS("spi:ksz9893");
+MODULE_ALIAS("spi:ksz9563");
+MODULE_ALIAS("spi:ksz8563");
+MODULE_ALIAS("spi:ksz9567");
+MODULE_ALIAS("spi:lan937x");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
+MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
new file mode 100644
index 000000000000..3388d91dbc44
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip lan937x dev ops headers
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+
+#ifndef __LAN937X_CFG_H
+#define __LAN937X_CFG_H
+
+int lan937x_reset_switch(struct ksz_device *dev);
+int lan937x_setup(struct dsa_switch *ds);
+void lan937x_teardown(struct dsa_switch *ds);
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
+void lan937x_config_cpu_port(struct dsa_switch *ds);
+int lan937x_switch_init(struct ksz_device *dev);
+void lan937x_switch_exit(struct ksz_device *dev);
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config);
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val);
+#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
new file mode 100644
index 000000000000..399a3905e6ca
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip LAN937X switch driver main logic
+ * Copyright (C) 2019-2022 Microchip Technology Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/math.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "lan937x_reg.h"
+#include "ksz_common.h"
+#include "ksz9477.h"
+#include "lan937x.h"
+
+static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], addr, bits, set ? bits : 0);
+}
+
+static int lan937x_port_cfg(struct ksz_device *dev, int port, int offset,
+ u8 bits, bool set)
+{
+ return regmap_update_bits(dev->regmap[0], PORT_CTRL_ADDR(port, offset),
+ bits, set ? bits : 0);
+}
+
+static int lan937x_enable_spi_indirect_access(struct ksz_device *dev)
+{
+ u16 data16;
+ int ret;
+
+ /* Enable Phy access through SPI */
+ ret = lan937x_cfg(dev, REG_GLOBAL_CTRL_0, SW_PHY_REG_BLOCK, false);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_read16(dev, REG_VPHY_SPECIAL_CTRL__2, &data16);
+ if (ret < 0)
+ return ret;
+
+ /* Allow SPI access */
+ data16 |= VPHY_SPI_INDIRECT_ENABLE;
+
+ return ksz_write16(dev, REG_VPHY_SPECIAL_CTRL__2, data16);
+}
+
+static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
+{
+ u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
+ u16 temp;
+
+ /* get register address based on the logical port */
+ temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
+
+ return ksz_write16(dev, REG_VPHY_IND_ADDR__2, temp);
+}
+
+static int lan937x_internal_phy_write(struct ksz_device *dev, int addr, int reg,
+ u16 val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port */
+ if (!dev->info->internal_phy[addr])
+ return -EOPNOTSUPP;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write the data to be written to the VPHY reg */
+ ret = ksz_write16(dev, REG_VPHY_IND_DATA__2, val);
+ if (ret < 0)
+ return ret;
+
+ /* Write the Write En and Busy bit */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2,
+ (VPHY_IND_WRITE | VPHY_IND_BUSY));
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to write phy register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
+ u16 *val)
+{
+ unsigned int value;
+ int ret;
+
+ /* Check for internal phy port, return 0xffff for non-existent phy */
+ if (!dev->info->internal_phy[addr])
+ return 0xffff;
+
+ ret = lan937x_vphy_ind_addr_wr(dev, addr, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Write Read and Busy bit to start the transaction */
+ ret = ksz_write16(dev, REG_VPHY_IND_CTRL__2, VPHY_IND_BUSY);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(dev->regmap[1], REG_VPHY_IND_CTRL__2,
+ value, !(value & VPHY_IND_BUSY), 10,
+ 1000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to read phy register\n");
+ return ret;
+ }
+
+ /* Read the VPHY register which has the PHY data */
+ return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
+}
+
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+{
+ return lan937x_internal_phy_read(dev, addr, reg, data);
+}
+
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+{
+ return lan937x_internal_phy_write(dev, addr, reg, val);
+}
+
+int lan937x_reset_switch(struct ksz_device *dev)
+{
+ u32 data32;
+ int ret;
+
+ /* reset switch */
+ ret = lan937x_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Auto Aging */
+ ret = lan937x_cfg(dev, REG_SW_LUE_CTRL_1, SW_LINK_AUTO_AGING, true);
+ if (ret < 0)
+ return ret;
+
+ /* disable interrupts */
+ ret = ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT);
+ if (ret < 0)
+ return ret;
+
+ ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
+ if (ret < 0)
+ return ret;
+
+ return ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+}
+
+void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ const u32 *masks = dev->info->masks;
+ const u16 *regs = dev->info->regs;
+ struct dsa_switch *ds = dev->ds;
+ u8 member;
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_TAIL_TAG_ENABLE, true);
+
+ /* Enable the Port Queue split */
+ ksz9477_port_queue_split(dev, port);
+
+ /* set back pressure for half duplex */
+ lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
+ true);
+
+ /* enable 802.1p priority */
+ lan937x_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (!dev->info->internal_phy[port])
+ lan937x_port_cfg(dev, port, regs[P_XMII_CTRL_0],
+ masks[P_MII_TX_FLOW_CTRL] |
+ masks[P_MII_RX_FLOW_CTRL],
+ true);
+
+ if (cpu_port)
+ member = dsa_user_ports(ds);
+ else
+ member = BIT(dsa_upstream_port(ds, port));
+
+ dev->dev_ops->cfg_port_member(dev, port, member);
+}
+
+void lan937x_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dev->info->cpu_ports & (1 << dp->index)) {
+ dev->cpu_port = dp->index;
+
+ /* enable cpu port */
+ lan937x_port_setup(dev, dp->index, true);
+ }
+ }
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ ksz_port_stp_state_set(ds, dp->index, BR_STATE_DISABLED);
+ }
+}
+
+int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+{
+ struct dsa_switch *ds = dev->ds;
+ int ret;
+
+ new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (dsa_is_cpu_port(ds, port))
+ new_mtu += LAN937X_TAG_LEN;
+
+ if (new_mtu >= FR_MIN_SIZE)
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, true);
+ else
+ ret = lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_0,
+ PORT_JUMBO_PACKET, false);
+ if (ret < 0) {
+ dev_err(ds->dev, "failed to enable jumbo\n");
+ return ret;
+ }
+
+ /* Write the frame size in PORT_MAX_FR_SIZE register */
+ ret = ksz_pwrite16(dev, port, PORT_MAX_FR_SIZE, new_mtu);
+ if (ret) {
+ dev_err(ds->dev, "failed to update mtu for port %d\n", port);
+ return ret;
+ }
+
+ return 0;
+}
+
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u32 value;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs);
+
+ return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value);
+}
+
+static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
+ u16 reg, u8 val)
+{
+ u16 data16;
+
+ ksz_pread16(dev, port, reg, &data16);
+
+ /* Update tune Adjust */
+ data16 |= FIELD_PREP(PORT_TUNE_ADJ, val);
+ ksz_pwrite16(dev, port, reg, data16);
+
+ /* write DLL reset to take effect */
+ data16 |= PORT_DLL_RESET;
+ ksz_pwrite16(dev, port, reg, data16);
+}
+
+static void lan937x_set_rgmii_tx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ /* Apply different codes based on the ports as per characterization
+ * results
+ */
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_TX_DELAY_2NS :
+ RGMII_2_TX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_5, val);
+}
+
+static void lan937x_set_rgmii_rx_delay(struct ksz_device *dev, int port)
+{
+ u8 val;
+
+ val = (port == LAN937X_RGMII_1_PORT) ? RGMII_1_RX_DELAY_2NS :
+ RGMII_2_RX_DELAY_2NS;
+
+ lan937x_set_tune_adj(dev, port, REG_PORT_XMII_CTRL_4, val);
+}
+
+void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
+ struct phylink_config *config)
+{
+ config->mac_capabilities = MAC_100FD;
+
+ if (dev->info->supports_rgmii[port]) {
+ /* MII/RMII/RGMII ports */
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10 | MAC_1000FD;
+ }
+}
+
+void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
+{
+ struct ksz_port *p = &dev->ports[port];
+
+ if (p->rgmii_tx_val) {
+ lan937x_set_rgmii_tx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii tx delay for the port %d\n",
+ port);
+ }
+
+ if (p->rgmii_rx_val) {
+ lan937x_set_rgmii_rx_delay(dev, port);
+ dev_info(dev->dev, "Applied rgmii rx delay for the port %d\n",
+ port);
+ }
+}
+
+int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
+{
+ return ksz_pwrite32(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
+}
+
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
+int lan937x_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ /* enable Indirect Access from SPI to the VPHY registers */
+ ret = lan937x_enable_spi_indirect_access(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable spi indirect access");
+ return ret;
+ }
+
+ /* The VLAN aware is a global setting. Mixed vlan
+ * filterings are not supported.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* Enable aggressive back off for half duplex & UNH mode */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
+ (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
+ true);
+
+ /* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
+ * packets when 16 or more collisions occur
+ */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+
+ /* enable global MIB counter freeze function */
+ lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+
+ /* disable CLK125 & CLK25, 1: disable, 0: enable */
+ lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+
+ return 0;
+}
+
+void lan937x_teardown(struct dsa_switch *ds)
+{
+
+}
+
+void lan937x_switch_exit(struct ksz_device *dev)
+{
+ lan937x_reset_switch(dev);
+}
+
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("Microchip LAN937x Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
new file mode 100644
index 000000000000..45b606b6429f
--- /dev/null
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip LAN937X switch register definitions
+ * Copyright (C) 2019-2021 Microchip Technology Inc.
+ */
+#ifndef __LAN937X_REG_H
+#define __LAN937X_REG_H
+
+#define PORT_CTRL_ADDR(port, addr) ((addr) | (((port) + 1) << 12))
+
+/* 0 - Operation */
+#define REG_GLOBAL_CTRL_0 0x0007
+
+#define SW_PHY_REG_BLOCK BIT(7)
+#define SW_FAST_MODE BIT(3)
+#define SW_FAST_MODE_OVERRIDE BIT(2)
+
+#define REG_SW_INT_STATUS__4 0x0010
+#define REG_SW_INT_MASK__4 0x0014
+
+#define LUE_INT BIT(31)
+#define TRIG_TS_INT BIT(30)
+#define APB_TIMEOUT_INT BIT(29)
+#define OVER_TEMP_INT BIT(28)
+#define HSR_INT BIT(27)
+#define PIO_INT BIT(26)
+#define POR_READY_INT BIT(25)
+
+#define SWITCH_INT_MASK \
+ (LUE_INT | TRIG_TS_INT | APB_TIMEOUT_INT | OVER_TEMP_INT | HSR_INT | \
+ PIO_INT | POR_READY_INT)
+
+#define REG_SW_PORT_INT_STATUS__4 0x0018
+#define REG_SW_PORT_INT_MASK__4 0x001C
+
+/* 1 - Global */
+#define REG_SW_GLOBAL_OUTPUT_CTRL__1 0x0103
+#define SW_CLK125_ENB BIT(1)
+#define SW_CLK25_ENB BIT(0)
+
+/* 3 - Operation Control */
+#define REG_SW_OPERATION 0x0300
+
+#define SW_DOUBLE_TAG BIT(7)
+#define SW_OVER_TEMP_ENABLE BIT(2)
+#define SW_RESET BIT(1)
+
+#define REG_SW_LUE_CTRL_0 0x0310
+
+#define SW_VLAN_ENABLE BIT(7)
+#define SW_DROP_INVALID_VID BIT(6)
+#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_S 3
+#define SW_RESV_MCAST_ENABLE BIT(2)
+
+#define REG_SW_LUE_CTRL_1 0x0311
+
+#define UNICAST_LEARN_DISABLE BIT(7)
+#define SW_FLUSH_STP_TABLE BIT(5)
+#define SW_FLUSH_MSTP_TABLE BIT(4)
+#define SW_SRC_ADDR_FILTER BIT(3)
+#define SW_AGING_ENABLE BIT(2)
+#define SW_FAST_AGING BIT(1)
+#define SW_LINK_AUTO_AGING BIT(0)
+
+#define REG_SW_AGE_PERIOD__1 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+#define REG_SW_AGE_PERIOD__2 0x0320
+#define SW_AGE_PERIOD_19_8_M GENMASK(19, 8)
+
+#define REG_SW_MAC_CTRL_0 0x0330
+#define SW_NEW_BACKOFF BIT(7)
+#define SW_PAUSE_UNH_MODE BIT(1)
+#define SW_AGGR_BACKOFF BIT(0)
+
+#define REG_SW_MAC_CTRL_1 0x0331
+#define SW_SHORT_IFG BIT(7)
+#define MULTICAST_STORM_DISABLE BIT(6)
+#define SW_BACK_PRESSURE BIT(5)
+#define FAIR_FLOW_CTRL BIT(4)
+#define NO_EXC_COLLISION_DROP BIT(3)
+#define SW_LEGAL_PACKET_DISABLE BIT(1)
+#define SW_PASS_SHORT_FRAME BIT(0)
+
+#define REG_SW_MAC_CTRL_6 0x0336
+#define SW_MIB_COUNTER_FLUSH BIT(7)
+#define SW_MIB_COUNTER_FREEZE BIT(6)
+
+/* 4 - LUE */
+#define REG_SW_ALU_STAT_CTRL__4 0x041C
+
+#define REG_SW_ALU_VAL_B 0x0424
+#define ALU_V_OVERRIDE BIT(31)
+#define ALU_V_USE_FID BIT(30)
+#define ALU_V_PORT_MAP 0xFF
+
+/* 7 - VPhy */
+#define REG_VPHY_IND_ADDR__2 0x075C
+#define REG_VPHY_IND_DATA__2 0x0760
+
+#define REG_VPHY_IND_CTRL__2 0x0768
+
+#define VPHY_IND_WRITE BIT(1)
+#define VPHY_IND_BUSY BIT(0)
+
+#define REG_VPHY_SPECIAL_CTRL__2 0x077C
+#define VPHY_SMI_INDIRECT_ENABLE BIT(15)
+#define VPHY_SW_LOOPBACK BIT(14)
+#define VPHY_MDIO_INTERNAL_ENABLE BIT(13)
+#define VPHY_SPI_INDIRECT_ENABLE BIT(12)
+#define VPHY_PORT_MODE_M 0x3
+#define VPHY_PORT_MODE_S 8
+#define VPHY_MODE_RGMII 0
+#define VPHY_MODE_MII_PHY 1
+#define VPHY_MODE_SGMII 2
+#define VPHY_MODE_RMII_PHY 3
+#define VPHY_SW_COLLISION_TEST BIT(7)
+#define VPHY_SPEED_DUPLEX_STAT_M 0x7
+#define VPHY_SPEED_DUPLEX_STAT_S 2
+#define VPHY_SPEED_1000 BIT(4)
+#define VPHY_SPEED_100 BIT(3)
+#define VPHY_FULL_DUPLEX BIT(2)
+
+/* Port Registers */
+
+/* 0 - Operation */
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_TAS_INT BIT(5)
+#define PORT_QCI_INT BIT(4)
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_SRC_PHY_INT 1
+
+#define REG_PORT_CTRL_0 0x0020
+
+#define PORT_MAC_LOOPBACK BIT(7)
+#define PORT_MAC_REMOTE_LOOPBACK BIT(6)
+#define PORT_K2L_INSERT_ENABLE BIT(5)
+#define PORT_K2L_DEBUG_ENABLE BIT(4)
+#define PORT_TAIL_TAG_ENABLE BIT(2)
+#define PORT_QUEUE_SPLIT_ENABLE 0x3
+
+/* 1 - Phy */
+#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+
+/* 3 - xMII */
+#define PORT_SGMII_SEL BIT(7)
+#define PORT_GRXC_ENABLE BIT(0)
+
+#define PORT_MII_SEL_EDGE BIT(5)
+
+#define REG_PORT_XMII_CTRL_4 0x0304
+#define REG_PORT_XMII_CTRL_5 0x0306
+
+#define PORT_DLL_RESET BIT(15)
+#define PORT_TUNE_ADJ GENMASK(13, 7)
+
+/* 4 - MAC */
+#define REG_PORT_MAC_CTRL_0 0x0400
+#define PORT_CHECK_LENGTH BIT(2)
+#define PORT_BROADCAST_STORM BIT(1)
+#define PORT_JUMBO_PACKET BIT(0)
+
+#define REG_PORT_MAC_CTRL_1 0x0401
+#define PORT_BACK_PRESSURE BIT(3)
+#define PORT_PASS_ALL BIT(0)
+
+#define PORT_MAX_FR_SIZE 0x404
+#define FR_MIN_SIZE 1522
+
+/* 8 - Classification and Policing */
+#define REG_PORT_MRI_PRIO_CTRL 0x0801
+#define PORT_HIGHEST_PRIO BIT(7)
+#define PORT_OR_PRIO BIT(6)
+#define PORT_MAC_PRIO_ENABLE BIT(4)
+#define PORT_VLAN_PRIO_ENABLE BIT(3)
+#define PORT_802_1P_PRIO_ENABLE BIT(2)
+#define PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+
+/* 9 - Shaping */
+#define REG_PORT_MTI_CREDIT_INCREMENT 0x091C
+
+/* The port number as per the datasheet */
+#define RGMII_2_PORT_NUM 5
+#define RGMII_1_PORT_NUM 6
+
+#define LAN937X_RGMII_2_PORT (RGMII_2_PORT_NUM - 1)
+#define LAN937X_RGMII_1_PORT (RGMII_1_PORT_NUM - 1)
+
+#define RGMII_1_TX_DELAY_2NS 2
+#define RGMII_2_TX_DELAY_2NS 0
+#define RGMII_1_RX_DELAY_2NS 0x1B
+#define RGMII_2_RX_DELAY_2NS 0x14
+
+#define LAN937X_TAG_LEN 2
+
+#endif
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
new file mode 100644
index 000000000000..088533663b83
--- /dev/null
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/gpio/consumer.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/pcs/pcs-mtk-lynxi.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+#include <net/dsa.h>
+
+#include "mt7530.h"
+
+static int
+mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct mii_bus *bus = context;
+ u16 page, r, lo, hi;
+ int ret;
+
+ page = (reg >> 6) & 0x3ff;
+ r = (reg >> 2) & 0xf;
+ lo = val & 0xffff;
+ hi = val >> 16;
+
+ /* MT7530 uses 31 as the pseudo port */
+ ret = bus->write(bus, 0x1f, 0x1f, page);
+ if (ret < 0)
+ return ret;
+
+ ret = bus->write(bus, 0x1f, r, lo);
+ if (ret < 0)
+ return ret;
+
+ ret = bus->write(bus, 0x1f, 0x10, hi);
+ return ret;
+}
+
+static int
+mt7530_regmap_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct mii_bus *bus = context;
+ u16 page, r, lo, hi;
+ int ret;
+
+ page = (reg >> 6) & 0x3ff;
+ r = (reg >> 2) & 0xf;
+
+ /* MT7530 uses 31 as the pseudo port */
+ ret = bus->write(bus, 0x1f, 0x1f, page);
+ if (ret < 0)
+ return ret;
+
+ lo = bus->read(bus, 0x1f, r);
+ hi = bus->read(bus, 0x1f, 0x10);
+
+ *val = (hi << 16) | (lo & 0xffff);
+
+ return 0;
+}
+
+static void
+mt7530_mdio_regmap_lock(void *mdio_lock)
+{
+ mutex_lock_nested(mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void
+mt7530_mdio_regmap_unlock(void *mdio_lock)
+{
+ mutex_unlock(mdio_lock);
+}
+
+static const struct regmap_bus mt7530_regmap_bus = {
+ .reg_write = mt7530_regmap_write,
+ .reg_read = mt7530_regmap_read,
+};
+
+static int
+mt7531_create_sgmii(struct mt7530_priv *priv, bool dual_sgmii)
+{
+ struct regmap_config *mt7531_pcs_config[2] = {};
+ struct phylink_pcs *pcs;
+ struct regmap *regmap;
+ int i, ret = 0;
+
+ /* MT7531AE has two SGMII units for port 5 and port 6
+ * MT7531BE has only one SGMII unit for port 6
+ */
+ for (i = dual_sgmii ? 0 : 1; i < 2; i++) {
+ mt7531_pcs_config[i] = devm_kzalloc(priv->dev,
+ sizeof(struct regmap_config),
+ GFP_KERNEL);
+ if (!mt7531_pcs_config[i]) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ mt7531_pcs_config[i]->name = i ? "port6" : "port5";
+ mt7531_pcs_config[i]->reg_bits = 16;
+ mt7531_pcs_config[i]->val_bits = 32;
+ mt7531_pcs_config[i]->reg_stride = 4;
+ mt7531_pcs_config[i]->reg_base = MT7531_SGMII_REG_BASE(5 + i);
+ mt7531_pcs_config[i]->max_register = 0x17c;
+ mt7531_pcs_config[i]->lock = mt7530_mdio_regmap_lock;
+ mt7531_pcs_config[i]->unlock = mt7530_mdio_regmap_unlock;
+ mt7531_pcs_config[i]->lock_arg = &priv->bus->mdio_lock;
+
+ regmap = devm_regmap_init(priv->dev,
+ &mt7530_regmap_bus, priv->bus,
+ mt7531_pcs_config[i]);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ break;
+ }
+ pcs = mtk_pcs_lynxi_create(priv->dev, regmap,
+ MT7531_PHYA_CTRL_SIGNAL3, 0);
+ if (!pcs) {
+ ret = -ENXIO;
+ break;
+ }
+ priv->ports[5 + i].sgmii_pcs = pcs;
+ }
+
+ if (ret && i)
+ mtk_pcs_lynxi_destroy(priv->ports[5].sgmii_pcs);
+
+ return ret;
+}
+
+static const struct of_device_id mt7530_of_match[] = {
+ { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
+ { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
+ { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt7530_of_match);
+
+static int
+mt7530_probe(struct mdio_device *mdiodev)
+{
+ static struct regmap_config *regmap_config;
+ struct mt7530_priv *priv;
+ struct device_node *dn;
+ int ret;
+
+ dn = mdiodev->dev.of_node;
+
+ priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+
+ ret = mt7530_probe_common(priv);
+ if (ret)
+ return ret;
+
+ /* Use medatek,mcm property to distinguish hardware type that would
+ * cause a little bit differences on power-on sequence.
+ * Not MCM that indicates switch works as the remote standalone
+ * integrated circuit so the GPIO pin would be used to complete
+ * the reset, otherwise memory-mapped register accessing used
+ * through syscon provides in the case of MCM.
+ */
+ priv->mcm = of_property_read_bool(dn, "mediatek,mcm");
+ if (priv->mcm) {
+ dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n");
+
+ priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm");
+ if (IS_ERR(priv->rstc)) {
+ dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->rstc);
+ }
+ } else {
+ priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->reset);
+ }
+ }
+
+ if (priv->id == ID_MT7530) {
+ priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
+ if (IS_ERR(priv->core_pwr))
+ return PTR_ERR(priv->core_pwr);
+
+ priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
+ if (IS_ERR(priv->io_pwr))
+ return PTR_ERR(priv->io_pwr);
+ }
+
+ regmap_config = devm_kzalloc(&mdiodev->dev, sizeof(*regmap_config),
+ GFP_KERNEL);
+ if (!regmap_config)
+ return -ENOMEM;
+
+ regmap_config->reg_bits = 16;
+ regmap_config->val_bits = 32;
+ regmap_config->reg_stride = 4;
+ regmap_config->max_register = MT7530_CREV;
+ regmap_config->disable_locking = true;
+ priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus,
+ priv->bus, regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ if (priv->id == ID_MT7531)
+ priv->create_sgmii = mt7531_create_sgmii;
+
+ return dsa_register_switch(priv->ds);
+}
+
+static void
+mt7530_remove(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+ int ret = 0, i;
+
+ if (!priv)
+ return;
+
+ ret = regulator_disable(priv->core_pwr);
+ if (ret < 0)
+ dev_err(priv->dev,
+ "Failed to disable core power: %d\n", ret);
+
+ ret = regulator_disable(priv->io_pwr);
+ if (ret < 0)
+ dev_err(priv->dev, "Failed to disable io pwr: %d\n",
+ ret);
+
+ mt7530_remove_common(priv);
+
+ for (i = 0; i < 2; ++i)
+ mtk_pcs_lynxi_destroy(priv->ports[5 + i].sgmii_pcs);
+}
+
+static void mt7530_shutdown(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static struct mdio_driver mt7530_mdio_driver = {
+ .probe = mt7530_probe,
+ .remove = mt7530_remove,
+ .shutdown = mt7530_shutdown,
+ .mdiodrv.driver = {
+ .name = "mt7530-mdio",
+ .of_match_table = mt7530_of_match,
+ },
+};
+
+mdio_module_driver(mt7530_mdio_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch (MDIO)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530-mmio.c b/drivers/net/dsa/mt7530-mmio.c
new file mode 100644
index 000000000000..1a3d4b692f34
--- /dev/null
+++ b/drivers/net/dsa/mt7530-mmio.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <net/dsa.h>
+
+#include "mt7530.h"
+
+static const struct of_device_id mt7988_of_match[] = {
+ { .compatible = "mediatek,mt7988-switch", .data = &mt753x_table[ID_MT7988], },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt7988_of_match);
+
+static int
+mt7988_probe(struct platform_device *pdev)
+{
+ static struct regmap_config *sw_regmap_config;
+ struct mt7530_priv *priv;
+ void __iomem *base_addr;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = NULL;
+ priv->dev = &pdev->dev;
+
+ ret = mt7530_probe_common(priv);
+ if (ret)
+ return ret;
+
+ priv->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc)) {
+ dev_err(&pdev->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(priv->rstc);
+ }
+
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base_addr)) {
+ dev_err(&pdev->dev, "cannot request I/O memory space\n");
+ return -ENXIO;
+ }
+
+ sw_regmap_config = devm_kzalloc(&pdev->dev, sizeof(*sw_regmap_config), GFP_KERNEL);
+ if (!sw_regmap_config)
+ return -ENOMEM;
+
+ sw_regmap_config->name = "switch";
+ sw_regmap_config->reg_bits = 16;
+ sw_regmap_config->val_bits = 32;
+ sw_regmap_config->reg_stride = 4;
+ sw_regmap_config->max_register = MT7530_CREV;
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base_addr, sw_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static int
+mt7988_remove(struct platform_device *pdev)
+{
+ struct mt7530_priv *priv = platform_get_drvdata(pdev);
+
+ if (priv)
+ mt7530_remove_common(priv);
+
+ return 0;
+}
+
+static void mt7988_shutdown(struct platform_device *pdev)
+{
+ struct mt7530_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static struct platform_driver mt7988_platform_driver = {
+ .probe = mt7988_probe,
+ .remove = mt7988_remove,
+ .shutdown = mt7988_shutdown,
+ .driver = {
+ .name = "mt7530-mmio",
+ .of_match_table = mt7988_of_match,
+ },
+};
+module_platform_driver(mt7988_platform_driver);
+
+MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
+MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch (MMIO)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 9890672a206d..9bc54e1348cb 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -24,6 +24,11 @@
#include "mt7530.h"
+static struct mt753x_pcs *pcs_to_mt753x_pcs(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mt753x_pcs, pcs);
+}
+
/* String, offset, and register size in bytes if different from 4 bytes */
static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0x00, "TxDrop"),
@@ -137,31 +142,42 @@ err:
}
static void
-core_write(struct mt7530_priv *priv, u32 reg, u32 val)
+mt7530_mutex_lock(struct mt7530_priv *priv)
{
- struct mii_bus *bus = priv->bus;
+ if (priv->bus)
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+}
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+static void
+mt7530_mutex_unlock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static void
+core_write(struct mt7530_priv *priv, u32 reg, u32 val)
+{
+ mt7530_mutex_lock(priv);
core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
}
static void
core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
{
- struct mii_bus *bus = priv->bus;
u32 val;
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
val &= ~mask;
val |= set;
core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
}
static void
@@ -179,66 +195,42 @@ core_clear(struct mt7530_priv *priv, u32 reg, u32 val)
static int
mt7530_mii_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
- struct mii_bus *bus = priv->bus;
- u16 page, r, lo, hi;
int ret;
- page = (reg >> 6) & 0x3ff;
- r = (reg >> 2) & 0xf;
- lo = val & 0xffff;
- hi = val >> 16;
-
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
- if (ret < 0)
- goto err;
-
- ret = bus->write(bus, 0x1f, r, lo);
- if (ret < 0)
- goto err;
+ ret = regmap_write(priv->regmap, reg, val);
- ret = bus->write(bus, 0x1f, 0x10, hi);
-err:
if (ret < 0)
- dev_err(&bus->dev,
+ dev_err(priv->dev,
"failed to write mt7530 register\n");
+
return ret;
}
static u32
mt7530_mii_read(struct mt7530_priv *priv, u32 reg)
{
- struct mii_bus *bus = priv->bus;
- u16 page, r, lo, hi;
int ret;
+ u32 val;
- page = (reg >> 6) & 0x3ff;
- r = (reg >> 2) & 0xf;
-
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
- if (ret < 0) {
- dev_err(&bus->dev,
+ ret = regmap_read(priv->regmap, reg, &val);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ dev_err(priv->dev,
"failed to read mt7530 register\n");
- return ret;
+ return 0;
}
- lo = bus->read(bus, 0x1f, r);
- hi = bus->read(bus, 0x1f, 0x10);
-
- return (hi << 16) | (lo & 0xffff);
+ return val;
}
static void
mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
- struct mii_bus *bus = priv->bus;
-
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
mt7530_mii_write(priv, reg, val);
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
}
static u32
@@ -250,14 +242,13 @@ _mt7530_unlocked_read(struct mt7530_dummy_poll *p)
static u32
_mt7530_read(struct mt7530_dummy_poll *p)
{
- struct mii_bus *bus = p->priv->bus;
u32 val;
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(p->priv);
val = mt7530_mii_read(p->priv, p->reg);
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(p->priv);
return val;
}
@@ -275,23 +266,17 @@ static void
mt7530_rmw(struct mt7530_priv *priv, u32 reg,
u32 mask, u32 set)
{
- struct mii_bus *bus = priv->bus;
- u32 val;
+ mt7530_mutex_lock(priv);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ regmap_update_bits(priv->regmap, reg, mask, set);
- val = mt7530_mii_read(priv, reg);
- val &= ~mask;
- val |= set;
- mt7530_mii_write(priv, reg, val);
-
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
}
static void
mt7530_set(struct mt7530_priv *priv, u32 reg, u32 val)
{
- mt7530_rmw(priv, reg, 0, val);
+ mt7530_rmw(priv, reg, val, val);
}
static void
@@ -388,12 +373,38 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
}
-/* Setup TX circuit including relevant PAD and driving */
+/* Set up switch core clock for MT7530 */
+static void mt7530_pll_setup(struct mt7530_priv *priv)
+{
+ /* Disable core clock */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+
+ /* Disable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1, 0);
+
+ /* Set core clock into 500Mhz */
+ core_write(priv, CORE_GSWPLL_GRP2,
+ RG_GSWPLL_POSDIV_500M(1) |
+ RG_GSWPLL_FBKDIV_500M(25));
+
+ /* Enable PLL */
+ core_write(priv, CORE_GSWPLL_GRP1,
+ RG_GSWPLL_EN_PRE |
+ RG_GSWPLL_POSDIV_200M(2) |
+ RG_GSWPLL_FBKDIV_200M(32));
+
+ udelay(20);
+
+ /* Enable core clock */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+}
+
+/* Setup port 6 interface mode and TRGMII TX circuit */
static int
mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, i, xtal;
+ u32 ncpo1, ssc_delta, trgint, xtal;
xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
@@ -407,15 +418,17 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
trgint = 0;
- /* PLL frequency: 125MHz */
- ncpo1 = 0x0c80;
break;
case PHY_INTERFACE_MODE_TRGMII:
trgint = 1;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
if (priv->id == ID_MT7621) {
- /* PLL frequency: 150MHz: 1.2GBit */
+ /* PLL frequency: 125MHz: 1.0GBit */
if (xtal == HWTRAP_XTAL_40MHZ)
- ncpo1 = 0x0780;
+ ncpo1 = 0x0640;
if (xtal == HWTRAP_XTAL_25MHZ)
ncpo1 = 0x0a00;
} else { /* PLL frequency: 250MHz: 2.0Gbit */
@@ -431,61 +444,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
return -EINVAL;
}
- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
-
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
P6_INTF_MODE(trgint));
- /* Lower Tx Driving for TRGMII path */
- for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
- mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
- TD_DM_DRVP(8) | TD_DM_DRVN(8));
-
- /* Disable MT7530 core and TRGMII Tx clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
-
- /* Setup core clock for MT7530 */
- /* Disable PLL */
- core_write(priv, CORE_GSWPLL_GRP1, 0);
-
- /* Set core clock into 500Mhz */
- core_write(priv, CORE_GSWPLL_GRP2,
- RG_GSWPLL_POSDIV_500M(1) |
- RG_GSWPLL_FBKDIV_500M(25));
-
- /* Enable PLL */
- core_write(priv, CORE_GSWPLL_GRP1,
- RG_GSWPLL_EN_PRE |
- RG_GSWPLL_POSDIV_200M(2) |
- RG_GSWPLL_FBKDIV_200M(32));
+ if (trgint) {
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+
+ /* Setup the MT7530 TRGMII Tx Clock */
+ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
+ core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
+ core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
+ core_write(priv, CORE_PLL_GROUP4,
+ RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
+ RG_SYSPLL_BIAS_LPF_EN);
+ core_write(priv, CORE_PLL_GROUP2,
+ RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
+ RG_SYSPLL_POSDIV(1));
+ core_write(priv, CORE_PLL_GROUP7,
+ RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
+ RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
+
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
+ }
- /* Setup the MT7530 TRGMII Tx Clock */
- core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
- core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0));
- core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta));
- core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta));
- core_write(priv, CORE_PLL_GROUP4,
- RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN |
- RG_SYSPLL_BIAS_LPF_EN);
- core_write(priv, CORE_PLL_GROUP2,
- RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN |
- RG_SYSPLL_POSDIV(1));
- core_write(priv, CORE_PLL_GROUP7,
- RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
- RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);
-
- /* Enable MT7530 core and TRGMII Tx clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
-
- if (!trgint)
- for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
- mt7530_rmw(priv, MT7530_TRGMII_RD(i),
- RD_TAP_MASK, RD_TAP(16));
return 0;
}
@@ -501,14 +485,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
static int
mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ return 0;
+}
+
+static void
+mt7531_pll_setup(struct mt7530_priv *priv)
+{
u32 top_sig;
u32 hwstrap;
u32 xtal;
u32 val;
if (mt7531_dual_sgmii_supported(priv))
- return 0;
+ return;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
@@ -587,8 +576,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
val |= EN_COREPLL;
mt7530_write(priv, MT7531_PLLGP_EN, val);
usleep_range(25, 35);
-
- return 0;
}
static void
@@ -600,29 +587,40 @@ mt7530_mib_reset(struct dsa_switch *ds)
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
}
-static int mt7530_phy_read(struct mt7530_priv *priv, int port, int regnum)
+static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum)
{
return mdiobus_read_nested(priv->bus, port, regnum);
}
-static int mt7530_phy_write(struct mt7530_priv *priv, int port, int regnum,
- u16 val)
+static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum,
+ u16 val)
{
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
+static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port,
+ int devad, int regnum)
+{
+ return mdiobus_c45_read_nested(priv->bus, port, devad, regnum);
+}
+
+static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u16 val)
+{
+ return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val);
+}
+
static int
mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
int regnum)
{
- struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
u32 reg, val;
int ret;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
@@ -655,23 +653,22 @@ mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
ret = val & MT7531_MDIO_RW_DATA_MASK;
out:
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return ret;
}
static int
mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
- int regnum, u32 data)
+ int regnum, u16 data)
{
- struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
u32 val, reg;
int ret;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
@@ -703,7 +700,7 @@ mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
}
out:
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return ret;
}
@@ -711,14 +708,13 @@ out:
static int
mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
{
- struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
int ret;
u32 val;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
!(val & MT7531_PHY_ACS_ST), 20, 100000);
@@ -741,7 +737,7 @@ mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
ret = val & MT7531_MDIO_RW_DATA_MASK;
out:
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return ret;
}
@@ -750,14 +746,13 @@ static int
mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
u16 data)
{
- struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
int ret;
u32 reg;
INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
!(reg & MT7531_PHY_ACS_ST), 20, 100000);
@@ -779,61 +774,42 @@ mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
}
out:
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return ret;
}
static int
-mt7531_ind_phy_read(struct mt7530_priv *priv, int port, int regnum)
+mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum)
{
- int devad;
- int ret;
-
- if (regnum & MII_ADDR_C45) {
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- ret = mt7531_ind_c45_phy_read(priv, port, devad,
- regnum & MII_REGADDR_C45_MASK);
- } else {
- ret = mt7531_ind_c22_phy_read(priv, port, regnum);
- }
+ struct mt7530_priv *priv = bus->priv;
- return ret;
+ return priv->info->phy_read_c22(priv, port, regnum);
}
static int
-mt7531_ind_phy_write(struct mt7530_priv *priv, int port, int regnum,
- u16 data)
+mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum)
{
- int devad;
- int ret;
-
- if (regnum & MII_ADDR_C45) {
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- ret = mt7531_ind_c45_phy_write(priv, port, devad,
- regnum & MII_REGADDR_C45_MASK,
- data);
- } else {
- ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
- }
+ struct mt7530_priv *priv = bus->priv;
- return ret;
+ return priv->info->phy_read_c45(priv, port, devad, regnum);
}
static int
-mt753x_phy_read(struct mii_bus *bus, int port, int regnum)
+mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val)
{
struct mt7530_priv *priv = bus->priv;
- return priv->info->phy_read(priv, port, regnum);
+ return priv->info->phy_write_c22(priv, port, regnum, val);
}
static int
-mt753x_phy_write(struct mii_bus *bus, int port, int regnum, u16 val)
+mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum,
+ u16 val)
{
struct mt7530_priv *priv = bus->priv;
- return priv->info->phy_write(priv, port, regnum, val);
+ return priv->info->phy_write_c45(priv, port, devad, regnum, val);
}
static void
@@ -920,6 +896,24 @@ mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
return 0;
}
+static const char *p5_intf_modes(unsigned int p5_interface)
+{
+ switch (p5_interface) {
+ case P5_DISABLED:
+ return "DISABLED";
+ case P5_INTF_SEL_PHY_P0:
+ return "PHY P0";
+ case P5_INTF_SEL_PHY_P4:
+ return "PHY P4";
+ case P5_INTF_SEL_GMAC5:
+ return "GMAC5";
+ case P5_INTF_SEL_GMAC5_SGMII:
+ return "GMAC5_SGMII";
+ default:
+ return "unknown";
+ }
+}
+
static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
@@ -1008,9 +1002,9 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
- /* Disable flooding by default */
- mt7530_rmw(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK | UNU_FFP_MASK,
- BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) | UNU_FFP(BIT(port)));
+ /* Enable flooding on the CPU port */
+ mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
+ UNU_FFP(BIT(port)));
/* Set CPU port number */
if (priv->id == ID_MT7621)
@@ -1033,6 +1027,7 @@ static int
mt7530_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
@@ -1041,7 +1036,11 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
* restore the port matrix if the port is the member of a certain
* bridge.
*/
- priv->ports[port].pm |= PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ if (dsa_port_is_user(dp)) {
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ priv->ports[port].pm |= PCR_MATRIX(BIT(cpu_dp->index));
+ }
priv->ports[port].enable = true;
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
priv->ports[port].pm);
@@ -1074,7 +1073,6 @@ static int
mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct mt7530_priv *priv = ds->priv;
- struct mii_bus *bus = priv->bus;
int length;
u32 val;
@@ -1085,7 +1083,7 @@ mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
if (!dsa_is_cpu_port(ds, port))
return 0;
- mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
val = mt7530_mii_read(priv, MT7530_GMACCR);
val &= ~MAX_RX_PKT_LEN_MASK;
@@ -1106,7 +1104,7 @@ mt7530_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
mt7530_mii_write(priv, MT7530_GMACCR, val);
- mutex_unlock(&bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
return 0;
}
@@ -1186,29 +1184,35 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
static int
mt7530_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ u32 port_bitmap = BIT(cpu_dp->index);
struct mt7530_priv *priv = ds->priv;
- u32 port_bitmap = BIT(MT7530_CPU_PORT);
- int i;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (dp == other_dp)
+ continue;
+
/* Add this port to the port matrix of the other ports in the
* same bridge. If the port is disabled, port matrix is kept
* and not being setup until the port becomes enabled.
*/
- if (dsa_is_user_port(ds, i) && i != port) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
- continue;
- if (priv->ports[i].enable)
- mt7530_set(priv, MT7530_PCR_P(i),
- PCR_MATRIX(BIT(port)));
- priv->ports[i].pm |= PCR_MATRIX(BIT(port));
+ if (!dsa_port_offloads_bridge(other_dp, &bridge))
+ continue;
- port_bitmap |= BIT(i);
- }
+ if (priv->ports[other_port].enable)
+ mt7530_set(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX(BIT(port)));
+ priv->ports[other_port].pm |= PCR_MATRIX(BIT(port));
+
+ port_bitmap |= BIT(other_port);
}
/* Add the all other ports to this port matrix. */
@@ -1236,7 +1240,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
/* This is called after .port_bridge_leave when leaving a VLAN-aware
* bridge. Don't set standalone ports to fallback mode.
*/
- if (dsa_to_port(ds, port)->bridge_dev)
+ if (dsa_port_bridge_dev_get(dsa_to_port(ds, port)))
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
MT7530_PORT_FALLBACK_MODE);
@@ -1262,9 +1266,12 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
* the CPU port get out of VLAN filtering mode.
*/
if (all_user_ports_removed) {
- mt7530_write(priv, MT7530_PCR_P(MT7530_CPU_PORT),
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
+ mt7530_write(priv, MT7530_PCR_P(cpu_dp->index),
PCR_MATRIX(dsa_user_ports(priv->ds)));
- mt7530_write(priv, MT7530_PVC_P(MT7530_CPU_PORT), PORT_SPEC_TAG
+ mt7530_write(priv, MT7530_PVC_P(cpu_dp->index), PORT_SPEC_TAG
| PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
}
@@ -1287,38 +1294,55 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
- }
- /* Set the port as a user port which is to be able to recognize VID
- * from incoming packets before fetching entry within the VLAN table.
- */
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
- VLAN_ATTR(MT7530_VLAN_USER) |
- PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ /* Set the port as a user port which is to be able to recognize
+ * VID from incoming packets before fetching entry within the
+ * VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ } else {
+ /* Also set CPU ports to the "user" VLAN port attribute, to
+ * allow VLAN classification, but keep the EG_TAG attribute as
+ * "consistent" (i.o.w. don't change its value) for packets
+ * received by the switch from the CPU, so that tagged packets
+ * are forwarded to user ports as tagged, and untagged as
+ * untagged.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+ }
}
static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
struct mt7530_priv *priv = ds->priv;
- int i;
mutex_lock(&priv->reg_mutex);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (dp == other_dp)
+ continue;
+
/* Remove this port from the port matrix of the other ports
* in the same bridge. If the port is disabled, port matrix
* is kept and not being setup until the port becomes enabled.
*/
- if (dsa_is_user_port(ds, i) && i != port) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
- continue;
- if (priv->ports[i].enable)
- mt7530_clear(priv, MT7530_PCR_P(i),
- PCR_MATRIX(BIT(port)));
- priv->ports[i].pm &= ~PCR_MATRIX(BIT(port));
- }
+ if (!dsa_port_offloads_bridge(other_dp, &bridge))
+ continue;
+
+ if (priv->ports[other_port].enable)
+ mt7530_clear(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX(BIT(port)));
+ priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port));
}
/* Set the cpu port to be the only one in the port matrix of
@@ -1326,8 +1350,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
*/
if (priv->ports[port].enable)
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
- PCR_MATRIX(BIT(MT7530_CPU_PORT)));
- priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
+ PCR_MATRIX(BIT(cpu_dp->index)));
+ priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index));
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN-unaware
@@ -1341,7 +1365,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1357,7 +1382,8 @@ mt7530_port_fdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
int ret;
@@ -1408,7 +1434,8 @@ err:
static int
mt7530_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1434,7 +1461,8 @@ mt7530_port_mdb_add(struct dsa_switch *ds, int port,
static int
mt7530_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mt7530_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
@@ -1490,6 +1518,9 @@ static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1497,7 +1528,7 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
* for becoming a VLAN-aware port.
*/
mt7530_port_set_vlan_aware(ds, port);
- mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+ mt7530_port_set_vlan_aware(ds, cpu_dp->index);
} else {
mt7530_port_set_vlan_unaware(ds, port);
}
@@ -1509,11 +1540,11 @@ static void
mt7530_hw_vlan_add(struct mt7530_priv *priv,
struct mt7530_hw_vlan_entry *entry)
{
+ struct dsa_port *dp = dsa_to_port(priv->ds, entry->port);
u8 new_members;
u32 val;
- new_members = entry->old_members | BIT(entry->port) |
- BIT(MT7530_CPU_PORT);
+ new_members = entry->old_members | BIT(entry->port);
/* Validate the entry with independent learning, create egress tag per
* VLAN and joining the port as one of the port members.
@@ -1524,22 +1555,20 @@ mt7530_hw_vlan_add(struct mt7530_priv *priv,
/* Decide whether adding tag or not for those outgoing packets from the
* port inside the VLAN.
- */
- val = entry->untagged ? MT7530_VLAN_EGRESS_UNTAG :
- MT7530_VLAN_EGRESS_TAG;
- mt7530_rmw(priv, MT7530_VAWD2,
- ETAG_CTRL_P_MASK(entry->port),
- ETAG_CTRL_P(entry->port, val));
-
- /* CPU port is always taken as a tagged port for serving more than one
+ * CPU port is always taken as a tagged port for serving more than one
* VLANs across and also being applied with egress type stack mode for
* that VLAN tags would be appended after hardware special tag used as
* DSA tag.
*/
+ if (dsa_port_is_cpu(dp))
+ val = MT7530_VLAN_EGRESS_STACK;
+ else if (entry->untagged)
+ val = MT7530_VLAN_EGRESS_UNTAG;
+ else
+ val = MT7530_VLAN_EGRESS_TAG;
mt7530_rmw(priv, MT7530_VAWD2,
- ETAG_CTRL_P_MASK(MT7530_CPU_PORT),
- ETAG_CTRL_P(MT7530_CPU_PORT,
- MT7530_VLAN_EGRESS_STACK));
+ ETAG_CTRL_P_MASK(entry->port),
+ ETAG_CTRL_P(entry->port, val));
}
static void
@@ -1558,11 +1587,7 @@ mt7530_hw_vlan_del(struct mt7530_priv *priv,
return;
}
- /* If certain member apart from CPU port is still alive in the VLAN,
- * the entry would be kept valid. Otherwise, the entry is got to be
- * disabled.
- */
- if (new_members && new_members != BIT(MT7530_CPU_PORT)) {
+ if (new_members) {
val = IVL_MAC | VTAG_EN | PORT_MEM(new_members) |
VLAN_VALID;
mt7530_write(priv, MT7530_VAWD1, val);
@@ -1701,7 +1726,7 @@ static int mt753x_mirror_port_set(unsigned int id, u32 val)
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
struct mt7530_priv *priv = ds->priv;
int monitor_port;
@@ -1880,10 +1905,10 @@ mt7530_irq_thread_fn(int irq, void *dev_id)
u32 val;
int p;
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
val = mt7530_mii_read(priv, MT7530_SYS_INT_STS);
mt7530_mii_write(priv, MT7530_SYS_INT_STS, val);
- mutex_unlock(&priv->bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
for (p = 0; p < MT7530_NUM_PHYS; p++) {
if (BIT(p) & val) {
@@ -1919,7 +1944,7 @@ mt7530_irq_bus_lock(struct irq_data *d)
{
struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+ mt7530_mutex_lock(priv);
}
static void
@@ -1928,7 +1953,7 @@ mt7530_irq_bus_sync_unlock(struct irq_data *d)
struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
- mutex_unlock(&priv->bus->mdio_lock);
+ mt7530_mutex_unlock(priv);
}
static struct irq_chip mt7530_irq_chip = {
@@ -1957,6 +1982,47 @@ static const struct irq_domain_ops mt7530_irq_domain_ops = {
};
static void
+mt7988_irq_mask(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_enable &= ~BIT(d->hwirq);
+ mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
+}
+
+static void
+mt7988_irq_unmask(struct irq_data *d)
+{
+ struct mt7530_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_enable |= BIT(d->hwirq);
+ mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable);
+}
+
+static struct irq_chip mt7988_irq_chip = {
+ .name = KBUILD_MODNAME,
+ .irq_mask = mt7988_irq_mask,
+ .irq_unmask = mt7988_irq_unmask,
+};
+
+static int
+mt7988_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, true);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mt7988_irq_domain_ops = {
+ .map = mt7988_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void
mt7530_setup_mdio_irq(struct mt7530_priv *priv)
{
struct dsa_switch *ds = priv->ds;
@@ -1990,8 +2056,15 @@ mt7530_setup_irq(struct mt7530_priv *priv)
return priv->irq ? : -EINVAL;
}
- priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
- &mt7530_irq_domain_ops, priv);
+ if (priv->id == ID_MT7988)
+ priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
+ &mt7988_irq_domain_ops,
+ priv);
+ else
+ priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS,
+ &mt7530_irq_domain_ops,
+ priv);
+
if (!priv->irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -2058,15 +2131,17 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
- bus->read = mt753x_phy_read;
- bus->write = mt753x_phy_write;
+ bus->read = mt753x_phy_read_c22;
+ bus->write = mt753x_phy_write_c22;
+ bus->read_c45 = mt753x_phy_read_c45;
+ bus->write_c45 = mt753x_phy_write_c45;
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
if (priv->irq)
mt7530_setup_mdio_irq(priv);
- ret = mdiobus_register(bus);
+ ret = devm_mdiobus_register(dev, bus);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
if (priv->irq)
@@ -2080,11 +2155,12 @@ static int
mt7530_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
+ struct device_node *dn = NULL;
struct device_node *phy_node;
struct device_node *mac_np;
struct mt7530_dummy_poll p;
phy_interface_t interface;
- struct device_node *dn;
+ struct dsa_port *cpu_dp;
u32 id, val;
int ret, i;
@@ -2092,7 +2168,19 @@ mt7530_setup(struct dsa_switch *ds)
* controller also is the container for two GMACs nodes representing
* as two netdev instances.
*/
- dn = dsa_to_port(ds, MT7530_CPU_PORT)->master->dev.of_node->parent;
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ dn = cpu_dp->master->dev.of_node->parent;
+ /* It doesn't matter which CPU port is found first,
+ * their masters should share the same parent OF node
+ */
+ break;
+ }
+
+ if (!dn) {
+ dev_err(ds->dev, "parent OF node of DSA master not found");
+ return -EINVAL;
+ }
+
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
@@ -2148,7 +2236,18 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
- /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */
+ mt7530_pll_setup(priv);
+
+ /* Lower Tx driving for TRGMII path */
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+ TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
+
+ /* Enable port 6 */
val = mt7530_read(priv, MT7530_MHWTRAP);
val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
val |= MHWTRAP_MANUAL;
@@ -2216,6 +2315,7 @@ mt7530_setup(struct dsa_switch *ds)
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
+ of_node_put(phy_node);
return ret;
}
id = of_mdio_parse_addr(ds->dev, phy_node);
@@ -2249,6 +2349,64 @@ mt7530_setup(struct dsa_switch *ds)
}
static int
+mt7531_setup_common(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *cpu_dp;
+ int ret, i;
+
+ /* BPDU to CPU port */
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+ BIT(cpu_dp->index));
+ break;
+ }
+ mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ MT753X_BPDU_CPU_ONLY);
+
+ /* Enable and reset MIB counters */
+ mt7530_mib_reset(ds);
+
+ /* Disable flooding on all ports */
+ mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
+ UNU_FFP_MASK);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+
+ /* Disable learning by default on all ports */
+ mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
+
+ mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else {
+ mt7530_port_disable(ds, i);
+
+ /* Set default PVID to 0 on all user ports */
+ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
+ G0_PORT_VID_DEF);
+ }
+
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int
mt7531_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
@@ -2286,11 +2444,17 @@ mt7531_setup(struct dsa_switch *ds)
return -ENODEV;
}
+ /* all MACs must be forced link-down before sw reset */
+ for (i = 0; i < MT7530_NUM_PORTS; i++)
+ mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL,
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
+ mt7531_pll_setup(priv);
+
if (mt7531_dual_sgmii_supported(priv)) {
priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
@@ -2324,41 +2488,7 @@ mt7531_setup(struct dsa_switch *ds)
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
CORE_PLL_GROUP4, val);
- /* BPDU to CPU port */
- mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
- BIT(MT7530_CPU_PORT));
- mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
- MT753X_BPDU_CPU_ONLY);
-
- /* Enable and reset MIB counters */
- mt7530_mib_reset(ds);
-
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
- /* Disable forwarding by default on all ports */
- mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
- PCR_MATRIX_CLR);
-
- /* Disable learning by default on all ports */
- mt7530_set(priv, MT7530_PSC_P(i), SA_DIS);
-
- mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
-
- if (dsa_is_cpu_port(ds, i)) {
- ret = mt753x_cpu_port_enable(ds, i);
- if (ret)
- return ret;
- } else {
- mt7530_port_disable(ds, i);
-
- /* Set default PVID to 0 on all user ports */
- mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
- G0_PORT_VID_DEF);
- }
-
- /* Enable consistent egress tag */
- mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
- PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
- }
+ mt7531_setup_common(ds);
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
@@ -2368,43 +2498,33 @@ mt7531_setup(struct dsa_switch *ds)
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
- /* Flush the FDB table */
- ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
- if (ret < 0)
- return ret;
-
return 0;
}
-static bool
-mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
-
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (!phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ config->supported_interfaces);
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
}
static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
@@ -2412,42 +2532,54 @@ static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
}
-static bool
-mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct mt7530_priv *priv = ds->priv;
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- return false;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
break;
+
case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
- if (mt7531_is_rgmii_port(priv, port))
- return phy_interface_mode_is_rgmii(state->interface);
+ if (mt7531_is_rgmii_port(priv, port)) {
+ phy_interface_set_rgmii(config->supported_interfaces);
+ break;
+ }
fallthrough;
+
case 6: /* 1st cpu port supports sgmii/8023z only */
- if (state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_8023z(state->interface))
- return false;
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_2500FD;
break;
- default:
- dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
- port);
- return false;
}
-
- return true;
}
-static bool
-mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state)
+static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- struct mt7530_priv *priv = ds->priv;
+ phy_interface_zero(config->supported_interfaces);
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ break;
- return priv->info->phy_mode_supported(ds, port, state);
+ case 6:
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10000FD;
+ }
}
static int
@@ -2520,141 +2652,20 @@ static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
return 0;
}
-static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
- unsigned long *supported)
-{
- /* Port5 supports ethier RGMII or SGMII.
- * Port6 supports SGMII only.
- */
- switch (port) {
- case 5:
- if (mt7531_is_rgmii_port(priv, port))
- break;
- fallthrough;
- case 6:
- phylink_set(supported, 1000baseX_Full);
- phylink_set(supported, 2500baseX_Full);
- phylink_set(supported, 2500baseT_Full);
- }
-}
-
-static void
-mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
-{
- struct mt7530_priv *priv = ds->priv;
- unsigned int val;
-
- /* For adjusting speed and duplex of SGMII force mode. */
- if (interface != PHY_INTERFACE_MODE_SGMII ||
- phylink_autoneg_inband(mode))
- return;
-
- /* SGMII force mode setting */
- val = mt7530_read(priv, MT7531_SGMII_MODE(port));
- val &= ~MT7531_SGMII_IF_MODE_MASK;
-
- switch (speed) {
- case SPEED_10:
- val |= MT7531_SGMII_FORCE_SPEED_10;
- break;
- case SPEED_100:
- val |= MT7531_SGMII_FORCE_SPEED_100;
- break;
- case SPEED_1000:
- val |= MT7531_SGMII_FORCE_SPEED_1000;
- break;
- }
-
- /* MT7531 SGMII 1G force mode can only work in full duplex mode,
- * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
- */
- if ((speed == SPEED_10 || speed == SPEED_100) &&
- duplex != DUPLEX_FULL)
- val |= MT7531_SGMII_FORCE_HALF_DUPLEX;
-
- mt7530_write(priv, MT7531_SGMII_MODE(port), val);
-}
-
static bool mt753x_is_mac_port(u32 port)
{
return (port == 5 || port == 6);
}
-static int mt7531_sgmii_setup_mode_force(struct mt7530_priv *priv, u32 port,
- phy_interface_t interface)
-{
- u32 val;
-
- if (!mt753x_is_mac_port(port))
- return -EINVAL;
-
- mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
- MT7531_SGMII_PHYA_PWD);
-
- val = mt7530_read(priv, MT7531_PHYA_CTRL_SIGNAL3(port));
- val &= ~MT7531_RG_TPHY_SPEED_MASK;
- /* Setup 2.5 times faster clock for 2.5Gbps data speeds with 10B/8B
- * encoding.
- */
- val |= (interface == PHY_INTERFACE_MODE_2500BASEX) ?
- MT7531_RG_TPHY_SPEED_3_125G : MT7531_RG_TPHY_SPEED_1_25G;
- mt7530_write(priv, MT7531_PHYA_CTRL_SIGNAL3(port), val);
-
- mt7530_clear(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
-
- /* MT7531 SGMII 1G and 2.5G force mode can only work in full duplex
- * mode, no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
- */
- mt7530_rmw(priv, MT7531_SGMII_MODE(port),
- MT7531_SGMII_IF_MODE_MASK | MT7531_SGMII_REMOTE_FAULT_DIS,
- MT7531_SGMII_FORCE_SPEED_1000);
-
- mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
-
- return 0;
-}
-
-static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
- phy_interface_t interface)
-{
- if (!mt753x_is_mac_port(port))
- return -EINVAL;
-
- mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
- MT7531_SGMII_PHYA_PWD);
-
- mt7530_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
- MT7531_RG_TPHY_SPEED_MASK, MT7531_RG_TPHY_SPEED_1_25G);
-
- mt7530_set(priv, MT7531_SGMII_MODE(port),
- MT7531_SGMII_REMOTE_FAULT_DIS |
- MT7531_SGMII_SPEED_DUPLEX_AN);
-
- mt7530_rmw(priv, MT7531_PCS_SPEED_ABILITY(port),
- MT7531_SGMII_TX_CONFIG_MASK, 1);
-
- mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
-
- mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_RESTART);
-
- mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
-
- return 0;
-}
-
-static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
+static int
+mt7988_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
- u32 val;
+ if (dsa_is_cpu_port(ds, port) &&
+ interface == PHY_INTERFACE_MODE_INTERNAL)
+ return 0;
- /* Only restart AN when AN is enabled */
- val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
- if (val & MT7531_SGMII_AN_ENABLE) {
- val |= MT7531_SGMII_AN_RESTART;
- mt7530_write(priv, MT7531_PCS_CONTROL_1(port), val);
- }
+ return -EINVAL;
}
static int
@@ -2679,14 +2690,11 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
phydev = dp->slave->phydev;
return mt7531_rgmii_setup(priv, port, interface, phydev);
case PHY_INTERFACE_MODE_SGMII:
- return mt7531_sgmii_setup_mode_an(priv, port, interface);
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- if (phylink_autoneg_inband(mode))
- return -EINVAL;
-
- return mt7531_sgmii_setup_mode_force(priv, port, interface);
+ /* handled in SGMII PCS driver */
+ return 0;
default:
return -EINVAL;
}
@@ -2703,6 +2711,24 @@ mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return priv->info->mac_port_config(ds, port, mode, state->interface);
}
+static struct phylink_pcs *
+mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ return &priv->pcs[port].pcs;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return priv->ports[port].sgmii_pcs;
+ default:
+ return NULL;
+ }
+}
+
static void
mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
@@ -2710,12 +2736,10 @@ mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
struct mt7530_priv *priv = ds->priv;
u32 mcr_cur, mcr_new;
- if (!mt753x_phy_mode_supported(ds, port, state))
- goto unsupported;
-
switch (port) {
case 0 ... 4: /* Internal phy */
- if (state->interface != PHY_INTERFACE_MODE_GMII)
+ if (state->interface != PHY_INTERFACE_MODE_GMII &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL)
goto unsupported;
break;
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
@@ -2746,13 +2770,6 @@ unsupported:
return;
}
- if (phylink_autoneg_inband(mode) &&
- state->interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
-
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
@@ -2767,17 +2784,6 @@ unsupported:
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
-static void
-mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_an_restart)
- return;
-
- priv->info->mac_pcs_an_restart(ds, port);
-}
-
static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
@@ -2787,16 +2793,13 @@ static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex)
+static void mt753x_phylink_pcs_link_up(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
{
- struct mt7530_priv *priv = ds->priv;
-
- if (!priv->info->mac_pcs_link_up)
- return;
-
- priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+ if (pcs->ops->pcs_link_up)
+ pcs->ops->pcs_link_up(pcs, mode, interface, speed, duplex);
}
static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -2809,14 +2812,13 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u32 mcr;
- mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
-
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
/* MT753x MAC works in 1G full duplex mode for all up-clocked
* variants.
*/
- if (interface == PHY_INTERFACE_MODE_TRGMII ||
+ if (interface == PHY_INTERFACE_MODE_INTERNAL ||
+ interface == PHY_INTERFACE_MODE_TRGMII ||
(phy_interface_mode_is_8023z(interface))) {
speed = SPEED_1000;
duplex = DUPLEX_FULL;
@@ -2838,7 +2840,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
mcr |= PMCR_RX_FC_EN;
}
- if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, 0) >= 0) {
+ if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_EEE1G;
@@ -2872,8 +2874,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
case 6:
interface = PHY_INTERFACE_MODE_2500BASEX;
- mt7531_pad_setup(ds, interface);
-
priv->p6_interface = interface;
break;
default:
@@ -2890,81 +2890,66 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port)
return ret;
mt7530_write(priv, MT7530_PMCR_P(port),
PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_pcs_link_up(&priv->pcs[port].pcs, MLO_AN_FIXED,
+ interface, speed, DUPLEX_FULL);
mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
speed, DUPLEX_FULL, true, true);
return 0;
}
-static void
-mt7530_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
-{
- if (port == 5)
- phylink_set(supported, 1000baseX_Full);
-}
-
-static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
- unsigned long *supported)
+static int
+mt7988_cpu_port_config(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- mt7531_sgmii_validate(priv, port, supported);
+ mt7530_write(priv, MT7530_PMCR_P(port),
+ PMCR_CPU_PORT_SETTING(priv->id));
+
+ mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED,
+ PHY_INTERFACE_MODE_INTERNAL, NULL,
+ SPEED_10000, DUPLEX_FULL, true, true);
+
+ return 0;
}
-static void
-mt753x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mt7530_priv *priv = ds->priv;
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !mt753x_phy_mode_supported(ds, port, state)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
-
- if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
- !phy_interface_mode_is_8023z(state->interface)) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, Autoneg);
- }
-
- /* This switch only supports 1G full-duplex. */
- if (state->interface != PHY_INTERFACE_MODE_MII)
- phylink_set(mask, 1000baseT_Full);
+ /* This switch only supports full-duplex at 1Gbps */
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- priv->info->mac_port_validate(ds, port, mask);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
+ priv->info->mac_port_get_caps(ds, port, config);
+}
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+static int mt753x_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* Autonegotiation is not supported in TRGMII nor 802.3z modes */
+ if (state->interface == PHY_INTERFACE_MODE_TRGMII ||
+ phy_interface_mode_is_8023z(state->interface))
+ phylink_clear(supported, Autoneg);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ return 0;
}
-static int
-mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7530_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
+ int port = pcs_to_mt753x_pcs(pcs)->port;
u32 pmsr;
- if (port < 0 || port >= MT7530_NUM_PORTS)
- return -EINVAL;
-
pmsr = mt7530_read(priv, MT7530_PMSR_P(port));
state->link = (pmsr & PMSR_LINK);
@@ -2991,76 +2976,41 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
state->pause |= MLO_PAUSE_RX;
if (pmsr & PMSR_TX_FC)
state->pause |= MLO_PAUSE_TX;
-
- return 1;
}
-static int
-mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
- struct phylink_link_state *state)
-{
- u32 status, val;
- u16 config_reg;
-
- status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
- state->link = !!(status & MT7531_SGMII_LINK_STATUS);
- if (state->interface == PHY_INTERFACE_MODE_SGMII &&
- (status & MT7531_SGMII_AN_ENABLE)) {
- val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
- config_reg = val >> 16;
-
- switch (config_reg & LPA_SGMII_SPD_MASK) {
- case LPA_SGMII_1000:
- state->speed = SPEED_1000;
- break;
- case LPA_SGMII_100:
- state->speed = SPEED_100;
- break;
- case LPA_SGMII_10:
- state->speed = SPEED_10;
- break;
- default:
- dev_err(priv->dev, "invalid sgmii PHY speed\n");
- state->link = false;
- return -EINVAL;
- }
-
- if (config_reg & LPA_SGMII_FULL_DUPLEX)
- state->duplex = DUPLEX_FULL;
- else
- state->duplex = DUPLEX_HALF;
- }
-
+static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
return 0;
}
-static int
-mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
+static void mt7530_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mt7530_priv *priv = ds->priv;
-
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- return mt7531_sgmii_pcs_get_state_an(priv, port, state);
-
- return -EOPNOTSUPP;
}
-static int
-mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct mt7530_priv *priv = ds->priv;
-
- return priv->info->mac_port_get_state(ds, port, state);
-}
+static const struct phylink_pcs_ops mt7530_pcs_ops = {
+ .pcs_validate = mt753x_pcs_validate,
+ .pcs_get_state = mt7530_pcs_get_state,
+ .pcs_config = mt753x_pcs_config,
+ .pcs_an_restart = mt7530_pcs_an_restart,
+};
static int
mt753x_setup(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- int ret = priv->info->sw_setup(ds);
+ int i, ret;
+ /* Initialise the PCS devices */
+ for (i = 0; i < priv->ds->num_ports; i++) {
+ priv->pcs[i].pcs.ops = priv->info->pcs_ops;
+ priv->pcs[i].priv = priv;
+ priv->pcs[i].port = i;
+ }
+
+ ret = priv->info->sw_setup(ds);
if (ret)
return ret;
@@ -3072,6 +3022,12 @@ mt753x_setup(struct dsa_switch *ds)
if (ret && priv->irq)
mt7530_free_irq_common(priv);
+ if (priv->create_sgmii) {
+ ret = priv->create_sgmii(priv, mt7531_dual_sgmii_supported(priv));
+ if (ret && priv->irq)
+ mt7530_free_irq(priv);
+ }
+
return ret;
}
@@ -3105,7 +3061,28 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
return 0;
}
-static const struct dsa_switch_ops mt7530_switch_ops = {
+static int mt7988_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+{
+ return 0;
+}
+
+static int mt7988_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* Reset the switch */
+ reset_control_assert(priv->rstc);
+ usleep_range(20, 50);
+ reset_control_deassert(priv->rstc);
+ usleep_range(20, 50);
+
+ /* Reset the switch PHYs */
+ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST);
+
+ return mt7531_setup_common(ds);
+}
+
+const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
.get_strings = mt7530_get_strings,
@@ -3131,100 +3108,86 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_del = mt7530_port_vlan_del,
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
- .phylink_validate = mt753x_phylink_validate,
- .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_get_caps = mt753x_phylink_get_caps,
+ .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
.phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
.phylink_mac_link_down = mt753x_phylink_mac_link_down,
.phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
.set_mac_eee = mt753x_set_mac_eee,
};
+EXPORT_SYMBOL_GPL(mt7530_switch_ops);
-static const struct mt753x_info mt753x_table[] = {
+const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read_c22 = mt7530_phy_read_c22,
+ .phy_write_c22 = mt7530_phy_write_c22,
+ .phy_read_c45 = mt7530_phy_read_c45,
+ .phy_write_c45 = mt7530_phy_write_c45,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7530] = {
.id = ID_MT7530,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read_c22 = mt7530_phy_read_c22,
+ .phy_write_c22 = mt7530_phy_write_c22,
+ .phy_read_c45 = mt7530_phy_read_c45,
+ .phy_write_c45 = mt7530_phy_write_c45,
.pad_setup = mt7530_pad_clk_setup,
- .phy_mode_supported = mt7530_phy_mode_supported,
- .mac_port_validate = mt7530_mac_port_validate,
- .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
},
[ID_MT7531] = {
.id = ID_MT7531,
+ .pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7531_setup,
- .phy_read = mt7531_ind_phy_read,
- .phy_write = mt7531_ind_phy_write,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
.pad_setup = mt7531_pad_setup,
.cpu_port_config = mt7531_cpu_port_config,
- .phy_mode_supported = mt7531_phy_mode_supported,
- .mac_port_validate = mt7531_mac_port_validate,
- .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .mac_port_get_caps = mt7531_mac_port_get_caps,
.mac_port_config = mt7531_mac_config,
- .mac_pcs_an_restart = mt7531_sgmii_restart_an,
- .mac_pcs_link_up = mt7531_sgmii_link_up_force,
+ },
+ [ID_MT7988] = {
+ .id = ID_MT7988,
+ .pcs_ops = &mt7530_pcs_ops,
+ .sw_setup = mt7988_setup,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
+ .pad_setup = mt7988_pad_setup,
+ .cpu_port_config = mt7988_cpu_port_config,
+ .mac_port_get_caps = mt7988_mac_port_get_caps,
+ .mac_port_config = mt7988_mac_config,
},
};
+EXPORT_SYMBOL_GPL(mt753x_table);
-static const struct of_device_id mt7530_of_match[] = {
- { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
- { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
- { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, mt7530_of_match);
-
-static int
-mt7530_probe(struct mdio_device *mdiodev)
+int
+mt7530_probe_common(struct mt7530_priv *priv)
{
- struct mt7530_priv *priv;
- struct device_node *dn;
-
- dn = mdiodev->dev.of_node;
-
- priv = devm_kzalloc(&mdiodev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ struct device *dev = priv->dev;
- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
if (!priv->ds)
return -ENOMEM;
- priv->ds->dev = &mdiodev->dev;
+ priv->ds->dev = dev;
priv->ds->num_ports = MT7530_NUM_PORTS;
- /* Use medatek,mcm property to distinguish hardware type that would
- * casues a little bit differences on power-on sequence.
- */
- priv->mcm = of_property_read_bool(dn, "mediatek,mcm");
- if (priv->mcm) {
- dev_info(&mdiodev->dev, "MT7530 adapts as multi-chip module\n");
-
- priv->rstc = devm_reset_control_get(&mdiodev->dev, "mcm");
- if (IS_ERR(priv->rstc)) {
- dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
- return PTR_ERR(priv->rstc);
- }
- }
-
/* Get the hardware identifier from the devicetree node.
* We will need it for some of the clock and regulator setup.
*/
- priv->info = of_device_get_match_data(&mdiodev->dev);
+ priv->info = of_device_get_match_data(dev);
if (!priv->info)
return -EINVAL;
@@ -3232,99 +3195,33 @@ mt7530_probe(struct mdio_device *mdiodev)
* properly.
*/
if (!priv->info->sw_setup || !priv->info->pad_setup ||
- !priv->info->phy_read || !priv->info->phy_write ||
- !priv->info->phy_mode_supported ||
- !priv->info->mac_port_validate ||
- !priv->info->mac_port_get_state || !priv->info->mac_port_config)
+ !priv->info->phy_read_c22 || !priv->info->phy_write_c22 ||
+ !priv->info->mac_port_get_caps ||
+ !priv->info->mac_port_config)
return -EINVAL;
priv->id = priv->info->id;
-
- if (priv->id == ID_MT7530) {
- priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
- if (IS_ERR(priv->core_pwr))
- return PTR_ERR(priv->core_pwr);
-
- priv->io_pwr = devm_regulator_get(&mdiodev->dev, "io");
- if (IS_ERR(priv->io_pwr))
- return PTR_ERR(priv->io_pwr);
- }
-
- /* Not MCM that indicates switch works as the remote standalone
- * integrated circuit so the GPIO pin would be used to complete
- * the reset, otherwise memory-mapped register accessing used
- * through syscon provides in the case of MCM.
- */
- if (!priv->mcm) {
- priv->reset = devm_gpiod_get_optional(&mdiodev->dev, "reset",
- GPIOD_OUT_LOW);
- if (IS_ERR(priv->reset)) {
- dev_err(&mdiodev->dev, "Couldn't get our reset line\n");
- return PTR_ERR(priv->reset);
- }
- }
-
- priv->bus = mdiodev->bus;
- priv->dev = &mdiodev->dev;
+ priv->dev = dev;
priv->ds->priv = priv;
priv->ds->ops = &mt7530_switch_ops;
mutex_init(&priv->reg_mutex);
- dev_set_drvdata(&mdiodev->dev, priv);
+ dev_set_drvdata(dev, priv);
- return dsa_register_switch(priv->ds);
+ return 0;
}
+EXPORT_SYMBOL_GPL(mt7530_probe_common);
-static void
-mt7530_remove(struct mdio_device *mdiodev)
+void
+mt7530_remove_common(struct mt7530_priv *priv)
{
- struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
- int ret = 0;
-
- if (!priv)
- return;
-
- ret = regulator_disable(priv->core_pwr);
- if (ret < 0)
- dev_err(priv->dev,
- "Failed to disable core power: %d\n", ret);
-
- ret = regulator_disable(priv->io_pwr);
- if (ret < 0)
- dev_err(priv->dev, "Failed to disable io pwr: %d\n",
- ret);
-
if (priv->irq)
mt7530_free_irq(priv);
dsa_unregister_switch(priv->ds);
- mutex_destroy(&priv->reg_mutex);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
-}
-
-static void mt7530_shutdown(struct mdio_device *mdiodev)
-{
- struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
- if (!priv)
- return;
-
- dsa_switch_shutdown(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
+ mutex_destroy(&priv->reg_mutex);
}
-
-static struct mdio_driver mt7530_mdio_driver = {
- .probe = mt7530_probe,
- .remove = mt7530_remove,
- .shutdown = mt7530_shutdown,
- .mdiodrv.driver = {
- .name = "mt7530",
- .of_match_table = mt7530_of_match,
- },
-};
-
-mdio_module_driver(mt7530_mdio_driver);
+EXPORT_SYMBOL_GPL(mt7530_remove_common);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_DESCRIPTION("Driver for Mediatek MT7530 Switch");
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 91508e2feef9..5084f48a8869 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -8,7 +8,6 @@
#define MT7530_NUM_PORTS 7
#define MT7530_NUM_PHYS 5
-#define MT7530_CPU_PORT 6
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
@@ -19,6 +18,7 @@ enum mt753x_id {
ID_MT7530 = 0,
ID_MT7621 = 1,
ID_MT7531 = 2,
+ ID_MT7988 = 3,
};
#define NUM_TRGMII_CTRL 5
@@ -55,11 +55,11 @@ enum mt753x_id {
#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
-#define MT753X_MIRROR_REG(id) (((id) == ID_MT7531) ? \
+#define MT753X_MIRROR_REG(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
MT7531_CFC : MT7530_MFC)
-#define MT753X_MIRROR_EN(id) (((id) == ID_MT7531) ? \
+#define MT753X_MIRROR_EN(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
MT7531_MIRROR_EN : MIRROR_EN)
-#define MT753X_MIRROR_MASK(id) (((id) == ID_MT7531) ? \
+#define MT753X_MIRROR_MASK(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
MT7531_MIRROR_MASK : MIRROR_MASK)
/* Registers for BPDU and PAE frame control*/
@@ -296,9 +296,8 @@ enum mt7530_vlan_port_acc_frm {
MT7531_FORCE_DPX | \
MT7531_FORCE_RX_FC | \
MT7531_FORCE_TX_FC)
-#define PMCR_FORCE_MODE_ID(id) (((id) == ID_MT7531) ? \
- MT7531_FORCE_MODE : \
- PMCR_FORCE_MODE)
+#define PMCR_FORCE_MODE_ID(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
+ MT7531_FORCE_MODE : PMCR_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
@@ -365,46 +364,8 @@ enum mt7530_vlan_port_acc_frm {
CCR_TX_OCT_CNT_BAD)
/* MT7531 SGMII register group */
-#define MT7531_SGMII_REG_BASE 0x5000
-#define MT7531_SGMII_REG(p, r) (MT7531_SGMII_REG_BASE + \
- ((p) - 5) * 0x1000 + (r))
-
-/* Register forSGMII PCS_CONTROL_1 */
-#define MT7531_PCS_CONTROL_1(p) MT7531_SGMII_REG(p, 0x00)
-#define MT7531_SGMII_LINK_STATUS BIT(18)
-#define MT7531_SGMII_AN_ENABLE BIT(12)
-#define MT7531_SGMII_AN_RESTART BIT(9)
-
-/* Register for SGMII PCS_SPPED_ABILITY */
-#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
-#define MT7531_SGMII_TX_CONFIG_MASK GENMASK(15, 0)
-#define MT7531_SGMII_TX_CONFIG BIT(0)
-
-/* Register for SGMII_MODE */
-#define MT7531_SGMII_MODE(p) MT7531_SGMII_REG(p, 0x20)
-#define MT7531_SGMII_REMOTE_FAULT_DIS BIT(8)
-#define MT7531_SGMII_IF_MODE_MASK GENMASK(5, 1)
-#define MT7531_SGMII_FORCE_DUPLEX BIT(4)
-#define MT7531_SGMII_FORCE_SPEED_MASK GENMASK(3, 2)
-#define MT7531_SGMII_FORCE_SPEED_1000 BIT(3)
-#define MT7531_SGMII_FORCE_SPEED_100 BIT(2)
-#define MT7531_SGMII_FORCE_SPEED_10 0
-#define MT7531_SGMII_SPEED_DUPLEX_AN BIT(1)
-
-enum mt7531_sgmii_force_duplex {
- MT7531_SGMII_FORCE_FULL_DUPLEX = 0,
- MT7531_SGMII_FORCE_HALF_DUPLEX = 0x10,
-};
-
-/* Fields of QPHY_PWR_STATE_CTRL */
-#define MT7531_QPHY_PWR_STATE_CTRL(p) MT7531_SGMII_REG(p, 0xe8)
-#define MT7531_SGMII_PHYA_PWD BIT(4)
-
-/* Values of SGMII SPEED */
-#define MT7531_PHYA_CTRL_SIGNAL3(p) MT7531_SGMII_REG(p, 0x128)
-#define MT7531_RG_TPHY_SPEED_MASK (BIT(2) | BIT(3))
-#define MT7531_RG_TPHY_SPEED_1_25G 0x0
-#define MT7531_RG_TPHY_SPEED_3_125G BIT(2)
+#define MT7531_SGMII_REG_BASE(p) (0x5000 + ((p) - 5) * 0x1000)
+#define MT7531_PHYA_CTRL_SIGNAL3 0x128
/* Register for system reset */
#define MT7530_SYS_CTRL 0x7000
@@ -703,13 +664,13 @@ struct mt7530_fdb {
* @pm: The matrix used to show all connections with the port.
* @pvid: The VLAN specified is to be considered a PVID at ingress. Any
* untagged frames will be assigned to the related VLAN.
- * @vlan_filtering: The flags indicating whether the port that can recognize
- * VLAN-tagged frames.
+ * @sgmii_pcs: Pointer to PCS instance for SerDes ports
*/
struct mt7530_port {
bool enable;
u32 pm;
u16 pvid;
+ struct phylink_pcs *sgmii_pcs;
};
/* Port 5 interface select definitions */
@@ -721,67 +682,53 @@ enum p5_interface_select {
P5_INTF_SEL_GMAC5_SGMII,
};
-static const char *p5_intf_modes(unsigned int p5_interface)
-{
- switch (p5_interface) {
- case P5_DISABLED:
- return "DISABLED";
- case P5_INTF_SEL_PHY_P0:
- return "PHY P0";
- case P5_INTF_SEL_PHY_P4:
- return "PHY P4";
- case P5_INTF_SEL_GMAC5:
- return "GMAC5";
- case P5_INTF_SEL_GMAC5_SGMII:
- return "GMAC5_SGMII";
- default:
- return "unknown";
- }
-}
-
struct mt7530_priv;
+struct mt753x_pcs {
+ struct phylink_pcs pcs;
+ struct mt7530_priv *priv;
+ int port;
+};
+
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
* @sw_setup: Holding the handler to a device initialization
- * @phy_read: Holding the way reading PHY port
- * @phy_write: Holding the way writing PHY port
+ * @phy_read_c22: Holding the way reading PHY port using C22
+ * @phy_write_c22: Holding the way writing PHY port using C22
+ * @phy_read_c45: Holding the way reading PHY port using C45
+ * @phy_write_c45: Holding the way writing PHY port using C45
* @pad_setup: Holding the way setting up the bus pad for a certain
* MAC port
* @phy_mode_supported: Check if the PHY type is being supported on a certain
* port
* @mac_port_validate: Holding the way to set addition validate type for a
* certan MAC port
- * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
- * MAC port
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
- * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
- * certain MAC port
- * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
- * of the certain MAC port
*/
struct mt753x_info {
enum mt753x_id id;
+ const struct phylink_pcs_ops *pcs_ops;
+
int (*sw_setup)(struct dsa_switch *ds);
- int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
- int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
+ int (*phy_read_c22)(struct mt7530_priv *priv, int port, int regnum);
+ int (*phy_write_c22)(struct mt7530_priv *priv, int port, int regnum,
+ u16 val);
+ int (*phy_read_c45)(struct mt7530_priv *priv, int port, int devad,
+ int regnum);
+ int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u16 val);
int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
int (*cpu_port_config)(struct dsa_switch *ds, int port);
- bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
- const struct phylink_link_state *state);
+ void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
+ struct phylink_config *config);
void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ phy_interface_t interface,
unsigned long *supported);
- int (*mac_port_get_state)(struct dsa_switch *ds, int port,
- struct phylink_link_state *state);
int (*mac_port_config)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
- void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
- void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex);
};
/* struct mt7530_priv - This is the main data structure for holding the state
@@ -789,6 +736,7 @@ struct mt753x_info {
* @dev: The device pointer
* @ds: The pointer to the dsa core structure
* @bus: The bus used for the device and built-in PHY
+ * @regmap: The regmap instance representing all switch registers
* @rstc: The pointer to reset control used by MCM
* @core_pwr: The power supplied into the core
* @io_pwr: The power supplied into the I/O
@@ -800,15 +748,16 @@ struct mt753x_info {
* registers
* @p6_interface Holding the current port 6 interface
* @p5_intf_sel: Holding the current port 5 interface select
- *
* @irq: IRQ number of the switch
* @irq_domain: IRQ domain of the switch irq_chip
* @irq_enable: IRQ enable bits, synced to SYS_INT_EN
+ * @create_sgmii: Pointer to function creating SGMII PCS instance(s)
*/
struct mt7530_priv {
struct device *dev;
struct dsa_switch *ds;
struct mii_bus *bus;
+ struct regmap *regmap;
struct reset_control *rstc;
struct regulator *core_pwr;
struct regulator *io_pwr;
@@ -821,13 +770,14 @@ struct mt7530_priv {
unsigned int p5_intf_sel;
u8 mirror_rx;
u8 mirror_tx;
-
struct mt7530_port ports[MT7530_NUM_PORTS];
+ struct mt753x_pcs pcs[MT7530_NUM_PORTS];
/* protect among processes for registers access*/
struct mutex reg_mutex;
int irq;
struct irq_domain *irq_domain;
u32 irq_enable;
+ int (*create_sgmii)(struct mt7530_priv *priv, bool dual_sgmii);
};
struct mt7530_hw_vlan_entry {
@@ -864,4 +814,10 @@ static inline void INIT_MT7530_DUMMY_POLL(struct mt7530_dummy_poll *p,
p->reg = reg;
}
+int mt7530_probe_common(struct mt7530_priv *priv);
+void mt7530_remove_common(struct mt7530_priv *priv);
+
+extern const struct dsa_switch_ops mt7530_switch_ops;
+extern const struct mt753x_info mt753x_table[];
+
#endif /* __MT7530_H */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index a4c6eb9a52d0..fdda62d6eb16 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -118,6 +118,9 @@ static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
int addr = REG_PORT(p);
int ret;
+ if (dsa_is_unused_port(priv->ds, p))
+ return 0;
+
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
@@ -294,8 +297,6 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
return;
dsa_unregister_switch(ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6060_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 7a2445a34eb7..e3181d5471df 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -2,7 +2,6 @@
config NET_DSA_MV88E6XXX
tristate "Marvell 88E6xxx Ethernet switch fabric support"
depends on NET_DSA
- depends on PTP_1588_CLOCK_OPTIONAL
select IRQ_DOMAIN
select NET_DSA_TAG_EDSA
select NET_DSA_TAG_DSA
@@ -13,7 +12,8 @@ config NET_DSA_MV88E6XXX
config NET_DSA_MV88E6XXX_PTP
bool "PTP support for Marvell 88E6xxx"
default n
- depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
+ depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \
+ (NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK)
help
Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
chips that support it.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index c8eca2b6f959..1409e691ab77 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -15,3 +15,8 @@ mv88e6xxx-objs += port_hidden.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
mv88e6xxx-objs += smi.o
+mv88e6xxx-objs += switchdev.o
+mv88e6xxx-objs += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index cd8462d1e27c..64a2f2f83735 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -86,12 +86,16 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
u16 mask, u16 val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- /* There's no bus specific operation to wait for a mask */
- for (i = 0; i < 16; i++) {
+ /* There's no bus specific operation to wait for a mask. Even
+ * if the initial poll takes longer than 50ms, always do at
+ * least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_read(chip, addr, reg, &data);
if (err)
return err;
@@ -99,7 +103,10 @@ int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
if ((data & mask) == val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
dev_err(chip->dev, "Timeout while waiting for switch\n");
@@ -442,9 +449,6 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
goto restore_link;
}
- if (speed == SPEED_MAX && chip->info->ops->port_max_speed_mode)
- mode = chip->info->ops->port_max_speed_mode(port);
-
if (chip->info->ops->port_set_pause) {
err = chip->info->ops->port_set_pause(chip, port, pause);
if (err)
@@ -563,133 +567,278 @@ static int mv88e6xxx_serdes_pcs_link_up(struct mv88e6xxx_chip *chip, int port,
return 0;
}
-static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static const u8 mv88e6185_phy_interface_modes[] = {
+ [MV88E6185_PORT_STS_CMODE_GMII_FD] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6185_PORT_STS_CMODE_MII_100_FD_PS] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_100] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_MII_10] = PHY_INTERFACE_MODE_MII,
+ [MV88E6185_PORT_STS_CMODE_SERDES] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_1000BASE_X] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII,
+};
+
+static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
+ u8 cmode = chip->ports[port].cmode;
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+
+ if (mv88e6xxx_phy_is_internal(chip->ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ } else {
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_1000FD;
}
}
-static void mv88e6185_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- /* FIXME: if the port is in 1000Base-X mode, then it only supports
- * 1000M FD speeds. In this case, CMODE will indicate 5.
+ u8 cmode = chip->ports[port].cmode;
+
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+}
+
+static const u8 mv88e6xxx_phy_interface_modes[] = {
+ [MV88E6XXX_PORT_STS_CMODE_MII_PHY] = PHY_INTERFACE_MODE_REVMII,
+ [MV88E6XXX_PORT_STS_CMODE_MII] = PHY_INTERFACE_MODE_MII,
+ [MV88E6XXX_PORT_STS_CMODE_GMII] = PHY_INTERFACE_MODE_GMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII_PHY] = PHY_INTERFACE_MODE_REVRMII,
+ [MV88E6XXX_PORT_STS_CMODE_RMII] = PHY_INTERFACE_MODE_RMII,
+ [MV88E6XXX_PORT_STS_CMODE_100BASEX] = PHY_INTERFACE_MODE_100BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_1000BASEX] = PHY_INTERFACE_MODE_1000BASEX,
+ [MV88E6XXX_PORT_STS_CMODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
+ /* higher interface modes are not needed here, since ports supporting
+ * them are writable, and so the supported interfaces are filled in the
+ * corresponding .phylink_set_interfaces() implementation below
*/
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+};
- mv88e6065_phylink_validate(chip, port, mask, state);
+static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
+{
+ if (cmode < ARRAY_SIZE(mv88e6xxx_phy_interface_modes) &&
+ mv88e6xxx_phy_interface_modes[cmode])
+ __set_bit(mv88e6xxx_phy_interface_modes[cmode], supported);
+ else if (cmode == MV88E6XXX_PORT_STS_CMODE_RGMII)
+ phy_interface_set_rgmii(supported);
}
-static void mv88e6341_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 5)
- phylink_set(mask, 2500baseX_Full);
+ unsigned long *supported = config->supported_interfaces;
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
}
-static void mv88e6352_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
{
- /* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ u16 reg, val;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ if (err)
+ return err;
+
+ /* If PHY_DETECT is zero, then we are not in auto-media mode */
+ if (!(reg & MV88E6XXX_PORT_STS_PHY_DETECT))
+ return 0xf;
+
+ val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ if (err)
+ return err;
+
+ /* Restore PHY_DETECT value */
+ err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ if (err)
+ return err;
- mv88e6065_phylink_validate(chip, port, mask, state);
+ return val & MV88E6XXX_PORT_STS_CMODE_MASK;
}
-static void mv88e6390_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
+ unsigned long *supported = config->supported_interfaces;
+ int err, cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 4 supports automedia if the serdes is associated with it. */
+ if (port == 4) {
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err < 0)
+ dev_err(chip->dev, "p%d: failed to read scratch\n",
+ port);
+ if (err <= 0)
+ return;
+
+ cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
}
+}
+
+static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
/* No ethtool bits for 200Mbps */
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on port 5 */
+ if (port == 5) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
- mv88e6065_phylink_validate(chip, port, mask, state);
+ config->mac_capabilities |= MAC_2500FD;
+ }
}
-static void mv88e6390x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6390_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
- if (port >= 9) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
+ unsigned long *supported = config->supported_interfaces;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ /* No ethtool bits for 200Mbps */
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* The C_Mode field is programmable on ports 9 and 10 */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ config->mac_capabilities |= MAC_2500FD;
}
+}
+
+static void mv88e6390x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+
+ mv88e6390_phylink_get_caps(chip, port, config);
+
+ /* For the 6x90X, ports 2-7 can be in automedia mode.
+ * (Note that 6x90 doesn't support RXAUI nor XAUI).
+ *
+ * Port 2 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 3-4 can also support 1000BASE-X in automedia mode if port 9 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * Port 5 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for 1000BASE-X, SGMII or 2500BASE-X.
+ * Port 6-7 can also support 1000BASE-X in automedia mode if port 10 is
+ * configured for RXAUI, 1000BASE-X, SGMII or 2500BASE-X.
+ *
+ * For now, be permissive (as the old code was) and allow 1000BASE-X
+ * on ports 2..7.
+ */
+ if (port >= 2 && port <= 7)
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
- mv88e6390_phylink_validate(chip, port, mask, state);
+ /* The C_Mode field can also be programmed for 10G speeds */
+ if (port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI, supported);
+ __set_bit(PHY_INTERFACE_MODE_RXAUI, supported);
+
+ config->mac_capabilities |= MAC_10000FD;
+ }
}
-static void mv88e6393x_phylink_validate(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state)
+static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
{
+ unsigned long *supported = config->supported_interfaces;
bool is_6191x =
chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6191X;
- if (((port == 0 || port == 9) && !is_6191x) || port == 10) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- phylink_set(mask, 2500baseT_Full);
- }
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
- mv88e6065_phylink_validate(chip, port, mask, state);
+ /* The C_Mode field can be programmed for ports 0, 9 and 10 */
+ if (port == 0 || port == 9 || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+
+ /* 6191X supports >1G modes only on port 10 */
+ if (!is_6191x || port == 10) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ __set_bit(PHY_INTERFACE_MODE_5GBASER, supported);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ /* FIXME: USXGMII is not supported yet */
+ /* __set_bit(PHY_INTERFACE_MODE_USXGMII, supported); */
+
+ config->mac_capabilities |= MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+ }
+ }
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, supported);
+ }
}
-static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mv88e6xxx_chip *chip = ds->priv;
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set_port_modes(mask);
-
- if (chip->info->ops->phylink_validate)
- chip->info->ops->phylink_validate(chip, port, mask, state);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ mv88e6xxx_reg_lock(chip);
+ chip->info->ops->phylink_get_caps(chip, port, config);
+ mv88e6xxx_reg_unlock(chip);
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ if (mv88e6xxx_phy_is_internal(ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Internal ports with no phy-mode need GMII for PHYLIB */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
}
static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
@@ -989,7 +1138,7 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mv88e6xxx_atu_vtu_stats_strings[i],
ETH_GSTRING_LEN);
}
@@ -1241,8 +1390,7 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
{
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
- struct net_device *br;
- struct dsa_port *dp;
+ struct dsa_port *dp, *other_dp;
bool found = false;
u16 pvlan;
@@ -1251,11 +1399,9 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
list_for_each_entry(dp, &dst->ports, list) {
if (dp->ds->index == dev && dp->index == port) {
/* dp might be a DSA link or a user port, so it
- * might or might not have a bridge_dev
- * pointer. Use the "found" variable for both
- * cases.
+ * might or might not have a bridge.
+ * Use the "found" variable for both cases.
*/
- br = dp->bridge_dev;
found = true;
break;
}
@@ -1263,13 +1409,14 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
/* dev is a virtual bridge */
} else {
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_num < 0)
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+
+ if (!bridge_num)
continue;
- if (dp->bridge_num + 1 + dst->last_switch != dev)
+ if (bridge_num + dst->last_switch != dev)
continue;
- br = dp->bridge_dev;
found = true;
break;
}
@@ -1285,15 +1432,21 @@ static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port)
pvlan = 0;
- /* Frames from user ports can egress any local DSA links and CPU ports,
- * as well as any local member of their bridge group.
+ /* Frames from standalone user ports can only egress on the
+ * upstream port.
+ */
+ if (!dsa_port_bridge_dev_get(dp))
+ return BIT(dsa_switch_upstream_port(ds));
+
+ /* Frames from bridged user ports can egress any local DSA
+ * links and CPU ports, as well as any local member of their
+ * bridge group.
*/
- list_for_each_entry(dp, &dst->ports, list)
- if (dp->ds == ds &&
- (dp->type == DSA_PORT_TYPE_CPU ||
- dp->type == DSA_PORT_TYPE_DSA ||
- (br && dp->bridge_dev == br)))
- pvlan |= BIT(dp->index);
+ dsa_switch_for_each_port(other_dp, ds)
+ if (other_dp->type == DSA_PORT_TYPE_CPU ||
+ other_dp->type == DSA_PORT_TYPE_DSA ||
+ dsa_port_bridge_same(dp, other_dp))
+ pvlan |= BIT(other_dp->index);
return pvlan;
}
@@ -1479,15 +1632,16 @@ static int mv88e6xxx_pvt_map(struct mv88e6xxx_chip *chip, int dev, int port)
ds = dsa_switch_find(dst->index, dev);
dp = ds ? dsa_to_port(ds, port) : NULL;
- if (dp && dp->lag_dev) {
+ if (dp && dp->lag) {
/* As the PVT is used to limit flooding of
* FORWARD frames, which use the LAG ID as the
* source port, we must translate dev/port to
* the special "LAG device" in the PVT, using
- * the LAG ID as the port number.
+ * the LAG ID (one-based) as the port number
+ * (zero-based).
*/
dev = MV88E6XXX_G2_PVT_ADDR_DEV_TRUNK;
- port = dsa_lag_id(dst, dp->lag_dev);
+ port = dsa_port_lag_id_get(dp) - 1;
}
}
@@ -1520,24 +1674,31 @@ static int mv88e6xxx_pvt_setup(struct mv88e6xxx_chip *chip)
return 0;
}
-static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_fast_age_fid(struct mv88e6xxx_chip *chip, int port,
+ u16 fid)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (dsa_to_port(ds, port)->lag_dev)
+ if (dsa_to_port(chip->ds, port)->lag)
/* Hardware is incapable of fast-aging a LAG through a
* regular ATU move operation. Until we have something
* more fancy in place this is a no-op.
*/
- return;
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_g1_atu_remove(chip, fid, port, false);
+}
+
+static void mv88e6xxx_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g1_atu_remove(chip, 0, port, false);
+ err = mv88e6xxx_port_fast_age_fid(chip, port, 0);
mv88e6xxx_reg_unlock(chip);
if (err)
- dev_err(ds->dev, "p%d: failed to flush ATU\n", port);
+ dev_err(chip->ds->dev, "p%d: failed to flush ATU: %d\n",
+ port, err);
}
static int mv88e6xxx_vtu_setup(struct mv88e6xxx_chip *chip)
@@ -1567,11 +1728,11 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
return err;
}
-static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
- int (*cb)(struct mv88e6xxx_chip *chip,
- const struct mv88e6xxx_vtu_entry *entry,
- void *priv),
- void *priv)
+int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv)
{
struct mv88e6xxx_vtu_entry entry = {
.vid = mv88e6xxx_max_vid(chip),
@@ -1619,21 +1780,11 @@ static int mv88e6xxx_fid_map_vlan(struct mv88e6xxx_chip *chip,
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
- int i, err;
- u16 fid;
-
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
- /* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = mv88e6xxx_port_get_fid(chip, i, &fid);
- if (err)
- return err;
-
- set_bit(fid, fid_bitmap);
- }
-
- /* Set every FID bit used by the VLAN entries */
+ /* Every FID has an associated VID, so walking the VTU
+ * will discover the full set of FIDs in use.
+ */
return mv88e6xxx_vtu_walk(chip, mv88e6xxx_fid_map_vlan, fid_bitmap);
}
@@ -1646,10 +1797,7 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
if (err)
return err;
- /* The reset value 0x000 is used to indicate that multiple address
- * databases are not needed. Return the next positive available.
- */
- *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
+ *fid = find_first_zero_bit(fid_bitmap, MV88E6XXX_N_FID);
if (unlikely(*fid >= mv88e6xxx_num_databases(chip)))
return -ENOSPC;
@@ -1657,15 +1805,197 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
+static int mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ if (!chip->info->ops->stu_loadpurge)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->stu_loadpurge(chip, entry);
+}
+
+static int mv88e6xxx_stu_setup(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_stu_entry stu = {
+ .valid = true,
+ .sid = 0
+ };
+
+ if (!mv88e6xxx_has_stu(chip))
+ return 0;
+
+ /* Make sure that SID 0 is always valid. This is used by VTU
+ * entries that do not make use of the STU, e.g. when creating
+ * a VLAN upper on a port that is also part of a VLAN
+ * filtering bridge.
+ */
+ return mv88e6xxx_stu_loadpurge(chip, &stu);
+}
+
+static int mv88e6xxx_sid_get(struct mv88e6xxx_chip *chip, u8 *sid)
+{
+ DECLARE_BITMAP(busy, MV88E6XXX_N_SID) = { 0 };
+ struct mv88e6xxx_mst *mst;
+
+ __set_bit(0, busy);
+
+ list_for_each_entry(mst, &chip->msts, node)
+ __set_bit(mst->stu.sid, busy);
+
+ *sid = find_first_zero_bit(busy, MV88E6XXX_N_SID);
+
+ return (*sid >= mv88e6xxx_max_sid(chip)) ? -ENOSPC : 0;
+}
+
+static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid)
+{
+ struct mv88e6xxx_mst *mst, *tmp;
+ int err;
+
+ if (!sid)
+ return 0;
+
+ list_for_each_entry_safe(mst, tmp, &chip->msts, node) {
+ if (mst->stu.sid != sid)
+ continue;
+
+ if (!refcount_dec_and_test(&mst->refcnt))
+ return 0;
+
+ mst->stu.valid = false;
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err) {
+ refcount_set(&mst->refcnt, 1);
+ return err;
+ }
+
+ list_del(&mst->node);
+ kfree(mst);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int mv88e6xxx_mst_get(struct mv88e6xxx_chip *chip, struct net_device *br,
+ u16 msti, u8 *sid)
+{
+ struct mv88e6xxx_mst *mst;
+ int err, i;
+
+ if (!mv88e6xxx_has_stu(chip)) {
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!msti) {
+ *sid = 0;
+ return 0;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == br && mst->msti == msti) {
+ refcount_inc(&mst->refcnt);
+ *sid = mst->stu.sid;
+ return 0;
+ }
+ }
+
+ err = mv88e6xxx_sid_get(chip, sid);
+ if (err)
+ goto err;
+
+ mst = kzalloc(sizeof(*mst), GFP_KERNEL);
+ if (!mst) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&mst->node);
+ refcount_set(&mst->refcnt, 1);
+ mst->br = br;
+ mst->msti = msti;
+ mst->stu.valid = true;
+ mst->stu.sid = *sid;
+
+ /* The bridge starts out all ports in the disabled state. But
+ * a STU state of disabled means to go by the port-global
+ * state. So we set all user port's initial state to blocking,
+ * to match the bridge's behavior.
+ */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ mst->stu.state[i] = dsa_is_user_port(chip->ds, i) ?
+ MV88E6XXX_PORT_CTL0_STATE_BLOCKING :
+ MV88E6XXX_PORT_CTL0_STATE_DISABLED;
+
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ if (err)
+ goto err_free;
+
+ list_add_tail(&mst->node, &chip->msts);
+ return 0;
+
+err_free:
+ kfree(mst);
+err:
+ return err;
+}
+
+static int mv88e6xxx_port_mst_state_set(struct dsa_switch *ds, int port,
+ const struct switchdev_mst_state *st)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_mst *mst;
+ u8 state;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ switch (st->state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ state = MV88E6XXX_PORT_CTL0_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ state = MV88E6XXX_PORT_CTL0_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ state = MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mst, &chip->msts, node) {
+ if (mst->br == dsa_port_bridge_dev_get(dp) &&
+ mst->msti == st->msti) {
+ if (mst->stu.state[port] == state)
+ return 0;
+
+ mst->stu.state[port] = state;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_stu_loadpurge(chip, &mst->stu);
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid)
{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_vtu_entry vlan;
- int i, err;
+ int err;
/* DSA and CPU ports have to be members of multiple vlans */
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
return 0;
err = mv88e6xxx_vtu_get(chip, vid, &vlan);
@@ -1675,27 +2005,22 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (!vlan.valid)
return 0;
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
- continue;
-
- if (!dsa_to_port(ds, i)->slave)
- continue;
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *other_br;
- if (vlan.member[i] ==
+ if (vlan.member[other_dp->index] ==
MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
- if (dsa_to_port(ds, i)->bridge_dev ==
- dsa_to_port(ds, port)->bridge_dev)
+ if (dsa_port_bridge_same(dp, other_dp))
break; /* same bridge, check next VLAN */
- if (!dsa_to_port(ds, i)->bridge_dev)
+ other_br = dsa_port_bridge_dev_get(other_dp);
+ if (!other_br)
continue;
dev_err(ds->dev, "p%d: hw VLAN %d already used by port %d in %s\n",
- port, vlan.vid, i,
- netdev_name(dsa_to_port(ds, i)->bridge_dev));
+ port, vlan.vid, other_dp->index, netdev_name(other_br));
return -EOPNOTSUPP;
}
@@ -1705,13 +2030,14 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_commit_pvid(struct mv88e6xxx_chip *chip, int port)
{
struct dsa_port *dp = dsa_to_port(chip->ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct mv88e6xxx_port *p = &chip->ports[port];
u16 pvid = MV88E6XXX_VID_STANDALONE;
bool drop_untagged = false;
int err;
- if (dp->bridge_dev) {
- if (br_vlan_enabled(dp->bridge_dev)) {
+ if (br) {
+ if (br_vlan_enabled(br)) {
pvid = p->bridge_pvid.vid;
drop_untagged = !p->bridge_pvid.valid;
} else {
@@ -2144,6 +2470,9 @@ static int mv88e6xxx_port_vlan_join(struct mv88e6xxx_chip *chip, int port,
if (!vlan.valid) {
memset(&vlan, 0, sizeof(vlan));
+ if (vid == MV88E6XXX_VID_STANDALONE)
+ vlan.policy = true;
+
err = mv88e6xxx_atu_new(chip, &vlan.fid);
if (err)
return err;
@@ -2276,6 +2605,12 @@ static int mv88e6xxx_port_vlan_leave(struct mv88e6xxx_chip *chip,
if (err)
return err;
+ if (!vlan.valid) {
+ err = mv88e6xxx_mst_put(chip, vlan.sid);
+ if (err)
+ return err;
+ }
+
return mv88e6xxx_g1_atu_remove(chip, vlan.fid, port, false);
}
@@ -2290,6 +2625,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
@@ -2314,8 +2656,75 @@ unlock:
return err;
}
+static int mv88e6xxx_port_vlan_fast_age(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, vid, &vlan);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_fast_age_fid(chip, port, vlan.fid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_vlan_msti_set(struct dsa_switch *ds,
+ struct dsa_bridge bridge,
+ const struct switchdev_vlan_msti *msti)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_vtu_entry vlan;
+ u8 old_sid, new_sid;
+ int err;
+
+ if (!mv88e6xxx_has_stu(chip))
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_vtu_get(chip, msti->vid, &vlan);
+ if (err)
+ goto unlock;
+
+ if (!vlan.valid) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ old_sid = vlan.sid;
+
+ err = mv88e6xxx_mst_get(chip, bridge.dev, msti->msti, &new_sid);
+ if (err)
+ goto unlock;
+
+ if (new_sid != old_sid) {
+ vlan.sid = new_sid;
+
+ err = mv88e6xxx_vtu_loadpurge(chip, &vlan);
+ if (err) {
+ mv88e6xxx_mst_put(chip, new_sid);
+ goto unlock;
+ }
+ }
+
+ err = mv88e6xxx_mst_put(chip, old_sid);
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+ return err;
+}
+
static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2329,7 +2738,8 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2429,7 +2839,7 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct dsa_switch *ds = chip->ds;
struct dsa_switch_tree *dst = ds->dst;
@@ -2437,7 +2847,7 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
int err;
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_dev == br) {
+ if (dsa_port_offloads_bridge(dp, &bridge)) {
if (dp->ds == ds) {
/* This is a local bridge group member,
* remap its Port VLAN Map.
@@ -2460,15 +2870,34 @@ static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
return 0;
}
+/* Treat the software bridge as a virtual single-port switch behind the
+ * CPU and map in the PVT. First dst->last_switch elements are taken by
+ * physical switches, so start from beyond that range.
+ */
+static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
+ unsigned int bridge_num)
+{
+ u8 dev = bridge_num + ds->dst->last_switch;
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ return mv88e6xxx_pvt_map(chip, dev, 0);
+}
+
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_bridge_map(chip, br);
+ err = mv88e6xxx_bridge_map(chip, bridge);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_port_set_map_da(chip, port, true);
if (err)
goto unlock;
@@ -2476,6 +2905,14 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
if (err)
goto unlock;
+ if (mv88e6xxx_has_pvt(chip)) {
+ err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
+ if (err)
+ goto unlock;
+
+ *tx_fwd_offload = true;
+ }
+
unlock:
mv88e6xxx_reg_unlock(chip);
@@ -2483,17 +2920,27 @@ unlock:
}
static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_bridge_map(chip, br) ||
+ if (bridge.tx_fwd_offload &&
+ mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
+ dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
+
+ if (mv88e6xxx_bridge_map(chip, bridge) ||
mv88e6xxx_port_vlan_map(chip, port))
dev_err(ds->dev, "failed to remap in-chip Port VLAN\n");
+ err = mv88e6xxx_port_set_map_da(chip, port, false);
+ if (err)
+ dev_err(ds->dev,
+ "port %d failed to restore map-DA: %pe\n",
+ port, ERR_PTR(err));
+
err = mv88e6xxx_port_commit_pvid(chip, port);
if (err)
dev_err(ds->dev,
@@ -2505,7 +2952,8 @@ static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct net_device *br)
+ int port, struct dsa_bridge bridge,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -2515,6 +2963,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_pvt_map(chip, sw_index, port);
+ err = err ? : mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num);
mv88e6xxx_reg_unlock(chip);
return err;
@@ -2522,7 +2971,7 @@ static int mv88e6xxx_crosschip_bridge_join(struct dsa_switch *ds,
static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
int tree_index, int sw_index,
- int port, struct net_device *br)
+ int port, struct dsa_bridge bridge)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -2530,49 +2979,12 @@ static void mv88e6xxx_crosschip_bridge_leave(struct dsa_switch *ds,
return;
mv88e6xxx_reg_lock(chip);
- if (mv88e6xxx_pvt_map(chip, sw_index, port))
+ if (mv88e6xxx_pvt_map(chip, sw_index, port) ||
+ mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge.num))
dev_err(ds->dev, "failed to remap cross-chip Port VLAN\n");
mv88e6xxx_reg_unlock(chip);
}
-/* Treat the software bridge as a virtual single-port switch behind the
- * CPU and map in the PVT. First dst->last_switch elements are taken by
- * physical switches, so start from beyond that range.
- */
-static int mv88e6xxx_map_virtual_bridge_to_pvt(struct dsa_switch *ds,
- int bridge_num)
-{
- u8 dev = bridge_num + ds->dst->last_switch + 1;
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_pvt_map(chip, dev, 0);
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
-{
- return mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
-}
-
-static void mv88e6xxx_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
-{
- int err;
-
- err = mv88e6xxx_map_virtual_bridge_to_pvt(ds, bridge_num);
- if (err) {
- dev_err(ds->dev, "failed to remap cross-chip Port VLAN: %pe\n",
- ERR_PTR(err));
- }
-}
-
static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
{
if (chip->info->ops->reset)
@@ -2873,27 +3285,58 @@ static int mv88e6xxx_setup_upstream_port(struct mv88e6xxx_chip *chip, int port)
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
+ struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ int tx_amp, speed;
int err;
u16 reg;
chip->ports[port].chip = chip;
chip->ports[port].port = port;
+ dp = dsa_to_port(ds, port);
+
/* MAC Forcing register: don't force link, speed, duplex or flow control
* state to any particular values on physical ports, but force the CPU
* port and all DSA ports to their maximum bandwidth and full duplex.
*/
- if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))
+ if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ struct phylink_config pl_config = {};
+ unsigned long caps;
+
+ chip->info->ops->phylink_get_caps(chip, port, &pl_config);
+
+ caps = pl_config.mac_capabilities;
+
+ if (chip->info->ops->port_max_speed_mode)
+ mode = chip->info->ops->port_max_speed_mode(port);
+ else
+ mode = PHY_INTERFACE_MODE_NA;
+
+ if (caps & MAC_10000FD)
+ speed = SPEED_10000;
+ else if (caps & MAC_5000FD)
+ speed = SPEED_5000;
+ else if (caps & MAC_2500FD)
+ speed = SPEED_2500;
+ else if (caps & MAC_1000)
+ speed = SPEED_1000;
+ else if (caps & MAC_100)
+ speed = SPEED_100;
+ else
+ speed = SPEED_10;
+
err = mv88e6xxx_port_setup_mac(chip, port, LINK_FORCED_UP,
- SPEED_MAX, DUPLEX_FULL,
- PAUSE_OFF,
- PHY_INTERFACE_MODE_NA);
- else
+ speed, DUPLEX_FULL,
+ PAUSE_OFF, mode);
+ } else {
err = mv88e6xxx_port_setup_mac(chip, port, LINK_UNFORCED,
SPEED_UNFORCED, DUPLEX_UNFORCED,
PAUSE_ON,
PHY_INTERFACE_MODE_NA);
+ }
if (err)
return err;
@@ -2911,9 +3354,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* If this is the upstream port for this switch, enable
* forwarding of unknown unicasts and multicasts.
*/
- reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
- MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
+ reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
+ /* Forward any IPv4 IGMP or IPv6 MLD frames received
+ * by a USER port to the CPU port to allow snooping.
+ */
+ if (dsa_is_user_port(ds, port))
+ reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
+
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
if (err)
return err;
@@ -2927,12 +3375,13 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
/* Port Control 2: don't force a good FCS, set the MTU size to
- * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
- * untagged frames on this port, do a destination address lookup on all
- * received packets as usual, disable ARP mirroring and don't send a
- * copy of all transmitted/received frames on this port to the CPU.
+ * 10222 bytes, disable 802.1q tags checking, don't discard
+ * tagged or untagged frames on this port, skip destination
+ * address lookup on user ports, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
*/
- err = mv88e6xxx_port_set_map_da(chip, port);
+ err = mv88e6xxx_port_set_map_da(chip, port, !dsa_is_user_port(ds, port));
if (err)
return err;
@@ -2940,8 +3389,44 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
+ /* On chips that support it, set all downstream DSA ports'
+ * VLAN policy to TRAP. In combination with loading
+ * MV88E6XXX_VID_STANDALONE as a policy entry in the VTU, this
+ * provides a better isolation barrier between standalone
+ * ports, as the ATU is bypassed on any intermediate switches
+ * between the incoming port and the CPU.
+ */
+ if (dsa_is_downstream_port(ds, port) &&
+ chip->info->ops->port_set_policy) {
+ err = chip->info->ops->port_set_policy(chip, port,
+ MV88E6XXX_POLICY_MAPPING_VTU,
+ MV88E6XXX_POLICY_ACTION_TRAP);
+ if (err)
+ return err;
+ }
+
+ /* User ports start out in standalone mode and 802.1Q is
+ * therefore disabled. On DSA ports, all valid VIDs are always
+ * loaded in the VTU - therefore, enable 802.1Q in order to take
+ * advantage of VLAN policy on chips that supports it.
+ */
err = mv88e6xxx_port_set_8021q_mode(chip, port,
- MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED);
+ dsa_is_user_port(ds, port) ?
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED :
+ MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE);
+ if (err)
+ return err;
+
+ /* Bind MV88E6XXX_VID_STANDALONE to MV88E6XXX_FID_STANDALONE by
+ * virtue of the fact that mv88e6xxx_atu_new() will pick it as
+ * the first free FID. This will be used as the private PVID for
+ * unbridged ports. Shared (DSA and CPU) ports must also be
+ * members of this VID, in order to trap all frames assigned to
+ * it to the CPU.
+ */
+ err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_STANDALONE,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
+ false);
if (err)
return err;
@@ -2954,7 +3439,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
* relying on their port default FID.
*/
err = mv88e6xxx_port_vlan_join(chip, port, MV88E6XXX_VID_BRIDGED,
- MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED,
+ MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNMODIFIED,
false);
if (err)
return err;
@@ -3027,6 +3512,22 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
}
+ if (chip->info->ops->serdes_set_tx_amplitude) {
+ if (dp)
+ phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
+
+ if (phy_handle && !of_property_read_u32(phy_handle,
+ "tx-p2p-microvolt",
+ &tx_amp))
+ err = chip->info->ops->serdes_set_tx_amplitude(chip,
+ port, tx_amp);
+ if (phy_handle) {
+ of_node_put(phy_handle);
+ if (err)
+ return err;
+ }
+ }
+
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
@@ -3053,7 +3554,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
else if (chip->info->ops->set_max_frame_size)
return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
- return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ return ETH_DATA_LEN;
}
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@@ -3061,6 +3562,17 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct mv88e6xxx_chip *chip = ds->priv;
int ret = 0;
+ /* For families where we don't know how to alter the MTU,
+ * just accept any value up to ETH_DATA_LEN
+ */
+ if (!chip->info->ops->port_set_jumbo_size &&
+ !chip->info->ops->set_max_frame_size) {
+ if (new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+
+ return 0;
+ }
+
if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
new_mtu += EDSA_HLEN;
@@ -3069,9 +3581,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
else if (chip->info->ops->set_max_frame_size)
ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
- else
- if (new_mtu > 1522)
- ret = -EINVAL;
mv88e6xxx_reg_unlock(chip);
return ret;
@@ -3176,11 +3685,220 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+ [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+ [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+ [MV88E6XXX_FAMILY_6393] = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
+};
+
+static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 prod_id;
+ u16 val;
+ int err;
+
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
+ mv88e6xxx_reg_unlock(chip);
+
+ /* Some internal PHYs don't have a model number. */
+ if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+ chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+ prod_id = family_prod_id_table[chip->info->family];
+ if (prod_id)
+ val |= prod_id >> 4;
+ }
+
+ return err ? err : val;
+}
+
+static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
+ int reg)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 val;
+ int err;
+
+ if (!chip->info->ops->phy_read_c45)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err ? err : val;
+}
+
+static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ int err;
+
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_mdio_write_c45(struct mii_bus *bus, int phy, int devad,
+ int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ int err;
+
+ if (!chip->info->ops->phy_write_c45)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_write_c45(chip, bus, phy, devad, reg, val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
+ struct device_node *np,
+ bool external)
+{
+ static int index;
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mii_bus *bus;
+ int err;
+
+ if (external) {
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err)
+ return err;
+ }
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_bus));
+ if (!bus)
+ return -ENOMEM;
+
+ mdio_bus = bus->priv;
+ mdio_bus->bus = bus;
+ mdio_bus->chip = chip;
+ INIT_LIST_HEAD(&mdio_bus->list);
+ mdio_bus->external = external;
+
+ if (np) {
+ bus->name = np->full_name;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np);
+ } else {
+ bus->name = "mv88e6xxx SMI";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
+ }
+
+ bus->read = mv88e6xxx_mdio_read;
+ bus->write = mv88e6xxx_mdio_write;
+ bus->read_c45 = mv88e6xxx_mdio_read_c45;
+ bus->write_c45 = mv88e6xxx_mdio_write_c45;
+ bus->parent = chip->dev;
+ bus->phy_mask = ~GENMASK(chip->info->phy_base_addr +
+ mv88e6xxx_num_ports(chip) - 1,
+ chip->info->phy_base_addr);
+
+ if (!external) {
+ err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
+ if (err)
+ goto out;
+ }
+
+ err = of_mdiobus_register(bus, np);
+ if (err) {
+ dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
+ mv88e6xxx_g2_irq_mdio_free(chip, bus);
+ goto out;
+ }
+
+ if (external)
+ list_add_tail(&mdio_bus->list, &chip->mdios);
+ else
+ list_add(&mdio_bus->list, &chip->mdios);
+
+ return 0;
+
+out:
+ mdiobus_free(bus);
+ return err;
+}
+
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
+
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus, *p;
+ struct mii_bus *bus;
+
+ list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
+ bus = mdio_bus->bus;
+
+ if (!mdio_bus->external)
+ mv88e6xxx_g2_irq_mdio_free(chip, bus);
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+ }
+}
+
+static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip)
+{
+ struct device_node *np = chip->dev->of_node;
+ struct device_node *child;
+ int err;
+
+ /* Always register one mdio bus for the internal/default mdio
+ * bus. This maybe represented in the device tree, but is
+ * optional.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ err = mv88e6xxx_mdio_register(chip, child, false);
+ of_node_put(child);
+ if (err)
+ return err;
+
+ /* Walk the device tree, and see if there are any other nodes
+ * which say they are compatible with the external mdio
+ * bus.
+ */
+ for_each_available_child_of_node(np, child) {
+ if (of_device_is_compatible(
+ child, "marvell,mv88e6xxx-mdio-external")) {
+ err = mv88e6xxx_mdio_register(chip, child, true);
+ if (err) {
+ mv88e6xxx_mdios_unregister(chip);
+ of_node_put(child);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
mv88e6xxx_teardown_devlink_regions_global(ds);
+ mv88e6xxx_mdios_unregister(chip);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3190,6 +3908,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
int err;
int i;
+ err = mv88e6xxx_mdios_register(chip);
+ if (err)
+ return err;
+
chip->ds = ds;
ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
@@ -3199,8 +3921,8 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
* time.
*/
if (mv88e6xxx_has_pvt(chip))
- ds->num_fwd_offloading_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
- ds->dst->last_switch - 1;
+ ds->max_num_bridges = MV88E6XXX_MAX_PVT_SWITCHES -
+ ds->dst->last_switch - 1;
mv88e6xxx_reg_lock(chip);
@@ -3225,6 +3947,13 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ /* Must be called after mv88e6xxx_vtu_setup (which flushes the
+ * VTU, thereby also flushing the STU).
+ */
+ err = mv88e6xxx_stu_setup(chip);
+ if (err)
+ goto unlock;
+
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
if (dsa_is_unused_port(ds, i))
@@ -3309,7 +4038,7 @@ unlock:
mv88e6xxx_reg_unlock(chip);
if (err)
- return err;
+ goto out_mdios;
/* Have to be called without holding the register lock, since
* they take the devlink lock, and we later take the locks in
@@ -3318,7 +4047,7 @@ unlock:
*/
err = mv88e6xxx_setup_devlink_resources(ds);
if (err)
- return err;
+ goto out_mdios;
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
@@ -3334,6 +4063,8 @@ out_params:
mv88e6xxx_teardown_devlink_params(ds);
out_resources:
dsa_devlink_resources_unregister(ds);
+out_mdios:
+ mv88e6xxx_mdios_unregister(chip);
return err;
}
@@ -3348,166 +4079,6 @@ static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
mv88e6xxx_teardown_devlink_regions_port(ds, port);
}
-/* prod_id for switch families which do not have a PHY model number */
-static const u16 family_prod_id_table[] = {
- [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
- [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
- [MV88E6XXX_FAMILY_6393] = MV88E6XXX_PORT_SWITCH_ID_PROD_6393X,
-};
-
-static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
-{
- struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
- struct mv88e6xxx_chip *chip = mdio_bus->chip;
- u16 prod_id;
- u16 val;
- int err;
-
- if (!chip->info->ops->phy_read)
- return -EOPNOTSUPP;
-
- mv88e6xxx_reg_lock(chip);
- err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
- mv88e6xxx_reg_unlock(chip);
-
- /* Some internal PHYs don't have a model number. */
- if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
- chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
- prod_id = family_prod_id_table[chip->info->family];
- if (prod_id)
- val |= prod_id >> 4;
- }
-
- return err ? err : val;
-}
-
-static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
-{
- struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
- struct mv88e6xxx_chip *chip = mdio_bus->chip;
- int err;
-
- if (!chip->info->ops->phy_write)
- return -EOPNOTSUPP;
-
- mv88e6xxx_reg_lock(chip);
- err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
- struct device_node *np,
- bool external)
-{
- static int index;
- struct mv88e6xxx_mdio_bus *mdio_bus;
- struct mii_bus *bus;
- int err;
-
- if (external) {
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
- mv88e6xxx_reg_unlock(chip);
-
- if (err)
- return err;
- }
-
- bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
- if (!bus)
- return -ENOMEM;
-
- mdio_bus = bus->priv;
- mdio_bus->bus = bus;
- mdio_bus->chip = chip;
- INIT_LIST_HEAD(&mdio_bus->list);
- mdio_bus->external = external;
-
- if (np) {
- bus->name = np->full_name;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np);
- } else {
- bus->name = "mv88e6xxx SMI";
- snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++);
- }
-
- bus->read = mv88e6xxx_mdio_read;
- bus->write = mv88e6xxx_mdio_write;
- bus->parent = chip->dev;
-
- if (!external) {
- err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
- if (err)
- return err;
- }
-
- err = of_mdiobus_register(bus, np);
- if (err) {
- dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
- mv88e6xxx_g2_irq_mdio_free(chip, bus);
- return err;
- }
-
- if (external)
- list_add_tail(&mdio_bus->list, &chip->mdios);
- else
- list_add(&mdio_bus->list, &chip->mdios);
-
- return 0;
-}
-
-static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
-
-{
- struct mv88e6xxx_mdio_bus *mdio_bus;
- struct mii_bus *bus;
-
- list_for_each_entry(mdio_bus, &chip->mdios, list) {
- bus = mdio_bus->bus;
-
- if (!mdio_bus->external)
- mv88e6xxx_g2_irq_mdio_free(chip, bus);
-
- mdiobus_unregister(bus);
- }
-}
-
-static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
- struct device_node *np)
-{
- struct device_node *child;
- int err;
-
- /* Always register one mdio bus for the internal/default mdio
- * bus. This maybe represented in the device tree, but is
- * optional.
- */
- child = of_get_child_by_name(np, "mdio");
- err = mv88e6xxx_mdio_register(chip, child, false);
- if (err)
- return err;
-
- /* Walk the device tree, and see if there are any other nodes
- * which say they are compatible with the external mdio
- * bus.
- */
- for_each_available_child_of_node(np, child) {
- if (of_device_is_compatible(
- child, "marvell,mv88e6xxx-mdio-external")) {
- err = mv88e6xxx_mdio_register(chip, child, true);
- if (err) {
- mv88e6xxx_mdios_unregister(chip);
- of_node_put(child);
- return err;
- }
- }
- }
-
- return 0;
-}
-
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
@@ -3567,6 +4138,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3593,7 +4165,9 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3627,7 +4201,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3637,12 +4211,15 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3673,7 +4250,9 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3683,8 +4262,10 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
@@ -3710,7 +4291,9 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3751,7 +4334,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6141_ops = {
@@ -3762,8 +4345,10 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -3799,6 +4384,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -3815,7 +4402,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -3824,12 +4411,15 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3855,9 +4445,11 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -3891,9 +4483,11 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6171_ops = {
@@ -3902,8 +4496,10 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -3935,7 +4531,9 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6172_ops = {
@@ -3946,8 +4544,10 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -3981,6 +4581,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -3990,7 +4592,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -3999,8 +4601,10 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4032,7 +4636,9 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6176_ops = {
@@ -4043,8 +4649,10 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4078,6 +4686,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4089,8 +4699,9 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -4129,7 +4740,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
@@ -4140,8 +4751,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4176,6 +4789,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4191,7 +4806,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -4201,8 +4816,10 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4237,6 +4854,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4252,7 +4871,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -4262,8 +4881,10 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4296,6 +4917,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4312,7 +4935,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6240_ops = {
@@ -4323,8 +4946,10 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4358,6 +4983,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4369,10 +4996,11 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6352_phylink_validate,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6250_ops = {
@@ -4383,8 +5011,10 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4412,7 +5042,7 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6250_ptp_ops,
- .phylink_validate = mv88e6065_phylink_validate,
+ .phylink_get_caps = mv88e6250_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -4422,8 +5052,10 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4457,6 +5089,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4473,8 +5107,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390_phylink_validate,
+ .ptp_ops = &mv88e6390_ptp_ops,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -4485,10 +5119,13 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -4518,7 +5155,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -4529,10 +5166,13 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
+ .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
.port_tag_remap = mv88e6095_port_tag_remap,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
@@ -4554,13 +5194,14 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
+ .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -4571,8 +5212,10 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4608,6 +5251,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6341_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4626,7 +5271,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6341_phylink_validate,
+ .phylink_get_caps = mv88e6341_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -4635,8 +5280,10 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4668,7 +5315,9 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_validate = mv88e6185_phylink_validate,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6351_ops = {
@@ -4677,8 +5326,10 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4710,9 +5361,11 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6185_phylink_validate,
+ .phylink_get_caps = mv88e6185_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -4723,8 +5376,10 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4758,6 +5413,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6352_g1_stu_getnext,
+ .stu_loadpurge = mv88e6352_g1_stu_loadpurge,
.serdes_get_lane = mv88e6352_serdes_get_lane,
.serdes_pcs_get_state = mv88e6352_serdes_pcs_get_state,
.serdes_pcs_config = mv88e6352_serdes_pcs_config,
@@ -4775,7 +5432,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
- .phylink_validate = mv88e6352_phylink_validate,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
+ .phylink_get_caps = mv88e6352_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -4785,8 +5443,10 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4822,6 +5482,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390_serdes_get_lane,
/* Check status register pause & lpa register */
@@ -4834,13 +5496,13 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
+ .ptp_ops = &mv88e6390_ptp_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
.serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
.serdes_get_regs = mv88e6390_serdes_get_regs,
- .phylink_validate = mv88e6390_phylink_validate,
+ .phylink_get_caps = mv88e6390_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -4850,8 +5512,10 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4887,6 +5551,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
.serdes_get_lane = mv88e6390x_serdes_get_lane,
.serdes_pcs_get_state = mv88e6390_serdes_pcs_get_state,
@@ -4903,8 +5569,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6390x_phylink_validate,
+ .ptp_ops = &mv88e6390_ptp_ops,
+ .phylink_get_caps = mv88e6390x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6393x_ops = {
@@ -4914,8 +5580,10 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4946,7 +5614,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
* .port_set_upstream_port method.
*/
.set_egress_port = mv88e6393x_set_egress_port,
- .watchdog_ops = &mv88e6390_watchdog_ops,
+ .watchdog_ops = &mv88e6393x_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
@@ -4955,6 +5623,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
+ .stu_getnext = mv88e6390_g1_stu_getnext,
+ .stu_loadpurge = mv88e6390_g1_stu_loadpurge,
.serdes_power = mv88e6393x_serdes_power,
.serdes_get_lane = mv88e6393x_serdes_get_lane,
.serdes_pcs_get_state = mv88e6393x_serdes_pcs_get_state,
@@ -4968,7 +5638,7 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_validate = mv88e6393x_phylink_validate,
+ .phylink_get_caps = mv88e6393x_phylink_get_caps,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -4981,6 +5651,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 10,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5023,6 +5694,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11,
.num_internal_phys = 8,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5046,6 +5718,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 3,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5090,6 +5763,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -5113,6 +5787,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5137,6 +5812,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_internal_phys = 0,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5160,6 +5836,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5184,6 +5861,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5207,6 +5885,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5231,6 +5910,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5276,6 +5956,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5299,6 +5980,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5321,6 +6003,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5343,6 +6026,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5365,6 +6049,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5415,6 +6100,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5460,6 +6146,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5533,6 +6220,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 6,
.num_gpio = 11,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -5557,6 +6245,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5580,6 +6269,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 7,
.num_internal_phys = 5,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5604,6 +6294,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 5,
.num_gpio = 15,
.max_vid = 4095,
+ .max_sid = 63,
.port_base_addr = 0x10,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5628,6 +6319,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5652,6 +6344,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5675,6 +6368,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.num_ports = 11, /* 10 + Z80 */
.num_internal_phys = 9,
.max_vid = 8191,
+ .max_sid = 63,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -5730,6 +6424,32 @@ static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip)
return 0;
}
+static int mv88e6xxx_single_chip_detect(struct mv88e6xxx_chip *chip,
+ struct mdio_device *mdiodev)
+{
+ int err;
+
+ /* dual_chip takes precedence over single/multi-chip modes */
+ if (chip->info->dual_chip)
+ return -EINVAL;
+
+ /* If the mdio addr is 16 indicating the first port address of a switch
+ * (e.g. mv88e6*41) in single chip addressing mode the device may be
+ * configured in single chip addressing mode. Setup the smi access as
+ * single chip addressing mode and attempt to detect the model of the
+ * switch, if this fails the device is not configured in single chip
+ * addressing mode.
+ */
+ if (mdiodev->addr != 16)
+ return -EINVAL;
+
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_detect(chip);
+}
+
static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
{
struct mv88e6xxx_chip *chip;
@@ -5743,6 +6463,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
mutex_init(&chip->reg_lock);
INIT_LIST_HEAD(&chip->mdios);
idr_init(&chip->policies);
+ INIT_LIST_HEAD(&chip->msts);
return chip;
}
@@ -5756,11 +6477,12 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->tag_protocol;
}
-static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
+static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
struct mv88e6xxx_chip *chip = ds->priv;
enum dsa_tag_protocol old_protocol;
+ struct dsa_port *cpu_dp;
int err;
switch (proto) {
@@ -5785,17 +6507,31 @@ static int mv88e6xxx_change_tag_protocol(struct dsa_switch *ds, int port,
chip->tag_protocol = proto;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_setup_port_mode(chip, port);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
+ if (err) {
+ mv88e6xxx_reg_unlock(chip);
+ goto unwind;
+ }
+ }
mv88e6xxx_reg_unlock(chip);
- if (err)
- chip->tag_protocol = old_protocol;
+ return 0;
+
+unwind:
+ chip->tag_protocol = old_protocol;
+
+ mv88e6xxx_reg_lock(chip);
+ dsa_switch_for_each_cpu_port_continue_reverse(cpu_dp, ds)
+ mv88e6xxx_setup_port_mode(chip, cpu_dp->index);
+ mv88e6xxx_reg_unlock(chip);
return err;
}
static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5809,7 +6545,8 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
@@ -5823,7 +6560,8 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port,
static int mv88e6xxx_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress,
+ struct netlink_ext_ack *extack)
{
enum mv88e6xxx_egress_direction direction = ingress ?
MV88E6XXX_EGRESS_DIR_INGRESS :
@@ -5897,7 +6635,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
const struct mv88e6xxx_ops *ops;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_PORT_LOCKED | BR_PORT_MAB))
return -EINVAL;
ops = chip->info->ops;
@@ -5916,7 +6654,7 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err = -EOPNOTSUPP;
+ int err = 0;
mv88e6xxx_reg_lock(chip);
@@ -5955,6 +6693,19 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
goto out;
}
+ if (flags.mask & BR_PORT_MAB) {
+ bool mab = !!(flags.val & BR_PORT_MAB);
+
+ mv88e6xxx_port_set_mab(chip, port, mab);
+ }
+
+ if (flags.mask & BR_PORT_LOCKED) {
+ bool locked = !!(flags.val & BR_PORT_LOCKED);
+
+ err = mv88e6xxx_port_set_lock(chip, port, locked);
+ if (err)
+ goto out;
+ }
out:
mv88e6xxx_reg_unlock(chip);
@@ -5962,32 +6713,40 @@ out:
}
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
- int id, members = 0;
+ int members = 0;
- if (!mv88e6xxx_has_lag(chip))
+ if (!mv88e6xxx_has_lag(chip)) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support LAG offload");
return false;
+ }
- id = dsa_lag_id(ds->dst, lag);
- if (id < 0 || id >= ds->num_lag_ids)
+ if (!lag.id)
return false;
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
- if (members > 8)
+ if (members > 8) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 8 LAG ports");
return false;
+ }
/* We could potentially relax this to include active
* backup in the future.
*/
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
/* Ideally we would also validate that the hash type matches
* the hardware. Alas, this is always set to unknown on team
@@ -5996,20 +6755,21 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
return true;
}
-static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct net_device *lag)
+static int mv88e6xxx_lag_sync_map(struct dsa_switch *ds, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
u16 map = 0;
int id;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
/* Build the map of all ports to distribute flows destined for
* this LAG. This can be either a local user port, or a DSA
* port if the LAG port is on a remote chip.
*/
- dsa_lag_foreach_port(dp, ds->dst, lag)
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
map |= BIT(dsa_towards_port(ds, dp->ds->index, dp->index));
return mv88e6xxx_g2_trunk_mapping_write(chip, id, map);
@@ -6054,8 +6814,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
{
struct mv88e6xxx_chip *chip = ds->priv;
unsigned int id, num_tx;
- struct net_device *lag;
struct dsa_port *dp;
+ struct dsa_lag *lag;
int i, err, nth;
u16 mask[8];
u16 ivec;
@@ -6064,8 +6824,8 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
ivec = BIT(mv88e6xxx_num_ports(chip)) - 1;
/* Disable all masks for ports that _are_ members of a LAG. */
- list_for_each_entry(dp, &ds->dst->ports, list) {
- if (!dp->lag_dev || dp->ds != ds)
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->lag)
continue;
ivec &= ~BIT(dp->index);
@@ -6078,7 +6838,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
* are in the Tx set.
*/
dsa_lags_foreach_id(id, ds->dst) {
- lag = dsa_lag_dev(ds->dst, id);
+ lag = dsa_lag_by_id(ds->dst, id);
if (!lag)
continue;
@@ -6114,7 +6874,7 @@ static int mv88e6xxx_lag_sync_masks(struct dsa_switch *ds)
}
static int mv88e6xxx_lag_sync_masks_map(struct dsa_switch *ds,
- struct net_device *lag)
+ struct dsa_lag lag)
{
int err;
@@ -6138,16 +6898,18 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
}
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
- struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
- id = dsa_lag_id(ds->dst, lag);
+ /* DSA LAG IDs are one-based */
+ id = lag.id - 1;
mv88e6xxx_reg_lock(chip);
@@ -6170,7 +6932,7 @@ err_unlock:
}
static int mv88e6xxx_port_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *lag)
+ struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_trunk;
@@ -6195,13 +6957,14 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
}
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag,
- struct netdev_lag_upper_info *info)
+ int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -6218,7 +6981,7 @@ unlock:
}
static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
- int port, struct net_device *lag)
+ int port, struct dsa_lag lag)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err_sync, err_pvt;
@@ -6237,7 +7000,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.teardown = mv88e6xxx_teardown,
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
- .phylink_validate = mv88e6xxx_validate,
+ .phylink_get_caps = mv88e6xxx_get_caps,
.phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
.phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart,
@@ -6265,15 +7028,18 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
.port_bridge_flags = mv88e6xxx_port_bridge_flags,
.port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_mst_state_set = mv88e6xxx_port_mst_state_set,
.port_fast_age = mv88e6xxx_port_fast_age,
+ .port_vlan_fast_age = mv88e6xxx_port_vlan_fast_age,
.port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
.port_vlan_add = mv88e6xxx_port_vlan_add,
.port_vlan_del = mv88e6xxx_port_vlan_del,
- .port_fdb_add = mv88e6xxx_port_fdb_add,
- .port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_dump = mv88e6xxx_port_fdb_dump,
- .port_mdb_add = mv88e6xxx_port_mdb_add,
- .port_mdb_del = mv88e6xxx_port_mdb_del,
+ .vlan_msti_set = mv88e6xxx_vlan_msti_set,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
+ .port_mdb_add = mv88e6xxx_port_mdb_add,
+ .port_mdb_del = mv88e6xxx_port_mdb_del,
.port_mirror_add = mv88e6xxx_port_mirror_add,
.port_mirror_del = mv88e6xxx_port_mirror_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
@@ -6292,8 +7058,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.crosschip_lag_change = mv88e6xxx_crosschip_lag_change,
.crosschip_lag_join = mv88e6xxx_crosschip_lag_join,
.crosschip_lag_leave = mv88e6xxx_crosschip_lag_leave,
- .port_bridge_tx_fwd_offload = mv88e6xxx_bridge_tx_fwd_offload,
- .port_bridge_tx_fwd_unoffload = mv88e6xxx_bridge_tx_fwd_unoffload,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
@@ -6400,10 +7164,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
chip->info = compat_info;
- err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
- if (err)
- goto out;
-
chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(chip->reset)) {
err = PTR_ERR(chip->reset);
@@ -6412,9 +7172,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (chip->reset)
usleep_range(1000, 2000);
- err = mv88e6xxx_detect(chip);
- if (err)
- goto out;
+ /* Detect if the device is configured in single chip addressing mode,
+ * otherwise continue with address specific smi init/detection.
+ */
+ err = mv88e6xxx_single_chip_detect(chip, mdiodev);
+ if (err) {
+ err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr);
+ if (err)
+ goto out;
+
+ err = mv88e6xxx_detect(chip);
+ if (err)
+ goto out;
+ }
if (chip->info->edsa_support == MV88E6XXX_EDSA_SUPPORTED)
chip->tag_protocol = DSA_TAG_PROTO_EDSA;
@@ -6476,18 +7246,12 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out_g1_atu_prob_irq;
- err = mv88e6xxx_mdios_register(chip, np);
- if (err)
- goto out_g1_vtu_prob_irq;
-
err = mv88e6xxx_register_switch(chip);
if (err)
- goto out_mdio;
+ goto out_g1_vtu_prob_irq;
return 0;
-out_mdio:
- mv88e6xxx_mdios_unregister(chip);
out_g1_vtu_prob_irq:
mv88e6xxx_g1_vtu_prob_irq_free(chip);
out_g1_atu_prob_irq:
@@ -6524,7 +7288,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
- mv88e6xxx_mdios_unregister(chip);
mv88e6xxx_g1_vtu_prob_irq_free(chip);
mv88e6xxx_g1_atu_prob_irq_free(chip);
@@ -6536,8 +7299,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 8271b8aa7b71..da6e1339f809 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -20,6 +20,7 @@
#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096
+#define MV88E6XXX_N_SID 64
#define MV88E6XXX_FID_STANDALONE 0
#define MV88E6XXX_FID_BRIDGED 1
@@ -130,6 +131,7 @@ struct mv88e6xxx_info {
unsigned int num_internal_phys;
unsigned int num_gpio;
unsigned int max_vid;
+ unsigned int max_sid;
unsigned int port_base_addr;
unsigned int phy_base_addr;
unsigned int global1_addr;
@@ -179,7 +181,14 @@ struct mv88e6xxx_vtu_entry {
u16 fid;
u8 sid;
bool valid;
+ bool policy;
u8 member[DSA_MAX_PORTS];
+ u8 state[DSA_MAX_PORTS]; /* Older silicon has no STU */
+};
+
+struct mv88e6xxx_stu_entry {
+ u8 sid;
+ bool valid;
u8 state[DSA_MAX_PORTS];
};
@@ -271,6 +280,9 @@ struct mv88e6xxx_port {
unsigned int serdes_irq;
char serdes_irq_name[64];
struct devlink_region *region;
+
+ /* MacAuth Bypass control flag */
+ bool mab;
};
enum mv88e6xxx_region_id {
@@ -278,6 +290,7 @@ enum mv88e6xxx_region_id {
MV88E6XXX_REGION_GLOBAL2,
MV88E6XXX_REGION_ATU,
MV88E6XXX_REGION_VTU,
+ MV88E6XXX_REGION_STU,
MV88E6XXX_REGION_PVT,
_MV88E6XXX_REGION_MAX,
@@ -287,6 +300,16 @@ struct mv88e6xxx_region_priv {
enum mv88e6xxx_region_id id;
};
+struct mv88e6xxx_mst {
+ struct list_head node;
+
+ refcount_t refcnt;
+ struct net_device *br;
+ u16 msti;
+
+ struct mv88e6xxx_stu_entry stu;
+};
+
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
@@ -387,11 +410,15 @@ struct mv88e6xxx_chip {
/* devlink regions */
struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
+
+ /* Bridge MST to SID mappings */
+ struct list_head msts;
};
struct mv88e6xxx_bus_ops {
int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
+ int (*init)(struct mv88e6xxx_chip *chip);
};
struct mv88e6xxx_mdio_bus {
@@ -427,6 +454,13 @@ struct mv88e6xxx_ops {
struct mii_bus *bus,
int addr, int reg, u16 val);
+ int (*phy_read_c45)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 *val);
+ int (*phy_write_c45)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 val);
+
/* Priority Override Table operations */
int (*pot_clear)(struct mv88e6xxx_chip *chip);
@@ -464,14 +498,13 @@ struct mv88e6xxx_ops {
int (*port_set_pause)(struct mv88e6xxx_chip *chip, int port,
int pause);
-#define SPEED_MAX INT_MAX
#define SPEED_UNFORCED -2
#define DUPLEX_UNFORCED -2
/* Port's MAC speed (in Mbps) and MAC duplex mode
*
* Depending on the chip, 10, 100, 200, 1000, 2500, 10000 are valid.
- * Use SPEED_UNFORCED for normal detection, SPEED_MAX for max value.
+ * Use SPEED_UNFORCED for normal detection.
*
* Use DUPLEX_HALF or DUPLEX_FULL to force half or full duplex,
* or DUPLEX_UNFORCED for normal duplex detection.
@@ -586,6 +619,10 @@ struct mv88e6xxx_ops {
void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
void *_p);
+ /* SERDES SGMII/Fiber Output Amplitude */
+ int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
@@ -596,6 +633,12 @@ struct mv88e6xxx_ops {
int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
+ /* Spanning Tree Unit operations */
+ int (*stu_getnext)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+ int (*stu_loadpurge)(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+
/* GPIO operations */
const struct mv88e6xxx_gpio_ops *gpio_ops;
@@ -609,9 +652,8 @@ struct mv88e6xxx_ops {
const struct mv88e6xxx_ptp_ops *ptp_ops;
/* Phylink */
- void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
- unsigned long *mask,
- struct phylink_link_state *state);
+ void (*phylink_get_caps)(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config);
/* Max Frame Size */
int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu);
@@ -673,6 +715,7 @@ struct mv88e6xxx_ptp_ops {
int (*port_disable)(struct mv88e6xxx_chip *chip, int port);
int (*global_enable)(struct mv88e6xxx_chip *chip);
int (*global_disable)(struct mv88e6xxx_chip *chip);
+ int (*set_ptp_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int n_ext_ts;
int arr0_sts_reg;
int arr1_sts_reg;
@@ -695,6 +738,13 @@ struct mv88e6xxx_hw_stat {
int type;
};
+static inline bool mv88e6xxx_has_stu(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid > 0 &&
+ chip->info->ops->stu_loadpurge &&
+ chip->info->ops->stu_getnext;
+}
+
static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip)
{
return chip->info->pvt;
@@ -725,6 +775,11 @@ static inline unsigned int mv88e6xxx_max_vid(struct mv88e6xxx_chip *chip)
return chip->info->max_vid;
}
+static inline unsigned int mv88e6xxx_max_sid(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->max_sid;
+}
+
static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
{
return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
@@ -740,6 +795,12 @@ static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int po
return (chip->info->invalid_port_mask & BIT(port)) != 0;
}
+static inline void mv88e6xxx_port_set_mab(struct mv88e6xxx_chip *chip,
+ int port, bool mab)
+{
+ chip->ports[port].mab = mab;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
@@ -758,6 +819,12 @@ static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
mutex_unlock(&chip->reg_lock);
}
+int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv);
+
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 381068395c63..a08dab75e0c0 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -503,6 +503,85 @@ static int mv88e6xxx_region_vtu_snapshot(struct devlink *dl,
return 0;
}
+/**
+ * struct mv88e6xxx_devlink_stu_entry - Devlink STU entry
+ * @sid: Global1/3: SID, unknown filters and learning.
+ * @vid: Global1/6: Valid bit.
+ * @data: Global1/7-9: Membership data and priority override.
+ * @resvd: Reserved. In case we forgot something.
+ *
+ * The STU entry format varies between chipset generations. Peridot
+ * and Amethyst packs the STU data into Global1/7-8. Older silicon
+ * spreads the information across all three VTU data registers -
+ * inheriting the layout of even older hardware that had no STU at
+ * all. Since this is a low-level debug interface, copy all data
+ * verbatim and defer parsing to the consumer.
+ */
+struct mv88e6xxx_devlink_stu_entry {
+ u16 sid;
+ u16 vid;
+ u16 data[3];
+ u16 resvd;
+};
+
+static int mv88e6xxx_region_stu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_devlink_stu_entry *table, *entry;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_stu_entry stu;
+ int err;
+
+ table = kcalloc(mv88e6xxx_max_sid(chip) + 1,
+ sizeof(struct mv88e6xxx_devlink_stu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ entry = table;
+ stu.sid = mv88e6xxx_max_sid(chip);
+ stu.valid = false;
+
+ mv88e6xxx_reg_lock(chip);
+
+ do {
+ err = mv88e6xxx_g1_stu_getnext(chip, &stu);
+ if (err)
+ break;
+
+ if (!stu.valid)
+ break;
+
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6352_G1_VTU_SID,
+ &entry->sid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_VID,
+ &entry->vid);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA1,
+ &entry->data[0]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA2,
+ &entry->data[1]);
+ err = err ? : mv88e6xxx_g1_read(chip, MV88E6XXX_G1_VTU_DATA3,
+ &entry->data[2]);
+ if (err)
+ break;
+
+ entry++;
+ } while (stu.sid < mv88e6xxx_max_sid(chip));
+
+ mv88e6xxx_reg_unlock(chip);
+
+ if (err) {
+ kfree(table);
+ return err;
+ }
+
+ *data = (u8 *)table;
+ return 0;
+}
+
static int mv88e6xxx_region_pvt_snapshot(struct devlink *dl,
const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack,
@@ -605,6 +684,12 @@ static struct devlink_region_ops mv88e6xxx_region_vtu_ops = {
.destructor = kfree,
};
+static struct devlink_region_ops mv88e6xxx_region_stu_ops = {
+ .name = "stu",
+ .snapshot = mv88e6xxx_region_stu_snapshot,
+ .destructor = kfree,
+};
+
static struct devlink_region_ops mv88e6xxx_region_pvt_ops = {
.name = "pvt",
.snapshot = mv88e6xxx_region_pvt_snapshot,
@@ -640,6 +725,11 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
.ops = &mv88e6xxx_region_vtu_ops
/* calculated at runtime */
},
+ [MV88E6XXX_REGION_STU] = {
+ .ops = &mv88e6xxx_region_stu_ops,
+ .cond = mv88e6xxx_has_stu,
+ /* calculated at runtime */
+ },
[MV88E6XXX_REGION_PVT] = {
.ops = &mv88e6xxx_region_pvt_ops,
.size = MV88E6XXX_MAX_PVT_ENTRIES * sizeof(u16),
@@ -706,6 +796,10 @@ int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
size = (mv88e6xxx_max_vid(chip) + 1) *
sizeof(struct mv88e6xxx_devlink_vtu_entry);
break;
+ case MV88E6XXX_REGION_STU:
+ size = (mv88e6xxx_max_sid(chip) + 1) *
+ sizeof(struct mv88e6xxx_devlink_stu_entry);
+ break;
}
region = dsa_devlink_region_create(ds, ops, 1, size);
@@ -727,11 +821,6 @@ int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- err = devlink_info_driver_name_put(req, "mv88e6xxx");
- if (err)
- return err;
return devlink_info_version_fixed_put(req,
DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 5848112036b0..2fa55a643591 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -403,6 +403,18 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
+int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST;
+
+ /* Use the default high priority for PTP frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
+ return mv88e6390_g1_monitor_write(chip, ptr, port);
+}
+
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
u16 ptr;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 4f3dbb015f77..c99ddd117fe6 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -46,6 +46,7 @@
/* Offset 0x02: VTU FID Register */
#define MV88E6352_G1_VTU_FID 0x02
+#define MV88E6352_G1_VTU_FID_VID_POLICY 0x1000
#define MV88E6352_G1_VTU_FID_MASK 0x0fff
/* Offset 0x03: VTU SID Register */
@@ -213,6 +214,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST 0x3200
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
@@ -302,6 +304,7 @@ int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
int port);
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
@@ -347,6 +350,16 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry);
int mv88e6xxx_g1_vtu_prob_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g1_vtu_prob_irq_free(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 40bd67a5c8e9..ce3b3690c3c0 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -12,6 +12,8 @@
#include "chip.h"
#include "global1.h"
+#include "switchdev.h"
+#include "trace.h"
/* Offset 0x01: ATU FID Register */
@@ -114,6 +116,19 @@ static int mv88e6xxx_g1_atu_op_wait(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_ATU_OP, bit, 0);
}
+static int mv88e6xxx_g1_read_atu_violation(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_ATU_OP,
+ MV88E6XXX_G1_ATU_OP_BUSY |
+ MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_atu_op_wait(chip);
+}
+
static int mv88e6xxx_g1_atu_op(struct mv88e6xxx_chip *chip, u16 fid, u16 op)
{
u16 val;
@@ -159,6 +174,41 @@ int mv88e6xxx_g1_atu_get_next(struct mv88e6xxx_chip *chip, u16 fid)
return mv88e6xxx_g1_atu_op(chip, fid, MV88E6XXX_G1_ATU_OP_GET_NEXT_DB);
}
+static int mv88e6xxx_g1_atu_fid_read(struct mv88e6xxx_chip *chip, u16 *fid)
+{
+ u16 val = 0, upper = 0, op = 0;
+ int err = -EOPNOTSUPP;
+
+ if (mv88e6xxx_num_databases(chip) > 256) {
+ err = mv88e6xxx_g1_read(chip, MV88E6352_G1_ATU_FID, &val);
+ val &= 0xfff;
+ if (err)
+ return err;
+ } else {
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &op);
+ if (err)
+ return err;
+ if (mv88e6xxx_num_databases(chip) > 64) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_CTL,
+ &upper);
+ if (err)
+ return err;
+
+ upper = (upper >> 8) & 0x00f0;
+ } else if (mv88e6xxx_num_databases(chip) > 16) {
+ /* ATU DBNum[5:4] are located in ATU Operation 9:8 */
+ upper = (op >> 4) & 0x30;
+ }
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ val = (op & 0xf) | upper;
+ }
+ *fid = val;
+
+ return err;
+}
+
/* Offset 0x0C: ATU Data Register */
static int mv88e6xxx_g1_atu_data_read(struct mv88e6xxx_chip *chip,
@@ -353,64 +403,69 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
struct mv88e6xxx_atu_entry entry;
- int spid;
- int err;
- u16 val;
+ int err, spid;
+ u16 val, fid;
mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_g1_atu_op(chip, 0,
- MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION);
+ err = mv88e6xxx_g1_read_atu_violation(chip);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
if (err)
- goto out;
+ goto out_unlock;
+
+ err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
+ if (err)
+ goto out_unlock;
err = mv88e6xxx_g1_atu_data_read(chip, &entry);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
if (err)
- goto out;
+ goto out_unlock;
- spid = entry.state;
+ mv88e6xxx_reg_unlock(chip);
- if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
- dev_err_ratelimited(chip->dev,
- "ATU age out violation for %pM\n",
- entry.mac);
- }
+ spid = entry.state;
if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
- dev_err_ratelimited(chip->dev,
- "ATU member violation for %pM portvec %x spid %d\n",
- entry.mac, entry.portvec, spid);
+ trace_mv88e6xxx_atu_member_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
chip->ports[spid].atu_member_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
- dev_err_ratelimited(chip->dev,
- "ATU miss violation for %pM portvec %x spid %d\n",
- entry.mac, entry.portvec, spid);
+ trace_mv88e6xxx_atu_miss_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
chip->ports[spid].atu_miss_violation++;
+
+ if (fid != MV88E6XXX_FID_STANDALONE && chip->ports[spid].mab) {
+ err = mv88e6xxx_handle_miss_violation(chip, spid,
+ &entry, fid);
+ if (err)
+ goto out;
+ }
}
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
- dev_err_ratelimited(chip->dev,
- "ATU full violation for %pM portvec %x spid %d\n",
- entry.mac, entry.portvec, spid);
+ trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
+ entry.portvec, entry.mac,
+ fid);
chip->ports[spid].atu_full_violation++;
}
- mv88e6xxx_reg_unlock(chip);
return IRQ_HANDLED;
-out:
+out_unlock:
mv88e6xxx_reg_unlock(chip);
+out:
dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
err);
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
index ae12c981923e..bcfb4a812055 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c
@@ -13,6 +13,7 @@
#include "chip.h"
#include "global1.h"
+#include "trace.h"
/* Offset 0x02: VTU FID Register */
@@ -27,7 +28,7 @@ static int mv88e6xxx_g1_vtu_fid_read(struct mv88e6xxx_chip *chip,
return err;
entry->fid = val & MV88E6352_G1_VTU_FID_MASK;
-
+ entry->policy = !!(val & MV88E6352_G1_VTU_FID_VID_POLICY);
return 0;
}
@@ -36,13 +37,15 @@ static int mv88e6xxx_g1_vtu_fid_write(struct mv88e6xxx_chip *chip,
{
u16 val = entry->fid & MV88E6352_G1_VTU_FID_MASK;
+ if (entry->policy)
+ val |= MV88E6352_G1_VTU_FID_VID_POLICY;
+
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_FID, val);
}
/* Offset 0x03: VTU SID Register */
-static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip, u8 *sid)
{
u16 val;
int err;
@@ -51,15 +54,14 @@ static int mv88e6xxx_g1_vtu_sid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->sid = val & MV88E6352_G1_VTU_SID_MASK;
+ *sid = val & MV88E6352_G1_VTU_SID_MASK;
return 0;
}
-static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+static int mv88e6xxx_g1_vtu_sid_write(struct mv88e6xxx_chip *chip, u8 sid)
{
- u16 val = entry->sid & MV88E6352_G1_VTU_SID_MASK;
+ u16 val = sid & MV88E6352_G1_VTU_SID_MASK;
return mv88e6xxx_g1_write(chip, MV88E6352_G1_VTU_SID, val);
}
@@ -88,7 +90,7 @@ static int mv88e6xxx_g1_vtu_op(struct mv88e6xxx_chip *chip, u16 op)
/* Offset 0x06: VTU VID Register */
static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool *valid, u16 *vid)
{
u16 val;
int err;
@@ -97,25 +99,28 @@ static int mv88e6xxx_g1_vtu_vid_read(struct mv88e6xxx_chip *chip,
if (err)
return err;
- entry->vid = val & 0xfff;
+ if (vid) {
+ *vid = val & 0xfff;
- if (val & MV88E6390_G1_VTU_VID_PAGE)
- entry->vid |= 0x1000;
+ if (val & MV88E6390_G1_VTU_VID_PAGE)
+ *vid |= 0x1000;
+ }
- entry->valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
+ if (valid)
+ *valid = !!(val & MV88E6XXX_G1_VTU_VID_VALID);
return 0;
}
static int mv88e6xxx_g1_vtu_vid_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ bool valid, u16 vid)
{
- u16 val = entry->vid & 0xfff;
+ u16 val = vid & 0xfff;
- if (entry->vid & 0x1000)
+ if (vid & 0x1000)
val |= MV88E6390_G1_VTU_VID_PAGE;
- if (entry->valid)
+ if (valid)
val |= MV88E6XXX_G1_VTU_VID_VALID;
return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_VTU_VID, val);
@@ -144,7 +149,7 @@ static int mv88e6185_g1_vtu_stu_data_read(struct mv88e6xxx_chip *chip,
}
static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3];
int err;
@@ -157,36 +162,20 @@ static int mv88e6185_g1_vtu_data_read(struct mv88e6xxx_chip *chip,
/* Extract MemberTag data */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
unsigned int member_offset = (i % 4) * 4;
+ unsigned int state_offset = member_offset + 2;
- entry->member[i] = (regs[i / 4] >> member_offset) & 0x3;
- }
-
- return 0;
-}
-
-static int mv88e6185_g1_stu_data_read(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- u16 regs[3];
- int err;
- int i;
-
- err = mv88e6185_g1_vtu_stu_data_read(chip, regs);
- if (err)
- return err;
-
- /* Extract PortState data */
- for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- unsigned int state_offset = (i % 4) * 4 + 2;
+ if (member)
+ member[i] = (regs[i / 4] >> member_offset) & 0x3;
- entry->state[i] = (regs[i / 4] >> state_offset) & 0x3;
+ if (state)
+ state[i] = (regs[i / 4] >> state_offset) & 0x3;
}
return 0;
}
static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
+ u8 *member, u8 *state)
{
u16 regs[3] = { 0 };
int i;
@@ -196,8 +185,11 @@ static int mv88e6185_g1_vtu_data_write(struct mv88e6xxx_chip *chip,
unsigned int member_offset = (i % 4) * 4;
unsigned int state_offset = member_offset + 2;
- regs[i / 4] |= (entry->member[i] & 0x3) << member_offset;
- regs[i / 4] |= (entry->state[i] & 0x3) << state_offset;
+ if (member)
+ regs[i / 4] |= (member[i] & 0x3) << member_offset;
+
+ if (state)
+ regs[i / 4] |= (state[i] & 0x3) << state_offset;
}
/* Write all 3 VTU/STU Data registers */
@@ -265,48 +257,6 @@ static int mv88e6390_g1_vtu_data_write(struct mv88e6xxx_chip *chip, u8 *data)
/* VLAN Translation Unit Operations */
-static int mv88e6xxx_g1_vtu_stu_getnext(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *entry)
-{
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, entry);
- if (err)
- return err;
-
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
-}
-
-static int mv88e6xxx_g1_vtu_stu_get(struct mv88e6xxx_chip *chip,
- struct mv88e6xxx_vtu_entry *vtu)
-{
- struct mv88e6xxx_vtu_entry stu;
- int err;
-
- err = mv88e6xxx_g1_vtu_sid_read(chip, vtu);
- if (err)
- return err;
-
- stu.sid = vtu->sid - 1;
-
- err = mv88e6xxx_g1_vtu_stu_getnext(chip, &stu);
- if (err)
- return err;
-
- if (stu.sid != vtu->sid || !stu.valid)
- return -EINVAL;
-
- return 0;
-}
-
int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry)
{
@@ -324,7 +274,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
* write the VID only once, when the entry is given as invalid.
*/
if (!entry->valid) {
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, false, entry->vid);
if (err)
return err;
}
@@ -333,7 +283,7 @@ int mv88e6xxx_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- return mv88e6xxx_g1_vtu_vid_read(chip, entry);
+ return mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, &entry->vid);
}
int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
@@ -347,11 +297,7 @@ int mv88e6185_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, entry->state);
if (err)
return err;
@@ -381,7 +327,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_read(chip, entry);
+ err = mv88e6185_g1_vtu_data_read(chip, entry->member, NULL);
if (err)
return err;
@@ -389,12 +335,7 @@ int mv88e6352_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6185_g1_stu_data_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -417,16 +358,11 @@ int mv88e6390_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
if (err)
return err;
- /* Fetch VLAN PortState data from the STU */
- err = mv88e6xxx_g1_vtu_stu_get(chip, entry);
- if (err)
- return err;
-
- err = mv88e6390_g1_vtu_data_read(chip, entry->state);
+ err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_read(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
@@ -444,12 +380,12 @@ int mv88e6185_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- err = mv88e6185_g1_vtu_data_write(chip, entry);
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, entry->state);
if (err)
return err;
@@ -476,27 +412,21 @@ int mv88e6352_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write MemberTag and PortState data */
- err = mv88e6185_g1_vtu_data_write(chip, entry);
- if (err)
- return err;
-
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ /* Write MemberTag data */
+ err = mv88e6185_g1_vtu_data_write(chip, entry->member, NULL);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
}
@@ -514,41 +444,113 @@ int mv88e6390_g1_vtu_loadpurge(struct mv88e6xxx_chip *chip,
if (err)
return err;
- err = mv88e6xxx_g1_vtu_vid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, entry->vid);
if (err)
return err;
if (entry->valid) {
- /* Write PortState data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ /* Write MemberTag data */
+ err = mv88e6390_g1_vtu_data_write(chip, entry->member);
if (err)
return err;
- err = mv88e6xxx_g1_vtu_sid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
if (err)
return err;
- /* Load STU entry */
- err = mv88e6xxx_g1_vtu_op(chip,
- MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- /* Write MemberTag data */
- err = mv88e6390_g1_vtu_data_write(chip, entry->member);
+ /* Load/Purge VTU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+}
+
+int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+}
+
+/* Spanning Tree Unit Operations */
+
+int mv88e6xxx_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ /* To get the next higher active SID, the STU GetNext operation can be
+ * started again without setting the SID registers since it already
+ * contains the last SID.
+ *
+ * To save a few hardware accesses and abstract this to the caller,
+ * write the SID only once, when the entry is given as invalid.
+ */
+ if (!entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
if (err)
return err;
+ }
- err = mv88e6xxx_g1_vtu_fid_write(chip, entry);
+ err = mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_GET_NEXT);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_read(chip, &entry->valid, NULL);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6xxx_g1_vtu_sid_read(chip, &entry->sid);
if (err)
return err;
}
- /* Load/Purge VTU entry */
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_VTU_LOAD_PURGE);
+ return 0;
}
-int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
+int mv88e6352_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6185_g1_vtu_data_read(chip, NULL, entry->state);
+}
+
+int mv88e6390_g1_stu_getnext(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_stu_getnext(chip, entry);
+ if (err)
+ return err;
+
+ if (!entry->valid)
+ return 0;
+
+ return mv88e6390_g1_vtu_data_read(chip, entry->state);
+}
+
+int mv88e6352_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
{
int err;
@@ -556,16 +558,59 @@ int mv88e6xxx_g1_vtu_flush(struct mv88e6xxx_chip *chip)
if (err)
return err;
- return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_FLUSH_ALL);
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6185_g1_vtu_data_write(chip, NULL, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
}
+int mv88e6390_g1_stu_loadpurge(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_stu_entry *entry)
+{
+ int err;
+
+ err = mv88e6xxx_g1_vtu_op_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_vid_write(chip, entry->valid, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_vtu_sid_write(chip, entry->sid);
+ if (err)
+ return err;
+
+ if (entry->valid) {
+ err = mv88e6390_g1_vtu_data_write(chip, entry->state);
+ if (err)
+ return err;
+ }
+
+ /* Load/Purge STU entry */
+ return mv88e6xxx_g1_vtu_op(chip, MV88E6XXX_G1_VTU_OP_STU_LOAD_PURGE);
+}
+
+/* VTU Violation Management */
+
static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
- struct mv88e6xxx_vtu_entry entry;
+ u16 val, vid;
int spid;
int err;
- u16 val;
mv88e6xxx_reg_lock(chip);
@@ -577,21 +622,19 @@ static irqreturn_t mv88e6xxx_g1_vtu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
- err = mv88e6xxx_g1_vtu_vid_read(chip, &entry);
+ err = mv88e6xxx_g1_vtu_vid_read(chip, NULL, &vid);
if (err)
goto out;
spid = val & MV88E6XXX_G1_VTU_OP_SPID_MASK;
if (val & MV88E6XXX_G1_VTU_OP_MEMBER_VIOLATION) {
- dev_err_ratelimited(chip->dev, "VTU member violation for vid %d, source port %d\n",
- entry.vid, spid);
+ trace_mv88e6xxx_vtu_member_violation(chip->dev, spid, vid);
chip->ports[spid].vtu_member_violation++;
}
if (val & MV88E6XXX_G1_VTU_OP_MISS_VIOLATION) {
- dev_dbg_ratelimited(chip->dev, "VTU miss violation for vid %d, source port %d\n",
- entry.vid, spid);
+ trace_mv88e6xxx_vtu_miss_violation(chip->dev, spid, vid);
chip->ports[spid].vtu_miss_violation++;
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index fa65ecd9cb85..615896893076 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -739,20 +739,18 @@ static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
}
-static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
- bool external, int port, int reg,
- u16 *data)
+static int _mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int devad,
+ int reg, u16 *data)
{
- int dev = (reg >> 16) & 0x1f;
- int addr = reg & 0xffff;
int err;
- err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
- addr);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
+ reg);
if (err)
return err;
- return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, dev,
+ return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, devad,
data);
}
@@ -771,51 +769,65 @@ static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
}
-static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
- bool external, int port, int reg,
- u16 data)
+static int _mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int devad,
+ int reg, u16 data)
{
- int dev = (reg >> 16) & 0x1f;
- int addr = reg & 0xffff;
int err;
- err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
- addr);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
+ reg);
if (err)
return err;
- return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, dev,
+ return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, devad,
data);
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
- int addr, int reg, u16 *val)
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, reg,
- val);
-
return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg,
val);
}
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
- int addr, int reg, u16 val)
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int devad,
+ int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, reg,
- val);
+ return _mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, devad, reg,
+ val);
+}
+
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int reg,
+ u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg,
val);
}
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int devad,
+ int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ return _mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, devad, reg,
+ val);
+}
+
/* Offset 0x1B: Watchdog Control */
static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
@@ -931,6 +943,26 @@ const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
.irq_free = mv88e6390_watchdog_free,
};
+static int mv88e6393x_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+ mv88e6390_watchdog_action(chip, irq);
+
+ /* Fix for clearing the force WD event bit.
+ * Unreleased erratum on mv88e6393x.
+ */
+ mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
+ MV88E6390_G2_WDOG_CTL_UPDATE |
+ MV88E6390_G2_WDOG_CTL_PTR_EVENT);
+
+ return IRQ_HANDLED;
+}
+
+const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops = {
+ .irq_action = mv88e6393x_watchdog_action,
+ .irq_setup = mv88e6390_watchdog_setup,
+ .irq_free = mv88e6390_watchdog_free,
+};
+
static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
@@ -1164,31 +1196,19 @@ out:
int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
struct mii_bus *bus)
{
- int phy, irq, err, err_phy;
+ int phy, irq;
for (phy = 0; phy < chip->info->num_internal_phys; phy++) {
irq = irq_find_mapping(chip->g2_irq.domain, phy);
- if (irq < 0) {
- err = irq;
- goto out;
- }
+ if (irq < 0)
+ return irq;
+
bus->irq[chip->info->phy_base_addr + phy] = irq;
}
return 0;
-out:
- err_phy = phy;
-
- for (phy = 0; phy < err_phy; phy++)
- irq_dispose_mapping(bus->irq[phy]);
-
- return err;
}
void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip,
struct mii_bus *bus)
{
- int phy;
-
- for (phy = 0; phy < chip->info->num_internal_phys; phy++)
- irq_dispose_mapping(bus->irq[phy]);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index f3e27573a386..7e091965582b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -298,7 +298,9 @@
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
-#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0xf
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO 0
#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
@@ -312,12 +314,18 @@ int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val);
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val);
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 val);
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
@@ -361,6 +369,7 @@ int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6393x_watchdog_ops;
extern const struct mv88e6xxx_avb_ops mv88e6165_avb_ops;
extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
@@ -370,6 +379,7 @@ extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
bool external);
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port);
int mv88e6xxx_g2_atu_stats_set(struct mv88e6xxx_chip *chip, u16 kind, u16 bin);
int mv88e6xxx_g2_atu_stats_get(struct mv88e6xxx_chip *chip, u16 *stats);
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
index eda710062933..a9d6e40321a2 100644
--- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -289,3 +289,31 @@ int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
}
+
+/**
+ * mv88e6352_g2_scratch_port_has_serdes - indicate if a port can have a serdes
+ * @chip: chip private data
+ * @port: port number to check for serdes
+ *
+ * Indicates whether the port may have a serdes attached according to the
+ * pin strapping. Returns negative error number, 0 if the port is not
+ * configured to have a serdes, and 1 if the port is configured to have a
+ * serdes attached.
+ */
+int mv88e6352_g2_scratch_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
+{
+ u8 config3, p;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, MV88E6352_G2_SCRATCH_CONFIG_DATA3,
+ &config3);
+ if (err)
+ return err;
+
+ if (config3 & MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL)
+ p = 5;
+ else
+ p = 4;
+
+ return port == p;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 8f74ffc7a279..331b4ca089ff 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -100,10 +100,6 @@ static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
*/
clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tstamp_enable = false;
@@ -305,7 +301,7 @@ static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
shwt->hwtstamp = ns_to_ktime(ns);
status &= ~MV88E6XXX_PTP_TS_VALID;
}
- netif_rx_ni(skb);
+ netif_rx(skb);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 252b5b3a3efe..8bb88b3d900d 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -55,6 +55,38 @@ int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
return chip->info->ops->phy_write(chip, bus, addr, reg, val);
}
+int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 *val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_read_c45)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_read_c45(chip, bus, addr, devad, reg, val);
+}
+
+int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_write_c45)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_write_c45(chip, bus, addr, devad, reg, val);
+}
+
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
{
return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h
index 05ea0d546969..5f47722364cc 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.h
+++ b/drivers/net/dsa/mv88e6xxx/phy.h
@@ -28,6 +28,10 @@ int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val);
int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 val);
+int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 *val);
+int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 val);
int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
u8 page, int reg, u16 *val);
int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index ab41619a809b..f79cf716c541 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -133,6 +133,15 @@ int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
}
+int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ if (port != 2 && port != 5 && port != 6)
+ return -EOPNOTSUPP;
+
+ return mv88e6xxx_port_set_rgmii_delay(chip, port, mode);
+}
+
int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link)
{
u16 reg;
@@ -294,28 +303,10 @@ static int mv88e6xxx_port_set_speed_duplex(struct mv88e6xxx_chip *chip,
return 0;
}
-/* Support 10, 100, 200 Mbps (e.g. 88E6065 family) */
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex)
-{
- if (speed == SPEED_MAX)
- speed = 200;
-
- if (speed > 200)
- return -EOPNOTSUPP;
-
- /* Setting 200 Mbps on port 0 to 3 selects 100 Mbps */
- return mv88e6xxx_port_set_speed_duplex(chip, port, speed, false, false,
- duplex);
-}
-
/* Support 10, 100, 1000 Mbps (e.g. 88E6185 family) */
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed == 200 || speed > 1000)
return -EOPNOTSUPP;
@@ -327,9 +318,6 @@ int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 100;
-
if (speed > 100)
return -EOPNOTSUPP;
@@ -341,9 +329,6 @@ int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6341_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 5 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -369,9 +354,6 @@ phy_interface_t mv88e6341_port_max_speed_mode(int port)
int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = 1000;
-
if (speed > 1000)
return -EOPNOTSUPP;
@@ -386,9 +368,6 @@ int mv88e6352_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int mv88e6390_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 2500;
-
if (speed > 2500)
return -EOPNOTSUPP;
@@ -414,9 +393,6 @@ phy_interface_t mv88e6390_port_max_speed_mode(int port)
int mv88e6390x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex)
{
- if (speed == SPEED_MAX)
- speed = port < 9 ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
@@ -445,9 +421,6 @@ int mv88e6393x_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
u16 reg, ctrl;
int err;
- if (speed == SPEED_MAX)
- speed = (port > 0 && port < 9) ? 1000 : 10000;
-
if (speed == 200 && port != 0)
return -EOPNOTSUPP;
@@ -550,6 +523,15 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
mode = PHY_INTERFACE_MODE_1000BASEX;
switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RGMII;
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
@@ -610,6 +592,8 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
chip->ports[port].cmode = cmode;
lane = mv88e6xxx_serdes_get_lane(chip, port);
+ if (lane == -ENODEV)
+ return 0;
if (lane < 0)
return lane;
@@ -665,6 +649,19 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
+ if (port == 9 || port == 10) {
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
/* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, &reg);
if (err)
@@ -1234,6 +1231,35 @@ int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
return err;
}
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked)
+{
+ u16 reg;
+ int err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL0, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_CTL0_SA_FILT_MASK;
+ if (locked)
+ reg |= MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK;
+
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, &reg);
+ if (err)
+ return err;
+
+ reg &= ~MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+ if (locked)
+ reg |= MV88E6XXX_PORT_ASSOC_VECTOR_LOCKED_PORT;
+
+ return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_ASSOC_VECTOR, reg);
+}
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode)
{
@@ -1278,7 +1304,7 @@ int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, new);
}
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map)
{
u16 reg;
int err;
@@ -1287,7 +1313,10 @@ int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ if (map)
+ reg |= MV88E6XXX_PORT_CTL2_MAP_DA;
+ else
+ reg &= ~MV88E6XXX_PORT_CTL2_MAP_DA;
return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL2, reg);
}
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 03382b66f800..aec9d4fd20e3 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -42,6 +42,11 @@
#define MV88E6XXX_PORT_STS_TX_PAUSED 0x0020
#define MV88E6XXX_PORT_STS_FLOW_CTL 0x0010
#define MV88E6XXX_PORT_STS_CMODE_MASK 0x000f
+#define MV88E6XXX_PORT_STS_CMODE_MII_PHY 0x0001
+#define MV88E6XXX_PORT_STS_CMODE_MII 0x0002
+#define MV88E6XXX_PORT_STS_CMODE_GMII 0x0003
+#define MV88E6XXX_PORT_STS_CMODE_RMII_PHY 0x0004
+#define MV88E6XXX_PORT_STS_CMODE_RMII 0x0005
#define MV88E6XXX_PORT_STS_CMODE_RGMII 0x0007
#define MV88E6XXX_PORT_STS_CMODE_100BASEX 0x0008
#define MV88E6XXX_PORT_STS_CMODE_1000BASEX 0x0009
@@ -142,7 +147,11 @@
/* Offset 0x04: Port Control Register */
#define MV88E6XXX_PORT_CTL0 0x04
#define MV88E6XXX_PORT_CTL0_USE_CORE_TAG 0x8000
-#define MV88E6XXX_PORT_CTL0_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_MASK 0xc000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DISABLED 0x0000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_LOCK 0x4000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_UNLOCK 0x8000
+#define MV88E6XXX_PORT_CTL0_SA_FILT_DROP_ON_CPU 0xc000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_MASK 0x3000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNMODIFIED 0x0000
#define MV88E6XXX_PORT_CTL0_EGRESS_MODE_UNTAGGED 0x1000
@@ -323,6 +332,8 @@ int mv88e6xxx_port_wait_bit(struct mv88e6xxx_chip *chip, int port, int reg,
int mv88e6185_port_set_pause(struct mv88e6xxx_chip *chip, int port,
int pause);
+int mv88e6320_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port,
@@ -333,8 +344,6 @@ int mv88e6xxx_port_set_link(struct mv88e6xxx_chip *chip, int port, int link);
int mv88e6xxx_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
int mv88e6185_port_sync_link(struct mv88e6xxx_chip *chip, int port, unsigned int mode, bool isup);
-int mv88e6065_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
- int speed, int duplex);
int mv88e6185_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
int speed, int duplex);
int mv88e6250_port_set_speed_duplex(struct mv88e6xxx_chip *chip, int port,
@@ -365,6 +374,9 @@ int mv88e6xxx_port_set_fid(struct mv88e6xxx_chip *chip, int port, u16 fid);
int mv88e6xxx_port_get_pvid(struct mv88e6xxx_chip *chip, int port, u16 *pvid);
int mv88e6xxx_port_set_pvid(struct mv88e6xxx_chip *chip, int port, u16 pvid);
+int mv88e6xxx_port_set_lock(struct mv88e6xxx_chip *chip, int port,
+ bool locked);
+
int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
u16 mode);
int mv88e6095_port_tag_remap(struct mv88e6xxx_chip *chip, int port);
@@ -425,7 +437,7 @@ int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6352_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
int mv88e6xxx_port_drop_untagged(struct mv88e6xxx_chip *chip, int port,
bool drop_untagged);
-int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port, bool map);
int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
int upstream_port);
int mv88e6xxx_port_set_mirror(struct mv88e6xxx_chip *chip, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/port_hidden.c b/drivers/net/dsa/mv88e6xxx/port_hidden.c
index b49d05f0e117..7a9f9ff6dedf 100644
--- a/drivers/net/dsa/mv88e6xxx/port_hidden.c
+++ b/drivers/net/dsa/mv88e6xxx/port_hidden.c
@@ -40,8 +40,9 @@ int mv88e6xxx_port_hidden_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_PORT_RESERVED_1A_BUSY);
- return mv88e6xxx_wait_bit(chip, MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
- MV88E6XXX_PORT_RESERVED_1A, bit, 0);
+ return mv88e6xxx_port_wait_bit(chip,
+ MV88E6XXX_PORT_RESERVED_1A_CTRL_PORT,
+ MV88E6XXX_PORT_RESERVED_1A, bit, 0);
}
int mv88e6xxx_port_hidden_read(struct mv88e6xxx_chip *chip, int block, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index d838c174dc0d..ea17231dc34e 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -11,6 +11,7 @@
*/
#include "chip.h"
+#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
#include "ptp.h"
@@ -419,6 +420,34 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
+const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .set_ptp_cpu_port = mv88e6390_g1_set_ptp_cpu_port,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+};
+
static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
@@ -491,6 +520,23 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+ if (ptp_ops->set_ptp_cpu_port) {
+ struct dsa_port *dp;
+ int upstream = 0;
+ int err;
+
+ dsa_switch_for_each_user_port(dp, chip->ds) {
+ upstream = dsa_upstream_port(chip->ds, dp->index);
+ break;
+ }
+
+ err = ptp_ops->set_ptp_cpu_port(chip, upstream);
+ if (err) {
+ dev_err(chip->dev, "Failed to set PTP CPU destination port!\n");
+ return err;
+ }
+ }
+
chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
if (IS_ERR(chip->ptp_clock))
return PTR_ERR(chip->ptp_clock);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 269d5d16a466..6c4d09adc93c 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -151,6 +151,7 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
@@ -171,6 +172,7 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {};
#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 2b05ead515cd..72faec8f44dc 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -36,36 +36,35 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 *val)
{
- int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
-
- return mv88e6xxx_phy_read(chip, lane, reg_c45, val);
+ return mv88e6xxx_phy_read_c45(chip, lane, device, reg, val);
}
static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 val)
{
- int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
-
- return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
+ return mv88e6xxx_phy_write_c45(chip, lane, device, reg, val);
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 ctrl, u16 status, u16 lpa,
+ u16 bmsr, u16 lpa, u16 status,
struct phylink_link_state *state)
{
+ state->link = false;
+
+ /* If the BMSR reports that the link had failed, report this to
+ * phylink.
+ */
+ if (!(bmsr & BMSR_LSTATUS))
+ return 0;
+
state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
/* The Spped and Duplex Resolved register is 1 if AN is enabled
* and complete, or if AN is disabled. So with disabled AN we
- * still get here on link up. But we want to set an_complete
- * only if AN was enabled, thus we look at BMCR_ANENABLE.
- * (According to 802.3-2008 section 22.2.4.2.10, we should be
- * able to get this same value from BMSR_ANEGCAPABLE, but tests
- * show that these Marvell PHYs don't conform to this part of
- * the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+ * still get here on link up.
*/
- state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -191,12 +190,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
- err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+ err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -212,7 +211,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -272,14 +271,6 @@ int mv88e6352_serdes_get_lane(struct mv88e6xxx_chip *chip, int port)
return lane;
}
-static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
-{
- if (mv88e6xxx_serdes_get_lane(chip, port) >= 0)
- return true;
-
- return false;
-}
-
struct mv88e6352_serdes_hw_stat {
char string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -293,20 +284,24 @@ static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = {
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6352_port_has_serdes(chip, port))
- return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+ int err;
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
}
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data)
{
struct mv88e6352_serdes_hw_stat *stat;
- int i;
+ int err, i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
stat = &mv88e6352_serdes_hw_stats[i];
@@ -348,11 +343,12 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
{
struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
struct mv88e6352_serdes_hw_stat *stat;
+ int i, err;
u64 value;
- int i;
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
@@ -419,8 +415,13 @@ unsigned int mv88e6352_serdes_irq_mapping(struct mv88e6xxx_chip *chip, int port)
int mv88e6352_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port)
{
- if (!mv88e6352_port_has_serdes(chip, port))
- return 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ mv88e6xxx_reg_unlock(chip);
+ if (err <= 0)
+ return err;
return 32 * sizeof(u16);
}
@@ -432,7 +433,8 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
int err;
int i;
- if (!mv88e6352_port_has_serdes(chip, port))
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
return;
for (i = 0 ; i < 32; i++) {
@@ -915,13 +917,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
- u16 lpa, status, ctrl;
+ u16 bmsr, lpa, status;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
- MV88E6390_SGMII_BMCR, &ctrl);
+ MV88E6390_SGMII_BMSR, &bmsr);
if (err) {
- dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err);
return err;
}
@@ -939,7 +941,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
@@ -1310,6 +1312,44 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
+static const int mv88e6352_serdes_p2p_to_reg[] = {
+ /* Index of value in microvolts corresponds to the register value */
+ 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
+};
+
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val)
+{
+ bool found = false;
+ u16 ctrl, reg;
+ int err;
+ int i;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
+ if (mv88e6352_serdes_p2p_to_reg[i] == val) {
+ reg = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
+ if (err)
+ return err;
+
+ ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
+ ctrl |= reg;
+
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
+}
+
static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
bool on)
{
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 8dd8ed225b45..29bb4e91e2f6 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -27,6 +27,8 @@
#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
#define MV88E6352_SERDES_INT_STATUS 0x13
+#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
+#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
#define MV88E6341_PORT5_LANE 0x15
@@ -176,6 +178,9 @@ void mv88e6352_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
index 282fe08db050..a990271b7482 100644
--- a/drivers/net/dsa/mv88e6xxx/smi.c
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -55,11 +55,15 @@ static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
int dev, int reg, int bit, int val)
{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(50);
u16 data;
int err;
int i;
- for (i = 0; i < 16; i++) {
+ /* Even if the initial poll takes longer than 50ms, always do
+ * at least one more attempt.
+ */
+ for (i = 0; time_before(jiffies, timeout) || (i < 2); i++) {
err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
if (err)
return err;
@@ -67,7 +71,10 @@ static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
if (!!(data & BIT(bit)) == !!val)
return 0;
- usleep_range(1000, 2000);
+ if (i < 2)
+ cpu_relax();
+ else
+ usleep_range(1000, 2000);
}
return -ETIMEDOUT;
@@ -104,11 +111,6 @@ static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_CMD,
MV88E6XXX_SMI_CMD_BUSY |
@@ -132,11 +134,6 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
{
int err;
- err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
- MV88E6XXX_SMI_CMD, 15, 0);
- if (err)
- return err;
-
err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
MV88E6XXX_SMI_DATA, data);
if (err)
@@ -155,9 +152,20 @@ static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
MV88E6XXX_SMI_CMD, 15, 0);
}
+static int mv88e6xxx_smi_indirect_init(struct mv88e6xxx_chip *chip)
+{
+ /* Ensure that the chip starts out in the ready state. As both
+ * reads and writes always ensure this on return, they can
+ * safely depend on the chip not being busy on entry.
+ */
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
.read = mv88e6xxx_smi_indirect_read,
.write = mv88e6xxx_smi_indirect_write,
+ .init = mv88e6xxx_smi_indirect_init,
};
int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
@@ -175,5 +183,8 @@ int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
chip->bus = bus;
chip->sw_addr = sw_addr;
+ if (chip->smi_ops->init)
+ return chip->smi_ops->init(chip);
+
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.c b/drivers/net/dsa/mv88e6xxx/switchdev.c
new file mode 100644
index 000000000000..4c346a884fb2
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/switchdev.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * switchdev.c
+ *
+ * Authors:
+ * Hans J. Schultz <netdev@kapio-technology.com>
+ *
+ */
+
+#include <net/switchdev.h>
+#include "chip.h"
+#include "global1.h"
+#include "switchdev.h"
+
+struct mv88e6xxx_fid_search_ctx {
+ u16 fid_search;
+ u16 vid_found;
+};
+
+static int __mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv)
+{
+ struct mv88e6xxx_fid_search_ctx *ctx = priv;
+
+ if (ctx->fid_search == entry->fid) {
+ ctx->vid_found = entry->vid;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip, u16 fid, u16 *vid)
+{
+ struct mv88e6xxx_fid_search_ctx ctx;
+ int err;
+
+ ctx.fid_search = fid;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_vtu_walk(chip, __mv88e6xxx_find_vid, &ctx);
+ mv88e6xxx_reg_unlock(chip);
+ if (err < 0)
+ return err;
+ if (err == 1)
+ *vid = ctx.vid_found;
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
+ struct mv88e6xxx_atu_entry *entry, u16 fid)
+{
+ struct switchdev_notifier_fdb_info info = {
+ .addr = entry->mac,
+ .locked = true,
+ };
+ struct net_device *brport;
+ struct dsa_port *dp;
+ u16 vid;
+ int err;
+
+ err = mv88e6xxx_find_vid(chip, fid, &vid);
+ if (err)
+ return err;
+
+ info.vid = vid;
+ dp = dsa_to_port(chip->ds, port);
+
+ rtnl_lock();
+ brport = dsa_port_to_bridge_port(dp);
+ if (!brport) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ err = call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ brport, &info.info, NULL);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.h b/drivers/net/dsa/mv88e6xxx/switchdev.h
new file mode 100644
index 000000000000..62214f9d62b0
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/switchdev.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * switchdev.h
+ *
+ * Authors:
+ * Hans J. Schultz <netdev@kapio-technology.com>
+ *
+ */
+
+#ifndef _MV88E6XXX_SWITCHDEV_H_
+#define _MV88E6XXX_SWITCHDEV_H_
+
+#include "chip.h"
+
+int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
+ struct mv88e6xxx_atu_entry *entry,
+ u16 fid);
+
+#endif /* _MV88E6XXX_SWITCHDEV_H_ */
diff --git a/drivers/net/dsa/mv88e6xxx/trace.c b/drivers/net/dsa/mv88e6xxx/trace.c
new file mode 100644
index 000000000000..7833cb50ca5d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/trace.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright 2022 NXP
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/dsa/mv88e6xxx/trace.h b/drivers/net/dsa/mv88e6xxx/trace.h
new file mode 100644
index 000000000000..f59ca04768e7
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/trace.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright 2022 NXP
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mv88e6xxx
+
+#if !defined(_MV88E6XXX_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _MV88E6XXX_TRACE_H
+
+#include <linux/device.h>
+#include <linux/if_ether.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(mv88e6xxx_atu_violation,
+
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+
+ TP_ARGS(dev, spid, portvec, addr, fid),
+
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __field(int, spid)
+ __field(u16, portvec)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, fid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->spid = spid;
+ __entry->portvec = portvec;
+ memcpy(__entry->addr, addr, ETH_ALEN);
+ __entry->fid = fid;
+ ),
+
+ TP_printk("dev %s spid %d portvec 0x%x addr %pM fid %u",
+ __get_str(name), __entry->spid, __entry->portvec,
+ __entry->addr, __entry->fid)
+);
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_member_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_miss_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DEFINE_EVENT(mv88e6xxx_atu_violation, mv88e6xxx_atu_full_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 portvec,
+ const unsigned char *addr, u16 fid),
+ TP_ARGS(dev, spid, portvec, addr, fid));
+
+DECLARE_EVENT_CLASS(mv88e6xxx_vtu_violation,
+
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+
+ TP_ARGS(dev, spid, vid),
+
+ TP_STRUCT__entry(
+ __string(name, dev_name(dev))
+ __field(int, spid)
+ __field(u16, vid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev_name(dev));
+ __entry->spid = spid;
+ __entry->vid = vid;
+ ),
+
+ TP_printk("dev %s spid %d vid %u",
+ __get_str(name), __entry->spid, __entry->vid)
+);
+
+DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_member_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+ TP_ARGS(dev, spid, vid));
+
+DEFINE_EVENT(mv88e6xxx_vtu_violation, mv88e6xxx_vtu_miss_violation,
+ TP_PROTO(const struct device *dev, int spid, u16 vid),
+ TP_ARGS(dev, spid, vid));
+
+#endif /* _MV88E6XXX_TRACE_H */
+
+/* We don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 9948544ba1c4..081e7a88ea02 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -1,4 +1,34 @@
# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MSCC_FELIX_DSA_LIB
+ tristate
+ help
+ This is an umbrella module for all network switches that are
+ register-compatible with Ocelot and that perform I/O to their host
+ CPU through an NPI (Node Processor Interface) Ethernet port.
+ Its name comes from the first hardware chip to make use of it
+ (VSC9959), code named Felix.
+
+config NET_DSA_MSCC_OCELOT_EXT
+ tristate "Ocelot External Ethernet switch support"
+ depends on NET_DSA && SPI
+ depends on NET_VENDOR_MICROSEMI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select MDIO_MSCC_MIIM
+ select MFD_OCELOT
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
+ select NET_DSA_TAG_OCELOT
+ help
+ This driver supports the VSC7511, VSC7512, VSC7513 and VSC7514 chips
+ when controlled through SPI.
+
+ The Ocelot switch family is a set of multi-port networking chips. All
+ of these chips have the ability to be controlled externally through
+ SPI or PCIe interfaces.
+
+ Say "Y" here to enable external control to these chips.
+
config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI
@@ -6,7 +36,9 @@ config NET_DSA_MSCC_FELIX
depends on NET_VENDOR_FREESCALE
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
@@ -21,7 +53,9 @@ config NET_DSA_MSCC_SEVILLE
depends on NET_VENDOR_MICROSEMI
depends on HAS_IOMEM
depends on PTP_1588_CLOCK_OPTIONAL
+ select MDIO_MSCC_MIIM
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select PCS_LYNX
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index f6dd131e7491..ead868a293e3 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MSCC_FELIX_DSA_LIB) += mscc_felix_dsa_lib.o
obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_OCELOT_EXT) += mscc_ocelot_ext.o
obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
-mscc_felix-objs := \
- felix.o \
- felix_vsc9959.o
-
-mscc_seville-objs := \
- felix.o \
- seville_vsc9953.o
+mscc_felix_dsa_lib-objs := felix.o
+mscc_felix-objs := felix_vsc9959.o
+mscc_ocelot_ext-objs := ocelot_ext.o
+mscc_seville-objs := seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index f1a05e7dc818..80861ac090ae 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -21,37 +21,69 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
-#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
-static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+/* Translate the DSA database API into the ocelot switch library API,
+ * which uses VID 0 for all ports that aren't part of a bridge,
+ * and expects the bridge_dev to be NULL in that case.
+ */
+static struct net_device *felix_classify_db(struct dsa_db db)
{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int key_length, upstream, err;
+ switch (db.type) {
+ case DSA_DB_PORT:
+ case DSA_DB_LAG:
+ return NULL;
+ case DSA_DB_BRIDGE:
+ return db.bridge.dev;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
- /* We don't need to install the rxvlan into the other ports' filtering
- * tables, because we're just pushing the rxvlan when sending towards
- * the CPU
- */
- if (!pvid)
- return 0;
+static int felix_cpu_port_for_master(struct dsa_switch *ds,
+ struct net_device *master)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int lag;
+
+ if (netif_is_lag_master(master)) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ lag = ocelot_bond_get_id(ocelot, master);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return lag;
+ }
+
+ cpu_dp = master->dsa_ptr;
+ return cpu_dp->index;
+}
+
+/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
+ * the tagger can perform RX source port identification.
+ */
+static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int key_length, err;
key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
- upstream = dsa_upstream_port(ds, port);
outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
GFP_KERNEL);
if (!outer_tagging_rule)
return -ENOMEM;
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
- outer_tagging_rule->id.cookie = port;
+ outer_tagging_rule->id.cookie = cookie;
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -72,20 +104,36 @@ static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
-static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
- bool pvid, bool untagged)
+static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port,
+ int upstream, u16 vid)
{
- struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int upstream, err;
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
- /* tag_8021q.c assumes we are implementing this via port VLAN
- * membership, which we aren't. So we don't need to add any VCAP filter
- * for the CPU port.
- */
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream);
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ cookie, false);
+ if (!outer_tagging_rule)
+ return -ENOENT;
+
+ return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
+}
+
+/* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2
+ * rules for steering those tagged packets towards the correct destination port
+ */
+static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port,
+ u16 vid)
+{
+ struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
+ unsigned long cpu_ports = dsa_cpu_ports(ds);
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
+ int err;
untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
if (!untagging_rule)
@@ -97,14 +145,14 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return -ENOMEM;
}
- upstream = dsa_upstream_port(ds, port);
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
- untagging_rule->ingress_port_mask = BIT(upstream);
+ untagging_rule->ingress_port_mask = cpu_ports;
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
- untagging_rule->id.cookie = port;
+ untagging_rule->id.cookie = cookie;
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -121,11 +169,13 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return err;
}
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
+
redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = BIT(upstream);
+ redirect_rule->ingress_port_mask = cpu_ports;
redirect_rule->pag = port;
redirect_rule->prio = 1;
- redirect_rule->id.cookie = port;
+ redirect_rule->id.cookie = cookie;
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -143,265 +193,320 @@ static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
return 0;
}
-static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
- u16 flags)
-{
- bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
- struct ocelot *ocelot = ds->priv;
-
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
- port, vid, pvid, untagged);
-
- return 0;
-}
-
-static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
-{
- struct ocelot_vcap_filter *outer_tagging_rule;
- struct ocelot_vcap_block *block_vcap_es0;
- struct ocelot *ocelot = &felix->ocelot;
-
- block_vcap_es0 = &ocelot->block[VCAP_ES0];
-
- outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
- port, false);
- /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
- * installing outer tagging ES0 rules where they weren't needed.
- * But in rxvlan_del, the API doesn't give us the "flags" anymore,
- * so that forces us to be slightly sloppy here, and just assume that
- * if we didn't find an outer_tagging_rule it means that there was
- * none in the first place, i.e. rxvlan_del is called on a non-pvid
- * port. This is most probably true though.
- */
- if (!outer_tagging_rule)
- return 0;
-
- return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
-}
-
-static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
+static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
{
struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
struct ocelot_vcap_block *block_vcap_is1;
struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
+ struct ocelot *ocelot = ds->priv;
+ unsigned long cookie;
int err;
- if (ocelot->ports[port]->is_dsa_8021q_cpu)
- return 0;
-
block_vcap_is1 = &ocelot->block[VCAP_IS1];
block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- port, false);
+ cookie, false);
if (!untagging_rule)
- return 0;
+ return -ENOENT;
err = ocelot_vcap_filter_del(ocelot, untagging_rule);
if (err)
return err;
+ cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- port, false);
+ cookie, false);
if (!redirect_rule)
- return 0;
+ return -ENOENT;
return ocelot_vcap_filter_del(ocelot, redirect_rule);
}
-static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int err;
- if (vid_is_dsa_8021q_rxvlan(vid))
- return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ /* tag_8021q.c assumes we are implementing this via port VLAN
+ * membership, which we aren't. So we don't need to add any VCAP filter
+ * for the CPU port.
+ */
+ if (!dsa_is_user_port(ds, port))
+ return 0;
- if (vid_is_dsa_8021q_txvlan(vid))
- return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
- port, vid);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
+ }
+
+ err = felix_tag_8021q_vlan_add_tx(ds, port, vid);
+ if (err)
+ goto add_tx_failed;
return 0;
+
+add_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+
+ return err;
}
-/* Alternatively to using the NPI functionality, that same hardware MAC
- * connected internally to the enetc or fman DSA master can be configured to
- * use the software-defined tag_8021q frame format. As far as the hardware is
- * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
- * module are now disconnected from it, but can still be accessed through
- * register-based MMIO.
- */
-static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
+static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
- ocelot->ports[port]->is_dsa_8021q_cpu = true;
- ocelot->npi = -1;
+ struct dsa_port *cpu_dp;
+ int err;
- /* Overwrite PGID_CPU with the non-tagging port */
- ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
+ if (!dsa_is_user_port(ds, port))
+ return 0;
- ocelot_apply_bridge_fwd_mask(ocelot);
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid);
+ if (err)
+ return err;
+ }
+
+ err = felix_tag_8021q_vlan_del_tx(ds, port, vid);
+ if (err)
+ goto del_tx_failed;
+
+ return 0;
+
+del_tx_failed:
+ dsa_switch_for_each_cpu_port(cpu_dp, ds)
+ felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+
+ return err;
}
-static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
+static int felix_trap_get_cpu_port(struct dsa_switch *ds,
+ const struct ocelot_vcap_filter *trap)
{
- ocelot->ports[port]->is_dsa_8021q_cpu = false;
+ struct dsa_port *dp;
+ int first_port;
+
+ if (WARN_ON(!trap->ingress_port_mask))
+ return -1;
- /* Restore PGID_CPU */
- ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
- PGID_CPU);
+ first_port = __ffs(trap->ingress_port_mask);
+ dp = dsa_to_port(ds, first_port);
- ocelot_apply_bridge_fwd_mask(ocelot);
+ return dp->cpu_dp->index;
}
-/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
- * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
- * tag_8021q CPU port.
+/* On switches with no extraction IRQ wired, trapped packets need to be
+ * replicated over Ethernet as well, otherwise we'd get no notification of
+ * their arrival when using the ocelot-8021q tagging protocol.
*/
-static int felix_setup_mmio_filtering(struct felix *felix)
+static int felix_update_trapping_destinations(struct dsa_switch *ds,
+ bool using_tag_8021q)
{
- unsigned long user_ports = dsa_user_ports(felix->ds);
- struct ocelot_vcap_filter *redirect_rule;
- struct ocelot_vcap_filter *tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int cpu = -1, port, ret;
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool cpu_copy_ena;
+ int err;
- tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!tagging_rule)
- return -ENOMEM;
+ if (!felix->info->quirk_no_xtr_irq)
+ return 0;
- redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!redirect_rule) {
- kfree(tagging_rule);
- return -ENOMEM;
- }
+ /* We are sure that "cpu" was found, otherwise
+ * dsa_tree_setup_default_cpu() would have failed earlier.
+ */
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_cpu_port(ds, port)) {
- cpu = port;
- break;
+ /* Make sure all traps are set up for that destination */
+ list_for_each_entry(trap, &block_vcap_is2->rules, list) {
+ if (!trap->is_trap)
+ continue;
+
+ /* Figure out the current trapping destination */
+ if (using_tag_8021q) {
+ /* Redirect to the tag_8021q CPU port. If timestamps
+ * are necessary, also copy trapped packets to the CPU
+ * port module.
+ */
+ mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ port_mask = BIT(felix_trap_get_cpu_port(ds, trap));
+ cpu_copy_ena = !!trap->take_ts;
+ } else {
+ /* Trap packets only to the CPU port module, which is
+ * redirected to the NPI port (the DSA CPU port)
+ */
+ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ port_mask = 0;
+ cpu_copy_ena = true;
}
- }
- if (cpu < 0) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return -EINVAL;
- }
+ if (trap->action.mask_mode == mask_mode &&
+ trap->action.port_mask == port_mask &&
+ trap->action.cpu_copy_ena == cpu_copy_ena)
+ continue;
- tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
- tagging_rule->ingress_port_mask = user_ports;
- tagging_rule->prio = 1;
- tagging_rule->id.cookie = ocelot->num_phys_ports;
- tagging_rule->id.tc_offload = false;
- tagging_rule->block_id = VCAP_IS1;
- tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- tagging_rule->lookup = 0;
- tagging_rule->action.pag_override_mask = 0xff;
- tagging_rule->action.pag_val = ocelot->num_phys_ports;
-
- ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
- if (ret) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return ret;
- }
+ trap->action.mask_mode = mask_mode;
+ trap->action.port_mask = port_mask;
+ trap->action.cpu_copy_ena = cpu_copy_ena;
- redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = user_ports;
- redirect_rule->pag = ocelot->num_phys_ports;
- redirect_rule->prio = 1;
- redirect_rule->id.cookie = ocelot->num_phys_ports;
- redirect_rule->id.tc_offload = false;
- redirect_rule->block_id = VCAP_IS2;
- redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- redirect_rule->lookup = 0;
- redirect_rule->action.cpu_copy_ena = true;
- if (felix->info->quirk_no_xtr_irq) {
- /* Redirect to the tag_8021q CPU but also copy PTP packets to
- * the CPU port module
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = BIT(cpu);
- } else {
- /* Trap PTP packets only to the CPU port module (which is
- * redirected to the NPI port)
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- redirect_rule->action.port_mask = 0;
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err)
+ return err;
}
- ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
- if (ret) {
- ocelot_vcap_filter_del(ocelot, tagging_rule);
- kfree(redirect_rule);
- return ret;
+ return 0;
+}
+
+/* The CPU port module is connected to the Node Processor Interface (NPI). This
+ * is the mode through which frames can be injected from and extracted to an
+ * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
+ * running Linux, and this forms a DSA setup together with the enetc or fman
+ * DSA master.
+ */
+static void felix_npi_port_init(struct ocelot *ocelot, int port)
+{
+ ocelot->npi = port;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
+ QSYS_EXT_CPU_CFG);
+
+ /* NPI port Injection/Extraction configuration */
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ ocelot->npi_xtr_prefix);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ ocelot->npi_inj_prefix);
+
+ /* Disable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+}
+
+static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
+{
+ /* Restore hardware defaults */
+ int unused_port = ocelot->num_phys_ports + 2;
+
+ ocelot->npi = -1;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
+ QSYS_EXT_CPU_CFG);
+
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ OCELOT_TAG_PREFIX_DISABLED);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ OCELOT_TAG_PREFIX_DISABLED);
+
+ /* Enable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
+}
+
+static int felix_tag_npi_setup(struct dsa_switch *ds)
+{
+ struct dsa_port *dp, *first_cpu_dp = NULL;
+ struct ocelot *ocelot = ds->priv;
+
+ dsa_switch_for_each_user_port(dp, ds) {
+ if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) {
+ dev_err(ds->dev, "Multiple NPI ports not supported\n");
+ return -EINVAL;
+ }
+
+ first_cpu_dp = dp->cpu_dp;
}
- /* The ownership of the CPU port module's queues might have just been
- * transferred to the tag_8021q tagger from the NPI-based tagger.
- * So there might still be all sorts of crap in the queues. On the
- * other hand, the MMIO-based matching of PTP frames is very brittle,
- * so we need to be careful that there are no extra frames to be
- * dequeued over MMIO, since we would never know to discard them.
- */
- ocelot_drain_cpu_queue(ocelot, 0);
+ if (!first_cpu_dp)
+ return -EINVAL;
+
+ felix_npi_port_init(ocelot, first_cpu_dp->index);
return 0;
}
-static int felix_teardown_mmio_filtering(struct felix *felix)
+static void felix_tag_npi_teardown(struct dsa_switch *ds)
{
- struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
- struct ocelot_vcap_block *block_vcap_is1;
- struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
- int err;
+ struct ocelot *ocelot = ds->priv;
- block_vcap_is1 = &ocelot->block[VCAP_IS1];
- block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+}
- tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- ocelot->num_phys_ports,
- false);
- if (!tagging_rule)
- return -ENOENT;
+static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
+{
+ struct ocelot *ocelot = ds->priv;
- err = ocelot_vcap_filter_del(ocelot, tagging_rule);
- if (err)
- return err;
+ return BIT(ocelot->num_phys_ports);
+}
- redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- ocelot->num_phys_ports,
- false);
- if (!redirect_rule)
- return -ENOENT;
+static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct ocelot *ocelot = ds->priv;
- return ocelot_vcap_filter_del(ocelot, redirect_rule);
+ if (netif_is_lag_master(master)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG DSA master only supported using ocelot-8021q");
+ return -EOPNOTSUPP;
+ }
+
+ /* Changing the NPI port breaks user ports still assigned to the old
+ * one, so only allow it while they're down, and don't allow them to
+ * come back up until they're all changed to the new one.
+ */
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = other_dp->slave;
+
+ if (other_dp != dp && (slave->flags & IFF_UP) &&
+ dsa_port_to_master(other_dp) != master) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change while old master still has users");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+ felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
+
+ return 0;
}
-static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
+/* Alternatively to using the NPI functionality, that same hardware MAC
+ * connected internally to the enetc or fman DSA master can be configured to
+ * use the software-defined tag_8021q frame format. As far as the hardware is
+ * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
+ * module are now disconnected from it, but can still be accessed through
+ * register-based MMIO.
+ */
+static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
+ .setup = felix_tag_npi_setup,
+ .teardown = felix_tag_npi_teardown,
+ .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
+ .change_master = felix_tag_npi_change_master,
+};
+
+static int felix_tag_8021q_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- unsigned long cpu_flood;
- int port, err;
+ struct dsa_port *dp;
+ int err;
- felix_8021q_cpu_port_init(ocelot, cpu);
+ err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
+ if (err)
+ return err;
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
+ dp->cpu_dp->index);
+ dsa_switch_for_each_available_port(dp, ds)
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
@@ -414,202 +519,194 @@ static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
- ANA_PORT_CPU_FWD_BPDU_CFG, port);
- }
+ ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
- /* In tag_8021q mode, the CPU port module is unused, except for PTP
- * frames. So we want to disable flooding of any kind to the CPU port
- * module, since packets going there will end in a black hole.
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
*/
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
-
- err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD));
- if (err)
- return err;
-
- err = felix_setup_mmio_filtering(felix);
- if (err)
- goto out_tag_8021q_unregister;
+ ocelot_drain_cpu_queue(ocelot, 0);
return 0;
-
-out_tag_8021q_unregister:
- dsa_tag_8021q_unregister(ds);
- return err;
}
-static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
+static void felix_tag_8021q_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- int err, port;
-
- err = felix_teardown_mmio_filtering(felix);
- if (err)
- dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
- err);
-
- dsa_tag_8021q_unregister(ds);
-
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ struct dsa_port *dp;
+ dsa_switch_for_each_available_port(dp, ds)
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
- port);
- }
+ dp->index);
+
+ dsa_switch_for_each_user_port(dp, ds)
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
- felix_8021q_cpu_port_deinit(ocelot, cpu);
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
+
+ dsa_tag_8021q_unregister(ds);
}
-/* The CPU port module is connected to the Node Processor Interface (NPI). This
- * is the mode through which frames can be injected from and extracted to an
- * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
- * running Linux, and this forms a DSA setup together with the enetc or fman
- * DSA master.
- */
-static void felix_npi_port_init(struct ocelot *ocelot, int port)
+static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
{
- ocelot->npi = port;
+ return dsa_cpu_ports(ds);
+}
- ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
- QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
- QSYS_EXT_CPU_CFG);
+static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ int cpu = felix_cpu_port_for_master(ds, master);
+ struct ocelot *ocelot = ds->priv;
- /* NPI port Injection/Extraction configuration */
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
- ocelot->npi_xtr_prefix);
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
- ocelot->npi_inj_prefix);
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
- /* Disable transmission of pause frames */
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+ return felix_update_trapping_destinations(ds, true);
}
-static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
-{
- /* Restore hardware defaults */
- int unused_port = ocelot->num_phys_ports + 2;
+static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
+ .setup = felix_tag_8021q_setup,
+ .teardown = felix_tag_8021q_teardown,
+ .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
+ .change_master = felix_tag_8021q_change_master,
+};
- ocelot->npi = -1;
+static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
+ bool uc, bool mc, bool bc)
+{
+ struct ocelot *ocelot = ds->priv;
+ unsigned long val;
- ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
- QSYS_EXT_CPU_CFG);
+ val = uc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC);
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
- OCELOT_TAG_PREFIX_DISABLED);
- ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
- OCELOT_TAG_PREFIX_DISABLED);
+ val = mc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4);
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6);
- /* Enable transmission of pause frames */
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
+ val = bc ? mask : 0;
+ ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC);
}
-static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
+static void
+felix_migrate_host_flood(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
{
struct ocelot *ocelot = ds->priv;
- unsigned long cpu_flood;
-
- felix_npi_port_init(ocelot, cpu);
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long mask;
- /* Include the CPU port module (and indirectly, the NPI port)
- * in the forwarding mask for unknown unicast - the hardware
- * default value for ANA_FLOODING_FLD_UNICAST excludes
- * BIT(ocelot->num_phys_ports), and so does ocelot_init,
- * since Ocelot relies on whitelisting MAC addresses towards
- * PGID_CPU.
- * We do this because DSA does not yet perform RX filtering,
- * and the NPI port does not perform source address learning,
- * so traffic sent to Linux is effectively unknown from the
- * switch's perspective.
- */
- cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
- ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
+ if (old_proto_ops) {
+ mask = old_proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, false, false, false);
+ }
- return 0;
+ mask = proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
+ !!felix->host_flood_mc_mask, true);
}
-static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
+static int felix_migrate_mdbs(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
{
struct ocelot *ocelot = ds->priv;
+ unsigned long from, to;
+
+ if (!old_proto_ops)
+ return 0;
+
+ from = old_proto_ops->get_host_fwd_mask(ds);
+ to = proto_ops->get_host_fwd_mask(ds);
- felix_npi_port_deinit(ocelot, cpu);
+ return ocelot_migrate_mdbs(ocelot, from, to);
}
-static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
- enum dsa_tag_protocol proto)
+/* Configure the shared hardware resources for a transition between
+ * @old_proto_ops and @proto_ops.
+ * Manual migration is needed because as far as DSA is concerned, no change of
+ * the CPU port is taking place here, just of the tagging protocol.
+ */
+static int
+felix_tag_proto_setup_shared(struct dsa_switch *ds,
+ const struct felix_tag_proto_ops *proto_ops,
+ const struct felix_tag_proto_ops *old_proto_ops)
{
+ bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops);
int err;
- switch (proto) {
- case DSA_TAG_PROTO_SEVILLE:
- case DSA_TAG_PROTO_OCELOT:
- err = felix_setup_tag_npi(ds, cpu);
- break;
- case DSA_TAG_PROTO_OCELOT_8021Q:
- err = felix_setup_tag_8021q(ds, cpu);
- break;
- default:
- err = -EPROTONOSUPPORT;
- }
+ err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops);
+ if (err)
+ return err;
- return err;
-}
+ felix_update_trapping_destinations(ds, using_tag_8021q);
-static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu,
- enum dsa_tag_protocol proto)
-{
- switch (proto) {
- case DSA_TAG_PROTO_SEVILLE:
- case DSA_TAG_PROTO_OCELOT:
- felix_teardown_tag_npi(ds, cpu);
- break;
- case DSA_TAG_PROTO_OCELOT_8021Q:
- felix_teardown_tag_8021q(ds, cpu);
- break;
- default:
- break;
- }
+ felix_migrate_host_flood(ds, proto_ops, old_proto_ops);
+
+ return 0;
}
/* This always leaves the switch in a consistent state, because although the
* tag_8021q setup can fail, the NPI setup can't. So either the change is made,
* or the restoration is guaranteed to work.
*/
-static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
+static int felix_change_tag_protocol(struct dsa_switch *ds,
enum dsa_tag_protocol proto)
{
+ const struct felix_tag_proto_ops *old_proto_ops, *proto_ops;
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- enum dsa_tag_protocol old_proto = felix->tag_proto;
int err;
- if (proto != DSA_TAG_PROTO_SEVILLE &&
- proto != DSA_TAG_PROTO_OCELOT &&
- proto != DSA_TAG_PROTO_OCELOT_8021Q)
+ switch (proto) {
+ case DSA_TAG_PROTO_SEVILLE:
+ case DSA_TAG_PROTO_OCELOT:
+ proto_ops = &felix_tag_npi_proto_ops;
+ break;
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ proto_ops = &felix_tag_8021q_proto_ops;
+ break;
+ default:
return -EPROTONOSUPPORT;
+ }
- felix_del_tag_protocol(ds, cpu, old_proto);
+ old_proto_ops = felix->tag_proto_ops;
- err = felix_set_tag_protocol(ds, cpu, proto);
- if (err) {
- felix_set_tag_protocol(ds, cpu, old_proto);
- return err;
- }
+ if (proto_ops == old_proto_ops)
+ return 0;
+
+ err = proto_ops->setup(ds);
+ if (err)
+ goto setup_failed;
+
+ err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops);
+ if (err)
+ goto setup_shared_failed;
+
+ if (old_proto_ops)
+ old_proto_ops->teardown(ds);
+ felix->tag_proto_ops = proto_ops;
felix->tag_proto = proto;
return 0;
+
+setup_shared_failed:
+ proto_ops->teardown(ds);
+setup_failed:
+ return err;
}
static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
@@ -622,6 +719,38 @@ static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
return felix->tag_proto;
}
+static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
+ bool uc, bool mc)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ unsigned long mask;
+
+ if (uc)
+ felix->host_flood_uc_mask |= BIT(port);
+ else
+ felix->host_flood_uc_mask &= ~BIT(port);
+
+ if (mc)
+ felix->host_flood_mc_mask |= BIT(port);
+ else
+ felix->host_flood_mc_mask &= ~BIT(port);
+
+ mask = felix->tag_proto_ops->get_host_fwd_mask(ds);
+ felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask,
+ !!felix->host_flood_mc_mask, true);
+}
+
+static int felix_port_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto_ops->change_master(ds, port, master, extack);
+}
+
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -632,6 +761,17 @@ static int felix_set_ageing_time(struct dsa_switch *ds,
return 0;
}
+static void felix_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+ int err;
+
+ err = ocelot_mact_flush(ocelot, port);
+ if (err)
+ dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n",
+ port, ERR_PTR(err));
+}
+
static int felix_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -641,35 +781,111 @@ static int felix_fdb_dump(struct dsa_switch *ds, int port,
}
static int felix_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_add(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_port_is_cpu(dp) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ if (dsa_port_is_cpu(dp))
+ port = PGID_CPU;
+
+ return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev);
}
static int felix_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_port_is_cpu(dp) && !bridge_dev &&
+ dsa_fdb_present_in_other_db(ds, port, addr, vid, db))
+ return 0;
+
+ if (dsa_port_is_cpu(dp))
+ port = PGID_CPU;
+
+ return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct net_device *bridge_dev = felix_classify_db(db);
+ struct ocelot *ocelot = ds->priv;
+
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev);
+}
+
+static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_fdb_del(ocelot, port, addr, vid);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev);
}
static int felix_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_add(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev);
}
static int felix_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
+ struct net_device *bridge_dev = felix_classify_db(db);
struct ocelot *ocelot = ds->priv;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ if (IS_ERR(bridge_dev))
+ return PTR_ERR(bridge_dev);
+
+ if (dsa_is_cpu_port(ds, port) && !bridge_dev &&
+ dsa_mdb_present_in_other_db(ds, port, mdb, db))
+ return 0;
+
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
+ return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev);
}
static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
@@ -695,46 +911,63 @@ static int felix_bridge_flags(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
+ if (port == ocelot->npi)
+ port = ocelot->num_phys_ports;
+
ocelot_port_bridge_flags(ocelot, port, val);
return 0;
}
static int felix_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_join(ocelot, port, br);
-
- return 0;
+ return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num,
+ extack);
}
static void felix_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_bridge_leave(ocelot, port, br);
+ ocelot_port_bridge_leave(ocelot, port, bridge.dev);
}
static int felix_lag_join(struct dsa_switch *ds, int port,
- struct net_device *bond,
- struct netdev_lag_upper_info *info)
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ int err;
- return ocelot_port_lag_join(ocelot, port, bond, info);
+ err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
+ if (err)
+ return err;
+
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, extack);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
- struct net_device *bond)
+ struct dsa_lag lag)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_lag_leave(ocelot, port, bond);
+ ocelot_port_lag_leave(ocelot, port, lag.dev);
- return 0;
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
@@ -804,27 +1037,48 @@ static int felix_vlan_del(struct dsa_switch *ds, int port,
return ocelot_vlan_del(ocelot, port, vlan->vid);
}
-static void felix_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- if (felix->info->phylink_validate)
- felix->info->phylink_validate(ocelot, port, supported, state);
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD |
+ MAC_2500FD;
+
+ __set_bit(ocelot->ports[port]->phy_mode,
+ config->supported_interfaces);
}
static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int link_an_mode,
+ unsigned int mode,
const struct phylink_link_state *state)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- struct dsa_port *dp = dsa_to_port(ds, port);
- if (felix->pcs[port])
- phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
+ if (felix->info->phylink_mac_config)
+ felix->info->phylink_mac_config(ocelot, port, mode, state);
+}
+
+static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
+ int port,
+ phy_interface_t iface)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct phylink_pcs *pcs = NULL;
+
+ if (felix->pcs && felix->pcs[port])
+ pcs = felix->pcs[port];
+
+ return pcs;
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -832,9 +1086,12 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -849,12 +1106,33 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
}
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ if (ocelot->npi >= 0) {
+ struct net_device *master = dsa_port_to_master(dp);
+
+ if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple masters are not allowed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
{
int i;
@@ -876,6 +1154,55 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
}
}
+static void felix_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_stats64(ocelot, port, stats);
+}
+
+static void felix_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_pause_stats(ocelot, port, pause_stats);
+}
+
+static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
+}
+
+static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
+}
+
+static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
+}
+
+static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -906,11 +1233,29 @@ static int felix_get_ts_info(struct dsa_switch *ds, int port,
return ocelot_get_ts_info(ocelot, port, info);
}
+static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = {
+ [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL,
+ [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII,
+ [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII,
+ [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII,
+ [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX,
+ [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX,
+};
+
+static int felix_validate_phy_mode(struct felix *felix, int port,
+ phy_interface_t phy_mode)
+{
+ u32 modes = felix->info->port_modes[port];
+
+ if (felix_phy_match_table[phy_mode] & modes)
+ return 0;
+ return -EOPNOTSUPP;
+}
+
static int felix_parse_ports_node(struct felix *felix,
struct device_node *ports_node,
phy_interface_t *port_phy_modes)
{
- struct ocelot *ocelot = &felix->ocelot;
struct device *dev = felix->ocelot.dev;
struct device_node *child;
@@ -937,12 +1282,17 @@ static int felix_parse_ports_node(struct felix *felix,
return -ENODEV;
}
- err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
+ err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
- dev_err(dev, "Unsupported PHY mode %s on port %d\n",
- phy_modes(phy_mode), port);
+ dev_info(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
of_node_put(child);
- return err;
+
+ /* Leave port_phy_modes[port] = 0, which is also
+ * PHY_INTERFACE_MODE_NA. This will perform a
+ * best-effort to bring up as many ports as possible.
+ */
+ continue;
}
port_phy_modes[port] = phy_mode;
@@ -974,11 +1324,62 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
return err;
}
+static struct regmap *felix_request_regmap_by_name(struct felix *felix,
+ const char *resource_name)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct resource res;
+ int i;
+
+ /* In an MFD configuration, regmaps are registered directly to the
+ * parent device before the child devices are probed, so there is no
+ * need to initialize a new one.
+ */
+ if (!felix->info->resources)
+ return dev_get_regmap(ocelot->dev->parent, resource_name);
+
+ for (i = 0; i < felix->info->num_resources; i++) {
+ if (strcmp(resource_name, felix->info->resources[i].name))
+ continue;
+
+ memcpy(&res, &felix->info->resources[i], sizeof(res));
+ res.start += felix->switch_base;
+ res.end += felix->switch_base;
+
+ return ocelot_regmap_init(ocelot, &res);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct regmap *felix_request_regmap(struct felix *felix,
+ enum ocelot_target target)
+{
+ const char *resource_name = felix->info->resource_names[target];
+
+ /* If the driver didn't provide a resource name for the target,
+ * the resource is optional.
+ */
+ if (!resource_name)
+ return NULL;
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
+static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
+{
+ char resource_name[32];
+
+ sprintf(resource_name, "port%d", port);
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
- struct resource res;
+ struct regmap *target;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -988,10 +1389,12 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
ocelot->map = felix->info->map;
- ocelot->stats_layout = felix->info->stats_layout;
- ocelot->num_stats = felix->info->num_stats;
ocelot->num_mact_rows = felix->info->num_mact_rows;
ocelot->vcap = felix->info->vcap;
+ ocelot->vcap_pol.base = felix->info->vcap_pol_base;
+ ocelot->vcap_pol.max = felix->info->vcap_pol_max;
+ ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2;
+ ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2;
ocelot->ops = felix->info->ops;
ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
@@ -1009,20 +1412,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
}
for (i = 0; i < TARGET_MAX; i++) {
- struct regmap *target;
-
- if (!felix->info->target_io_res[i].name)
- continue;
-
- memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = ocelot_regmap_init(ocelot, &res);
+ target = felix_request_regmap(felix, i);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map device memory space\n");
+ "Failed to map device memory space: %pe\n",
+ target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1039,7 +1433,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
- struct regmap *target;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -1051,16 +1444,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
}
- memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = ocelot_regmap_init(ocelot, &res);
+ target = felix_request_port_regmap(felix, port);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map memory space for port %d\n",
- port);
+ "Failed to map memory space for port %d: %pe\n",
+ port, target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1068,6 +1456,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot_port->phy_mode = port_phy_modes[port];
ocelot_port->ocelot = ocelot;
ocelot_port->target = target;
+ ocelot_port->index = port;
ocelot->ports[port] = ocelot_port;
}
@@ -1143,55 +1532,38 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
kfree(xmit_work);
}
-static int felix_port_setup_tagger_data(struct dsa_switch *ds, int port)
+static int felix_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- struct felix_port *felix_port;
+ struct ocelot_8021q_tagger_data *tagger_data;
- if (!dsa_port_is_user(dp))
+ switch (proto) {
+ case DSA_TAG_PROTO_OCELOT_8021Q:
+ tagger_data = ocelot_8021q_tagger_data(ds);
+ tagger_data->xmit_work_fn = felix_port_deferred_xmit;
return 0;
-
- felix_port = kzalloc(sizeof(*felix_port), GFP_KERNEL);
- if (!felix_port)
- return -ENOMEM;
-
- felix_port->xmit_worker = felix->xmit_worker;
- felix_port->xmit_work_fn = felix_port_deferred_xmit;
-
- dp->priv = felix_port;
-
- return 0;
-}
-
-static void felix_port_teardown_tagger_data(struct dsa_switch *ds, int port)
-{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct felix_port *felix_port = dp->priv;
-
- if (!felix_port)
- return;
-
- dp->priv = NULL;
- kfree(felix_port);
+ case DSA_TAG_PROTO_OCELOT:
+ case DSA_TAG_PROTO_SEVILLE:
+ return 0;
+ default:
+ return -EPROTONOSUPPORT;
+ }
}
-/* Hardware initialization done here so that we can allocate structures with
- * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
- * us to allocate structures twice (leak memory) and map PCI memory twice
- * (which will not work).
- */
static int felix_setup(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port, err;
+ struct dsa_port *dp;
+ int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
return err;
+ if (ocelot->targets[HSIO])
+ ocelot_pll5_init(ocelot);
+
err = ocelot_init(ocelot);
if (err)
goto out_mdiobus_free;
@@ -1205,64 +1577,39 @@ static int felix_setup(struct dsa_switch *ds)
}
}
- felix->xmit_worker = kthread_create_worker(0, "felix_xmit");
- if (IS_ERR(felix->xmit_worker)) {
- err = PTR_ERR(felix->xmit_worker);
- goto out_deinit_timestamp;
- }
-
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ dsa_switch_for_each_available_port(dp, ds) {
+ ocelot_init_port(ocelot, dp->index);
- ocelot_init_port(ocelot, port);
+ if (felix->info->configure_serdes)
+ felix->info->configure_serdes(ocelot, dp->index,
+ dp->dn);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
- felix_port_qos_map_init(ocelot, port);
-
- err = felix_port_setup_tagger_data(ds, port);
- if (err) {
- dev_err(ds->dev,
- "port %d failed to set up tagger data: %pe\n",
- port, ERR_PTR(err));
- goto out_deinit_ports;
- }
+ felix_port_qos_map_init(ocelot, dp->index);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
- /* The initial tag protocol is NPI which always returns 0, so
- * there's no real point in checking for errors.
- */
- felix_set_tag_protocol(ds, port, felix->tag_proto);
- break;
- }
+ /* The initial tag protocol is NPI which won't fail during initial
+ * setup, there's no real point in checking for errors.
+ */
+ felix_change_tag_protocol(ds, felix->tag_proto);
ds->mtu_enforcement_ingress = true;
ds->assisted_learning_on_cpu_port = true;
+ ds->fdb_isolation = true;
+ ds->max_num_bridges = ds->num_ports;
return 0;
out_deinit_ports:
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- felix_port_teardown_tagger_data(ds, port);
- ocelot_deinit_port(ocelot, port);
- }
-
- kthread_destroy_worker(felix->xmit_worker);
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
-out_deinit_timestamp:
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
@@ -1277,25 +1624,13 @@ static void felix_teardown(struct dsa_switch *ds)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
- felix_del_tag_protocol(ds, port, felix->tag_proto);
- break;
- }
-
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
+ struct dsa_port *dp;
- felix_port_teardown_tagger_data(ds, port);
- ocelot_deinit_port(ocelot, port);
- }
+ if (felix->tag_proto_ops)
+ felix->tag_proto_ops->teardown(ds);
- kthread_destroy_worker(felix->xmit_worker);
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
@@ -1317,14 +1652,23 @@ static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
- return ocelot_hwstamp_set(ocelot, port, ifr);
+ err = ocelot_hwstamp_set(ocelot, port, ifr);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
-static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
+static bool felix_check_xtr_pkt(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
- int err, grp = 0;
+ int err = 0, grp = 0;
if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
return false;
@@ -1332,9 +1676,6 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
if (!felix->info->quirk_no_xtr_irq)
return false;
- if (ptp_type == PTP_CLASS_NONE)
- return false;
-
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
@@ -1364,8 +1705,12 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
}
out:
- if (err < 0)
+ if (err < 0) {
+ dev_err_ratelimited(ocelot->dev,
+ "Error during packet extraction: %pe\n",
+ ERR_PTR(err));
ocelot_drain_cpu_queue(ocelot, 0);
+ }
return true;
}
@@ -1385,7 +1730,7 @@ static bool felix_rxtstamp(struct dsa_switch *ds, int port,
* MMIO in the CPU port module, and inject that into the stack from
* ocelot_xtr_poll().
*/
- if (felix_check_xtr_pkt(ocelot, type)) {
+ if (felix_check_xtr_pkt(ocelot)) {
kfree_skb(skb);
return true;
}
@@ -1428,9 +1773,18 @@ static void felix_txtstamp(struct dsa_switch *ds, int port,
static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct ocelot *ocelot = ds->priv;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct felix *felix = ocelot_to_felix(ocelot);
ocelot_port_set_maxlen(ocelot, port, new_mtu);
+ mutex_lock(&ocelot->tas_lock);
+
+ if (ocelot_port->taprio && felix->info->tas_guard_bands_update)
+ felix->info->tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
+
return 0;
}
@@ -1445,8 +1799,17 @@ static int felix_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
- return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
@@ -1484,6 +1847,24 @@ static void felix_port_policer_del(struct dsa_switch *ds, int port)
ocelot_port_policer_del(ocelot, port);
}
+static int felix_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port,
+ ingress, extack);
+}
+
+static void felix_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_mirror_del(ocelot, port, mirror->ingress);
+}
+
static int felix_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1633,23 +2014,101 @@ felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
+static int felix_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_default_prio(ocelot, port);
+}
+
+static int felix_port_set_default_prio(struct dsa_switch *ds, int port,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_default_prio(ocelot, port, prio);
+}
+
+static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_dscp_prio(ocelot, port, dscp);
+}
+
+static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
+ u8 prio)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
+}
+
+static int felix_get_mm(struct dsa_switch *ds, int port,
+ struct ethtool_mm_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_mm(ocelot, port, state);
+}
+
+static int felix_set_mm(struct dsa_switch *ds, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_mm(ocelot, port, cfg, extack);
+}
+
+static void felix_get_mm_stats(struct dsa_switch *ds, int port,
+ struct ethtool_mm_stats *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_mm_stats(ocelot, port, stats);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
+ .connect_tag_protocol = felix_connect_tag_protocol,
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
+ .get_mm = felix_get_mm,
+ .set_mm = felix_set_mm,
+ .get_mm_stats = felix_get_mm_stats,
+ .get_stats64 = felix_get_stats64,
+ .get_pause_stats = felix_get_pause_stats,
+ .get_rmon_stats = felix_get_rmon_stats,
+ .get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
+ .get_eth_mac_stats = felix_get_eth_mac_stats,
+ .get_eth_phy_stats = felix_get_eth_phy_stats,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
- .phylink_validate = felix_phylink_validate,
+ .phylink_get_caps = felix_phylink_get_caps,
.phylink_mac_config = felix_phylink_mac_config,
+ .phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
+ .port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
.port_fdb_del = felix_fdb_del,
+ .lag_fdb_add = felix_lag_fdb_add,
+ .lag_fdb_del = felix_lag_fdb_del,
.port_mdb_add = felix_mdb_add,
.port_mdb_del = felix_mdb_del,
.port_pre_bridge_flags = felix_pre_bridge_flags,
@@ -1671,6 +2130,8 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_max_mtu = felix_get_max_mtu,
.port_policer_add = felix_port_policer_add,
.port_policer_del = felix_port_policer_del,
+ .port_mirror_add = felix_port_mirror_add,
+ .port_mirror_del = felix_port_mirror_del,
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
@@ -1691,7 +2152,15 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_mrp_del_ring_role = felix_mrp_del_ring_role,
.tag_8021q_vlan_add = felix_tag_8021q_vlan_add,
.tag_8021q_vlan_del = felix_tag_8021q_vlan_del,
+ .port_get_default_prio = felix_port_get_default_prio,
+ .port_set_default_prio = felix_port_set_default_prio,
+ .port_get_dscp_prio = felix_port_get_dscp_prio,
+ .port_add_dscp_prio = felix_port_add_dscp_prio,
+ .port_del_dscp_prio = felix_port_del_dscp_prio,
+ .port_set_host_flood = felix_port_set_host_flood,
+ .port_change_master = felix_port_change_master,
};
+EXPORT_SYMBOL_GPL(felix_switch_ops);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
@@ -1703,6 +2172,7 @@ struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
return dsa_to_port(ds, port)->slave;
}
+EXPORT_SYMBOL_GPL(felix_port_to_netdev);
int felix_netdev_to_port(struct net_device *dev)
{
@@ -1714,3 +2184,7 @@ int felix_netdev_to_port(struct net_device *dev)
return dp->index;
}
+EXPORT_SYMBOL_GPL(felix_netdev_to_port);
+
+MODULE_DESCRIPTION("Felix DSA library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index be3e42e135c0..96008c046da5 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -7,23 +7,39 @@
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+#define OCELOT_PORT_MODE_NONE 0
+#define OCELOT_PORT_MODE_INTERNAL BIT(0)
+#define OCELOT_PORT_MODE_SGMII BIT(1)
+#define OCELOT_PORT_MODE_QSGMII BIT(2)
+#define OCELOT_PORT_MODE_2500BASEX BIT(3)
+#define OCELOT_PORT_MODE_USXGMII BIT(4)
+#define OCELOT_PORT_MODE_1000BASEX BIT(5)
+
+struct device_node;
+
/* Platform-specific information */
struct felix_info {
- const struct resource *target_io_res;
- const struct resource *port_io_res;
- const struct resource *imdio_res;
+ /* Hardcoded resources provided by the hardware instantiation. */
+ const struct resource *resources;
+ size_t num_resources;
+ /* Names of the mandatory resources that will be requested during
+ * probe. Must have TARGET_MAX elements, since it is indexed by target.
+ */
+ const char *const *resource_names;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
+ const u32 *port_modes;
int num_mact_rows;
- const struct ocelot_stat_layout *stats_layout;
- unsigned int num_stats;
int num_ports;
int num_tx_queues;
struct vcap_props *vcap;
- int switch_pci_bar;
- int imdio_pci_bar;
+ u16 vcap_pol_base;
+ u16 vcap_pol_max;
+ u16 vcap_pol_base2;
+ u16 vcap_pol_max2;
const struct ptp_clock_info *ptp_caps;
+ unsigned long quirks;
/* Some Ocelot switches are integrated into the SoC without the
* extraction IRQ line connected to the ARM GIC. By enabling this
@@ -39,15 +55,32 @@ struct felix_info {
int (*mdio_bus_alloc)(struct ocelot *ocelot);
void (*mdio_bus_free)(struct ocelot *ocelot);
- void (*phylink_validate)(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state);
- int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode);
int (*port_setup_tc)(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data);
+ void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
+ void (*phylink_mac_config)(struct ocelot *ocelot, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+ int (*configure_serdes)(struct ocelot *ocelot, int port,
+ struct device_node *portnp);
+};
+
+/* Methods for initializing the hardware resources specific to a tagging
+ * protocol (like the NPI port, for "ocelot" or "seville", or the VCAP TCAMs,
+ * for "ocelot-8021q").
+ * It is important that the resources configured here do not have side effects
+ * for the other tagging protocols. If that is the case, their configuration
+ * needs to go to felix_tag_proto_setup_shared().
+ */
+struct felix_tag_proto_ops {
+ int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
+ unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
+ int (*change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -58,11 +91,13 @@ struct felix {
const struct felix_info *info;
struct ocelot ocelot;
struct mii_bus *imdio;
- struct lynx_pcs **pcs;
+ struct phylink_pcs **pcs;
resource_size_t switch_base;
- resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
+ const struct felix_tag_proto_ops *tag_proto_ops;
struct kthread_worker *xmit_worker;
+ unsigned long host_flood_uc_mask;
+ unsigned long host_flood_mc_mask;
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 45c5ec7a83ea..cfb3faeaa5bf 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -5,8 +5,11 @@
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
+#include <net/tc_act/tc_gate.h>
#include <soc/mscc/ocelot.h>
#include <linux/dsa/ocelot.h>
#include <linux/pcs-lynx.h>
@@ -14,9 +17,32 @@
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/pci.h>
+#include <linux/time.h>
#include "felix.h"
+#define VSC9959_NUM_PORTS 6
+
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_TAS_MIN_GATE_LEN_NS 33
+#define VSC9959_VCAP_POLICER_BASE 63
+#define VSC9959_VCAP_POLICER_MAX 383
+#define VSC9959_SWITCH_PCI_BAR 4
+#define VSC9959_IMDIO_PCI_BAR 0
+
+#define VSC9959_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII | \
+ OCELOT_PORT_MODE_1000BASEX | \
+ OCELOT_PORT_MODE_2500BASEX | \
+ OCELOT_PORT_MODE_USXGMII)
+
+static const u32 vsc9959_port_modes[VSC9959_NUM_PORTS] = {
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ VSC9959_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
static const u32 vsc9959_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x0089a0),
@@ -250,27 +276,139 @@ static const u32 vsc9959_rew_regmap[] = {
static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
- REG(SYS_COUNT_RX_LONGS, 0x000044),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
+ REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
+ REG(SYS_COUNT_RX_ASSEMBLY_ERRS, 0x0000b0),
+ REG(SYS_COUNT_RX_SMD_ERRS, 0x0000b4),
+ REG(SYS_COUNT_RX_ASSEMBLY_OK, 0x0000b8),
+ REG(SYS_COUNT_RX_MERGE_FRAGMENTS, 0x0000bc),
+ REG(SYS_COUNT_RX_PMAC_OCTETS, 0x0000c0),
+ REG(SYS_COUNT_RX_PMAC_UNICAST, 0x0000c4),
+ REG(SYS_COUNT_RX_PMAC_MULTICAST, 0x0000c8),
+ REG(SYS_COUNT_RX_PMAC_BROADCAST, 0x0000cc),
+ REG(SYS_COUNT_RX_PMAC_SHORTS, 0x0000d0),
+ REG(SYS_COUNT_RX_PMAC_FRAGMENTS, 0x0000d4),
+ REG(SYS_COUNT_RX_PMAC_JABBERS, 0x0000d8),
+ REG(SYS_COUNT_RX_PMAC_CRC_ALIGN_ERRS, 0x0000dc),
+ REG(SYS_COUNT_RX_PMAC_SYM_ERRS, 0x0000e0),
+ REG(SYS_COUNT_RX_PMAC_64, 0x0000e4),
+ REG(SYS_COUNT_RX_PMAC_65_127, 0x0000e8),
+ REG(SYS_COUNT_RX_PMAC_128_255, 0x0000ec),
+ REG(SYS_COUNT_RX_PMAC_256_511, 0x0000f0),
+ REG(SYS_COUNT_RX_PMAC_512_1023, 0x0000f4),
+ REG(SYS_COUNT_RX_PMAC_1024_1526, 0x0000f8),
+ REG(SYS_COUNT_RX_PMAC_1527_MAX, 0x0000fc),
+ REG(SYS_COUNT_RX_PMAC_PAUSE, 0x000100),
+ REG(SYS_COUNT_RX_PMAC_CONTROL, 0x000104),
+ REG(SYS_COUNT_RX_PMAC_LONGS, 0x000108),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
+ REG(SYS_COUNT_TX_UNICAST, 0x000204),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000208),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00020c),
REG(SYS_COUNT_TX_COLLISION, 0x000210),
REG(SYS_COUNT_TX_DROPS, 0x000214),
+ REG(SYS_COUNT_TX_PAUSE, 0x000218),
REG(SYS_COUNT_TX_64, 0x00021c),
REG(SYS_COUNT_TX_65_127, 0x000220),
- REG(SYS_COUNT_TX_128_511, 0x000224),
- REG(SYS_COUNT_TX_512_1023, 0x000228),
- REG(SYS_COUNT_TX_1024_1526, 0x00022c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000230),
- REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_TX_128_255, 0x000224),
+ REG(SYS_COUNT_TX_256_511, 0x000228),
+ REG(SYS_COUNT_TX_512_1023, 0x00022c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000230),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000234),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000238),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00023c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000240),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000244),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000248),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00024c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000250),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000254),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000258),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00025c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000260),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000264),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000268),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
+ REG(SYS_COUNT_TX_AGED, 0x000278),
+ REG(SYS_COUNT_TX_MM_HOLD, 0x00027c),
+ REG(SYS_COUNT_TX_MERGE_FRAGMENTS, 0x000280),
+ REG(SYS_COUNT_TX_PMAC_OCTETS, 0x000284),
+ REG(SYS_COUNT_TX_PMAC_UNICAST, 0x000288),
+ REG(SYS_COUNT_TX_PMAC_MULTICAST, 0x00028c),
+ REG(SYS_COUNT_TX_PMAC_BROADCAST, 0x000290),
+ REG(SYS_COUNT_TX_PMAC_PAUSE, 0x000294),
+ REG(SYS_COUNT_TX_PMAC_64, 0x000298),
+ REG(SYS_COUNT_TX_PMAC_65_127, 0x00029c),
+ REG(SYS_COUNT_TX_PMAC_128_255, 0x0002a0),
+ REG(SYS_COUNT_TX_PMAC_256_511, 0x0002a4),
+ REG(SYS_COUNT_TX_PMAC_512_1023, 0x0002a8),
+ REG(SYS_COUNT_TX_PMAC_1024_1526, 0x0002ac),
+ REG(SYS_COUNT_TX_PMAC_1527_MAX, 0x0002b0),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000400),
+ REG(SYS_COUNT_DROP_TAIL, 0x000404),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00040c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000410),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000414),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000418),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00041c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000420),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000424),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000428),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00042c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000430),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000434),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000438),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
+ REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800),
+ REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804),
+ REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808),
+ REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -292,7 +430,6 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -340,6 +477,9 @@ static const u32 vsc9959_dev_gmii_regmap[] = {
REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
REG(DEV_MAC_STICKY, 0x44),
+ REG(DEV_MM_ENABLE_CONFIG, 0x48),
+ REG(DEV_MM_VERIF_CONFIG, 0x4C),
+ REG(DEV_MM_STATUS, 0x50),
REG_RESERVED(PCS1G_CFG),
REG_RESERVED(PCS1G_MODE_CFG),
REG_RESERVED(PCS1G_SD_CFG),
@@ -378,100 +518,43 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the PCI device's base address */
-static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9959_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9959_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
+static const char * const vsc9959_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
-static const struct resource vsc9959_imdio_res = {
- .start = 0x8030,
- .end = 0x8040,
- .name = "imdio",
-};
+static const struct resource vsc9959_imdio_res =
+ DEFINE_RES_MEM_NAMED(0x8030, 0x10, "imdio");
static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
@@ -523,101 +606,6 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
};
-static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x80, .name = "tx_octets", },
- { .offset = 0x81, .name = "tx_unicast", },
- { .offset = 0x82, .name = "tx_multicast", },
- { .offset = 0x83, .name = "tx_broadcast", },
- { .offset = 0x84, .name = "tx_collision", },
- { .offset = 0x85, .name = "tx_drops", },
- { .offset = 0x86, .name = "tx_pause", },
- { .offset = 0x87, .name = "tx_frames_below_65_octets", },
- { .offset = 0x88, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x89, .name = "tx_frames_128_255_octets", },
- { .offset = 0x8B, .name = "tx_frames_256_511_octets", },
- { .offset = 0x8C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x8D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x8E, .name = "tx_yellow_prio_0", },
- { .offset = 0x8F, .name = "tx_yellow_prio_1", },
- { .offset = 0x90, .name = "tx_yellow_prio_2", },
- { .offset = 0x91, .name = "tx_yellow_prio_3", },
- { .offset = 0x92, .name = "tx_yellow_prio_4", },
- { .offset = 0x93, .name = "tx_yellow_prio_5", },
- { .offset = 0x94, .name = "tx_yellow_prio_6", },
- { .offset = 0x95, .name = "tx_yellow_prio_7", },
- { .offset = 0x96, .name = "tx_green_prio_0", },
- { .offset = 0x97, .name = "tx_green_prio_1", },
- { .offset = 0x98, .name = "tx_green_prio_2", },
- { .offset = 0x99, .name = "tx_green_prio_3", },
- { .offset = 0x9A, .name = "tx_green_prio_4", },
- { .offset = 0x9B, .name = "tx_green_prio_5", },
- { .offset = 0x9C, .name = "tx_green_prio_6", },
- { .offset = 0x9D, .name = "tx_green_prio_7", },
- { .offset = 0x9E, .name = "tx_aged", },
- { .offset = 0x100, .name = "drop_local", },
- { .offset = 0x101, .name = "drop_tail", },
- { .offset = 0x102, .name = "drop_yellow_prio_0", },
- { .offset = 0x103, .name = "drop_yellow_prio_1", },
- { .offset = 0x104, .name = "drop_yellow_prio_2", },
- { .offset = 0x105, .name = "drop_yellow_prio_3", },
- { .offset = 0x106, .name = "drop_yellow_prio_4", },
- { .offset = 0x107, .name = "drop_yellow_prio_5", },
- { .offset = 0x108, .name = "drop_yellow_prio_6", },
- { .offset = 0x109, .name = "drop_yellow_prio_7", },
- { .offset = 0x10A, .name = "drop_green_prio_0", },
- { .offset = 0x10B, .name = "drop_green_prio_1", },
- { .offset = 0x10C, .name = "drop_green_prio_2", },
- { .offset = 0x10D, .name = "drop_green_prio_3", },
- { .offset = 0x10E, .name = "drop_green_prio_4", },
- { .offset = 0x10F, .name = "drop_green_prio_5", },
- { .offset = 0x110, .name = "drop_green_prio_6", },
- { .offset = 0x111, .name = "drop_green_prio_7", },
-};
-
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 3},
[VCAP_ES0_IGR_PORT] = { 3, 3},
@@ -934,62 +922,6 @@ static int vsc9959_reset(struct ocelot *ocelot)
return 0;
}
-static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_USXGMII) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 4 && port != 5)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* Not supported on internal to-CPU ports */
- if (port == 4 || port == 5)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 8: Unit; 0:1, 1:16
* Bit 7-0: Value to be multiplied with unit
@@ -1020,20 +952,13 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
*maxuse = val & GENMASK(11, 0);
}
-static const struct ocelot_ops vsc9959_ops = {
- .reset = vsc9959_reset,
- .wm_enc = vsc9959_wm_enc,
- .wm_dec = vsc9959_wm_dec,
- .wm_stat = vsc9959_wm_stat,
- .port_to_netdev = felix_port_to_netdev,
- .netdev_to_port = felix_netdev_to_port,
-};
-
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
struct felix *felix = ocelot_to_felix(ocelot);
struct enetc_mdio_priv *mdio_priv;
struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
void __iomem *imdio_regs;
struct resource res;
struct enetc_hw *hw;
@@ -1042,17 +967,18 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct lynx_pcs *),
+ sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
return -ENOMEM;
}
- memcpy(&res, felix->info->imdio_res, sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->imdio_base;
- res.end += felix->imdio_base;
+ imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
+
+ memcpy(&res, &vsc9959_imdio_res, sizeof(res));
+ res.start += imdio_base;
+ res.end += imdio_base;
imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs))
@@ -1064,13 +990,15 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return PTR_ERR(hw);
}
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
bus->name = "VSC9959 internal MDIO bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
@@ -1084,6 +1012,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
+ mdiobus_free(bus);
return rc;
}
@@ -1091,8 +1020,8 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct mdio_device *pcs;
- struct lynx_pcs *lynx;
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
if (dsa_is_unused_port(felix->ds, port))
continue;
@@ -1100,17 +1029,17 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = mdio_device_create(felix->imdio, port);
- if (IS_ERR(pcs))
+ mdio_device = mdio_device_create(felix->imdio, port);
+ if (IS_ERR(mdio_device))
continue;
- lynx = lynx_pcs_create(pcs);
- if (!lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
continue;
}
- felix->pcs[port] = lynx;
+ felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
}
@@ -1124,20 +1053,296 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct lynx_pcs *pcs = felix->pcs[port];
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
+ struct mdio_device *mdio_device;
- if (!pcs)
+ if (!phylink_pcs)
continue;
- mdio_device_free(pcs->mdio);
- lynx_pcs_destroy(pcs);
+ mdio_device = lynx_get_mdio_device(phylink_pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
+ mdiobus_free(felix->imdio);
+}
+
+/* The switch considers any frame (regardless of size) as eligible for
+ * transmission if the traffic class gate is open for at least 33 ns.
+ * Overruns are prevented by cropping an interval at the end of the gate time
+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
+ * available for one packet to be transmitted, otherwise the port tc will hang.
+ * This function returns the size of a gate interval that remains available for
+ * setting the guard band, after reserving the space for one egress frame.
+ */
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+{
+ /* Gate always open */
+ if (gate_len_ns == U64_MAX)
+ return U64_MAX;
+
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+}
+
+/* Extract shortest continuous gate open intervals in ns for each traffic class
+ * of a cyclic tc-taprio schedule. If a gate is always open, the duration is
+ * considered U64_MAX. If the gate is always closed, it is considered 0.
+ */
+static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
+ u64 min_gate_len[OCELOT_NUM_TC])
+{
+ struct tc_taprio_sched_entry *entry;
+ u64 gate_len[OCELOT_NUM_TC];
+ u8 gates_ever_opened = 0;
+ int tc, i, n;
+
+ /* Initialize arrays */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ min_gate_len[tc] = U64_MAX;
+ gate_len[tc] = 0;
+ }
+
+ /* If we don't have taprio, consider all gates as permanently open */
+ if (!taprio)
+ return;
+
+ n = taprio->num_entries;
+
+ /* Walk through the gate list twice to determine the length
+ * of consecutively open gates for a traffic class, including
+ * open gates that wrap around. We are just interested in the
+ * minimum window size, and this doesn't change what the
+ * minimum is (if the gate never closes, min_gate_len will
+ * remain U64_MAX).
+ */
+ for (i = 0; i < 2 * n; i++) {
+ entry = &taprio->entries[i % n];
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ if (entry->gate_mask & BIT(tc)) {
+ gate_len[tc] += entry->interval;
+ gates_ever_opened |= BIT(tc);
+ } else {
+ /* Gate closes now, record a potential new
+ * minimum and reinitialize length
+ */
+ if (min_gate_len[tc] > gate_len[tc] &&
+ gate_len[tc])
+ min_gate_len[tc] = gate_len[tc];
+ gate_len[tc] = 0;
+ }
+ }
+ }
+
+ /* min_gate_len[tc] actually tracks minimum *open* gate time, so for
+ * permanently closed gates, min_gate_len[tc] will still be U64_MAX.
+ * Therefore they are currently indistinguishable from permanently
+ * open gates. Overwrite the gate len with 0 when we know they're
+ * actually permanently closed, i.e. after the loop above.
+ */
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (!(gates_ever_opened & BIT(tc)))
+ min_gate_len[tc] = 0;
+}
+
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
+ * so we need to spell out the register access to each traffic class in helper
+ * functions, to simplify callers
+ */
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
+ u32 max_sdu)
+{
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+}
+
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
+{
+ switch (tc) {
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
+ default:
+ return 0;
+ }
+}
+
+static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
+{
+ if (!taprio || !taprio->max_sdu[tc])
+ return 0;
+
+ return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
+}
+
+/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
+ * switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
+ * values (the default value is 1518). Also, for traffic class windows smaller
+ * than one MTU sized frame, update QSYS_QMAXSDU_CFG to enable oversized frame
+ * dropping, such that these won't hang the port, as they will never be sent.
+ */
+static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct tc_taprio_qopt_offload *taprio;
+ u64 min_gate_len[OCELOT_NUM_TC];
+ int speed, picos_per_byte;
+ u64 needed_bit_time_ps;
+ u32 val, maxlen;
+ u8 tas_speed;
+ int tc;
+
+ lockdep_assert_held(&ocelot->tas_lock);
+
+ taprio = ocelot_port->taprio;
+
+ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
+ tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
+
+ switch (tas_speed) {
+ case OCELOT_SPEED_10:
+ speed = SPEED_10;
+ break;
+ case OCELOT_SPEED_100:
+ speed = SPEED_100;
+ break;
+ case OCELOT_SPEED_1000:
+ speed = SPEED_1000;
+ break;
+ case OCELOT_SPEED_2500:
+ speed = SPEED_2500;
+ break;
+ default:
+ return;
+ }
+
+ picos_per_byte = (USEC_PER_SEC * 8) / speed;
+
+ val = ocelot_port_readl(ocelot_port, DEV_MAC_MAXLEN_CFG);
+ /* MAXLEN_CFG accounts automatically for VLAN. We need to include it
+ * manually in the bit time calculation, plus the preamble and SFD.
+ */
+ maxlen = val + 2 * VLAN_HLEN;
+ /* Consider the standard Ethernet overhead of 8 octets preamble+SFD,
+ * 4 octets FCS, 12 octets IFG.
+ */
+ needed_bit_time_ps = (maxlen + 24) * picos_per_byte;
+
+ dev_dbg(ocelot->dev,
+ "port %d: max frame size %d needs %llu ps at speed %d\n",
+ port, maxlen, needed_bit_time_ps, speed);
+
+ vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
+
+ mutex_lock(&ocelot->fwd_domain_lock);
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
+ u64 remaining_gate_len_ps;
+ u32 max_sdu;
+
+ remaining_gate_len_ps =
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
+
+ if (remaining_gate_len_ps > needed_bit_time_ps) {
+ /* Setting QMAXSDU_CFG to 0 disables oversized frame
+ * dropping.
+ */
+ max_sdu = requested_max_sdu;
+ dev_dbg(ocelot->dev,
+ "port %d tc %d min gate len %llu"
+ ", sending all frames\n",
+ port, tc, min_gate_len[tc]);
+ } else {
+ /* If traffic class doesn't support a full MTU sized
+ * frame, make sure to enable oversize frame dropping
+ * for frames larger than the smallest that would fit.
+ *
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
+ * controls not only oversized frame dropping, but also
+ * per-tc static guard band lengths, so it reduces the
+ * useful gate interval length. Therefore, be careful
+ * to calculate a guard band (and therefore max_sdu)
+ * that still leaves 33 ns available in the time slot.
+ */
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
+ /* A TC gate may be completely closed, which is a
+ * special case where all packets are oversized.
+ * Any limit smaller than 64 octets accomplishes this
+ */
+ if (!max_sdu)
+ max_sdu = 1;
+ /* Take L1 overhead into account, but just don't allow
+ * max_sdu to go negative or to 0. Here we use 20
+ * because QSYS_MAXSDU_CFG_* already counts the 4 FCS
+ * octets as part of packet size.
+ */
+ if (max_sdu > 20)
+ max_sdu -= 20;
+
+ if (requested_max_sdu && requested_max_sdu < max_sdu)
+ max_sdu = requested_max_sdu;
+
+ dev_info(ocelot->dev,
+ "port %d tc %d min gate length %llu"
+ " ns not enough for max frame size %d at %d"
+ " Mbps, dropping frames over %d"
+ " octets including FCS\n",
+ port, tc, min_gate_len[tc], maxlen, speed,
+ max_sdu);
+ }
+
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
+ }
+
+ ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
u32 speed)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 tas_speed;
switch (speed) {
@@ -1158,10 +1363,17 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
break;
}
+ mutex_lock(&ocelot->tas_lock);
+
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
+
+ if (ocelot_port->taprio)
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
}
static void vsc9959_new_base_time(struct ocelot *ocelot, ktime_t base_time,
@@ -1204,26 +1416,41 @@ static void vsc9959_tas_gcl_set(struct ocelot *ocelot, const u32 gcl_ix,
static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
struct tc_taprio_qopt_offload *taprio)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
struct timespec64 base_ts;
int ret, i;
u32 val;
+ mutex_lock(&ocelot->tas_lock);
+
if (!taprio->enable) {
- ocelot_rmw_rix(ocelot,
- QSYS_TAG_CONFIG_INIT_GATE_STATE(0xFF),
- QSYS_TAG_CONFIG_ENABLE |
- QSYS_TAG_CONFIG_INIT_GATE_STATE_M,
+ ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
QSYS_TAG_CONFIG, port);
+ taprio_offload_free(ocelot_port->taprio);
+ ocelot_port->taprio = NULL;
+
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
return 0;
}
+ ret = ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
+ if (ret)
+ goto err_unlock;
+
if (taprio->cycle_time > NSEC_PER_SEC ||
- taprio->cycle_time_extension >= NSEC_PER_SEC)
- return -EINVAL;
+ taprio->cycle_time_extension >= NSEC_PER_SEC) {
+ ret = -EINVAL;
+ goto err_reset_tc;
+ }
- if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
- return -ERANGE;
+ if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX) {
+ ret = -ERANGE;
+ goto err_reset_tc;
+ }
/* Enable guard band. The switch will schedule frames without taking
* their length into account. Thus we'll always need to enable the
@@ -1244,8 +1471,10 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
* config is pending, need reset the TAS module
*/
val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING)
- return -EBUSY;
+ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+ ret = -EBUSY;
+ goto err_reset_tc;
+ }
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_ENABLE |
@@ -1278,10 +1507,74 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
ret = readx_poll_timeout(vsc9959_tas_read_cfg_status, ocelot, val,
!(val & QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE),
10, 100000);
+ if (ret)
+ goto err_reset_tc;
+
+ ocelot_port->taprio = taprio_offload_get(taprio);
+ vsc9959_tas_guard_bands_update(ocelot, port);
+
+ mutex_unlock(&ocelot->tas_lock);
+
+ return 0;
+
+err_reset_tc:
+ taprio->mqprio.qopt.num_tc = 0;
+ ocelot_port_mqprio(ocelot, port, &taprio->mqprio);
+err_unlock:
+ mutex_unlock(&ocelot->tas_lock);
return ret;
}
+static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
+{
+ struct tc_taprio_qopt_offload *taprio;
+ struct ocelot_port *ocelot_port;
+ struct timespec64 base_ts;
+ int port;
+ u32 val;
+
+ mutex_lock(&ocelot->tas_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ ocelot_port = ocelot->ports[port];
+ taprio = ocelot_port->taprio;
+ if (!taprio)
+ continue;
+
+ ocelot_rmw(ocelot,
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Disable time-aware shaper */
+ ocelot_rmw_rix(ocelot, 0, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+
+ vsc9959_new_base_time(ocelot, taprio->base_time,
+ taprio->cycle_time, &base_ts);
+
+ ocelot_write(ocelot, base_ts.tv_nsec, QSYS_PARAM_CFG_REG_1);
+ ocelot_write(ocelot, lower_32_bits(base_ts.tv_sec),
+ QSYS_PARAM_CFG_REG_2);
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_rmw(ocelot,
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(val),
+ QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
+ QSYS_PARAM_CFG_REG_3);
+
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
+ QSYS_TAS_PARAM_CFG_CTRL);
+
+ /* Re-enable time-aware shaper */
+ ocelot_rmw_rix(ocelot, QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG_ENABLE,
+ QSYS_TAG_CONFIG, port);
+ }
+ mutex_unlock(&ocelot->tas_lock);
+}
+
static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
struct tc_cbs_qopt_offload *cbs_qopt)
{
@@ -1328,6 +1621,28 @@ static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
return 0;
}
+static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -1335,8 +1650,12 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
switch (type) {
+ case TC_QUERY_CAPS:
+ return vsc9959_qos_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO:
return vsc9959_qos_port_tas_set(ocelot, port, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return ocelot_port_mqprio(ocelot, port, type_data);
case TC_SETUP_QDISC_CBS:
return vsc9959_qos_port_cbs_set(ds, port, type_data);
default:
@@ -1344,44 +1663,980 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
}
}
+#define VSC9959_PSFP_SFID_MAX 175
+#define VSC9959_PSFP_GATE_ID_MAX 183
+#define VSC9959_PSFP_POLICER_BASE 63
+#define VSC9959_PSFP_POLICER_MAX 383
+#define VSC9959_PSFP_GATE_LIST_NUM 4
+#define VSC9959_PSFP_GATE_CYCLETIME_MIN 5000
+
+struct felix_stream {
+ struct list_head list;
+ unsigned long id;
+ bool dummy;
+ int ports;
+ int port;
+ u8 dmac[ETH_ALEN];
+ u16 vid;
+ s8 prio;
+ u8 sfid_valid;
+ u8 ssid_valid;
+ u32 sfid;
+ u32 ssid;
+};
+
+struct felix_stream_filter_counters {
+ u64 match;
+ u64 not_pass_gate;
+ u64 not_pass_sdu;
+ u64 red;
+};
+
+struct felix_stream_filter {
+ struct felix_stream_filter_counters stats;
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+ u8 enable;
+ int portmask;
+ u8 sg_valid;
+ u32 sgid;
+ u8 fm_valid;
+ u32 fmid;
+ u8 prio_valid;
+ u8 prio;
+ u32 maxsdu;
+};
+
+struct felix_stream_gate {
+ u32 index;
+ u8 enable;
+ u8 ipv_valid;
+ u8 init_ipv;
+ u64 basetime;
+ u64 cycletime;
+ u64 cycletime_ext;
+ u32 num_entries;
+ struct action_gate_entry entries[];
+};
+
+struct felix_stream_gate_entry {
+ struct list_head list;
+ refcount_t refcount;
+ u32 index;
+};
+
+static int vsc9959_stream_identify(struct flow_cls_offload *f,
+ struct felix_stream *stream)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
+ return -EOPNOTSUPP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ ether_addr_copy(stream->dmac, match.key->dst);
+ if (!is_zero_ether_addr(match.mask->src))
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ if (match.mask->vlan_priority)
+ stream->prio = match.key->vlan_priority;
+ else
+ stream->prio = -1;
+
+ if (!match.mask->vlan_id)
+ return -EOPNOTSUPP;
+ stream->vid = match.key->vlan_id;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ stream->id = f->cookie;
+
+ return 0;
+}
+
+static int vsc9959_mact_stream_set(struct ocelot *ocelot,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ enum macaccess_entry_type type;
+ int ret, sfid, ssid;
+ u32 vid, dst_idx;
+ u8 mac[ETH_ALEN];
+
+ ether_addr_copy(mac, stream->dmac);
+ vid = stream->vid;
+
+ /* Stream identification desn't support to add a stream with non
+ * existent MAC (The MAC entry has not been learned in MAC table).
+ */
+ ret = ocelot_mact_lookup(ocelot, &dst_idx, mac, vid, &type);
+ if (ret) {
+ if (extack)
+ NL_SET_ERR_MSG_MOD(extack, "Stream is not learned in MAC table");
+ return -EOPNOTSUPP;
+ }
+
+ if ((stream->sfid_valid || stream->ssid_valid) &&
+ type == ENTRYTYPE_NORMAL)
+ type = ENTRYTYPE_LOCKED;
+
+ sfid = stream->sfid_valid ? stream->sfid : -1;
+ ssid = stream->ssid_valid ? stream->ssid : -1;
+
+ ret = ocelot_mact_learn_streamdata(ocelot, dst_idx, mac, vid, type,
+ sfid, ssid);
+
+ return ret;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_lookup(struct list_head *stream_list,
+ struct felix_stream *stream)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (ether_addr_equal(tmp->dmac, stream->dmac) &&
+ tmp->vid == stream->vid)
+ return tmp;
+
+ return NULL;
+}
+
+static int vsc9959_stream_table_add(struct ocelot *ocelot,
+ struct list_head *stream_list,
+ struct felix_stream *stream,
+ struct netlink_ext_ack *extack)
+{
+ struct felix_stream *stream_entry;
+ int ret;
+
+ stream_entry = kmemdup(stream, sizeof(*stream_entry), GFP_KERNEL);
+ if (!stream_entry)
+ return -ENOMEM;
+
+ if (!stream->dummy) {
+ ret = vsc9959_mact_stream_set(ocelot, stream_entry, extack);
+ if (ret) {
+ kfree(stream_entry);
+ return ret;
+ }
+ }
+
+ list_add_tail(&stream_entry->list, stream_list);
+
+ return 0;
+}
+
+static struct felix_stream *
+vsc9959_stream_table_get(struct list_head *stream_list, unsigned long id)
+{
+ struct felix_stream *tmp;
+
+ list_for_each_entry(tmp, stream_list, list)
+ if (tmp->id == id)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_stream_table_del(struct ocelot *ocelot,
+ struct felix_stream *stream)
+{
+ if (!stream->dummy)
+ vsc9959_mact_stream_set(ocelot, stream, NULL);
+
+ list_del(&stream->list);
+ kfree(stream);
+}
+
+static u32 vsc9959_sfi_access_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_TABLES_SFIDACCESS);
+}
+
+static int vsc9959_psfp_sfi_set(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ u32 val;
+
+ if (sfi->index > VSC9959_PSFP_SFID_MAX)
+ return -EINVAL;
+
+ if (!sfi->enable) {
+ ocelot_write(ocelot, ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ val = ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE);
+ ocelot_write(ocelot, val, ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+ }
+
+ if (sfi->sgid > VSC9959_PSFP_GATE_ID_MAX ||
+ sfi->fmid > VSC9959_PSFP_POLICER_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot,
+ (sfi->sg_valid ? ANA_TABLES_SFIDTIDX_SGID_VALID : 0) |
+ ANA_TABLES_SFIDTIDX_SGID(sfi->sgid) |
+ (sfi->fm_valid ? ANA_TABLES_SFIDTIDX_POL_ENA : 0) |
+ ANA_TABLES_SFIDTIDX_POL_IDX(sfi->fmid) |
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfi->index),
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ (sfi->prio_valid ? ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA : 0) |
+ ANA_TABLES_SFIDACCESS_IGR_PRIO(sfi->prio) |
+ ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(sfi->maxsdu) |
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfidmask_set(struct ocelot *ocelot, u32 sfid, int ports)
+{
+ u32 val;
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDTIDX_SFID_INDEX(sfid),
+ ANA_TABLES_SFIDTIDX_SFID_INDEX_M,
+ ANA_TABLES_SFIDTIDX);
+
+ ocelot_write(ocelot,
+ ANA_TABLES_SFID_MASK_IGR_PORT_MASK(ports) |
+ ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA,
+ ANA_TABLES_SFID_MASK);
+
+ ocelot_rmw(ocelot,
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(SFIDACCESS_CMD_WRITE),
+ ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M,
+ ANA_TABLES_SFIDACCESS);
+
+ return readx_poll_timeout(vsc9959_sfi_access_status, ocelot, val,
+ (!ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sfi_list_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct list_head *pos)
+{
+ struct felix_stream_filter *sfi_entry;
+ int ret;
+
+ sfi_entry = kmemdup(sfi, sizeof(*sfi_entry), GFP_KERNEL);
+ if (!sfi_entry)
+ return -ENOMEM;
+
+ refcount_set(&sfi_entry->refcount, 1);
+
+ ret = vsc9959_psfp_sfi_set(ocelot, sfi_entry);
+ if (ret) {
+ kfree(sfi_entry);
+ return ret;
+ }
+
+ vsc9959_psfp_sfidmask_set(ocelot, sfi->index, sfi->portmask);
+
+ list_add(&sfi_entry->list, pos);
+
+ return 0;
+}
+
+static int vsc9959_psfp_sfi_table_add(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct list_head *pos, *q, *last;
+ struct felix_stream_filter *tmp;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ if (sfi->sg_valid == tmp->sg_valid &&
+ sfi->fm_valid == tmp->fm_valid &&
+ sfi->portmask == tmp->portmask &&
+ tmp->sgid == sfi->sgid &&
+ tmp->fmid == sfi->fmid) {
+ sfi->index = tmp->index;
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index == insert) {
+ last = pos;
+ insert++;
+ }
+ }
+ sfi->index = insert;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+}
+
+static int vsc9959_psfp_sfi_table_add2(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi,
+ struct felix_stream_filter *sfi2)
+{
+ struct felix_stream_filter *tmp;
+ struct list_head *pos, *q, *last;
+ struct ocelot_psfp_list *psfp;
+ u32 insert = 0;
+ int ret;
+
+ psfp = &ocelot->psfp;
+ last = &psfp->sfi_list;
+
+ list_for_each_safe(pos, q, &psfp->sfi_list) {
+ tmp = list_entry(pos, struct felix_stream_filter, list);
+ /* Make sure that the index is increasing in order. */
+ if (tmp->index >= insert + 2)
+ break;
+
+ insert = tmp->index + 1;
+ last = pos;
+ }
+ sfi->index = insert;
+
+ ret = vsc9959_psfp_sfi_list_add(ocelot, sfi, last);
+ if (ret)
+ return ret;
+
+ sfi2->index = insert + 1;
+
+ return vsc9959_psfp_sfi_list_add(ocelot, sfi2, last->next);
+}
+
+static struct felix_stream_filter *
+vsc9959_psfp_sfi_table_get(struct list_head *sfi_list, u32 index)
+{
+ struct felix_stream_filter *tmp;
+
+ list_for_each_entry(tmp, sfi_list, list)
+ if (tmp->index == index)
+ return tmp;
+
+ return NULL;
+}
+
+static void vsc9959_psfp_sfi_table_del(struct ocelot *ocelot, u32 index)
+{
+ struct felix_stream_filter *tmp, *n;
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sfi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ tmp->enable = 0;
+ vsc9959_psfp_sfi_set(ocelot, tmp);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static void vsc9959_psfp_parse_gate(const struct flow_action_entry *entry,
+ struct felix_stream_gate *sgi)
+{
+ sgi->index = entry->hw_index;
+ sgi->ipv_valid = (entry->gate.prio < 0) ? 0 : 1;
+ sgi->init_ipv = (sgi->ipv_valid) ? entry->gate.prio : 0;
+ sgi->basetime = entry->gate.basetime;
+ sgi->cycletime = entry->gate.cycletime;
+ sgi->num_entries = entry->gate.num_entries;
+ sgi->enable = 1;
+
+ memcpy(sgi->entries, entry->gate.entries,
+ entry->gate.num_entries * sizeof(struct action_gate_entry));
+}
+
+static u32 vsc9959_sgi_cfg_status(struct ocelot *ocelot)
+{
+ return ocelot_read(ocelot, ANA_SG_ACCESS_CTRL);
+}
+
+static int vsc9959_psfp_sgi_set(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct action_gate_entry *e;
+ struct timespec64 base_ts;
+ u32 interval_sum = 0;
+ u32 val;
+ int i;
+
+ if (sgi->index > VSC9959_PSFP_GATE_ID_MAX)
+ return -EINVAL;
+
+ ocelot_write(ocelot, ANA_SG_ACCESS_CTRL_SGID(sgi->index),
+ ANA_SG_ACCESS_CTRL);
+
+ if (!sgi->enable) {
+ ocelot_rmw(ocelot, ANA_SG_CONFIG_REG_3_INIT_GATE_STATE,
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE,
+ ANA_SG_CONFIG_REG_3);
+
+ return 0;
+ }
+
+ if (sgi->cycletime < VSC9959_PSFP_GATE_CYCLETIME_MIN ||
+ sgi->cycletime > NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (sgi->num_entries > VSC9959_PSFP_GATE_LIST_NUM)
+ return -EINVAL;
+
+ vsc9959_new_base_time(ocelot, sgi->basetime, sgi->cycletime, &base_ts);
+ ocelot_write(ocelot, base_ts.tv_nsec, ANA_SG_CONFIG_REG_1);
+ val = lower_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot, val, ANA_SG_CONFIG_REG_2);
+
+ val = upper_32_bits(base_ts.tv_sec);
+ ocelot_write(ocelot,
+ (sgi->ipv_valid ? ANA_SG_CONFIG_REG_3_IPV_VALID : 0) |
+ ANA_SG_CONFIG_REG_3_INIT_IPV(sgi->init_ipv) |
+ ANA_SG_CONFIG_REG_3_GATE_ENABLE |
+ ANA_SG_CONFIG_REG_3_LIST_LENGTH(sgi->num_entries) |
+ ANA_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(val),
+ ANA_SG_CONFIG_REG_3);
+
+ ocelot_write(ocelot, sgi->cycletime, ANA_SG_CONFIG_REG_4);
+
+ e = sgi->entries;
+ for (i = 0; i < sgi->num_entries; i++) {
+ u32 ips = (e[i].ipv < 0) ? 0 : (e[i].ipv + 8);
+
+ ocelot_write_rix(ocelot, ANA_SG_GCL_GS_CONFIG_IPS(ips) |
+ (e[i].gate_state ?
+ ANA_SG_GCL_GS_CONFIG_GATE_STATE : 0),
+ ANA_SG_GCL_GS_CONFIG, i);
+
+ interval_sum += e[i].interval;
+ ocelot_write_rix(ocelot, interval_sum, ANA_SG_GCL_TI_CONFIG, i);
+ }
+
+ ocelot_rmw(ocelot, ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL_CONFIG_CHANGE,
+ ANA_SG_ACCESS_CTRL);
+
+ return readx_poll_timeout(vsc9959_sgi_cfg_status, ocelot, val,
+ (!(ANA_SG_ACCESS_CTRL_CONFIG_CHANGE & val)),
+ 10, 100000);
+}
+
+static int vsc9959_psfp_sgi_table_add(struct ocelot *ocelot,
+ struct felix_stream_gate *sgi)
+{
+ struct felix_stream_gate_entry *tmp;
+ struct ocelot_psfp_list *psfp;
+ int ret;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry(tmp, &psfp->sgi_list, list)
+ if (tmp->index == sgi->index) {
+ refcount_inc(&tmp->refcount);
+ return 0;
+ }
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = vsc9959_psfp_sgi_set(ocelot, sgi);
+ if (ret) {
+ kfree(tmp);
+ return ret;
+ }
+
+ tmp->index = sgi->index;
+ refcount_set(&tmp->refcount, 1);
+ list_add_tail(&tmp->list, &psfp->sgi_list);
+
+ return 0;
+}
+
+static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
+ u32 index)
+{
+ struct felix_stream_gate_entry *tmp, *n;
+ struct felix_stream_gate sgi = {0};
+ struct ocelot_psfp_list *psfp;
+ u8 z;
+
+ psfp = &ocelot->psfp;
+
+ list_for_each_entry_safe(tmp, n, &psfp->sgi_list, list)
+ if (tmp->index == index) {
+ z = refcount_dec_and_test(&tmp->refcount);
+ if (z) {
+ sgi.index = index;
+ sgi.enable = 0;
+ vsc9959_psfp_sgi_set(ocelot, &sgi);
+ list_del(&tmp->list);
+ kfree(tmp);
+ }
+ break;
+ }
+}
+
+static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct felix_stream_filter old_sfi, *sfi_entry;
+ struct felix_stream_filter sfi = {0};
+ const struct flow_action_entry *a;
+ struct felix_stream *stream_entry;
+ struct felix_stream stream = {0};
+ struct felix_stream_gate *sgi;
+ struct ocelot_psfp_list *psfp;
+ struct ocelot_policer pol;
+ int ret, i, size;
+ u64 rate, burst;
+ u32 index;
+
+ psfp = &ocelot->psfp;
+
+ ret = vsc9959_stream_identify(f, &stream);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(extack, "Only can match on VID, PCP, and dest MAC");
+ return ret;
+ }
+
+ mutex_lock(&psfp->lock);
+
+ flow_action_for_each(i, a, &f->rule->action) {
+ switch (a->id) {
+ case FLOW_ACTION_GATE:
+ size = struct_size(sgi, entries, a->gate.num_entries);
+ sgi = kzalloc(size, GFP_KERNEL);
+ if (!sgi) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ vsc9959_psfp_parse_gate(a, sgi);
+ ret = vsc9959_psfp_sgi_table_add(ocelot, sgi);
+ if (ret) {
+ kfree(sgi);
+ goto err;
+ }
+ sfi.sg_valid = 1;
+ sfi.sgid = sgi->index;
+ kfree(sgi);
+ break;
+ case FLOW_ACTION_POLICE:
+ index = a->hw_index + VSC9959_PSFP_POLICER_BASE;
+ if (index > VSC9959_PSFP_POLICER_MAX) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ rate = a->police.rate_bytes_ps;
+ burst = rate * PSCHED_NS2TICKS(a->police.burst);
+ pol = (struct ocelot_policer) {
+ .burst = div_u64(burst, PSCHED_TICKS_PER_SEC),
+ .rate = div_u64(rate, 1000) * 8,
+ };
+ ret = ocelot_vcap_policer_add(ocelot, index, &pol);
+ if (ret)
+ goto err;
+
+ sfi.fm_valid = 1;
+ sfi.fmid = index;
+ sfi.maxsdu = a->police.mtu;
+ break;
+ default:
+ mutex_unlock(&psfp->lock);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ stream.ports = BIT(port);
+ stream.port = port;
+
+ sfi.portmask = stream.ports;
+ sfi.prio_valid = (stream.prio < 0 ? 0 : 1);
+ sfi.prio = (sfi.prio_valid ? stream.prio : 0);
+ sfi.enable = 1;
+
+ /* Check if stream is set. */
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &stream);
+ if (stream_entry) {
+ if (stream_entry->ports & BIT(port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on this port");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ if (stream_entry->ports != BIT(stream_entry->port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The stream is added on two ports");
+ ret = -EEXIST;
+ goto err;
+ }
+
+ stream_entry->ports |= BIT(port);
+ stream.ports = stream_entry->ports;
+
+ sfi_entry = vsc9959_psfp_sfi_table_get(&psfp->sfi_list,
+ stream_entry->sfid);
+ memcpy(&old_sfi, sfi_entry, sizeof(old_sfi));
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream_entry->sfid);
+
+ old_sfi.portmask = stream_entry->ports;
+ sfi.portmask = stream.ports;
+
+ if (stream_entry->port > port) {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &sfi,
+ &old_sfi);
+ stream_entry->dummy = true;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add2(ocelot, &old_sfi,
+ &sfi);
+ stream.dummy = true;
+ }
+ if (ret)
+ goto err;
+
+ stream_entry->sfid = old_sfi.index;
+ } else {
+ ret = vsc9959_psfp_sfi_table_add(ocelot, &sfi);
+ if (ret)
+ goto err;
+ }
+
+ stream.sfid = sfi.index;
+ stream.sfid_valid = 1;
+ ret = vsc9959_stream_table_add(ocelot, &psfp->stream_list,
+ &stream, extack);
+ if (ret) {
+ vsc9959_psfp_sfi_table_del(ocelot, stream.sfid);
+ goto err;
+ }
+
+ mutex_unlock(&psfp->lock);
+
+ return 0;
+
+err:
+ if (sfi.sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi.sgid);
+
+ if (sfi.fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi.fmid);
+
+ mutex_unlock(&psfp->lock);
+
+ return ret;
+}
+
+static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
+ struct flow_cls_offload *f)
+{
+ struct felix_stream *stream, tmp, *stream_entry;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ static struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream) {
+ mutex_unlock(&psfp->lock);
+ return -ENOMEM;
+ }
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi) {
+ mutex_unlock(&psfp->lock);
+ return -ENOMEM;
+ }
+
+ if (sfi->sg_valid)
+ vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
+
+ if (sfi->fm_valid)
+ ocelot_vcap_policer_del(ocelot, sfi->fmid);
+
+ vsc9959_psfp_sfi_table_del(ocelot, stream->sfid);
+
+ memcpy(&tmp, stream, sizeof(tmp));
+
+ stream->sfid_valid = 0;
+ vsc9959_stream_table_del(ocelot, stream);
+
+ stream_entry = vsc9959_stream_table_lookup(&psfp->stream_list, &tmp);
+ if (stream_entry) {
+ stream_entry->ports = BIT(stream_entry->port);
+ if (stream_entry->dummy) {
+ stream_entry->dummy = false;
+ vsc9959_mact_stream_set(ocelot, stream_entry, NULL);
+ }
+ vsc9959_psfp_sfidmask_set(ocelot, stream_entry->sfid,
+ stream_entry->ports);
+ }
+
+ mutex_unlock(&psfp->lock);
+
+ return 0;
+}
+
+static void vsc9959_update_sfid_stats(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct felix_stream_filter_counters *s = &sfi->stats;
+ u32 match, not_pass_gate, not_pass_sdu, red;
+ u32 sfid = sfi->index;
+
+ lockdep_assert_held(&ocelot->stat_view_lock);
+
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES);
+ not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES);
+ not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU);
+ red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(sfid) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+
+ s->match += match;
+ s->not_pass_gate += not_pass_gate;
+ s->not_pass_sdu += not_pass_sdu;
+ s->red += red;
+}
+
+/* Caller must hold &ocelot->stat_view_lock */
+static void vsc9959_update_stats(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ list_for_each_entry(sfi, &psfp->sfi_list, list)
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ mutex_unlock(&psfp->lock);
+}
+
+static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
+ struct flow_cls_offload *f,
+ struct flow_stats *stats)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter_counters *s;
+ static struct felix_stream_filter *sfi;
+ struct felix_stream *stream;
+
+ stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
+ if (!stream)
+ return -ENOMEM;
+
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -EINVAL;
+
+ mutex_lock(&ocelot->stat_view_lock);
+
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ s = &sfi->stats;
+ stats->pkts = s->match;
+ stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red;
+
+ memset(s, 0, sizeof(*s));
+
+ mutex_unlock(&ocelot->stat_view_lock);
+
+ return 0;
+}
+
+static void vsc9959_psfp_init(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+
+ INIT_LIST_HEAD(&psfp->stream_list);
+ INIT_LIST_HEAD(&psfp->sfi_list);
+ INIT_LIST_HEAD(&psfp->sgi_list);
+ mutex_init(&psfp->lock);
+}
+
+/* When using cut-through forwarding and the egress port runs at a higher data
+ * rate than the ingress port, the packet currently under transmission would
+ * suffer an underrun since it would be transmitted faster than it is received.
+ * The Felix switch implementation of cut-through forwarding does not check in
+ * hardware whether this condition is satisfied or not, so we must restrict the
+ * list of ports that have cut-through forwarding enabled on egress to only be
+ * the ports operating at the lowest link speed within their respective
+ * forwarding domain.
+ */
+static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
+ int tc, port, other_port;
+
+ lockdep_assert_held(&ocelot->fwd_domain_lock);
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm = &ocelot->mm[port];
+ int min_speed = ocelot_port->speed;
+ unsigned long mask = 0;
+ u32 tmp, val = 0;
+
+ /* Disable cut-through on ports that are down */
+ if (ocelot_port->speed <= 0)
+ goto set;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* Ocelot switches forward from the NPI port towards
+ * any port, regardless of it being in the NPI port's
+ * forwarding domain or not.
+ */
+ mask = dsa_user_ports(ds);
+ } else {
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port);
+ mask &= ~BIT(port);
+ if (ocelot->npi >= 0)
+ mask |= BIT(ocelot->npi);
+ else
+ mask |= ocelot_port_assigned_dsa_8021q_cpu_mask(ocelot,
+ port);
+ }
+
+ /* Calculate the minimum link speed, among the ports that are
+ * up, of this source port's forwarding domain.
+ */
+ for_each_set_bit(other_port, &mask, ocelot->num_phys_ports) {
+ struct ocelot_port *other_ocelot_port;
+
+ other_ocelot_port = ocelot->ports[other_port];
+ if (other_ocelot_port->speed <= 0)
+ continue;
+
+ if (min_speed > other_ocelot_port->speed)
+ min_speed = other_ocelot_port->speed;
+ }
+
+ /* Enable cut-through forwarding for all traffic classes that
+ * don't have oversized dropping enabled, since this check is
+ * bypassed in cut-through mode. Also exclude preemptible
+ * traffic classes, since these would hang the port for some
+ * reason, if sent as cut-through.
+ */
+ if (ocelot_port->speed == min_speed) {
+ val = GENMASK(7, 0) & ~mm->active_preemptible_tcs;
+
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
+ val &= ~BIT(tc);
+ }
+
+set:
+ tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
+ if (tmp == val)
+ continue;
+
+ dev_dbg(ocelot->dev,
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
+ port, mask, ocelot_port->speed, min_speed,
+ val ? "enabling" : "disabling", val);
+
+ ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
+ }
+}
+
+static const struct ocelot_ops vsc9959_ops = {
+ .reset = vsc9959_reset,
+ .wm_enc = vsc9959_wm_enc,
+ .wm_dec = vsc9959_wm_dec,
+ .wm_stat = vsc9959_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+ .psfp_init = vsc9959_psfp_init,
+ .psfp_filter_add = vsc9959_psfp_filter_add,
+ .psfp_filter_del = vsc9959_psfp_filter_del,
+ .psfp_stats_get = vsc9959_psfp_stats_get,
+ .cut_through_fwd = vsc9959_cut_through_fwd,
+ .tas_clock_adjust = vsc9959_tas_clock_adjust,
+ .update_stats = vsc9959_update_stats,
+};
+
static const struct felix_info felix_info_vsc9959 = {
- .target_io_res = vsc9959_target_io_res,
- .port_io_res = vsc9959_port_io_res,
- .imdio_res = &vsc9959_imdio_res,
+ .resources = vsc9959_resources,
+ .num_resources = ARRAY_SIZE(vsc9959_resources),
+ .resource_names = vsc9959_resource_names,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
- .stats_layout = vsc9959_stats_layout,
- .num_stats = ARRAY_SIZE(vsc9959_stats_layout),
.vcap = vsc9959_vcap_props,
+ .vcap_pol_base = VSC9959_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9959_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = 0,
+ .vcap_pol_max2 = 0,
.num_mact_rows = 2048,
- .num_ports = 6,
+ .num_ports = VSC9959_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
- .switch_pci_bar = 4,
- .imdio_pci_bar = 0,
+ .quirks = FELIX_MAC_QUIRKS,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
- .phylink_validate = vsc9959_phylink_validate,
- .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
+ .port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
+ .tas_guard_bands_update = vsc9959_tas_guard_bands_update,
};
+/* The INTB interrupt is shared between for PTP TX timestamp availability
+ * notification and MAC Merge status change on each port.
+ */
static irqreturn_t felix_irq_handler(int irq, void *data)
{
struct ocelot *ocelot = (struct ocelot *)data;
- /* The INTB interrupt is used for both PTP TX timestamp interrupt
- * and preemption status change interrupt on each port.
- *
- * - Get txtstamp if have
- * - TODO: handle preemption. Without handling it, driver may get
- * interrupt storm.
- */
-
ocelot_get_txtstamp(ocelot);
+ ocelot_mm_irq(ocelot);
return IRQ_HANDLED;
}
@@ -1417,10 +2672,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
ocelot->dev = &pdev->dev;
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
- felix->switch_base = pci_resource_start(pdev,
- felix->info->switch_pci_bar);
- felix->imdio_base = pci_resource_start(pdev,
- felix->info->imdio_pci_bar);
+ felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
pci_set_master(pdev);
@@ -1433,6 +2685,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
}
ocelot->ptp = 1;
+ ocelot->mm_supported = true;
ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
if (!ds) {
@@ -1451,7 +2704,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
err = dsa_register_switch(ds);
if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+ dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
goto err_register_ds;
}
@@ -1481,8 +2734,6 @@ static void felix_pci_remove(struct pci_dev *pdev)
kfree(felix);
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static void felix_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
new file mode 100644
index 000000000000..c29bee5a5c48
--- /dev/null
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ */
+
+#include <linux/mfd/ocelot.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/mscc/ocelot.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "felix.h"
+
+#define VSC7514_NUM_PORTS 11
+
+#define OCELOT_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc7512_port_modes[VSC7514_NUM_PORTS] = {
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_SGMII,
+ OCELOT_PORT_MODE_SERDES,
+};
+
+static const struct ocelot_ops ocelot_ext_ops = {
+ .reset = ocelot_reset,
+ .wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+};
+
+static const char * const vsc7512_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [QS] = "qs",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
+};
+
+static const struct felix_info vsc7512_info = {
+ .resource_names = vsc7512_resource_names,
+ .regfields = vsc7514_regfields,
+ .map = vsc7514_regmap,
+ .ops = &ocelot_ext_ops,
+ .vcap = vsc7514_vcap_props,
+ .num_mact_rows = 1024,
+ .num_ports = VSC7514_NUM_PORTS,
+ .num_tx_queues = OCELOT_NUM_TC,
+ .port_modes = vsc7512_port_modes,
+ .phylink_mac_config = ocelot_phylink_mac_config,
+ .configure_serdes = ocelot_port_configure_serdes,
+};
+
+static int ocelot_ext_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ felix = kzalloc(sizeof(*felix), GFP_KERNEL);
+ if (!felix)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = dev;
+
+ ocelot->num_flooding_pgids = 1;
+
+ felix->info = &vsc7512_info;
+
+ ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err_probe(dev, err, "Failed to allocate DSA switch\n");
+ goto err_free_felix;
+ }
+
+ ds->dev = dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->num_tx_queues = felix->info->num_tx_queues;
+
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+ felix->tag_proto = DSA_TAG_PROTO_OCELOT;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err_probe(dev, err, "Failed to register DSA switch\n");
+ goto err_free_ds;
+ }
+
+ return 0;
+
+err_free_ds:
+ kfree(ds);
+err_free_felix:
+ kfree(felix);
+ return err;
+}
+
+static int ocelot_ext_remove(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return 0;
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ return 0;
+}
+
+static void ocelot_ext_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id ocelot_ext_switch_of_match[] = {
+ { .compatible = "mscc,vsc7512-switch" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ocelot_ext_switch_of_match);
+
+static struct platform_driver ocelot_ext_switch_driver = {
+ .driver = {
+ .name = "ocelot-ext-switch",
+ .of_match_table = ocelot_ext_switch_of_match,
+ },
+ .probe = ocelot_ext_probe,
+ .remove = ocelot_ext_remove,
+ .shutdown = ocelot_ext_shutdown,
+};
+module_platform_driver(ocelot_ext_switch_driver);
+
+MODULE_DESCRIPTION("External Ocelot Switch driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MFD_OCELOT);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 92eae63150ea..96d4972a62f0 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -6,18 +6,37 @@
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
+#include <linux/mdio/mdio-mscc-miim.h>
+#include <linux/of_mdio.h>
#include <linux/of_platform.h>
#include <linux/pcs-lynx.h>
#include <linux/dsa/ocelot.h>
#include <linux/iopoll.h>
#include "felix.h"
-#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
-#define MSCC_MIIM_CMD_OPR_READ BIT(2)
-#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
-#define MSCC_MIIM_CMD_REGAD_SHIFT 20
-#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
-#define MSCC_MIIM_CMD_VLD BIT(31)
+#define VSC9953_NUM_PORTS 10
+
+#define VSC9953_VCAP_POLICER_BASE 11
+#define VSC9953_VCAP_POLICER_MAX 31
+#define VSC9953_VCAP_POLICER_BASE2 120
+#define VSC9953_VCAP_POLICER_MAX2 161
+
+#define VSC9953_PORT_MODE_SERDES (OCELOT_PORT_MODE_1000BASEX | \
+ OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc9953_port_modes[VSC9953_NUM_PORTS] = {
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ VSC9953_PORT_MODE_SERDES,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+};
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
@@ -251,27 +270,98 @@ static const u32 vsc9953_rew_regmap[] = {
static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
+ REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
+ REG(SYS_COUNT_RX_BROADCAST, 0x00000c),
REG(SYS_COUNT_RX_SHORTS, 0x000010),
REG(SYS_COUNT_RX_FRAGMENTS, 0x000014),
REG(SYS_COUNT_RX_JABBERS, 0x000018),
+ REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c),
+ REG(SYS_COUNT_RX_SYM_ERRS, 0x000020),
REG(SYS_COUNT_RX_64, 0x000024),
REG(SYS_COUNT_RX_65_127, 0x000028),
REG(SYS_COUNT_RX_128_255, 0x00002c),
- REG(SYS_COUNT_RX_256_1023, 0x000030),
- REG(SYS_COUNT_RX_1024_1526, 0x000034),
- REG(SYS_COUNT_RX_1527_MAX, 0x000038),
+ REG(SYS_COUNT_RX_256_511, 0x000030),
+ REG(SYS_COUNT_RX_512_1023, 0x000034),
+ REG(SYS_COUNT_RX_1024_1526, 0x000038),
+ REG(SYS_COUNT_RX_1527_MAX, 0x00003c),
+ REG(SYS_COUNT_RX_PAUSE, 0x000040),
+ REG(SYS_COUNT_RX_CONTROL, 0x000044),
REG(SYS_COUNT_RX_LONGS, 0x000048),
+ REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x00004c),
+ REG(SYS_COUNT_RX_RED_PRIO_0, 0x000050),
+ REG(SYS_COUNT_RX_RED_PRIO_1, 0x000054),
+ REG(SYS_COUNT_RX_RED_PRIO_2, 0x000058),
+ REG(SYS_COUNT_RX_RED_PRIO_3, 0x00005c),
+ REG(SYS_COUNT_RX_RED_PRIO_4, 0x000060),
+ REG(SYS_COUNT_RX_RED_PRIO_5, 0x000064),
+ REG(SYS_COUNT_RX_RED_PRIO_6, 0x000068),
+ REG(SYS_COUNT_RX_RED_PRIO_7, 0x00006c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_0, 0x000070),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_1, 0x000074),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_2, 0x000078),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_3, 0x00007c),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_4, 0x000080),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_5, 0x000084),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_6, 0x000088),
+ REG(SYS_COUNT_RX_YELLOW_PRIO_7, 0x00008c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_0, 0x000090),
+ REG(SYS_COUNT_RX_GREEN_PRIO_1, 0x000094),
+ REG(SYS_COUNT_RX_GREEN_PRIO_2, 0x000098),
+ REG(SYS_COUNT_RX_GREEN_PRIO_3, 0x00009c),
+ REG(SYS_COUNT_RX_GREEN_PRIO_4, 0x0000a0),
+ REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
+ REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
+ REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
REG(SYS_COUNT_TX_OCTETS, 0x000100),
+ REG(SYS_COUNT_TX_UNICAST, 0x000104),
+ REG(SYS_COUNT_TX_MULTICAST, 0x000108),
+ REG(SYS_COUNT_TX_BROADCAST, 0x00010c),
REG(SYS_COUNT_TX_COLLISION, 0x000110),
REG(SYS_COUNT_TX_DROPS, 0x000114),
+ REG(SYS_COUNT_TX_PAUSE, 0x000118),
REG(SYS_COUNT_TX_64, 0x00011c),
REG(SYS_COUNT_TX_65_127, 0x000120),
- REG(SYS_COUNT_TX_128_511, 0x000124),
- REG(SYS_COUNT_TX_512_1023, 0x000128),
- REG(SYS_COUNT_TX_1024_1526, 0x00012c),
- REG(SYS_COUNT_TX_1527_MAX, 0x000130),
- REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_TX_128_255, 0x000124),
+ REG(SYS_COUNT_TX_256_511, 0x000128),
+ REG(SYS_COUNT_TX_512_1023, 0x00012c),
+ REG(SYS_COUNT_TX_1024_1526, 0x000130),
+ REG(SYS_COUNT_TX_1527_MAX, 0x000134),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_0, 0x000138),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_1, 0x00013c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_2, 0x000140),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_3, 0x000144),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_4, 0x000148),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_5, 0x00014c),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_6, 0x000150),
+ REG(SYS_COUNT_TX_YELLOW_PRIO_7, 0x000154),
+ REG(SYS_COUNT_TX_GREEN_PRIO_0, 0x000158),
+ REG(SYS_COUNT_TX_GREEN_PRIO_1, 0x00015c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_2, 0x000160),
+ REG(SYS_COUNT_TX_GREEN_PRIO_3, 0x000164),
+ REG(SYS_COUNT_TX_GREEN_PRIO_4, 0x000168),
+ REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
+ REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
+ REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
+ REG(SYS_COUNT_DROP_LOCAL, 0x000200),
+ REG(SYS_COUNT_DROP_TAIL, 0x000204),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_1, 0x00020c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_2, 0x000210),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_3, 0x000214),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_4, 0x000218),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_5, 0x00021c),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_6, 0x000220),
+ REG(SYS_COUNT_DROP_YELLOW_PRIO_7, 0x000224),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_0, 0x000228),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_1, 0x00022c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_2, 0x000230),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_3, 0x000234),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_4, 0x000238),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00023c),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000240),
+ REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000244),
REG(SYS_RESET_CFG, 0x000318),
REG_RESERVED(SYS_SR_ETYPE_CFG),
REG(SYS_VLAN_ETYPE_CFG, 0x000320),
@@ -293,7 +383,6 @@ static const u32 vsc9953_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
REG_RESERVED(SYS_PTP_STATUS),
REG_RESERVED(SYS_PTP_TXSTAMP),
REG_RESERVED(SYS_PTP_NXT),
@@ -369,110 +458,40 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the device's base address */
-static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9953_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"),
+ DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"),
+ DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"),
+ DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9953_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
- {
- .start = 0x0160000,
- .end = 0x016ffff,
- .name = "port6",
- },
- {
- .start = 0x0170000,
- .end = 0x017ffff,
- .name = "port7",
- },
- {
- .start = 0x0180000,
- .end = 0x018ffff,
- .name = "port8",
- },
- {
- .start = 0x0190000,
- .end = 0x019ffff,
- .name = "port9",
- },
+static const char * const vsc9953_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
@@ -524,102 +543,6 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
[SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
};
-static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
- { .offset = 0x00, .name = "rx_octets", },
- { .offset = 0x01, .name = "rx_unicast", },
- { .offset = 0x02, .name = "rx_multicast", },
- { .offset = 0x03, .name = "rx_broadcast", },
- { .offset = 0x04, .name = "rx_shorts", },
- { .offset = 0x05, .name = "rx_fragments", },
- { .offset = 0x06, .name = "rx_jabbers", },
- { .offset = 0x07, .name = "rx_crc_align_errs", },
- { .offset = 0x08, .name = "rx_sym_errs", },
- { .offset = 0x09, .name = "rx_frames_below_65_octets", },
- { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", },
- { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", },
- { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", },
- { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", },
- { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", },
- { .offset = 0x0F, .name = "rx_frames_over_1526_octets", },
- { .offset = 0x10, .name = "rx_pause", },
- { .offset = 0x11, .name = "rx_control", },
- { .offset = 0x12, .name = "rx_longs", },
- { .offset = 0x13, .name = "rx_classified_drops", },
- { .offset = 0x14, .name = "rx_red_prio_0", },
- { .offset = 0x15, .name = "rx_red_prio_1", },
- { .offset = 0x16, .name = "rx_red_prio_2", },
- { .offset = 0x17, .name = "rx_red_prio_3", },
- { .offset = 0x18, .name = "rx_red_prio_4", },
- { .offset = 0x19, .name = "rx_red_prio_5", },
- { .offset = 0x1A, .name = "rx_red_prio_6", },
- { .offset = 0x1B, .name = "rx_red_prio_7", },
- { .offset = 0x1C, .name = "rx_yellow_prio_0", },
- { .offset = 0x1D, .name = "rx_yellow_prio_1", },
- { .offset = 0x1E, .name = "rx_yellow_prio_2", },
- { .offset = 0x1F, .name = "rx_yellow_prio_3", },
- { .offset = 0x20, .name = "rx_yellow_prio_4", },
- { .offset = 0x21, .name = "rx_yellow_prio_5", },
- { .offset = 0x22, .name = "rx_yellow_prio_6", },
- { .offset = 0x23, .name = "rx_yellow_prio_7", },
- { .offset = 0x24, .name = "rx_green_prio_0", },
- { .offset = 0x25, .name = "rx_green_prio_1", },
- { .offset = 0x26, .name = "rx_green_prio_2", },
- { .offset = 0x27, .name = "rx_green_prio_3", },
- { .offset = 0x28, .name = "rx_green_prio_4", },
- { .offset = 0x29, .name = "rx_green_prio_5", },
- { .offset = 0x2A, .name = "rx_green_prio_6", },
- { .offset = 0x2B, .name = "rx_green_prio_7", },
- { .offset = 0x40, .name = "tx_octets", },
- { .offset = 0x41, .name = "tx_unicast", },
- { .offset = 0x42, .name = "tx_multicast", },
- { .offset = 0x43, .name = "tx_broadcast", },
- { .offset = 0x44, .name = "tx_collision", },
- { .offset = 0x45, .name = "tx_drops", },
- { .offset = 0x46, .name = "tx_pause", },
- { .offset = 0x47, .name = "tx_frames_below_65_octets", },
- { .offset = 0x48, .name = "tx_frames_65_to_127_octets", },
- { .offset = 0x49, .name = "tx_frames_128_255_octets", },
- { .offset = 0x4A, .name = "tx_frames_256_511_octets", },
- { .offset = 0x4B, .name = "tx_frames_512_1023_octets", },
- { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", },
- { .offset = 0x4D, .name = "tx_frames_over_1526_octets", },
- { .offset = 0x4E, .name = "tx_yellow_prio_0", },
- { .offset = 0x4F, .name = "tx_yellow_prio_1", },
- { .offset = 0x50, .name = "tx_yellow_prio_2", },
- { .offset = 0x51, .name = "tx_yellow_prio_3", },
- { .offset = 0x52, .name = "tx_yellow_prio_4", },
- { .offset = 0x53, .name = "tx_yellow_prio_5", },
- { .offset = 0x54, .name = "tx_yellow_prio_6", },
- { .offset = 0x55, .name = "tx_yellow_prio_7", },
- { .offset = 0x56, .name = "tx_green_prio_0", },
- { .offset = 0x57, .name = "tx_green_prio_1", },
- { .offset = 0x58, .name = "tx_green_prio_2", },
- { .offset = 0x59, .name = "tx_green_prio_3", },
- { .offset = 0x5A, .name = "tx_green_prio_4", },
- { .offset = 0x5B, .name = "tx_green_prio_5", },
- { .offset = 0x5C, .name = "tx_green_prio_6", },
- { .offset = 0x5D, .name = "tx_green_prio_7", },
- { .offset = 0x5E, .name = "tx_aged", },
- { .offset = 0x80, .name = "drop_local", },
- { .offset = 0x81, .name = "drop_tail", },
- { .offset = 0x82, .name = "drop_yellow_prio_0", },
- { .offset = 0x83, .name = "drop_yellow_prio_1", },
- { .offset = 0x84, .name = "drop_yellow_prio_2", },
- { .offset = 0x85, .name = "drop_yellow_prio_3", },
- { .offset = 0x86, .name = "drop_yellow_prio_4", },
- { .offset = 0x87, .name = "drop_yellow_prio_5", },
- { .offset = 0x88, .name = "drop_yellow_prio_6", },
- { .offset = 0x89, .name = "drop_yellow_prio_7", },
- { .offset = 0x8A, .name = "drop_green_prio_0", },
- { .offset = 0x8B, .name = "drop_green_prio_1", },
- { .offset = 0x8C, .name = "drop_green_prio_2", },
- { .offset = 0x8D, .name = "drop_green_prio_3", },
- { .offset = 0x8E, .name = "drop_green_prio_4", },
- { .offset = 0x8F, .name = "drop_green_prio_5", },
- { .offset = 0x90, .name = "drop_green_prio_6", },
- { .offset = 0x91, .name = "drop_green_prio_7", },
-};
-
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 4},
[VCAP_ES0_IGR_PORT] = { 4, 4},
@@ -857,7 +780,6 @@ static struct vcap_props vsc9953_vcap_props[] = {
#define VSC9953_INIT_TIMEOUT 50000
#define VSC9953_GCB_RST_SLEEP 100
#define VSC9953_SYS_RAMINIT_SLEEP 80
-#define VCS9953_MII_TIMEOUT 10000
static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot)
{
@@ -877,82 +799,6 @@ static int vsc9953_sys_ram_init_status(struct ocelot *ocelot)
return val;
}
-static int vsc9953_gcb_miim_pending_status(struct ocelot *ocelot)
-{
- int val;
-
- ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_PENDING, &val);
-
- return val;
-}
-
-static int vsc9953_gcb_miim_busy_status(struct ocelot *ocelot)
-{
- int val;
-
- ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_BUSY, &val);
-
- return val;
-}
-
-static int vsc9953_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 value)
-{
- struct ocelot *ocelot = bus->priv;
- int err, cmd, val;
-
- /* Wait while MIIM controller becomes idle */
- err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO write: pending timeout\n");
- goto out;
- }
-
- cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
- (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
- (value << MSCC_MIIM_CMD_WRDATA_SHIFT) |
- MSCC_MIIM_CMD_OPR_WRITE;
-
- ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
-
-out:
- return err;
-}
-
-static int vsc9953_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
-{
- struct ocelot *ocelot = bus->priv;
- int err, cmd, val;
-
- /* Wait until MIIM controller becomes idle */
- err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO read: pending timeout\n");
- goto out;
- }
-
- /* Write the MIIM COMMAND register */
- cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
- (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ;
-
- ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
-
- /* Wait while read operation via the MIIM controller is in progress */
- err = readx_poll_timeout(vsc9953_gcb_miim_busy_status, ocelot,
- val, !val, 10, VCS9953_MII_TIMEOUT);
- if (err) {
- dev_err(ocelot->dev, "MDIO read: busy timeout\n");
- goto out;
- }
-
- val = ocelot_read(ocelot, GCB_MIIM_MII_DATA);
-
- err = val & 0xFFFF;
-out:
- return err;
-}
/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
* MEM_INIT is in SYS:SYSTEM:RESET_CFG
@@ -990,57 +836,6 @@ static int vsc9953_reset(struct ocelot *ocelot)
return 0;
}
-static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != ocelot_port->phy_mode) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
- phy_interface_t phy_mode)
-{
- switch (phy_mode) {
- case PHY_INTERFACE_MODE_INTERNAL:
- if (port != 8 && port != 9)
- return -ENOTSUPP;
- return 0;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- /* Not supported on internal to-CPU ports */
- if (port == 8 || port == 9)
- return -ENOTSUPP;
- return 0;
- default:
- return -ENOTSUPP;
- }
-}
-
/* Watermark encode
* Bit 9: Unit; 0:1, 1:16
* Bit 8-0: Value to be multiplied with unit
@@ -1089,26 +884,24 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct phy_device *),
+ sizeof(struct phylink_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
return -ENOMEM;
}
- bus = devm_mdiobus_alloc(dev);
- if (!bus)
- return -ENOMEM;
-
- bus->name = "VSC9953 internal MDIO bus";
- bus->read = vsc9953_mdio_read;
- bus->write = vsc9953_mdio_write;
- bus->parent = dev;
- bus->priv = ocelot;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+ rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus",
+ ocelot->targets[GCB],
+ ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK],
+ true);
+ if (rc) {
+ dev_err(dev, "failed to setup MDIO bus\n");
+ return rc;
+ }
/* Needed in order to initialize the bus mutex lock */
- rc = mdiobus_register(bus);
+ rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
@@ -1118,9 +911,9 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
int addr = port + 4;
- struct mdio_device *pcs;
- struct lynx_pcs *lynx;
if (dsa_is_unused_port(felix->ds, port))
continue;
@@ -1128,17 +921,17 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = mdio_device_create(felix->imdio, addr);
- if (IS_ERR(pcs))
+ mdio_device = mdio_device_create(felix->imdio, addr);
+ if (IS_ERR(mdio_device))
continue;
- lynx = lynx_pcs_create(pcs);
- if (!lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
continue;
}
- felix->pcs[port] = lynx;
+ felix->pcs[port] = phylink_pcs;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
}
@@ -1152,33 +945,39 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct lynx_pcs *pcs = felix->pcs[port];
+ struct phylink_pcs *phylink_pcs = felix->pcs[port];
+ struct mdio_device *mdio_device;
- if (!pcs)
+ if (!phylink_pcs)
continue;
- mdio_device_free(pcs->mdio);
- lynx_pcs_destroy(pcs);
+ mdio_device = lynx_get_mdio_device(phylink_pcs);
+ mdio_device_free(mdio_device);
+ lynx_pcs_destroy(phylink_pcs);
}
- mdiobus_unregister(felix->imdio);
+
+ /* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {
- .target_io_res = vsc9953_target_io_res,
- .port_io_res = vsc9953_port_io_res,
+ .resources = vsc9953_resources,
+ .num_resources = ARRAY_SIZE(vsc9953_resources),
+ .resource_names = vsc9953_resource_names,
.regfields = vsc9953_regfields,
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
- .stats_layout = vsc9953_stats_layout,
- .num_stats = ARRAY_SIZE(vsc9953_stats_layout),
.vcap = vsc9953_vcap_props,
+ .vcap_pol_base = VSC9953_VCAP_POLICER_BASE,
+ .vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
+ .vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
+ .vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
+ .quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
- .num_ports = 10,
+ .num_ports = VSC9953_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
- .phylink_validate = vsc9953_phylink_validate,
- .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
+ .port_modes = vsc9953_port_modes,
};
static int seville_probe(struct platform_device *pdev)
@@ -1253,8 +1052,6 @@ static int seville_remove(struct platform_device *pdev)
kfree(felix->ds);
kfree(felix);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
@@ -1282,7 +1079,7 @@ static struct platform_driver seville_vsc9953_driver = {
.shutdown = seville_shutdown,
.driver = {
.name = "mscc_seville",
- .of_match_table = of_match_ptr(seville_of_match),
+ .of_match_table = seville_of_match,
},
};
module_platform_driver(seville_vsc9953_driver);
diff --git a/drivers/net/dsa/qca/Kconfig b/drivers/net/dsa/qca/Kconfig
index 13b7e679b8b5..4347b42c50fd 100644
--- a/drivers/net/dsa/qca/Kconfig
+++ b/drivers/net/dsa/qca/Kconfig
@@ -7,3 +7,19 @@ config NET_DSA_AR9331
help
This enables support for the Qualcomm Atheros AR9331 built-in Ethernet
switch.
+
+config NET_DSA_QCA8K
+ tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
+ select NET_DSA_TAG_QCA
+ select REGMAP
+ help
+ This enables support for the Qualcomm Atheros QCA8K Ethernet
+ switch chips.
+
+config NET_DSA_QCA8K_LEDS_SUPPORT
+ bool "Qualcomm Atheros QCA8K Ethernet switch family LEDs support"
+ depends on NET_DSA_QCA8K
+ depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_QCA8K
+ help
+ This enabled support for LEDs present on the Qualcomm Atheros
+ QCA8K Ethernet switch chips.
diff --git a/drivers/net/dsa/qca/Makefile b/drivers/net/dsa/qca/Makefile
index 274022319066..ce66b1984e5f 100644
--- a/drivers/net/dsa/qca/Makefile
+++ b/drivers/net/dsa/qca/Makefile
@@ -1,2 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
+obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+qca8k-y += qca8k-common.o qca8k-8xxx.o
+ifdef CONFIG_NET_DSA_QCA8K_LEDS_SUPPORT
+qca8k-y += qca8k-leds.o
+endif
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index da0d7e68643a..e7b98b864fa1 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -231,6 +231,7 @@ struct ar9331_sw_port {
int idx;
struct delayed_work mib_read;
struct rtnl_link_stats64 stats;
+ struct ethtool_pause_stats pause_stats;
struct spinlock stats_lock;
};
@@ -378,7 +379,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
if (!mnp)
return -ENODEV;
- ret = of_mdiobus_register(mbus, mnp);
+ ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
@@ -499,52 +500,27 @@ static enum dsa_tag_protocol ar9331_sw_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_AR9331;
}
-static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
switch (port) {
case 0:
- if (state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ config->mac_capabilities |= MAC_1000;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
- if (state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- default:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
}
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-unsupported:
- linkmode_zero(supported);
- dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
- state->interface, port);
}
static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
@@ -629,6 +605,7 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
static void ar9331_read_stats(struct ar9331_sw_port *port)
{
struct ar9331_sw_priv *priv = ar9331_sw_port_to_priv(port);
+ struct ethtool_pause_stats *pstats = &port->pause_stats;
struct rtnl_link_stats64 *stats = &port->stats;
struct ar9331_sw_stats_raw raw;
int ret;
@@ -669,6 +646,9 @@ static void ar9331_read_stats(struct ar9331_sw_port *port)
stats->multicast += raw.rxmulti;
stats->collisions += raw.txcollision;
+ pstats->tx_pause_frames += raw.txpause;
+ pstats->rx_pause_frames += raw.rxpause;
+
spin_unlock(&port->stats_lock);
}
@@ -693,15 +673,27 @@ static void ar9331_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
+static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
+ struct ar9331_sw_port *p = &priv->port[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(pause_stats, &p->pause_stats, sizeof(*pause_stats));
+ spin_unlock(&p->stats_lock);
+}
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
- .phylink_validate = ar9331_sw_phylink_validate,
+ .phylink_get_caps = ar9331_sw_phylink_get_caps,
.phylink_mac_config = ar9331_sw_phylink_mac_config,
.phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
.phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
.get_stats64 = ar9331_get_stats64,
+ .get_pause_stats = ar9331_get_pause_stats,
};
static irqreturn_t ar9331_sw_irq(int irq, void *data)
@@ -843,7 +835,7 @@ static int __ar9331_mdio_write(struct mii_bus *sbus, u8 mode, u16 reg, u16 val)
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
- return mdiobus_write(sbus, p, r, val);
+ return __mdiobus_write(sbus, p, r, val);
}
static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
@@ -854,7 +846,7 @@ static int __ar9331_mdio_read(struct mii_bus *sbus, u16 reg)
FIELD_GET(AR9331_SW_LOW_ADDR_PHY, reg);
r = FIELD_GET(AR9331_SW_LOW_ADDR_REG, reg);
- return mdiobus_read(sbus, p, r);
+ return __mdiobus_read(sbus, p, r);
}
static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
@@ -874,6 +866,8 @@ static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
return 0;
}
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
+
ret = __ar9331_mdio_read(sbus, reg);
if (ret < 0)
goto error;
@@ -885,9 +879,13 @@ static int ar9331_mdio_read(void *ctx, const void *reg_buf, size_t reg_len,
*(u32 *)val_buf |= ret << 16;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
error:
+ mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to read register.\n");
+
return ret;
}
@@ -897,12 +895,15 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
struct mii_bus *sbus = priv->sbus;
int ret;
+ mutex_lock_nested(&sbus->mdio_lock, MDIO_MUTEX_NESTED);
if (reg == AR9331_SW_REG_PAGE) {
ret = __ar9331_mdio_write(sbus, AR9331_SW_MDIO_PHY_MODE_PAGE,
0, val);
if (ret < 0)
goto error;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
}
@@ -922,10 +923,14 @@ static int ar9331_mdio_write(void *ctx, u32 reg, u32 val)
if (ret < 0)
goto error;
+ mutex_unlock(&sbus->mdio_lock);
+
return 0;
error:
+ mutex_unlock(&sbus->mdio_lock);
dev_err_ratelimited(&sbus->dev, "Bus error. Failed to write register.\n");
+
return ret;
}
@@ -1091,12 +1096,9 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
}
irq_domain_remove(priv->irqdomain);
- mdiobus_unregister(priv->mbus);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 147ca39531a3..6d5ac7588a69 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -9,72 +9,20 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
+#include <linux/bitfield.h>
+#include <linux/regmap.h>
#include <net/dsa.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
-#include <linux/if_bridge.h>
#include <linux/mdio.h>
#include <linux/phylink.h>
#include <linux/gpio/consumer.h>
#include <linux/etherdevice.h>
+#include <linux/dsa/tag_qca.h>
#include "qca8k.h"
-
-#define MIB_DESC(_s, _o, _n) \
- { \
- .size = (_s), \
- .offset = (_o), \
- .name = (_n), \
- }
-
-static const struct qca8k_mib_desc ar8327_mib[] = {
- MIB_DESC(1, 0x00, "RxBroad"),
- MIB_DESC(1, 0x04, "RxPause"),
- MIB_DESC(1, 0x08, "RxMulti"),
- MIB_DESC(1, 0x0c, "RxFcsErr"),
- MIB_DESC(1, 0x10, "RxAlignErr"),
- MIB_DESC(1, 0x14, "RxRunt"),
- MIB_DESC(1, 0x18, "RxFragment"),
- MIB_DESC(1, 0x1c, "Rx64Byte"),
- MIB_DESC(1, 0x20, "Rx128Byte"),
- MIB_DESC(1, 0x24, "Rx256Byte"),
- MIB_DESC(1, 0x28, "Rx512Byte"),
- MIB_DESC(1, 0x2c, "Rx1024Byte"),
- MIB_DESC(1, 0x30, "Rx1518Byte"),
- MIB_DESC(1, 0x34, "RxMaxByte"),
- MIB_DESC(1, 0x38, "RxTooLong"),
- MIB_DESC(2, 0x3c, "RxGoodByte"),
- MIB_DESC(2, 0x44, "RxBadByte"),
- MIB_DESC(1, 0x4c, "RxOverFlow"),
- MIB_DESC(1, 0x50, "Filtered"),
- MIB_DESC(1, 0x54, "TxBroad"),
- MIB_DESC(1, 0x58, "TxPause"),
- MIB_DESC(1, 0x5c, "TxMulti"),
- MIB_DESC(1, 0x60, "TxUnderRun"),
- MIB_DESC(1, 0x64, "Tx64Byte"),
- MIB_DESC(1, 0x68, "Tx128Byte"),
- MIB_DESC(1, 0x6c, "Tx256Byte"),
- MIB_DESC(1, 0x70, "Tx512Byte"),
- MIB_DESC(1, 0x74, "Tx1024Byte"),
- MIB_DESC(1, 0x78, "Tx1518Byte"),
- MIB_DESC(1, 0x7c, "TxMaxByte"),
- MIB_DESC(1, 0x80, "TxOverSize"),
- MIB_DESC(2, 0x84, "TxByte"),
- MIB_DESC(1, 0x8c, "TxCollision"),
- MIB_DESC(1, 0x90, "TxAbortCol"),
- MIB_DESC(1, 0x94, "TxMultiCol"),
- MIB_DESC(1, 0x98, "TxSingleCol"),
- MIB_DESC(1, 0x9c, "TxExcDefer"),
- MIB_DESC(1, 0xa0, "TxDefer"),
- MIB_DESC(1, 0xa4, "TxLateCol"),
-};
-
-/* The 32bit switch registers are accessed indirectly. To achieve this we need
- * to set the page of the register. Track the last page that was set to reduce
- * mdio writes
- */
-static u16 qca8k_current_page = 0xffff;
+#include "qca8k_leds.h"
static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
@@ -90,50 +38,114 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
}
static int
-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
{
int ret;
+ u16 lo;
- ret = bus->read(bus, phy_id, regnum);
- if (ret >= 0) {
- *val = ret;
- ret = bus->read(bus, phy_id, regnum + 1);
- *val |= ret << 16;
- }
+ lo = val & 0xffff;
+ ret = bus->write(bus, phy_id, regnum, lo);
+ if (ret < 0)
+ dev_err_ratelimited(&bus->dev,
+ "failed to write qca8k 32bit lo register\n");
- if (ret < 0) {
+ return ret;
+}
+
+static int
+qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ int ret;
+ u16 hi;
+
+ hi = (u16)(val >> 16);
+ ret = bus->write(bus, phy_id, regnum, hi);
+ if (ret < 0)
dev_err_ratelimited(&bus->dev,
- "failed to read qca8k 32bit register\n");
- *val = 0;
- return ret;
- }
+ "failed to write qca8k 32bit hi register\n");
+
+ return ret;
+}
+
+static int
+qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+ int ret;
+
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret < 0)
+ goto err;
+ *val = ret & 0xffff;
return 0;
+
+err:
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit lo register\n");
+ *val = 0;
+
+ return ret;
}
-static void
-qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+static int
+qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
- u16 lo, hi;
int ret;
- lo = val & 0xffff;
- hi = (u16)(val >> 16);
+ ret = bus->read(bus, phy_id, regnum);
+ if (ret < 0)
+ goto err;
- ret = bus->write(bus, phy_id, regnum, lo);
- if (ret >= 0)
- ret = bus->write(bus, phy_id, regnum + 1, hi);
+ *val = ret << 16;
+ return 0;
+
+err:
+ dev_err_ratelimited(&bus->dev,
+ "failed to read qca8k 32bit hi register\n");
+ *val = 0;
+
+ return ret;
+}
+
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+ u32 hi, lo;
+ int ret;
+
+ *val = 0;
+
+ ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
if (ret < 0)
- dev_err_ratelimited(&bus->dev,
- "failed to write qca8k 32bit register\n");
+ goto err;
+
+ ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
+ if (ret < 0)
+ goto err;
+
+ *val = lo | hi;
+
+err:
+ return ret;
+}
+
+static void
+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+ if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
+ return;
+
+ qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
}
static int
-qca8k_set_page(struct mii_bus *bus, u16 page)
+qca8k_set_page(struct qca8k_priv *priv, u16 page)
{
+ u16 *cached_page = &priv->mdio_cache.page;
+ struct mii_bus *bus = priv->bus;
int ret;
- if (page == qca8k_current_page)
+ if (page == *cached_page)
return 0;
ret = bus->write(bus, 0x18, 0, page);
@@ -143,13 +155,278 @@ qca8k_set_page(struct mii_bus *bus, u16 page)
return ret;
}
- qca8k_current_page = page;
+ *cached_page = page;
usleep_range(1000, 2000);
return 0;
}
+static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 command;
+ u8 len, cmd;
+ int i;
+
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb);
+ mgmt_eth_data = &priv->mgmt_eth_data;
+
+ command = get_unaligned_le32(&mgmt_ethhdr->command);
+ cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+
+ len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
+ /* Special case for len of 15 as this is the max value for len and needs to
+ * be increased before converting it from word to dword.
+ */
+ if (len == 15)
+ len++;
+
+ /* We can ignore odd value, we always round up them in the alloc function. */
+ len *= sizeof(u16);
+
+ /* Make sure the seq match the requested packet */
+ if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
+ mgmt_eth_data->ack = true;
+
+ if (cmd == MDIO_READ) {
+ u32 *val = mgmt_eth_data->data;
+
+ *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data);
+
+ /* Get the rest of the 12 byte of data.
+ * The read/write function will extract the requested data.
+ */
+ if (len > QCA_HDR_MGMT_DATA1_LEN) {
+ __le32 *data2 = (__le32 *)skb->data;
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ *val = get_unaligned_le32(data2);
+ val++;
+ data2++;
+ }
+ }
+ }
+
+ complete(&mgmt_eth_data->rw_done);
+}
+
+static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *val,
+ int priority, unsigned int len)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ unsigned int real_len;
+ struct sk_buff *skb;
+ __le32 *data2;
+ u32 command;
+ u16 hdr;
+ int i;
+
+ skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN);
+ if (!skb)
+ return NULL;
+
+ /* Hdr mgmt length value is in step of word size.
+ * As an example to process 4 byte of data the correct length to set is 2.
+ * To process 8 byte 4, 12 byte 6, 16 byte 8...
+ *
+ * Odd values will always return the next size on the ack packet.
+ * (length of 3 (6 byte) will always return 8 bytes of data)
+ *
+ * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
+ * of data.
+ *
+ * To correctly calculate the length we devide the requested len by word and
+ * round up.
+ * On the ack function we can skip the odd check as we already handle the
+ * case here.
+ */
+ real_len = DIV_ROUND_UP(len, sizeof(u16));
+
+ /* We check if the result len is odd and we round up another time to
+ * the next size. (length of 3 will be increased to 4 as switch will always
+ * return 8 bytes)
+ */
+ if (real_len % sizeof(u16) != 0)
+ real_len++;
+
+ /* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
+ if (real_len == 16)
+ real_len--;
+
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, skb->len);
+
+ mgmt_ethhdr = skb_push(skb, QCA_HDR_MGMT_HEADER_LEN + QCA_HDR_LEN);
+
+ hdr = FIELD_PREP(QCA_HDR_XMIT_VERSION, QCA_HDR_VERSION);
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_PRIORITY, priority);
+ hdr |= QCA_HDR_XMIT_FROM_CPU;
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0));
+ hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG);
+
+ command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg);
+ command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd);
+ command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE,
+ QCA_HDR_MGMT_CHECK_CODE_VAL);
+
+ put_unaligned_le32(command, &mgmt_ethhdr->command);
+
+ if (cmd == MDIO_WRITE)
+ put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data);
+
+ mgmt_ethhdr->hdr = htons(hdr);
+
+ data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN);
+ if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) {
+ int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN,
+ len - QCA_HDR_MGMT_DATA1_LEN);
+
+ val++;
+
+ for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) {
+ put_unaligned_le32(*val, data2);
+ data2++;
+ val++;
+ }
+ }
+
+ return skb;
+}
+
+static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num)
+{
+ struct qca_mgmt_ethhdr *mgmt_ethhdr;
+ u32 seq;
+
+ seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num);
+ mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data;
+ put_unaligned_le32(seq, &mgmt_ethhdr->seq);
+}
+
+static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_READ, reg, NULL,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ *val = mgmt_eth_data->data[0];
+ if (len > QCA_HDR_MGMT_DATA1_LEN)
+ memcpy(val + 1, mgmt_eth_data->data + 1, len - QCA_HDR_MGMT_DATA1_LEN);
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qca8k_write_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
+{
+ struct qca8k_mgmt_eth_data *mgmt_eth_data = &priv->mgmt_eth_data;
+ struct sk_buff *skb;
+ bool ack;
+ int ret;
+
+ skb = qca8k_alloc_mdio_header(MDIO_WRITE, reg, val,
+ QCA8K_ETHERNET_MDIO_PRIORITY, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check mgmt_master if is operational */
+ if (!priv->mgmt_master) {
+ kfree_skb(skb);
+ mutex_unlock(&mgmt_eth_data->mutex);
+ return -EINVAL;
+ }
+
+ skb->dev = priv->mgmt_master;
+
+ reinit_completion(&mgmt_eth_data->rw_done);
+
+ /* Increment seq_num and set it in the mdio pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
+
+ dev_queue_xmit(skb);
+
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ msecs_to_jiffies(QCA8K_ETHERNET_TIMEOUT));
+
+ ack = mgmt_eth_data->ack;
+
+ mutex_unlock(&mgmt_eth_data->mutex);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ if (!ack)
+ return -EINVAL;
+
+ return 0;
+}
+
static int
-qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = qca8k_read_eth(priv, reg, &val, sizeof(val));
+ if (ret)
+ return ret;
+
+ val &= ~mask;
+ val |= write_val;
+
+ return qca8k_write_eth(priv, reg, &val, sizeof(val));
+}
+
+static int
+qca8k_read_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t *val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
@@ -159,7 +436,7 @@ qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
@@ -171,7 +448,7 @@ exit:
}
static int
-qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+qca8k_write_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
@@ -181,7 +458,7 @@ qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
@@ -193,7 +470,8 @@ exit:
}
static int
-qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+qca8k_regmap_update_bits_mii(struct qca8k_priv *priv, uint32_t reg,
+ uint32_t mask, uint32_t write_val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
@@ -204,7 +482,7 @@ qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret < 0)
goto exit;
@@ -223,401 +501,276 @@ exit:
}
static int
-qca8k_reg_set(struct qca8k_priv *priv, u32 reg, u32 val)
+qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
{
- return qca8k_rmw(priv, reg, 0, val);
-}
+ int i, count = val_len / sizeof(u32), ret;
+ u32 reg = *(u32 *)reg_buf & U16_MAX;
+ struct qca8k_priv *priv = ctx;
-static int
-qca8k_reg_clear(struct qca8k_priv *priv, u32 reg, u32 val)
-{
- return qca8k_rmw(priv, reg, val, 0);
+ if (priv->mgmt_master &&
+ !qca8k_read_eth(priv, reg, val_buf, val_len))
+ return 0;
+
+ /* loop count times and increment reg of 4 */
+ for (i = 0; i < count; i++, reg += sizeof(u32)) {
+ ret = qca8k_read_mii(priv, reg, val_buf + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
}
static int
-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
+ const void *val_buf, size_t val_len)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
+ int i, count = val_len / sizeof(u32), ret;
+ u32 reg = *(u32 *)reg_buf & U16_MAX;
+ struct qca8k_priv *priv = ctx;
+ u32 *val = (u32 *)val_buf;
+
+ if (priv->mgmt_master &&
+ !qca8k_write_eth(priv, reg, val, val_len))
+ return 0;
+
+ /* loop count times, increment reg of 4 and increment val ptr to
+ * the next value
+ */
+ for (i = 0; i < count; i++, reg += sizeof(u32), val++) {
+ ret = qca8k_write_mii(priv, reg, *val);
+ if (ret < 0)
+ return ret;
+ }
- return qca8k_read(priv, reg, val);
+ return 0;
}
static int
-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+qca8k_bulk_write(void *ctx, const void *data, size_t bytes)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
-
- return qca8k_write(priv, reg, val);
+ return qca8k_bulk_gather_write(ctx, data, sizeof(u16), data + sizeof(u16),
+ bytes - sizeof(u16));
}
-static const struct regmap_range qca8k_readable_ranges[] = {
- regmap_reg_range(0x0000, 0x00e4), /* Global control */
- regmap_reg_range(0x0100, 0x0168), /* EEE control */
- regmap_reg_range(0x0200, 0x0270), /* Parser control */
- regmap_reg_range(0x0400, 0x0454), /* ACL */
- regmap_reg_range(0x0600, 0x0718), /* Lookup */
- regmap_reg_range(0x0800, 0x0b70), /* QM */
- regmap_reg_range(0x0c00, 0x0c80), /* PKT */
- regmap_reg_range(0x0e00, 0x0e98), /* L3 */
- regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
- regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
- regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
- regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
- regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
- regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
- regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+static int
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+{
+ struct qca8k_priv *priv = ctx;
-};
+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+ return 0;
-static const struct regmap_access_table qca8k_readable_table = {
- .yes_ranges = qca8k_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
-};
+ return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
+}
static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x16ac, /* end MIB - Port6 range */
- .reg_read = qca8k_regmap_read,
- .reg_write = qca8k_regmap_write,
+ .read = qca8k_bulk_read,
+ .write = qca8k_bulk_write,
+ .reg_update_bits = qca8k_regmap_update_bits,
.rd_table = &qca8k_readable_table,
+ .disable_locking = true, /* Locking is handled by qca8k read/write */
+ .cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
+ .max_raw_read = 32, /* mgmt eth can read/write up to 8 registers at time */
+ .max_raw_write = 32,
};
static int
-qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
+ struct sk_buff *read_skb, u32 *val)
{
- int ret, ret1;
- u32 val;
+ struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
+ bool ack;
+ int ret;
- ret = read_poll_timeout(qca8k_read, ret1, !(val & mask),
- 0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- priv, reg, &val);
+ reinit_completion(&mgmt_eth_data->rw_done);
- /* Check if qca8k_read has failed for a different reason
- * before returning -ETIMEDOUT
- */
- if (ret < 0 && ret1 < 0)
- return ret1;
+ /* Increment seq_num and set it in the copy pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
- return ret;
-}
+ dev_queue_xmit(skb);
-static int
-qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
-{
- u32 reg[4], val;
- int i, ret;
-
- /* load the ARL table into an array */
- for (i = 0; i < 4; i++) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
- if (ret < 0)
- return ret;
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
- reg[i] = val;
- }
+ ack = mgmt_eth_data->ack;
- /* vid - 83:72 */
- fdb->vid = (reg[2] >> QCA8K_ATU_VID_S) & QCA8K_ATU_VID_M;
- /* aging - 67:64 */
- fdb->aging = reg[2] & QCA8K_ATU_STATUS_M;
- /* portmask - 54:48 */
- fdb->port_mask = (reg[1] >> QCA8K_ATU_PORT_S) & QCA8K_ATU_PORT_M;
- /* mac - 47:0 */
- fdb->mac[0] = (reg[1] >> QCA8K_ATU_ADDR0_S) & 0xff;
- fdb->mac[1] = reg[1] & 0xff;
- fdb->mac[2] = (reg[0] >> QCA8K_ATU_ADDR2_S) & 0xff;
- fdb->mac[3] = (reg[0] >> QCA8K_ATU_ADDR3_S) & 0xff;
- fdb->mac[4] = (reg[0] >> QCA8K_ATU_ADDR4_S) & 0xff;
- fdb->mac[5] = reg[0] & 0xff;
+ if (ret <= 0)
+ return -ETIMEDOUT;
- return 0;
-}
+ if (!ack)
+ return -EINVAL;
-static void
-qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
- u8 aging)
-{
- u32 reg[3] = { 0 };
- int i;
+ *val = mgmt_eth_data->data[0];
- /* vid - 83:72 */
- reg[2] = (vid & QCA8K_ATU_VID_M) << QCA8K_ATU_VID_S;
- /* aging - 67:64 */
- reg[2] |= aging & QCA8K_ATU_STATUS_M;
- /* portmask - 54:48 */
- reg[1] = (port_mask & QCA8K_ATU_PORT_M) << QCA8K_ATU_PORT_S;
- /* mac - 47:0 */
- reg[1] |= mac[0] << QCA8K_ATU_ADDR0_S;
- reg[1] |= mac[1];
- reg[0] |= mac[2] << QCA8K_ATU_ADDR2_S;
- reg[0] |= mac[3] << QCA8K_ATU_ADDR3_S;
- reg[0] |= mac[4] << QCA8K_ATU_ADDR4_S;
- reg[0] |= mac[5];
-
- /* load the array into the ARL table */
- for (i = 0; i < 3; i++)
- qca8k_write(priv, QCA8K_REG_ATU_DATA0 + (i * 4), reg[i]);
+ return 0;
}
static int
-qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
+qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
+ int regnum, u16 data)
{
- u32 reg;
- int ret;
+ struct sk_buff *write_skb, *clear_skb, *read_skb;
+ struct qca8k_mgmt_eth_data *mgmt_eth_data;
+ u32 write_val, clear_val = 0, val;
+ struct net_device *mgmt_master;
+ int ret, ret1;
+ bool ack;
- /* Set the command and FDB index */
- reg = QCA8K_ATU_FUNC_BUSY;
- reg |= cmd;
- if (port >= 0) {
- reg |= QCA8K_ATU_FUNC_PORT_EN;
- reg |= (port & QCA8K_ATU_FUNC_PORT_M) << QCA8K_ATU_FUNC_PORT_S;
- }
+ if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+ return -EINVAL;
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
- if (ret)
- return ret;
+ mgmt_eth_data = &priv->mgmt_eth_data;
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
- if (ret)
- return ret;
+ write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+ QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+ QCA8K_MDIO_MASTER_REG_ADDR(regnum);
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_FDB_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_ATU_FUNC_FULL)
- return -1;
+ if (read) {
+ write_val |= QCA8K_MDIO_MASTER_READ;
+ } else {
+ write_val |= QCA8K_MDIO_MASTER_WRITE;
+ write_val |= QCA8K_MDIO_MASTER_DATA(data);
}
- return 0;
-}
-
-static int
-qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port)
-{
- int ret;
-
- qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
- if (ret < 0)
- return ret;
-
- return qca8k_fdb_read(priv, fdb);
-}
-
-static int
-qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask,
- u16 vid, u8 aging)
-{
- int ret;
-
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, aging);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
- mutex_unlock(&priv->reg_mutex);
+ /* Prealloc all the needed skb before the lock */
+ write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
+ if (!write_skb)
+ return -ENOMEM;
- return ret;
-}
+ clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!clear_skb) {
+ ret = -ENOMEM;
+ goto err_clear_skb;
+ }
-static int
-qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid)
-{
- int ret;
+ read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
+ QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
+ if (!read_skb) {
+ ret = -ENOMEM;
+ goto err_read_skb;
+ }
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_write(priv, vid, port_mask, mac, 0);
- ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
- mutex_unlock(&priv->reg_mutex);
+ /* Actually start the request:
+ * 1. Send mdio master packet
+ * 2. Busy Wait for mdio master command
+ * 3. Get the data if we are reading
+ * 4. Reset the mdio master (even with error)
+ */
+ mutex_lock(&mgmt_eth_data->mutex);
+
+ /* Check if mgmt_master is operational */
+ mgmt_master = priv->mgmt_master;
+ if (!mgmt_master) {
+ mutex_unlock(&mgmt_eth_data->mutex);
+ ret = -EINVAL;
+ goto err_mgmt_master;
+ }
- return ret;
-}
+ read_skb->dev = mgmt_master;
+ clear_skb->dev = mgmt_master;
+ write_skb->dev = mgmt_master;
-static void
-qca8k_fdb_flush(struct qca8k_priv *priv)
-{
- mutex_lock(&priv->reg_mutex);
- qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
- mutex_unlock(&priv->reg_mutex);
-}
+ reinit_completion(&mgmt_eth_data->rw_done);
-static int
-qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
-{
- u32 reg;
- int ret;
+ /* Increment seq_num and set it in the write pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
- /* Set the command and VLAN index */
- reg = QCA8K_VTU_FUNC1_BUSY;
- reg |= cmd;
- reg |= vid << QCA8K_VTU_FUNC1_VID_S;
+ dev_queue_xmit(write_skb);
- /* Write the function register triggering the table access */
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
- if (ret)
- return ret;
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
- /* wait for completion */
- ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
- if (ret)
- return ret;
+ ack = mgmt_eth_data->ack;
- /* Check for table full violation when adding an entry */
- if (cmd == QCA8K_VLAN_LOAD) {
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
- if (ret < 0)
- return ret;
- if (reg & QCA8K_VTU_FUNC1_FULL)
- return -ENOMEM;
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ kfree_skb(read_skb);
+ goto exit;
}
- return 0;
-}
-
-static int
-qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
-{
- u32 reg;
- int ret;
-
- /*
- We do the right thing with VLAN 0 and treat it as untagged while
- preserving the tag on egress.
- */
- if (vid == 0)
- return 0;
-
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
+ if (!ack) {
+ ret = -EINVAL;
+ kfree_skb(read_skb);
+ goto exit;
+ }
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
- reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
- if (untagged)
- reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
- else
- reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
+ !(val & QCA8K_MDIO_MASTER_BUSY), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
+ mgmt_eth_data, read_skb, &val);
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ if (ret < 0 && ret1 < 0) {
+ ret = ret1;
+ goto exit;
+ }
-out:
- mutex_unlock(&priv->reg_mutex);
+ if (read) {
+ reinit_completion(&mgmt_eth_data->rw_done);
- return ret;
-}
+ /* Increment seq_num and set it in the read pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
-static int
-qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
-{
- u32 reg, mask;
- int ret, i;
- bool del;
+ dev_queue_xmit(read_skb);
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
- if (ret < 0)
- goto out;
+ ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
- ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
- if (ret < 0)
- goto out;
- reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
- reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
- QCA8K_VTU_FUNC0_EG_MODE_S(port);
+ ack = mgmt_eth_data->ack;
- /* Check if we're the last member to be removed */
- del = true;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- mask = QCA8K_VTU_FUNC0_EG_MODE_NOT;
- mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i);
+ if (ret <= 0) {
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
- if ((reg & mask) != mask) {
- del = false;
- break;
+ if (!ack) {
+ ret = -EINVAL;
+ goto exit;
}
- }
- if (del) {
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
} else {
- ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
- if (ret)
- goto out;
- ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ kfree_skb(read_skb);
}
+exit:
+ reinit_completion(&mgmt_eth_data->rw_done);
-out:
- mutex_unlock(&priv->reg_mutex);
-
- return ret;
-}
-
-static int
-qca8k_mib_init(struct qca8k_priv *priv)
-{
- int ret;
+ /* Increment seq_num and set it in the clear pkt */
+ mgmt_eth_data->seq++;
+ qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
+ mgmt_eth_data->ack = false;
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_FLUSH | QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
+ dev_queue_xmit(clear_skb);
- ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
- if (ret)
- goto exit;
-
- ret = qca8k_reg_set(priv, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
- if (ret)
- goto exit;
+ wait_for_completion_timeout(&mgmt_eth_data->rw_done,
+ QCA8K_ETHERNET_TIMEOUT);
- ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+ mutex_unlock(&mgmt_eth_data->mutex);
-exit:
- mutex_unlock(&priv->reg_mutex);
return ret;
-}
-static void
-qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
-{
- u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
-
- /* Port 0 and 6 have no internal PHY */
- if (port > 0 && port < 6)
- mask |= QCA8K_PORT_STATUS_LINK_AUTO;
-
- if (enable)
- qca8k_reg_set(priv, QCA8K_REG_PORT_STATUS(port), mask);
- else
- qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
-}
-
-static u32
-qca8k_port_to_phy(int port)
-{
- /* From Andrew Lunn:
- * Port 0 has no internal phy.
- * Port 1 has an internal PHY at MDIO address 0.
- * Port 2 has an internal PHY at MDIO address 1.
- * ...
- * Port 5 has an internal PHY at MDIO address 4.
- * Port 6 has no internal PHY.
- */
+ /* Error handling before lock */
+err_mgmt_master:
+ kfree_skb(read_skb);
+err_read_skb:
+ kfree_skb(clear_skb);
+err_clear_skb:
+ kfree_skb(write_skb);
- return port - 1;
+ return ret;
}
static int
@@ -629,9 +782,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
qca8k_split_addr(reg, &r1, &r2, &page);
- ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
+ ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- bus, 0x10 | r2, r1, &val);
+ bus, 0x10 | r2, r1 + 1, &val);
/* Check if qca8k_read has failed for a different reason
* before returnting -ETIMEDOUT
@@ -643,8 +796,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
}
static int
-qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
+qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -661,7 +815,7 @@ qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
@@ -672,7 +826,7 @@ qca8k_mdio_write(struct mii_bus *bus, int phy, int regnum, u16 data)
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
mutex_unlock(&bus->mdio_lock);
@@ -680,8 +834,9 @@ exit:
}
static int
-qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
+qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
{
+ struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
@@ -697,22 +852,22 @@ qca8k_mdio_read(struct mii_bus *bus, int phy, int regnum)
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- ret = qca8k_set_page(bus, page);
+ ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
- qca8k_mii_write32(bus, 0x10 | r2, r1, val);
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
if (ret)
goto exit;
- ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+ ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
- qca8k_mii_write32(bus, 0x10 | r2, r1, 0);
+ qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
mutex_unlock(&bus->mdio_lock);
@@ -726,87 +881,92 @@ static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
+ int ret;
- return qca8k_mdio_write(bus, phy, regnum, data);
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
+ if (!ret)
+ return 0;
+
+ return qca8k_mdio_write(priv, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
- struct mii_bus *bus = priv->bus;
+ int ret;
- return qca8k_mdio_read(bus, phy, regnum);
-}
+ /* Use mdio Ethernet when available, fallback to legacy one on error */
+ ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
+ if (ret >= 0)
+ return ret;
-static int
-qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
-{
- struct qca8k_priv *priv = ds->priv;
+ ret = qca8k_mdio_read(priv, phy, regnum);
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+ if (ret < 0)
+ return 0xffff;
- return qca8k_mdio_write(priv->bus, port, regnum, data);
+ return ret;
}
static int
-qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
{
- struct qca8k_priv *priv = ds->priv;
- int ret;
-
- /* Check if the legacy mapping should be used and the
- * port is not correctly mapped to the right PHY in the
- * devicetree
- */
- if (priv->legacy_phy_port_mapping)
- port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- ret = qca8k_mdio_read(priv->bus, port, regnum);
+ return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
+}
- if (ret < 0)
- return 0xffff;
+static int
+qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
+{
+ port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
- return ret;
+ return qca8k_internal_mdio_read(slave_bus, port, regnum);
}
static int
-qca8k_mdio_register(struct qca8k_priv *priv, struct device_node *mdio)
+qca8k_mdio_register(struct qca8k_priv *priv)
{
struct dsa_switch *ds = priv->ds;
+ struct device_node *mdio;
struct mii_bus *bus;
bus = devm_mdiobus_alloc(ds->dev);
-
if (!bus)
return -ENOMEM;
bus->priv = (void *)priv;
- bus->name = "qca8k slave mii";
- bus->read = qca8k_internal_mdio_read;
- bus->write = qca8k_internal_mdio_write;
- snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d",
- ds->index);
-
+ snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
+ ds->dst->index, ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
-
ds->slave_mii_bus = bus;
- return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ /* Check if the devicetree declare the port:phy mapping */
+ mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ bus->name = "qca8k slave mii";
+ bus->read = qca8k_internal_mdio_read;
+ bus->write = qca8k_internal_mdio_write;
+ return devm_of_mdiobus_register(priv->dev, bus, mdio);
+ }
+
+ /* If a mapping can't be found the legacy mapping is used,
+ * using the qca8k_port_to_phy function
+ */
+ bus->name = "qca8k-legacy slave mii";
+ bus->read = qca8k_legacy_mdio_read;
+ bus->write = qca8k_legacy_mdio_write;
+ return devm_mdiobus_register(priv->dev, bus);
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
- struct device_node *ports, *port, *mdio;
+ struct device_node *ports, *port;
phy_interface_t mode;
int err;
@@ -864,28 +1024,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
* a dt-overlay and driver reload changed the configuration
*/
- return qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
- QCA8K_MDIO_MASTER_EN);
+ return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
+ QCA8K_MDIO_MASTER_EN);
}
- /* Check if the devicetree declare the port:phy mapping */
- mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
- if (of_device_is_available(mdio)) {
- err = qca8k_mdio_register(priv, mdio);
- if (err)
- of_node_put(mdio);
-
- return err;
- }
-
- /* If a mapping can't be found the legacy mapping is used,
- * using the qca8k_port_to_phy function
- */
- priv->legacy_phy_port_mapping = true;
- priv->ops.phy_read = qca8k_phy_read;
- priv->ops.phy_write = qca8k_phy_write;
-
- return 0;
+ return qca8k_mdio_register(priv);
}
static int
@@ -934,8 +1077,8 @@ static int qca8k_find_cpu_port(struct dsa_switch *ds)
static int
qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
{
+ const struct qca8k_match_data *data = priv->info;
struct device_node *node = priv->dev->of_node;
- const struct qca8k_match_data *data;
u32 val = 0;
int ret;
@@ -944,8 +1087,6 @@ qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
* Should be applied by default but we set this just to make sure.
*/
if (priv->switch_id == QCA8K_ID_QCA8327) {
- data = of_device_get_match_data(priv->dev);
-
/* Set the correct package of 148 pin for QCA8327 */
if (data->reduced_package)
val |= QCA8327_PWS_PACKAGE148_EN;
@@ -983,7 +1124,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
u32 delay;
/* We have 2 CPU port. Check them */
- for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Skip every other port */
if (port != 0 && port != 6)
continue;
@@ -1014,7 +1155,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
mode == PHY_INTERFACE_MODE_RGMII_TXID)
delay = 1;
- if (delay > QCA8K_MAX_DELAY) {
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
@@ -1030,7 +1171,7 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
mode == PHY_INTERFACE_MODE_RGMII_RXID)
delay = 2;
- if (delay > QCA8K_MAX_DELAY) {
+ if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
@@ -1071,223 +1212,6 @@ qca8k_parse_port_config(struct qca8k_priv *priv)
return 0;
}
-static int
-qca8k_setup(struct dsa_switch *ds)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, ret, i;
- u32 mask;
-
- cpu_port = qca8k_find_cpu_port(ds);
- if (cpu_port < 0) {
- dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
- return cpu_port;
- }
-
- /* Parse CPU port config to be later used in phy_link mac_config */
- ret = qca8k_parse_port_config(priv);
- if (ret)
- return ret;
-
- mutex_init(&priv->reg_mutex);
-
- /* Start by setting up the register mapping */
- priv->regmap = devm_regmap_init(ds->dev, NULL, priv,
- &qca8k_regmap_config);
- if (IS_ERR(priv->regmap))
- dev_warn(priv->dev, "regmap initialization failed");
-
- ret = qca8k_setup_mdio_bus(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_of_pws_reg(priv);
- if (ret)
- return ret;
-
- ret = qca8k_setup_mac_pwr_sel(priv);
- if (ret)
- return ret;
-
- /* Make sure MAC06 is disabled */
- ret = qca8k_reg_clear(priv, QCA8K_REG_PORT0_PAD_CTRL,
- QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
- if (ret) {
- dev_err(priv->dev, "failed disabling MAC06 exchange");
- return ret;
- }
-
- /* Enable CPU Port */
- ret = qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
- QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
- if (ret) {
- dev_err(priv->dev, "failed enabling CPU port");
- return ret;
- }
-
- /* Enable MIB counters */
- ret = qca8k_mib_init(priv);
- if (ret)
- dev_warn(priv->dev, "mib init failed");
-
- /* Initial setup of all ports */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* Disable forwarding by default on all ports */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, 0);
- if (ret)
- return ret;
-
- /* Enable QCA header mode on all cpu ports */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
- }
-
- /* Disable MAC by default on all user ports */
- if (dsa_is_user_port(ds, i))
- qca8k_port_set_status(priv, i, 0);
- }
-
- /* Forward all unknown frames to CPU port for Linux processing
- * Notice that in multi-cpu config only one port should be set
- * for igmp, unknown, multicast and broadcast packet
- */
- ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
- if (ret)
- return ret;
-
- /* Setup connection between CPU port & user ports
- * Configure specific switch configuration for ports
- */
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- /* CPU port gets connected to all user ports of the switch */
- if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
- if (ret)
- return ret;
- }
-
- /* Individual user ports get connected to CPU port only */
- if (dsa_is_user_port(ds, i)) {
- int shift = 16 * (i % 2);
-
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_MEMBER,
- BIT(cpu_port));
- if (ret)
- return ret;
-
- /* Enable ARP Auto-learning by default */
- ret = qca8k_reg_set(priv, QCA8K_PORT_LOOKUP_CTRL(i),
- QCA8K_PORT_LOOKUP_LEARN);
- if (ret)
- return ret;
-
- /* For port based vlans to work we need to set the
- * default egress vid
- */
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
- 0xfff << shift,
- QCA8K_PORT_VID_DEF << shift);
- if (ret)
- return ret;
-
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
- QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
- QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
- if (ret)
- return ret;
- }
-
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- switch (i) {
- /* The 2 CPU port and port 5 requires some different
- * priority than any other ports.
- */
- case 0:
- case 5:
- case 6:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
- break;
- default:
- mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
- QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
- QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
- }
- qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
-
- mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN;
- qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
- QCA8K_PORT_HOL_CTRL1_ING_BUF |
- QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
- QCA8K_PORT_HOL_CTRL1_WRED_EN,
- mask);
- }
-
- /* Set initial MTU for every port.
- * We have only have a general MTU setting. So track
- * every port and set the max across all port.
- * Set per port MTU to 1500 as the MTU change function
- * will add the overhead and if its set to 1518 then it
- * will apply the overhead again and we will end up with
- * MTU of 1536 instead of 1518
- */
- priv->port_mtu[i] = ETH_DATA_LEN;
- }
-
- /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
- if (priv->switch_id == QCA8K_ID_QCA8327) {
- mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
- qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
- QCA8K_GLOBAL_FC_GOL_XON_THRES_S |
- QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S,
- mask);
- }
-
- /* Setup our port MTUs to match power on defaults */
- ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
- if (ret)
- dev_warn(priv->dev, "failed setting MTU settings");
-
- /* Flush the FDB table */
- qca8k_fdb_flush(priv);
-
- /* We don't have interrupts for link changes, so we need to poll */
- ds->pcs_poll = true;
-
- return 0;
-}
-
static void
qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
u32 reg)
@@ -1329,13 +1253,41 @@ qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_inde
cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
}
+static struct phylink_pcs *
+qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct qca8k_priv *priv = ds->priv;
+ struct phylink_pcs *pcs = NULL;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ switch (port) {
+ case 0:
+ pcs = &priv->pcs_port_0.pcs;
+ break;
+
+ case 6:
+ pcs = &priv->pcs_port_6.pcs;
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return pcs;
+}
+
static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
- int cpu_port_index, ret;
- u32 reg, val;
+ int cpu_port_index;
+ u32 reg;
switch (port) {
case 0: /* 1st CPU port */
@@ -1401,70 +1353,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case PHY_INTERFACE_MODE_1000BASEX:
/* Enable SGMII on the port */
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
-
- /* Enable/disable SerDes auto-negotiation as necessary */
- ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
- if (ret)
- return;
- if (phylink_autoneg_inband(mode))
- val &= ~QCA8K_PWS_SERDES_AEN_DIS;
- else
- val |= QCA8K_PWS_SERDES_AEN_DIS;
- qca8k_write(priv, QCA8K_REG_PWS, val);
-
- /* Configure the SGMII parameters */
- ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
- if (ret)
- return;
-
- val |= QCA8K_SGMII_EN_SD;
-
- if (priv->ports_config.sgmii_enable_pll)
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX;
-
- if (dsa_is_cpu_port(ds, port)) {
- /* CPU port, we're talking to the CPU MAC, be a PHY */
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_PHY;
- } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_MAC;
- } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
- val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
- val |= QCA8K_SGMII_MODE_CTRL_BASEX;
- }
-
- qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
-
- /* From original code is reported port instability as SGMII also
- * require delay set. Apply advised values here or take them from DT.
- */
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
- /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
- * falling edge is set writing in the PORT0 PAD reg
- */
- if (priv->switch_id == QCA8K_ID_QCA8327 ||
- priv->switch_id == QCA8K_ID_QCA8337)
- reg = QCA8K_REG_PORT0_PAD_CTRL;
-
- val = 0;
-
- /* SGMII Clock phase configuration */
- if (priv->ports_config.sgmii_rx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
-
- if (priv->ports_config.sgmii_tx_clk_falling_edge)
- val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
-
- if (val)
- ret = qca8k_rmw(priv, reg,
- QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
- QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
- val);
-
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
@@ -1473,109 +1361,41 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
}
}
-static void
-qca8k_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
switch (port) {
case 0: /* 1st CPU port */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII)
- goto unsupported;
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
break;
+
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL)
- goto unsupported;
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
break;
- case 6: /* 2nd CPU port / external PHY */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
- state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_1000BASEX)
- goto unsupported;
- break;
- default:
-unsupported:
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- phylink_set(mask, 1000baseX_Full);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
-static int
-qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct qca8k_priv *priv = ds->priv;
- u32 reg;
- int ret;
-
- ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
- if (ret < 0)
- return ret;
-
- state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
- state->an_complete = state->link;
- state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
- state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
- DUPLEX_HALF;
-
- switch (reg & QCA8K_PORT_STATUS_SPEED) {
- case QCA8K_PORT_STATUS_SPEED_10:
- state->speed = SPEED_10;
- break;
- case QCA8K_PORT_STATUS_SPEED_100:
- state->speed = SPEED_100;
- break;
- case QCA8K_PORT_STATUS_SPEED_1000:
- state->speed = SPEED_1000;
- break;
- default:
- state->speed = SPEED_UNKNOWN;
+ case 6: /* 2nd CPU port / external PHY */
+ phy_interface_set_rgmii(config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ config->supported_interfaces);
break;
}
- state->pause = MLO_PAUSE_NONE;
- if (reg & QCA8K_PORT_STATUS_RXFLOW)
- state->pause |= MLO_PAUSE_RX;
- if (reg & QCA8K_PORT_STATUS_TXFLOW)
- state->pause |= MLO_PAUSE_TX;
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- return 1;
+ config->legacy_pre_march2020 = false;
}
static void
@@ -1628,380 +1448,526 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
}
-static void
-qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
+static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++)
- strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
- ETH_GSTRING_LEN);
+ return container_of(pcs, struct qca8k_pcs, pcs);
}
-static void
-qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data)
+static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- const struct qca8k_mib_desc *mib;
- u32 reg, i, val;
- u32 hi = 0;
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int port = pcs_to_qca8k_pcs(pcs)->port;
+ u32 reg;
int ret;
- for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
- mib = &ar8327_mib[i];
- reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
-
- ret = qca8k_read(priv, reg, &val);
- if (ret < 0)
- continue;
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+ if (ret < 0) {
+ state->link = false;
+ return;
+ }
- if (mib->size == 2) {
- ret = qca8k_read(priv, reg + 4, &hi);
- if (ret < 0)
- continue;
- }
+ state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+ state->an_complete = state->link;
+ state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+ DUPLEX_HALF;
- data[i] = val;
- if (mib->size == 2)
- data[i] |= (u64)hi << 32;
+ switch (reg & QCA8K_PORT_STATUS_SPEED) {
+ case QCA8K_PORT_STATUS_SPEED_10:
+ state->speed = SPEED_10;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_100:
+ state->speed = SPEED_100;
+ break;
+ case QCA8K_PORT_STATUS_SPEED_1000:
+ state->speed = SPEED_1000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
}
+
+ if (reg & QCA8K_PORT_STATUS_RXFLOW)
+ state->pause |= MLO_PAUSE_RX;
+ if (reg & QCA8K_PORT_STATUS_TXFLOW)
+ state->pause |= MLO_PAUSE_TX;
}
-static int
-qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- if (sset != ETH_SS_STATS)
- return 0;
+ struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
+ int cpu_port_index, ret, port;
+ u32 reg, val;
- return ARRAY_SIZE(ar8327_mib);
-}
+ port = pcs_to_qca8k_pcs(pcs)->port;
+ switch (port) {
+ case 0:
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
+ break;
-static int
-qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
- u32 reg;
- int ret;
+ case 6:
+ reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
+ break;
- mutex_lock(&priv->reg_mutex);
- ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
- if (ret < 0)
- goto exit;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
- if (eee->eee_enabled)
- reg |= lpi_en;
+ /* Enable/disable SerDes auto-negotiation as necessary */
+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
+ if (ret)
+ return ret;
+ if (phylink_autoneg_inband(mode))
+ val &= ~QCA8K_PWS_SERDES_AEN_DIS;
else
- reg &= ~lpi_en;
- ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+ val |= QCA8K_PWS_SERDES_AEN_DIS;
+ qca8k_write(priv, QCA8K_REG_PWS, val);
-exit:
- mutex_unlock(&priv->reg_mutex);
- return ret;
-}
+ /* Configure the SGMII parameters */
+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+ if (ret)
+ return ret;
+
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
+
+ if (dsa_is_cpu_port(priv->ds, port)) {
+ /* CPU port, we're talking to the CPU MAC, be a PHY */
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_PHY;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_MAC;
+ } else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
+ val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+ val |= QCA8K_SGMII_MODE_CTRL_BASEX;
+ }
+
+ qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
-static int
-qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
-{
- /* Nothing to do on the port's MAC */
return 0;
}
-static void
-qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u32 stp_state;
+}
- switch (state) {
- case BR_STATE_DISABLED:
- stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
- break;
- case BR_STATE_BLOCKING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
- break;
- case BR_STATE_LISTENING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
- break;
- case BR_STATE_LEARNING:
- stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
- break;
- case BR_STATE_FORWARDING:
- default:
- stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
- break;
- }
+static const struct phylink_pcs_ops qca8k_pcs_ops = {
+ .pcs_get_state = qca8k_pcs_get_state,
+ .pcs_config = qca8k_pcs_config,
+ .pcs_an_restart = qca8k_pcs_an_restart,
+};
+
+static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
+ int port)
+{
+ qpcs->pcs.ops = &qca8k_pcs_ops;
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+ /* We don't have interrupts for link changes, so we need to poll */
+ qpcs->pcs.poll = true;
+ qpcs->priv = priv;
+ qpcs->port = port;
}
-static int
-qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
+static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask, cpu_port;
- int i, ret;
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ struct mib_ethhdr *mib_ethhdr;
+ __le32 *data2;
+ u8 port;
+ int i;
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
+ mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
+ mib_eth_data = &priv->mib_eth_data;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = qca8k_reg_set(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
+ /* The switch autocast every port. Ignore other packet and
+ * parse only the requested one.
+ */
+ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
+ if (port != mib_eth_data->req_port)
+ goto exit;
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+ data2 = (__le32 *)skb->data;
- return ret;
-}
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
-static void
-qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int cpu_port, i;
+ /* First 3 mib are present in the skb head */
+ if (i < 3) {
+ mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i);
+ continue;
+ }
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ /* Some mib are 64 bit wide */
+ if (mib->size == 2)
+ mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2);
+ else
+ mib_eth_data->data[i] = get_unaligned_le32(data2);
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- qca8k_reg_clear(priv,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
+ data2 += mib->size;
}
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+exit:
+ /* Complete on receiving all the mib packet */
+ if (refcount_dec_and_test(&mib_eth_data->port_parsed))
+ complete(&mib_eth_data->rw_done);
}
static int
-qca8k_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct qca8k_mib_eth_data *mib_eth_data;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
- qca8k_port_set_status(priv, port, 1);
- priv->port_sts[port].enabled = 1;
+ mib_eth_data = &priv->mib_eth_data;
- if (dsa_is_user_port(ds, port))
- phy_support_asym_pause(phy);
+ mutex_lock(&mib_eth_data->mutex);
- return 0;
-}
+ reinit_completion(&mib_eth_data->rw_done);
-static void
-qca8k_port_disable(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ mib_eth_data->req_port = dp->index;
+ mib_eth_data->data = data;
+ refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
- qca8k_port_set_status(priv, port, 0);
- priv->port_sts[port].enabled = 0;
-}
+ mutex_lock(&priv->reg_mutex);
-static int
-qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
-{
- struct qca8k_priv *priv = ds->priv;
- int i, mtu = 0;
+ /* Send mib autocast request */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
+ QCA8K_MIB_BUSY);
- priv->port_mtu[port] = new_mtu;
+ mutex_unlock(&priv->reg_mutex);
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- if (priv->port_mtu[i] > mtu)
- mtu = priv->port_mtu[i];
+ if (ret)
+ goto exit;
- /* Include L2 header / FCS length */
- return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
-}
+ ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
-static int
-qca8k_port_max_mtu(struct dsa_switch *ds, int port)
-{
- return QCA8K_MAX_MTU;
+exit:
+ mutex_unlock(&mib_eth_data->mutex);
+
+ return ret;
}
-static int
-qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
- u16 port_mask, u16 vid)
+static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
{
- /* Set the vid to the port vlan id if no vid is set */
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
+ struct qca8k_priv *priv = ds->priv;
- return qca8k_fdb_add(priv, addr, port_mask, vid,
- QCA8K_ATU_STATUS_STATIC);
+ /* Communicate to the phy internal driver the switch revision.
+ * Based on the switch revision different values needs to be
+ * set to the dbg and mmd reg on the phy.
+ * The first 2 bit are used to communicate the switch revision
+ * to the phy driver.
+ */
+ if (port > 0 && port < 6)
+ return priv->switch_revision;
+
+ return 0;
}
-static int
-qca8k_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+static enum dsa_tag_protocol
+qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
-
- return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+ return DSA_TAG_PROTO_QCA;
}
-static int
-qca8k_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+static void
+qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
+ bool operational)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- u16 port_mask = BIT(port);
+ struct dsa_port *dp = master->dsa_ptr;
+ struct qca8k_priv *priv = ds->priv;
+
+ /* Ethernet MIB/MDIO is only supported for CPU port 0 */
+ if (dp->index != 0)
+ return;
+
+ mutex_lock(&priv->mgmt_eth_data.mutex);
+ mutex_lock(&priv->mib_eth_data.mutex);
- if (!vid)
- vid = QCA8K_PORT_VID_DEF;
+ priv->mgmt_master = operational ? (struct net_device *)master : NULL;
- return qca8k_fdb_del(priv, addr, port_mask, vid);
+ mutex_unlock(&priv->mib_eth_data.mutex);
+ mutex_unlock(&priv->mgmt_eth_data.mutex);
}
-static int
-qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
+static int qca8k_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- struct qca8k_fdb _fdb = { 0 };
- int cnt = QCA8K_NUM_FDB_RECORDS;
- bool is_static;
- int ret = 0;
+ struct qca_tagger_data *tagger_data;
- mutex_lock(&priv->reg_mutex);
- while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
- if (!_fdb.aging)
- break;
- is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
- ret = cb(_fdb.mac, _fdb.vid, is_static, data);
- if (ret)
- break;
+ switch (proto) {
+ case DSA_TAG_PROTO_QCA:
+ tagger_data = ds->tagger_data;
+
+ tagger_data->rw_reg_ack_handler = qca8k_rw_reg_ack_handler;
+ tagger_data->mib_autocast_handler = qca8k_mib_autocast_handler;
+
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- mutex_unlock(&priv->reg_mutex);
return 0;
}
static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
+qca8k_setup(struct dsa_switch *ds)
{
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ int cpu_port, ret, i;
+ u32 mask;
- if (vlan_filtering) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
- QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
- } else {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_VLAN_MODE,
- QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
}
- return ret;
-}
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
-static int
-qca8k_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct netlink_ext_ack *extack)
-{
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ ret = qca8k_setup_mdio_bus(priv);
+ if (ret)
+ return ret;
- ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_led_ctrl(priv);
+ if (ret)
+ return ret;
+
+ qca8k_setup_pcs(priv, &priv->pcs_port_0, 0);
+ qca8k_setup_pcs(priv, &priv->pcs_port_6, 6);
+
+ /* Make sure MAC06 is disabled */
+ ret = regmap_clear_bits(priv->regmap, QCA8K_REG_PORT0_PAD_CTRL,
+ QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN);
if (ret) {
- dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ dev_err(priv->dev, "failed disabling MAC06 exchange");
return ret;
}
- if (pvid) {
- int shift = 16 * (port % 2);
+ /* Enable CPU Port */
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling CPU port");
+ return ret;
+ }
+
+ /* Enable MIB counters */
+ ret = qca8k_mib_init(priv);
+ if (ret)
+ dev_warn(priv->dev, "mib init failed");
- ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
- 0xfff << shift, vlan->vid << shift);
+ /* Initial setup of all ports */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, 0);
if (ret)
return ret;
- ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
- QCA8K_PORT_VLAN_CVID(vlan->vid) |
- QCA8K_PORT_VLAN_SVID(vlan->vid));
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_TX_MASK, QCA8K_PORT_HDR_CTRL_ALL) |
+ FIELD_PREP(QCA8K_PORT_HDR_CTRL_RX_MASK, QCA8K_PORT_HDR_CTRL_ALL));
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
+
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
}
- return ret;
-}
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
+ ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK, BIT(cpu_port)) |
+ FIELD_PREP(QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK, BIT(cpu_port)));
+ if (ret)
+ return ret;
-static int
-qca8k_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct qca8k_priv *priv = ds->priv;
- int ret;
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* CPU port gets connected to all user ports of the switch */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
+ if (ret)
+ return ret;
+ }
+
+ /* Individual user ports get connected to CPU port only */
+ if (dsa_is_user_port(ds, i)) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_MEMBER,
+ BIT(cpu_port));
+ if (ret)
+ return ret;
- ret = qca8k_vlan_del(priv, port, vlan->vid);
+ /* Enable ARP Auto-learning by default */
+ ret = regmap_set_bits(priv->regmap, QCA8K_PORT_LOOKUP_CTRL(i),
+ QCA8K_PORT_LOOKUP_LEARN);
+ if (ret)
+ return ret;
+
+ /* For port based vlans to work we need to set the
+ * default egress vid
+ */
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i),
+ QCA8K_EGREES_VLAN_PORT_MASK(i),
+ QCA8K_EGREES_VLAN_PORT(i, QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i),
+ QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) |
+ QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF));
+ if (ret)
+ return ret;
+ }
+
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
+ switch (i) {
+ /* The 2 CPU port and port 5 requires some different
+ * priority than any other ports.
+ */
+ case 0:
+ case 5:
+ case 6:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI4(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI5(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x1e);
+ break;
+ default:
+ mask = QCA8K_PORT_HOL_CTRL0_EG_PRI0(0x3) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI1(0x4) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI2(0x6) |
+ QCA8K_PORT_HOL_CTRL0_EG_PRI3(0x8) |
+ QCA8K_PORT_HOL_CTRL0_EG_PORT(0x19);
+ }
+ qca8k_write(priv, QCA8K_REG_PORT_HOL_CTRL0(i), mask);
+
+ mask = QCA8K_PORT_HOL_CTRL1_ING(0x6) |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN;
+ qca8k_rmw(priv, QCA8K_REG_PORT_HOL_CTRL1(i),
+ QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK |
+ QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN |
+ QCA8K_PORT_HOL_CTRL1_WRED_EN,
+ mask);
+ }
+ }
+
+ /* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ mask = QCA8K_GLOBAL_FC_GOL_XON_THRES(288) |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES(496);
+ qca8k_rmw(priv, QCA8K_REG_GLOBAL_FC_THRESH,
+ QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK |
+ QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK,
+ mask);
+ }
+
+ /* Setup our port MTUs to match power on defaults */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
- dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+ dev_warn(priv->dev, "failed setting MTU settings");
- return ret;
-}
+ /* Flush the FDB table */
+ qca8k_fdb_flush(priv);
-static u32 qca8k_get_phy_flags(struct dsa_switch *ds, int port)
-{
- struct qca8k_priv *priv = ds->priv;
+ /* Set min a max ageing value supported */
+ ds->ageing_time_min = 7000;
+ ds->ageing_time_max = 458745000;
- /* Communicate to the phy internal driver the switch revision.
- * Based on the switch revision different values needs to be
- * set to the dbg and mmd reg on the phy.
- * The first 2 bit are used to communicate the switch revision
- * to the phy driver.
- */
- if (port > 0 && port < 6)
- return priv->switch_revision;
+ /* Set max number of LAGs supported */
+ ds->num_lag_ids = QCA8K_NUM_LAGS;
return 0;
}
-static enum dsa_tag_protocol
-qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
- enum dsa_tag_protocol mp)
-{
- return DSA_TAG_PROTO_QCA;
-}
-
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
.get_strings = qca8k_get_strings,
.get_ethtool_stats = qca8k_get_ethtool_stats,
.get_sset_count = qca8k_get_sset_count,
+ .set_ageing_time = qca8k_set_ageing_time,
.get_mac_eee = qca8k_get_mac_eee,
.set_mac_eee = qca8k_set_mac_eee,
.port_enable = qca8k_port_enable,
@@ -2011,50 +1977,29 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_stp_state_set = qca8k_port_stp_state_set,
.port_bridge_join = qca8k_port_bridge_join,
.port_bridge_leave = qca8k_port_bridge_leave,
+ .port_fast_age = qca8k_port_fast_age,
.port_fdb_add = qca8k_port_fdb_add,
.port_fdb_del = qca8k_port_fdb_del,
.port_fdb_dump = qca8k_port_fdb_dump,
+ .port_mdb_add = qca8k_port_mdb_add,
+ .port_mdb_del = qca8k_port_mdb_del,
+ .port_mirror_add = qca8k_port_mirror_add,
+ .port_mirror_del = qca8k_port_mirror_del,
.port_vlan_filtering = qca8k_port_vlan_filtering,
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
- .phylink_validate = qca8k_phylink_validate,
- .phylink_mac_link_state = qca8k_phylink_mac_link_state,
+ .phylink_get_caps = qca8k_phylink_get_caps,
+ .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
.phylink_mac_config = qca8k_phylink_mac_config,
.phylink_mac_link_down = qca8k_phylink_mac_link_down,
.phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
+ .port_lag_join = qca8k_port_lag_join,
+ .port_lag_leave = qca8k_port_lag_leave,
+ .master_state_change = qca8k_master_change,
+ .connect_tag_protocol = qca8k_connect_tag_protocol,
};
-static int qca8k_read_switch_id(struct qca8k_priv *priv)
-{
- const struct qca8k_match_data *data;
- u32 val;
- u8 id;
- int ret;
-
- /* get the switches ID from the compatible */
- data = of_device_get_match_data(priv->dev);
- if (!data)
- return -ENODEV;
-
- ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
- if (ret < 0)
- return -ENODEV;
-
- id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
- if (id != data->id) {
- dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
- return -ENODEV;
- }
-
- priv->switch_id = id;
-
- /* Save revision to communicate to the internal PHY driver */
- priv->switch_revision = (val & QCA8K_MASK_CTRL_REV_ID_MASK);
-
- return 0;
-}
-
static int
qca8k_sw_probe(struct mdio_device *mdiodev)
{
@@ -2070,6 +2015,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
GPIOD_ASIS);
@@ -2085,6 +2031,16 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
gpiod_set_value_cansleep(priv->reset_gpio, 0);
}
+ /* Start by setting up the register mapping */
+ priv->regmap = devm_regmap_init(&mdiodev->dev, NULL, priv,
+ &qca8k_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(priv->dev, "regmap initialization failed");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->mdio_cache.page = 0xffff;
+
/* Check the detected switch id */
ret = qca8k_read_switch_id(priv);
if (ret)
@@ -2094,11 +2050,16 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv->ds)
return -ENOMEM;
+ mutex_init(&priv->mgmt_eth_data.mutex);
+ init_completion(&priv->mgmt_eth_data.rw_done);
+
+ mutex_init(&priv->mib_eth_data.mutex);
+ init_completion(&priv->mib_eth_data.rw_done);
+
priv->ds->dev = &mdiodev->dev;
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
- priv->ops = qca8k_switch_ops;
- priv->ds->ops = &priv->ops;
+ priv->ds->ops = &qca8k_switch_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
@@ -2118,8 +2079,6 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
@@ -2138,13 +2097,16 @@ static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
static void
qca8k_set_pm(struct qca8k_priv *priv, int enable)
{
- int i;
+ int port;
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (!priv->port_sts[i].enabled)
+ for (port = 0; port < QCA8K_NUM_PORTS; port++) {
+ /* Do not enable on resume if the port was
+ * disabled before.
+ */
+ if (!(priv->port_enabled_map & BIT(port)))
continue;
- qca8k_port_set_status(priv, i, enable);
+ qca8k_port_set_status(priv, port, enable);
}
}
@@ -2170,17 +2132,27 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
+static const struct qca8k_info_ops qca8xxx_ops = {
+ .autocast_mib = qca8k_get_ethtool_stats_eth,
+};
+
static const struct qca8k_match_data qca8327 = {
.id = QCA8K_ID_QCA8327,
.reduced_package = true,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
+ .mib_count = QCA8K_QCA832X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337,
+ .mib_count = QCA8K_QCA833X_MIB_COUNT,
+ .ops = &qca8xxx_ops,
};
static const struct of_device_id qca8k_of_match[] = {
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
new file mode 100644
index 000000000000..96773e432558
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -0,0 +1,1184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/netdevice.h>
+#include <net/dsa.h>
+#include <linux/if_bridge.h>
+
+#include "qca8k.h"
+
+#define MIB_DESC(_s, _o, _n) \
+ { \
+ .size = (_s), \
+ .offset = (_o), \
+ .name = (_n), \
+ }
+
+const struct qca8k_mib_desc ar8327_mib[] = {
+ MIB_DESC(1, 0x00, "RxBroad"),
+ MIB_DESC(1, 0x04, "RxPause"),
+ MIB_DESC(1, 0x08, "RxMulti"),
+ MIB_DESC(1, 0x0c, "RxFcsErr"),
+ MIB_DESC(1, 0x10, "RxAlignErr"),
+ MIB_DESC(1, 0x14, "RxRunt"),
+ MIB_DESC(1, 0x18, "RxFragment"),
+ MIB_DESC(1, 0x1c, "Rx64Byte"),
+ MIB_DESC(1, 0x20, "Rx128Byte"),
+ MIB_DESC(1, 0x24, "Rx256Byte"),
+ MIB_DESC(1, 0x28, "Rx512Byte"),
+ MIB_DESC(1, 0x2c, "Rx1024Byte"),
+ MIB_DESC(1, 0x30, "Rx1518Byte"),
+ MIB_DESC(1, 0x34, "RxMaxByte"),
+ MIB_DESC(1, 0x38, "RxTooLong"),
+ MIB_DESC(2, 0x3c, "RxGoodByte"),
+ MIB_DESC(2, 0x44, "RxBadByte"),
+ MIB_DESC(1, 0x4c, "RxOverFlow"),
+ MIB_DESC(1, 0x50, "Filtered"),
+ MIB_DESC(1, 0x54, "TxBroad"),
+ MIB_DESC(1, 0x58, "TxPause"),
+ MIB_DESC(1, 0x5c, "TxMulti"),
+ MIB_DESC(1, 0x60, "TxUnderRun"),
+ MIB_DESC(1, 0x64, "Tx64Byte"),
+ MIB_DESC(1, 0x68, "Tx128Byte"),
+ MIB_DESC(1, 0x6c, "Tx256Byte"),
+ MIB_DESC(1, 0x70, "Tx512Byte"),
+ MIB_DESC(1, 0x74, "Tx1024Byte"),
+ MIB_DESC(1, 0x78, "Tx1518Byte"),
+ MIB_DESC(1, 0x7c, "TxMaxByte"),
+ MIB_DESC(1, 0x80, "TxOverSize"),
+ MIB_DESC(2, 0x84, "TxByte"),
+ MIB_DESC(1, 0x8c, "TxCollision"),
+ MIB_DESC(1, 0x90, "TxAbortCol"),
+ MIB_DESC(1, 0x94, "TxMultiCol"),
+ MIB_DESC(1, 0x98, "TxSingleCol"),
+ MIB_DESC(1, 0x9c, "TxExcDefer"),
+ MIB_DESC(1, 0xa0, "TxDefer"),
+ MIB_DESC(1, 0xa4, "TxLateCol"),
+ MIB_DESC(1, 0xa8, "RXUnicast"),
+ MIB_DESC(1, 0xac, "TXUnicast"),
+};
+
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
+{
+ return regmap_read(priv->regmap, reg, val);
+}
+
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
+{
+ return regmap_write(priv->regmap, reg, val);
+}
+
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
+{
+ return regmap_update_bits(priv->regmap, reg, mask, write_val);
+}
+
+static const struct regmap_range qca8k_readable_ranges[] = {
+ regmap_reg_range(0x0000, 0x00e4), /* Global control */
+ regmap_reg_range(0x0100, 0x0168), /* EEE control */
+ regmap_reg_range(0x0200, 0x0270), /* Parser control */
+ regmap_reg_range(0x0400, 0x0454), /* ACL */
+ regmap_reg_range(0x0600, 0x0718), /* Lookup */
+ regmap_reg_range(0x0800, 0x0b70), /* QM */
+ regmap_reg_range(0x0c00, 0x0c80), /* PKT */
+ regmap_reg_range(0x0e00, 0x0e98), /* L3 */
+ regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
+ regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
+ regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
+ regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
+ regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
+ regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
+ regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
+};
+
+const struct regmap_access_table qca8k_readable_table = {
+ .yes_ranges = qca8k_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
+};
+
+static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
+ QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
+}
+
+static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
+{
+ u32 reg[QCA8K_ATU_TABLE_SIZE];
+ int ret;
+
+ /* load the ARL table into an array */
+ ret = regmap_bulk_read(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
+ QCA8K_ATU_TABLE_SIZE);
+ if (ret)
+ return ret;
+
+ /* vid - 83:72 */
+ fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
+ /* aging - 67:64 */
+ fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
+ /* portmask - 54:48 */
+ fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
+ /* mac - 47:0 */
+ fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
+ fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
+ fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
+ fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
+ fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
+ fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
+
+ return 0;
+}
+
+static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
+ const u8 *mac, u8 aging)
+{
+ u32 reg[QCA8K_ATU_TABLE_SIZE] = { 0 };
+
+ /* vid - 83:72 */
+ reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
+ /* aging - 67:64 */
+ reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
+ /* portmask - 54:48 */
+ reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
+ /* mac - 47:0 */
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
+ reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
+ reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
+
+ /* load the array into the ARL table */
+ regmap_bulk_write(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
+ QCA8K_ATU_TABLE_SIZE);
+}
+
+static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
+ int port)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and FDB index */
+ reg = QCA8K_ATU_FUNC_BUSY;
+ reg |= cmd;
+ if (port >= 0) {
+ reg |= QCA8K_ATU_FUNC_PORT_EN;
+ reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
+ }
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_FDB_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_ATU_FUNC_FULL)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
+ int port)
+{
+ int ret;
+
+ qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
+ if (ret < 0)
+ return ret;
+
+ return qca8k_fdb_read(priv, fdb);
+}
+
+static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid, u8 aging)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
+ u16 port_mask, u16 vid)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_write(priv, vid, port_mask, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+void qca8k_fdb_flush(struct qca8k_priv *priv)
+{
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ ret = qca8k_fdb_read(priv, &fdb);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule exist. Delete first */
+ if (!fdb.aging) {
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+ }
+
+ /* Add port to fdb portmask */
+ fdb.port_mask |= port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
+ const u8 *mac, u16 vid)
+{
+ struct qca8k_fdb fdb = { 0 };
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+
+ qca8k_fdb_write(priv, vid, 0, mac, 0);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
+ if (ret < 0)
+ goto exit;
+
+ /* Rule doesn't exist. Why delete? */
+ if (!fdb.aging) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
+ if (ret)
+ goto exit;
+
+ /* Only port in the rule is this port. Don't re insert */
+ if (fdb.port_mask == port_mask)
+ goto exit;
+
+ /* Remove port from port mask */
+ fdb.port_mask &= ~port_mask;
+
+ qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
+ ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+static int qca8k_vlan_access(struct qca8k_priv *priv,
+ enum qca8k_vlan_cmd cmd, u16 vid)
+{
+ u32 reg;
+ int ret;
+
+ /* Set the command and VLAN index */
+ reg = QCA8K_VTU_FUNC1_BUSY;
+ reg |= cmd;
+ reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
+
+ /* Write the function register triggering the table access */
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
+ if (ret)
+ return ret;
+
+ /* wait for completion */
+ ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
+ if (ret)
+ return ret;
+
+ /* Check for table full violation when adding an entry */
+ if (cmd == QCA8K_VLAN_LOAD) {
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+ if (ret < 0)
+ return ret;
+ if (reg & QCA8K_VTU_FUNC1_FULL)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
+ bool untagged)
+{
+ u32 reg;
+ int ret;
+
+ /* We do the right thing with VLAN 0 and treat it as untagged while
+ * preserving the tag on egress.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ if (untagged)
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
+ else
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
+
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
+{
+ u32 reg, mask;
+ int ret, i;
+ bool del;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
+ if (ret < 0)
+ goto out;
+
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
+ goto out;
+ reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
+ reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
+
+ /* Check if we're the last member to be removed */
+ del = true;
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
+
+ if ((reg & mask) != mask) {
+ del = false;
+ break;
+ }
+ }
+
+ if (del) {
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
+ } else {
+ ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
+ if (ret)
+ goto out;
+ ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
+ }
+
+out:
+ mutex_unlock(&priv->reg_mutex);
+
+ return ret;
+}
+
+int qca8k_mib_init(struct qca8k_priv *priv)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
+ QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
+ FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
+ QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
+ if (ret)
+ goto exit;
+
+ ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
+ if (ret)
+ goto exit;
+
+ ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
+{
+ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
+ /* Port 0 and 6 have no internal PHY */
+ if (port > 0 && port < 6)
+ mask |= QCA8K_PORT_STATUS_LINK_AUTO;
+
+ if (enable)
+ regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+ else
+ regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
+}
+
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++)
+ strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
+ ETH_GSTRING_LEN);
+}
+
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const struct qca8k_mib_desc *mib;
+ u32 reg, i, val;
+ u32 hi = 0;
+ int ret;
+
+ if (priv->mgmt_master && priv->info->ops->autocast_mib &&
+ priv->info->ops->autocast_mib(ds, port, data) > 0)
+ return;
+
+ for (i = 0; i < priv->info->mib_count; i++) {
+ mib = &ar8327_mib[i];
+ reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
+
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
+ continue;
+
+ if (mib->size == 2) {
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
+ continue;
+ }
+
+ data[i] = val;
+ if (mib->size == 2)
+ data[i] |= (u64)hi << 32;
+ }
+}
+
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return priv->info->mib_count;
+}
+
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *eee)
+{
+ u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg;
+ int ret;
+
+ mutex_lock(&priv->reg_mutex);
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+ if (ret < 0)
+ goto exit;
+
+ if (eee->eee_enabled)
+ reg |= lpi_en;
+ else
+ reg &= ~lpi_en;
+ ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
+
+exit:
+ mutex_unlock(&priv->reg_mutex);
+ return ret;
+}
+
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ /* Nothing to do on the port's MAC */
+ return 0;
+}
+
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LISTENING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
+ break;
+ }
+
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
+}
+
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int port_mask, cpu_port;
+ int i, ret;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Add this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ if (ret)
+ return ret;
+ if (i != port)
+ port_mask |= BIT(i);
+ }
+
+ /* Add all other ports to this ports portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int cpu_port, i;
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
+ continue;
+ /* Remove this port to the portvlan mask of the other ports
+ * in the bridge
+ */
+ regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ /* Set the cpu port to be the only one in the portvlan mask of
+ * this port
+ */
+ qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+}
+
+void qca8k_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ mutex_lock(&priv->reg_mutex);
+ qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
+ mutex_unlock(&priv->reg_mutex);
+}
+
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct qca8k_priv *priv = ds->priv;
+ unsigned int secs = msecs / 1000;
+ u32 val;
+
+ /* AGE_TIME reg is set in 7s step */
+ val = secs / 7;
+
+ /* Handle case with 0 as val to NOT disable
+ * learning
+ */
+ if (!val)
+ val = 1;
+
+ return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
+ QCA8K_ATU_AGE_TIME_MASK,
+ QCA8K_ATU_AGE_TIME(val));
+}
+
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 1);
+ priv->port_enabled_map |= BIT(port);
+
+ if (dsa_is_user_port(ds, port))
+ phy_support_asym_pause(phy);
+
+ return 0;
+}
+
+void qca8k_port_disable(struct dsa_switch *ds, int port)
+{
+ struct qca8k_priv *priv = ds->priv;
+
+ qca8k_port_set_status(priv, port, 0);
+ priv->port_enabled_map &= ~BIT(port);
+}
+
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ /* We have only have a general MTU setting.
+ * DSA always set the CPU port's MTU to the largest MTU of the slave
+ * ports.
+ * Setting MTU just for the CPU port is sufficient to correctly set a
+ * value for every port.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ /* To change the MAX_FRAME_SIZE the cpu ports must be off or
+ * the switch panics.
+ * Turn off both cpu ports before applying the new value to prevent
+ * this.
+ */
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 0);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 0);
+
+ /* Include L2 header / FCS length */
+ ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
+ ETH_HLEN + ETH_FCS_LEN);
+
+ if (priv->port_enabled_map & BIT(0))
+ qca8k_port_set_status(priv, 0, 1);
+
+ if (priv->port_enabled_map & BIT(6))
+ qca8k_port_set_status(priv, 6, 1);
+
+ return ret;
+}
+
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return QCA8K_MAX_MTU;
+}
+
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid)
+{
+ /* Set the vid to the port vlan id if no vid is set */
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_add(priv, addr, port_mask, vid,
+ QCA8K_ATU_STATUS_STATIC);
+}
+
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ u16 port_mask = BIT(port);
+
+ if (!vid)
+ vid = QCA8K_PORT_VID_DEF;
+
+ return qca8k_fdb_del(priv, addr, port_mask, vid);
+}
+
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
+ struct qca8k_fdb _fdb = { 0 };
+ int cnt = QCA8K_NUM_FDB_RECORDS;
+ bool is_static;
+ int ret = 0;
+
+ mutex_lock(&priv->reg_mutex);
+ while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
+ if (!_fdb.aging)
+ break;
+ is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
+ ret = cb(_fdb.mac, _fdb.vid, is_static, data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&priv->reg_mutex);
+
+ return 0;
+}
+
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
+{
+ struct qca8k_priv *priv = ds->priv;
+ const u8 *addr = mdb->addr;
+ u16 vid = mdb->vid;
+
+ return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
+}
+
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int monitor_port, ret;
+ u32 reg, val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+ ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* QCA83xx can have only one port set to mirror mode.
+ * Check that the correct port is requested and return error otherwise.
+ * When no mirror port is set, the values is set to 0xF
+ */
+ monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+ /* Set the monitor port */
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
+ mirror->to_local_port);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ return ret;
+
+ if (ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_update_bits(priv->regmap, reg, val, val);
+ if (ret)
+ return ret;
+
+ /* Track mirror port for tx and rx to decide when the
+ * mirror port has to be disabled.
+ */
+ if (ingress)
+ priv->mirror_rx |= BIT(port);
+ else
+ priv->mirror_tx |= BIT(port);
+
+ return 0;
+}
+
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct qca8k_priv *priv = ds->priv;
+ u32 reg, val;
+ int ret;
+
+ if (mirror->ingress) {
+ reg = QCA8K_PORT_LOOKUP_CTRL(port);
+ val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
+ } else {
+ reg = QCA8K_REG_PORT_HOL_CTRL1(port);
+ val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
+ }
+
+ ret = regmap_clear_bits(priv->regmap, reg, val);
+ if (ret)
+ goto err;
+
+ if (mirror->ingress)
+ priv->mirror_rx &= ~BIT(port);
+ else
+ priv->mirror_tx &= ~BIT(port);
+
+ /* No port set to send packet to mirror port. Disable mirror port */
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+ val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
+ QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
+ if (ret)
+ goto err;
+ }
+err:
+ dev_err(priv->dev, "Failed to del mirror port from %d", port);
+}
+
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ if (vlan_filtering) {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
+ } else {
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
+ QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
+ if (ret) {
+ dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
+ return ret;
+ }
+
+ if (pvid) {
+ ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
+ QCA8K_EGREES_VLAN_PORT_MASK(port),
+ QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
+ if (ret)
+ return ret;
+
+ ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
+ QCA8K_PORT_VLAN_CVID(vlan->vid) |
+ QCA8K_PORT_VLAN_SVID(vlan->vid));
+ }
+
+ return ret;
+}
+
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret;
+
+ ret = qca8k_vlan_del(priv, port, vlan->vid);
+ if (ret)
+ dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
+
+ return ret;
+}
+
+static bool qca8k_lag_can_offload(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp;
+ int members = 0;
+
+ if (!lag.id)
+ return false;
+
+ dsa_lag_foreach_port(dp, ds->dst, &lag)
+ /* Includes the port joining the LAG */
+ members++;
+
+ if (members > QCA8K_NUM_PORTS_FOR_LAG) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 4 LAG ports");
+ return false;
+ }
+
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
+ return false;
+ }
+
+ if (info->hash_type != NETDEV_LAG_HASH_L2 &&
+ info->hash_type != NETDEV_LAG_HASH_L23) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload L2 or L2+L3 TX hash");
+ return false;
+ }
+
+ return true;
+}
+
+static int qca8k_lag_setup_hash(struct dsa_switch *ds,
+ struct dsa_lag lag,
+ struct netdev_lag_upper_info *info)
+{
+ struct net_device *lag_dev = lag.dev;
+ struct qca8k_priv *priv = ds->priv;
+ bool unique_lag = true;
+ unsigned int i;
+ u32 hash = 0;
+
+ switch (info->hash_type) {
+ case NETDEV_LAG_HASH_L23:
+ hash |= QCA8K_TRUNK_HASH_SIP_EN;
+ hash |= QCA8K_TRUNK_HASH_DIP_EN;
+ fallthrough;
+ case NETDEV_LAG_HASH_L2:
+ hash |= QCA8K_TRUNK_HASH_SA_EN;
+ hash |= QCA8K_TRUNK_HASH_DA_EN;
+ break;
+ default: /* We should NEVER reach this */
+ return -EOPNOTSUPP;
+ }
+
+ /* Check if we are the unique configured LAG */
+ dsa_lags_foreach_id(i, ds->dst)
+ if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
+ unique_lag = false;
+ break;
+ }
+
+ /* Hash Mode is global. Make sure the same Hash Mode
+ * is set to all the 4 possible lag.
+ * If we are the unique LAG we can set whatever hash
+ * mode we want.
+ * To change hash mode it's needed to remove all LAG
+ * and change the mode with the latest.
+ */
+ if (unique_lag) {
+ priv->lag_hash_mode = hash;
+ } else if (priv->lag_hash_mode != hash) {
+ netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
+ QCA8K_TRUNK_HASH_MASK, hash);
+}
+
+static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
+ struct dsa_lag lag, bool delete)
+{
+ struct qca8k_priv *priv = ds->priv;
+ int ret, id, i;
+ u32 val;
+
+ /* DSA LAG IDs are one-based, hardware is zero-based */
+ id = lag.id - 1;
+
+ /* Read current port member */
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
+ if (ret)
+ return ret;
+
+ /* Shift val to the correct trunk */
+ val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
+ val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
+ if (delete)
+ val &= ~BIT(port);
+ else
+ val |= BIT(port);
+
+ /* Update port member. With empty portmap disable trunk */
+ ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
+ QCA8K_REG_GOL_TRUNK_MEMBER(id) |
+ QCA8K_REG_GOL_TRUNK_EN(id),
+ !val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
+ val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
+
+ /* Search empty member if adding or port on deleting */
+ for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
+ ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
+ if (ret)
+ return ret;
+
+ val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
+
+ if (delete) {
+ /* If port flagged to be disabled assume this member is
+ * empty
+ */
+ if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+
+ val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
+ if (val != port)
+ continue;
+ } else {
+ /* If port flagged to be enabled assume this member is
+ * already set
+ */
+ if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
+ continue;
+ }
+
+ /* We have found the member to add/remove */
+ break;
+ }
+
+ /* Set port in the correct port mask or disable port if in delete mode */
+ return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
+ QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
+ !delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
+ port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
+}
+
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
+{
+ int ret;
+
+ if (!qca8k_lag_can_offload(ds, lag, info, extack))
+ return -EOPNOTSUPP;
+
+ ret = qca8k_lag_setup_hash(ds, lag, info);
+ if (ret)
+ return ret;
+
+ return qca8k_lag_refresh_portmap(ds, port, lag, false);
+}
+
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag)
+{
+ return qca8k_lag_refresh_portmap(ds, port, lag, true);
+}
+
+int qca8k_read_switch_id(struct qca8k_priv *priv)
+{
+ u32 val;
+ u8 id;
+ int ret;
+
+ if (!priv->info)
+ return -ENODEV;
+
+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+ if (ret < 0)
+ return -ENODEV;
+
+ id = QCA8K_MASK_CTRL_DEVICE_ID(val);
+ if (id != priv->info->id) {
+ dev_err(priv->dev,
+ "Switch id detected %x but expected %x",
+ id, priv->info->id);
+ return -ENODEV;
+ }
+
+ priv->switch_id = id;
+
+ /* Save revision to communicate to the internal PHY driver */
+ priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
new file mode 100644
index 000000000000..b883692b7d86
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#include "qca8k.h"
+#include "qca8k_leds.h"
+
+static int
+qca8k_get_enable_led_reg(int port_num, int led_num, struct qca8k_led_pattern_en *reg_info)
+{
+ switch (port_num) {
+ case 0:
+ reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
+ reg_info->shift = QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT;
+ break;
+ case 1:
+ case 2:
+ case 3:
+ /* Port 123 are controlled on a different reg */
+ reg_info->reg = QCA8K_LED_CTRL3_REG;
+ reg_info->shift = QCA8K_LED_PHY123_PATTERN_EN_SHIFT(port_num, led_num);
+ break;
+ case 4:
+ reg_info->reg = QCA8K_LED_CTRL_REG(led_num);
+ reg_info->shift = QCA8K_LED_PHY4_CONTROL_RULE_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_led_brightness_set(struct qca8k_led *led,
+ enum led_brightness brightness)
+{
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 mask, val;
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ val = QCA8K_LED_ALWAYS_OFF;
+ if (brightness)
+ val = QCA8K_LED_ALWAYS_ON;
+
+ /* HW regs to control brightness is special and port 1-2-3
+ * are placed in a different reg.
+ *
+ * To control port 0 brightness:
+ * - the 2 bit (15, 14) of:
+ * - QCA8K_LED_CTRL0_REG for led1
+ * - QCA8K_LED_CTRL1_REG for led2
+ * - QCA8K_LED_CTRL2_REG for led3
+ *
+ * To control port 4:
+ * - the 2 bit (31, 30) of:
+ * - QCA8K_LED_CTRL0_REG for led1
+ * - QCA8K_LED_CTRL1_REG for led2
+ * - QCA8K_LED_CTRL2_REG for led3
+ *
+ * To control port 1:
+ * - the 2 bit at (9, 8) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (11, 10) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (13, 12) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To control port 2:
+ * - the 2 bit at (15, 14) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (17, 16) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (19, 18) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To control port 3:
+ * - the 2 bit at (21, 20) of QCA8K_LED_CTRL3_REG are used for led1
+ * - the 2 bit at (23, 22) of QCA8K_LED_CTRL3_REG are used for led2
+ * - the 2 bit at (25, 24) of QCA8K_LED_CTRL3_REG are used for led3
+ *
+ * To abstract this and have less code, we use the port and led numm
+ * to calculate the shift and the correct reg due to this problem of
+ * not having a 1:1 map of LED with the regs.
+ */
+ if (led->port_num == 0 || led->port_num == 4) {
+ mask = QCA8K_LED_PATTERN_EN_MASK;
+ val <<= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ return regmap_update_bits(priv->regmap, reg_info.reg,
+ mask << reg_info.shift,
+ val << reg_info.shift);
+}
+
+static int
+qca8k_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+
+ return qca8k_led_brightness_set(led, brightness);
+}
+
+static enum led_brightness
+qca8k_led_brightness_get(struct qca8k_led *led)
+{
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+ u32 val;
+ int ret;
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ ret = regmap_read(priv->regmap, reg_info.reg, &val);
+ if (ret)
+ return 0;
+
+ val >>= reg_info.shift;
+
+ if (led->port_num == 0 || led->port_num == 4) {
+ val &= QCA8K_LED_PATTERN_EN_MASK;
+ val >>= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ val &= QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ /* Assume brightness ON only when the LED is set to always ON */
+ return val == QCA8K_LED_ALWAYS_ON;
+}
+
+static int
+qca8k_cled_blink_set(struct led_classdev *ldev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qca8k_led *led = container_of(ldev, struct qca8k_led, cdev);
+ u32 mask, val = QCA8K_LED_ALWAYS_BLINK_4HZ;
+ struct qca8k_led_pattern_en reg_info;
+ struct qca8k_priv *priv = led->priv;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 125;
+ *delay_off = 125;
+ }
+
+ if (*delay_on != 125 || *delay_off != 125) {
+ /* The hardware only supports blinking at 4Hz. Fall back
+ * to software implementation in other cases.
+ */
+ return -EINVAL;
+ }
+
+ qca8k_get_enable_led_reg(led->port_num, led->led_num, &reg_info);
+
+ if (led->port_num == 0 || led->port_num == 4) {
+ mask = QCA8K_LED_PATTERN_EN_MASK;
+ val <<= QCA8K_LED_PATTERN_EN_SHIFT;
+ } else {
+ mask = QCA8K_LED_PHY123_PATTERN_EN_MASK;
+ }
+
+ regmap_update_bits(priv->regmap, reg_info.reg, mask << reg_info.shift,
+ val << reg_info.shift);
+
+ return 0;
+}
+
+static int
+qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int port_num)
+{
+ struct fwnode_handle *led = NULL, *leds = NULL;
+ struct led_init_data init_data = { };
+ struct dsa_switch *ds = priv->ds;
+ enum led_default_state state;
+ struct qca8k_led *port_led;
+ int led_num, led_index;
+ int ret;
+
+ leds = fwnode_get_named_child_node(port, "leds");
+ if (!leds) {
+ dev_dbg(priv->dev, "No Leds node specified in device tree for port %d!\n",
+ port_num);
+ return 0;
+ }
+
+ fwnode_for_each_child_node(leds, led) {
+ /* Reg represent the led number of the port.
+ * Each port can have at most 3 leds attached
+ * Commonly:
+ * 1. is gigabit led
+ * 2. is mbit led
+ * 3. additional status led
+ */
+ if (fwnode_property_read_u32(led, "reg", &led_num))
+ continue;
+
+ if (led_num >= QCA8K_LED_PORT_COUNT) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_num, port_num);
+ continue;
+ }
+
+ led_index = QCA8K_LED_PORT_INDEX(port_num, led_num);
+
+ port_led = &priv->ports_led[led_index];
+ port_led->port_num = port_num;
+ port_led->led_num = led_num;
+ port_led->priv = priv;
+
+ state = led_init_default_state_get(led);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ port_led->cdev.brightness = 1;
+ qca8k_led_brightness_set(port_led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ port_led->cdev.brightness =
+ qca8k_led_brightness_get(port_led);
+ break;
+ default:
+ port_led->cdev.brightness = 0;
+ qca8k_led_brightness_set(port_led, 0);
+ }
+
+ port_led->cdev.max_brightness = 1;
+ port_led->cdev.brightness_set_blocking = qca8k_cled_brightness_set_blocking;
+ port_led->cdev.blink_set = qca8k_cled_blink_set;
+ init_data.default_label = ":port";
+ init_data.fwnode = led;
+ init_data.devname_mandatory = true;
+ init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d", ds->slave_mii_bus->id,
+ port_num);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
+ if (ret)
+ dev_warn(priv->dev, "Failed to init LED %d for port %d", led_num, port_num);
+
+ kfree(init_data.devicename);
+ }
+
+ return 0;
+}
+
+int
+qca8k_setup_led_ctrl(struct qca8k_priv *priv)
+{
+ struct fwnode_handle *ports, *port;
+ int port_num;
+ int ret;
+
+ ports = device_get_named_child_node(priv->dev, "ports");
+ if (!ports) {
+ dev_info(priv->dev, "No ports node specified in device tree!");
+ return 0;
+ }
+
+ fwnode_for_each_child_node(ports, port) {
+ if (fwnode_property_read_u32(port, "reg", &port_num))
+ continue;
+
+ /* Skip checking for CPU port 0 and CPU port 6 as not supported */
+ if (port_num == 0 || port_num == 6)
+ continue;
+
+ /* Each port can have at most 3 different leds attached.
+ * Switch port starts from 0 to 6, but port 0 and 6 are CPU
+ * port. The port index needs to be decreased by one to identify
+ * the correct port for LED setup.
+ */
+ ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
new file mode 100644
index 000000000000..c5cc8a172d65
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -0,0 +1,588 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCA8K_H
+#define __QCA8K_H
+
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <linux/dsa/tag_qca.h>
+
+#define QCA8K_ETHERNET_MDIO_PRIORITY 7
+#define QCA8K_ETHERNET_PHY_PRIORITY 6
+#define QCA8K_ETHERNET_TIMEOUT 5
+
+#define QCA8K_NUM_PORTS 7
+#define QCA8K_NUM_CPU_PORTS 2
+#define QCA8K_MAX_MTU 9000
+#define QCA8K_NUM_LAGS 4
+#define QCA8K_NUM_PORTS_FOR_LAG 4
+
+#define PHY_ID_QCA8327 0x004dd034
+#define QCA8K_ID_QCA8327 0x12
+#define PHY_ID_QCA8337 0x004dd036
+#define QCA8K_ID_QCA8337 0x13
+
+#define QCA8K_QCA832X_MIB_COUNT 39
+#define QCA8K_QCA833X_MIB_COUNT 41
+
+#define QCA8K_BUSY_WAIT_TIMEOUT 2000
+
+#define QCA8K_NUM_FDB_RECORDS 2048
+
+#define QCA8K_PORT_VID_DEF 1
+
+/* Global control registers */
+#define QCA8K_REG_MASK_CTRL 0x000
+#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
+#define QCA8K_MASK_CTRL_REV_ID(x) FIELD_GET(QCA8K_MASK_CTRL_REV_ID_MASK, x)
+#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
+#define QCA8K_MASK_CTRL_DEVICE_ID(x) FIELD_GET(QCA8K_MASK_CTRL_DEVICE_ID_MASK, x)
+#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
+#define QCA8K_REG_PORT5_PAD_CTRL 0x008
+#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
+#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, x)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) FIELD_PREP(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, x)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
+#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
+#define QCA8K_REG_PWS 0x010
+#define QCA8K_PWS_POWER_ON_SEL BIT(31)
+/* This reg is only valid for QCA832x and toggle the package
+ * type from 176 pin (by default) to 148 pin used on QCA8327
+ */
+#define QCA8327_PWS_PACKAGE148_EN BIT(30)
+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
+#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
+#define QCA8K_REG_MODULE_EN 0x030
+#define QCA8K_MODULE_EN_MIB BIT(0)
+#define QCA8K_REG_MIB 0x034
+#define QCA8K_MIB_FUNC GENMASK(26, 24)
+#define QCA8K_MIB_CPU_KEEP BIT(20)
+#define QCA8K_MIB_BUSY BIT(17)
+#define QCA8K_MDIO_MASTER_CTRL 0x3c
+#define QCA8K_MDIO_MASTER_BUSY BIT(31)
+#define QCA8K_MDIO_MASTER_EN BIT(30)
+#define QCA8K_MDIO_MASTER_READ BIT(27)
+#define QCA8K_MDIO_MASTER_WRITE 0
+#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
+#define QCA8K_MDIO_MASTER_PHY_ADDR_MASK GENMASK(25, 21)
+#define QCA8K_MDIO_MASTER_PHY_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_PHY_ADDR_MASK, x)
+#define QCA8K_MDIO_MASTER_REG_ADDR_MASK GENMASK(20, 16)
+#define QCA8K_MDIO_MASTER_REG_ADDR(x) FIELD_PREP(QCA8K_MDIO_MASTER_REG_ADDR_MASK, x)
+#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
+#define QCA8K_MDIO_MASTER_DATA(x) FIELD_PREP(QCA8K_MDIO_MASTER_DATA_MASK, x)
+#define QCA8K_MDIO_MASTER_MAX_PORTS 5
+#define QCA8K_MDIO_MASTER_MAX_REG 32
+
+/* LED control register */
+#define QCA8K_LED_PORT_COUNT 3
+#define QCA8K_LED_COUNT ((QCA8K_NUM_PORTS - QCA8K_NUM_CPU_PORTS) * QCA8K_LED_PORT_COUNT)
+#define QCA8K_LED_RULE_COUNT 6
+#define QCA8K_LED_RULE_MAX 11
+#define QCA8K_LED_PORT_INDEX(_phy, _led) (((_phy) * QCA8K_LED_PORT_COUNT) + (_led))
+
+#define QCA8K_LED_PHY123_PATTERN_EN_SHIFT(_phy, _led) ((((_phy) - 1) * 6) + 8 + (2 * (_led)))
+#define QCA8K_LED_PHY123_PATTERN_EN_MASK GENMASK(1, 0)
+
+#define QCA8K_LED_PHY0123_CONTROL_RULE_SHIFT 0
+#define QCA8K_LED_PHY4_CONTROL_RULE_SHIFT 16
+
+#define QCA8K_LED_CTRL_REG(_i) (0x050 + (_i) * 4)
+#define QCA8K_LED_CTRL0_REG 0x50
+#define QCA8K_LED_CTRL1_REG 0x54
+#define QCA8K_LED_CTRL2_REG 0x58
+#define QCA8K_LED_CTRL3_REG 0x5C
+#define QCA8K_LED_CTRL_SHIFT(_i) (((_i) % 2) * 16)
+#define QCA8K_LED_CTRL_MASK GENMASK(15, 0)
+#define QCA8K_LED_RULE_MASK GENMASK(13, 0)
+#define QCA8K_LED_BLINK_FREQ_MASK GENMASK(1, 0)
+#define QCA8K_LED_BLINK_FREQ_SHITF 0
+#define QCA8K_LED_BLINK_2HZ 0
+#define QCA8K_LED_BLINK_4HZ 1
+#define QCA8K_LED_BLINK_8HZ 2
+#define QCA8K_LED_BLINK_AUTO 3
+#define QCA8K_LED_LINKUP_OVER_MASK BIT(2)
+#define QCA8K_LED_TX_BLINK_MASK BIT(4)
+#define QCA8K_LED_RX_BLINK_MASK BIT(5)
+#define QCA8K_LED_COL_BLINK_MASK BIT(7)
+#define QCA8K_LED_LINK_10M_EN_MASK BIT(8)
+#define QCA8K_LED_LINK_100M_EN_MASK BIT(9)
+#define QCA8K_LED_LINK_1000M_EN_MASK BIT(10)
+#define QCA8K_LED_POWER_ON_LIGHT_MASK BIT(11)
+#define QCA8K_LED_HALF_DUPLEX_MASK BIT(12)
+#define QCA8K_LED_FULL_DUPLEX_MASK BIT(13)
+#define QCA8K_LED_PATTERN_EN_MASK GENMASK(15, 14)
+#define QCA8K_LED_PATTERN_EN_SHIFT 14
+#define QCA8K_LED_ALWAYS_OFF 0
+#define QCA8K_LED_ALWAYS_BLINK_4HZ 1
+#define QCA8K_LED_ALWAYS_ON 2
+#define QCA8K_LED_RULE_CONTROLLED 3
+
+#define QCA8K_GOL_MAC_ADDR0 0x60
+#define QCA8K_GOL_MAC_ADDR1 0x64
+#define QCA8K_MAX_FRAME_SIZE 0x78
+#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
+#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
+#define QCA8K_PORT_STATUS_SPEED_10 0
+#define QCA8K_PORT_STATUS_SPEED_100 0x1
+#define QCA8K_PORT_STATUS_SPEED_1000 0x2
+#define QCA8K_PORT_STATUS_TXMAC BIT(2)
+#define QCA8K_PORT_STATUS_RXMAC BIT(3)
+#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
+#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
+#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
+#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
+#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
+#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
+#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
+#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
+#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
+#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
+#define QCA8K_PORT_HDR_CTRL_ALL 2
+#define QCA8K_PORT_HDR_CTRL_MGMT 1
+#define QCA8K_PORT_HDR_CTRL_NONE 0
+#define QCA8K_REG_SGMII_CTRL 0x0e0
+#define QCA8K_SGMII_EN_PLL BIT(1)
+#define QCA8K_SGMII_EN_RX BIT(2)
+#define QCA8K_SGMII_EN_TX BIT(3)
+#define QCA8K_SGMII_EN_SD BIT(4)
+#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
+#define QCA8K_SGMII_MODE_CTRL_MASK GENMASK(23, 22)
+#define QCA8K_SGMII_MODE_CTRL(x) FIELD_PREP(QCA8K_SGMII_MODE_CTRL_MASK, x)
+#define QCA8K_SGMII_MODE_CTRL_BASEX QCA8K_SGMII_MODE_CTRL(0x0)
+#define QCA8K_SGMII_MODE_CTRL_PHY QCA8K_SGMII_MODE_CTRL(0x1)
+#define QCA8K_SGMII_MODE_CTRL_MAC QCA8K_SGMII_MODE_CTRL(0x2)
+
+/* MAC_PWR_SEL registers */
+#define QCA8K_REG_MAC_PWR_SEL 0x0e4
+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
+
+/* EEE control registers */
+#define QCA8K_REG_EEE_CTRL 0x100
+#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
+
+/* TRUNK_HASH_EN registers */
+#define QCA8K_TRUNK_HASH_EN_CTRL 0x270
+#define QCA8K_TRUNK_HASH_SIP_EN BIT(3)
+#define QCA8K_TRUNK_HASH_DIP_EN BIT(2)
+#define QCA8K_TRUNK_HASH_SA_EN BIT(1)
+#define QCA8K_TRUNK_HASH_DA_EN BIT(0)
+#define QCA8K_TRUNK_HASH_MASK GENMASK(3, 0)
+
+/* ACL registers */
+#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
+#define QCA8K_PORT_VLAN_CVID_MASK GENMASK(27, 16)
+#define QCA8K_PORT_VLAN_CVID(x) FIELD_PREP(QCA8K_PORT_VLAN_CVID_MASK, x)
+#define QCA8K_PORT_VLAN_SVID_MASK GENMASK(11, 0)
+#define QCA8K_PORT_VLAN_SVID(x) FIELD_PREP(QCA8K_PORT_VLAN_SVID_MASK, x)
+#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
+#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
+#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
+
+/* Lookup registers */
+#define QCA8K_ATU_TABLE_SIZE 3 /* 12 bytes wide table / sizeof(u32) */
+
+#define QCA8K_REG_ATU_DATA0 0x600
+#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
+#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
+#define QCA8K_ATU_ADDR4_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR5_MASK GENMASK(7, 0)
+#define QCA8K_REG_ATU_DATA1 0x604
+#define QCA8K_ATU_PORT_MASK GENMASK(22, 16)
+#define QCA8K_ATU_ADDR0_MASK GENMASK(15, 8)
+#define QCA8K_ATU_ADDR1_MASK GENMASK(7, 0)
+#define QCA8K_REG_ATU_DATA2 0x608
+#define QCA8K_ATU_VID_MASK GENMASK(19, 8)
+#define QCA8K_ATU_STATUS_MASK GENMASK(3, 0)
+#define QCA8K_ATU_STATUS_STATIC 0xf
+#define QCA8K_REG_ATU_FUNC 0x60c
+#define QCA8K_ATU_FUNC_BUSY BIT(31)
+#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
+#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
+#define QCA8K_ATU_FUNC_FULL BIT(12)
+#define QCA8K_ATU_FUNC_PORT_MASK GENMASK(11, 8)
+#define QCA8K_REG_VTU_FUNC0 0x610
+#define QCA8K_VTU_FUNC0_VALID BIT(20)
+#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
+/* QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(17, 4)
+ * It does contain VLAN_MODE for each port [5:4] for port0,
+ * [7:6] for port1 ... [17:16] for port6. Use virtual port
+ * define to handle this.
+ */
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i) (4 + (_i) * 2)
+#define QCA8K_VTU_FUNC0_EG_MODE_MASK GENMASK(1, 0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(_i) (GENMASK(1, 0) << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x0)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNMOD(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNMOD << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x1)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_UNTAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_TAG FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x2)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(_i) (QCA8K_VTU_FUNC0_EG_MODE_TAG << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_VTU_FUNC0_EG_MODE_NOT FIELD_PREP(QCA8K_VTU_FUNC0_EG_MODE_MASK, 0x3)
+#define QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(_i) (QCA8K_VTU_FUNC0_EG_MODE_NOT << QCA8K_VTU_FUNC0_EG_MODE_PORT_SHIFT(_i))
+#define QCA8K_REG_VTU_FUNC1 0x614
+#define QCA8K_VTU_FUNC1_BUSY BIT(31)
+#define QCA8K_VTU_FUNC1_VID_MASK GENMASK(27, 16)
+#define QCA8K_VTU_FUNC1_FULL BIT(4)
+#define QCA8K_REG_ATU_CTRL 0x618
+#define QCA8K_ATU_AGE_TIME_MASK GENMASK(15, 0)
+#define QCA8K_ATU_AGE_TIME(x) FIELD_PREP(QCA8K_ATU_AGE_TIME_MASK, (x))
+#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
+#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
+#define QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM GENMASK(7, 4)
+#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
+#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_MASK GENMASK(30, 24)
+#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_MASK GENMASK(22, 16)
+#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_MASK GENMASK(14, 8)
+#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_MASK GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
+#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_MASK GENMASK(9, 8)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_VLAN_MODE_MASK, x)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE QCA8K_PORT_LOOKUP_VLAN_MODE(0x0)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK QCA8K_PORT_LOOKUP_VLAN_MODE(0x1)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK QCA8K_PORT_LOOKUP_VLAN_MODE(0x2)
+#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE QCA8K_PORT_LOOKUP_VLAN_MODE(0x3)
+#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
+#define QCA8K_PORT_LOOKUP_STATE(x) FIELD_PREP(QCA8K_PORT_LOOKUP_STATE_MASK, x)
+#define QCA8K_PORT_LOOKUP_STATE_DISABLED QCA8K_PORT_LOOKUP_STATE(0x0)
+#define QCA8K_PORT_LOOKUP_STATE_BLOCKING QCA8K_PORT_LOOKUP_STATE(0x1)
+#define QCA8K_PORT_LOOKUP_STATE_LISTENING QCA8K_PORT_LOOKUP_STATE(0x2)
+#define QCA8K_PORT_LOOKUP_STATE_LEARNING QCA8K_PORT_LOOKUP_STATE(0x3)
+#define QCA8K_PORT_LOOKUP_STATE_FORWARD QCA8K_PORT_LOOKUP_STATE(0x4)
+#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
+#define QCA8K_PORT_LOOKUP_ING_MIRROR_EN BIT(25)
+
+#define QCA8K_REG_GOL_TRUNK_CTRL0 0x700
+/* 4 max trunk first
+ * first 6 bit for member bitmap
+ * 7th bit is to enable trunk port
+ */
+#define QCA8K_REG_GOL_TRUNK_SHIFT(_i) ((_i) * 8)
+#define QCA8K_REG_GOL_TRUNK_EN_MASK BIT(7)
+#define QCA8K_REG_GOL_TRUNK_EN(_i) (QCA8K_REG_GOL_TRUNK_EN_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_MEMBER_MASK GENMASK(6, 0)
+#define QCA8K_REG_GOL_TRUNK_MEMBER(_i) (QCA8K_REG_GOL_TRUNK_MEMBER_MASK << QCA8K_REG_GOL_TRUNK_SHIFT(_i))
+/* 0x704 for TRUNK 0-1 --- 0x708 for TRUNK 2-3 */
+#define QCA8K_REG_GOL_TRUNK_CTRL(_i) (0x704 + (((_i) / 2) * 4))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK GENMASK(3, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK BIT(3)
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK GENMASK(2, 0)
+#define QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i) (((_i) / 2) * 16)
+#define QCA8K_REG_GOL_MEM_ID_SHIFT(_i) ((_i) * 4)
+/* Complex shift: FIRST shift for port THEN shift for trunk */
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j) (QCA8K_REG_GOL_MEM_ID_SHIFT(_j) + QCA8K_REG_GOL_TRUNK_ID_SHIFT(_i))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
+#define QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(_i, _j) (QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(_i, _j))
+
+#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK GENMASK(24, 16)
+#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XON_THRES_MASK, x)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK GENMASK(8, 0)
+#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) FIELD_PREP(QCA8K_GLOBAL_FC_GOL_XOFF_THRES_MASK, x)
+
+#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK GENMASK(7, 4)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK GENMASK(11, 8)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK GENMASK(15, 12)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK GENMASK(19, 16)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK GENMASK(23, 20)
+#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK GENMASK(29, 24)
+#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF_MASK, x)
+
+#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
+#define QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK GENMASK(3, 0)
+#define QCA8K_PORT_HOL_CTRL1_ING(x) FIELD_PREP(QCA8K_PORT_HOL_CTRL1_ING_BUF_MASK, x)
+#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
+#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
+#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
+#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
+
+/* Pkt edit registers */
+#define QCA8K_EGREES_VLAN_PORT_SHIFT(_i) (16 * ((_i) % 2))
+#define QCA8K_EGREES_VLAN_PORT_MASK(_i) (GENMASK(11, 0) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+#define QCA8K_EGREES_VLAN_PORT(_i, x) ((x) << QCA8K_EGREES_VLAN_PORT_SHIFT(_i))
+#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
+
+/* L3 registers */
+#define QCA8K_HROUTER_CONTROL 0xe00
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
+#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
+#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
+#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
+#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
+#define QCA8K_HNAT_CONTROL 0xe38
+
+/* MIB registers */
+#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
+
+/* QCA specific MII registers */
+#define MII_ATH_MMD_ADDR 0x0d
+#define MII_ATH_MMD_DATA 0x0e
+
+enum {
+ QCA8K_PORT_SPEED_10M = 0,
+ QCA8K_PORT_SPEED_100M = 1,
+ QCA8K_PORT_SPEED_1000M = 2,
+ QCA8K_PORT_SPEED_ERR = 3,
+};
+
+enum qca8k_fdb_cmd {
+ QCA8K_FDB_FLUSH = 1,
+ QCA8K_FDB_LOAD = 2,
+ QCA8K_FDB_PURGE = 3,
+ QCA8K_FDB_FLUSH_PORT = 5,
+ QCA8K_FDB_NEXT = 6,
+ QCA8K_FDB_SEARCH = 7,
+};
+
+enum qca8k_vlan_cmd {
+ QCA8K_VLAN_FLUSH = 1,
+ QCA8K_VLAN_LOAD = 2,
+ QCA8K_VLAN_PURGE = 3,
+ QCA8K_VLAN_REMOVE_PORT = 4,
+ QCA8K_VLAN_NEXT = 5,
+ QCA8K_VLAN_READ = 6,
+};
+
+enum qca8k_mid_cmd {
+ QCA8K_MIB_FLUSH = 1,
+ QCA8K_MIB_FLUSH_PORT = 2,
+ QCA8K_MIB_CAST = 3,
+};
+
+struct qca8k_priv;
+
+struct qca8k_info_ops {
+ int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
+};
+
+struct qca8k_match_data {
+ u8 id;
+ bool reduced_package;
+ u8 mib_count;
+ const struct qca8k_info_ops *ops;
+};
+
+enum {
+ QCA8K_CPU_PORT0,
+ QCA8K_CPU_PORT6,
+};
+
+struct qca8k_mgmt_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Enforce one mdio read/write at time */
+ bool ack;
+ u32 seq;
+ u32 data[4];
+};
+
+struct qca8k_mib_eth_data {
+ struct completion rw_done;
+ struct mutex mutex; /* Process one command at time */
+ refcount_t port_parsed; /* Counter to track parsed port */
+ u8 req_port;
+ u64 *data; /* pointer to ethtool data */
+};
+
+struct qca8k_ports_config {
+ bool sgmii_rx_clk_falling_edge;
+ bool sgmii_tx_clk_falling_edge;
+ bool sgmii_enable_pll;
+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+};
+
+struct qca8k_mdio_cache {
+/* The 32bit switch registers are accessed indirectly. To achieve this we need
+ * to set the page of the register. Track the last page that was set to reduce
+ * mdio writes
+ */
+ u16 page;
+};
+
+struct qca8k_pcs {
+ struct phylink_pcs pcs;
+ struct qca8k_priv *priv;
+ int port;
+};
+
+struct qca8k_led_pattern_en {
+ u32 reg;
+ u8 shift;
+};
+
+struct qca8k_led {
+ u8 port_num;
+ u8 led_num;
+ u16 old_rule;
+ struct qca8k_priv *priv;
+ struct led_classdev cdev;
+};
+
+struct qca8k_priv {
+ u8 switch_id;
+ u8 switch_revision;
+ u8 mirror_rx;
+ u8 mirror_tx;
+ u8 lag_hash_mode;
+ /* Each bit correspond to a port. This switch can support a max of 7 port.
+ * Bit 1: port enabled. Bit 0: port disabled.
+ */
+ u8 port_enabled_map;
+ struct qca8k_ports_config ports_config;
+ struct regmap *regmap;
+ struct mii_bus *bus;
+ struct dsa_switch *ds;
+ struct mutex reg_mutex;
+ struct device *dev;
+ struct gpio_desc *reset_gpio;
+ struct net_device *mgmt_master; /* Track if mdio/mib Ethernet is available */
+ struct qca8k_mgmt_eth_data mgmt_eth_data;
+ struct qca8k_mib_eth_data mib_eth_data;
+ struct qca8k_mdio_cache mdio_cache;
+ struct qca8k_pcs pcs_port_0;
+ struct qca8k_pcs pcs_port_6;
+ const struct qca8k_match_data *info;
+ struct qca8k_led ports_led[QCA8K_LED_COUNT];
+};
+
+struct qca8k_mib_desc {
+ unsigned int size;
+ unsigned int offset;
+ const char *name;
+};
+
+struct qca8k_fdb {
+ u16 vid;
+ u8 port_mask;
+ u8 aging;
+ u8 mac[6];
+};
+
+static inline u32 qca8k_port_to_phy(int port)
+{
+ /* From Andrew Lunn:
+ * Port 0 has no internal phy.
+ * Port 1 has an internal PHY at MDIO address 0.
+ * Port 2 has an internal PHY at MDIO address 1.
+ * ...
+ * Port 5 has an internal PHY at MDIO address 4.
+ * Port 6 has no internal PHY.
+ */
+
+ return port - 1;
+}
+
+/* Common setup function */
+extern const struct qca8k_mib_desc ar8327_mib[];
+extern const struct regmap_access_table qca8k_readable_table;
+int qca8k_mib_init(struct qca8k_priv *priv);
+void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable);
+int qca8k_read_switch_id(struct qca8k_priv *priv);
+
+/* Common read/write/rmw function */
+int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val);
+int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val);
+int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val);
+
+/* Common ops function */
+void qca8k_fdb_flush(struct qca8k_priv *priv);
+
+/* Common ethtool stats function */
+void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data);
+void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data);
+int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* Common eee function */
+int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
+int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
+
+/* Common bridge function */
+void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
+int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack);
+void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge);
+
+/* Common port enable/disable function */
+int qca8k_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy);
+void qca8k_port_disable(struct dsa_switch *ds, int port);
+
+/* Common MTU function */
+int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu);
+int qca8k_port_max_mtu(struct dsa_switch *ds, int port);
+
+/* Common fast age function */
+void qca8k_port_fast_age(struct dsa_switch *ds, int port);
+int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
+
+/* Common FDB function */
+int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
+ u16 port_mask, u16 vid);
+int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db);
+int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+
+/* Common MDB function */
+int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db);
+
+/* Common port mirror function */
+int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress, struct netlink_ext_ack *extack);
+void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
+
+/* Common port VLAN function */
+int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+
+/* Common port LAG function */
+int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
+int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
+ struct dsa_lag lag);
+
+#endif /* __QCA8K_H */
diff --git a/drivers/net/dsa/qca/qca8k_leds.h b/drivers/net/dsa/qca/qca8k_leds.h
new file mode 100644
index 000000000000..ab367f05b173
--- /dev/null
+++ b/drivers/net/dsa/qca/qca8k_leds.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __QCA8K_LEDS_H
+#define __QCA8K_LEDS_H
+
+/* Leds Support function */
+#ifdef CONFIG_NET_DSA_QCA8K_LEDS_SUPPORT
+int qca8k_setup_led_ctrl(struct qca8k_priv *priv);
+#else
+static inline int qca8k_setup_led_ctrl(struct qca8k_priv *priv)
+{
+ return 0;
+}
+#endif
+
+#endif /* __QCA8K_LEDS_H */
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
deleted file mode 100644
index 128b8cf85e08..000000000000
--- a/drivers/net/dsa/qca8k.h
+++ /dev/null
@@ -1,311 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
- * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __QCA8K_H
-#define __QCA8K_H
-
-#include <linux/delay.h>
-#include <linux/regmap.h>
-#include <linux/gpio.h>
-
-#define QCA8K_NUM_PORTS 7
-#define QCA8K_NUM_CPU_PORTS 2
-#define QCA8K_MAX_MTU 9000
-
-#define PHY_ID_QCA8327 0x004dd034
-#define QCA8K_ID_QCA8327 0x12
-#define PHY_ID_QCA8337 0x004dd036
-#define QCA8K_ID_QCA8337 0x13
-
-#define QCA8K_BUSY_WAIT_TIMEOUT 2000
-
-#define QCA8K_NUM_FDB_RECORDS 2048
-
-#define QCA8K_PORT_VID_DEF 1
-
-/* Global control registers */
-#define QCA8K_REG_MASK_CTRL 0x000
-#define QCA8K_MASK_CTRL_REV_ID_MASK GENMASK(7, 0)
-#define QCA8K_MASK_CTRL_REV_ID(x) ((x) >> 0)
-#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
-#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
-#define QCA8K_REG_PORT0_PAD_CTRL 0x004
-#define QCA8K_PORT0_PAD_MAC06_EXCHANGE_EN BIT(31)
-#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
-#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
-#define QCA8K_REG_PORT5_PAD_CTRL 0x008
-#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
-#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
-#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
-#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
-#define QCA8K_MAX_DELAY 3
-#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
-#define QCA8K_REG_PWS 0x010
-#define QCA8K_PWS_POWER_ON_SEL BIT(31)
-/* This reg is only valid for QCA832x and toggle the package
- * type from 176 pin (by default) to 148 pin used on QCA8327
- */
-#define QCA8327_PWS_PACKAGE148_EN BIT(30)
-#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
-#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
-#define QCA8K_REG_MODULE_EN 0x030
-#define QCA8K_MODULE_EN_MIB BIT(0)
-#define QCA8K_REG_MIB 0x034
-#define QCA8K_MIB_FLUSH BIT(24)
-#define QCA8K_MIB_CPU_KEEP BIT(20)
-#define QCA8K_MIB_BUSY BIT(17)
-#define QCA8K_MDIO_MASTER_CTRL 0x3c
-#define QCA8K_MDIO_MASTER_BUSY BIT(31)
-#define QCA8K_MDIO_MASTER_EN BIT(30)
-#define QCA8K_MDIO_MASTER_READ BIT(27)
-#define QCA8K_MDIO_MASTER_WRITE 0
-#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
-#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
-#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
-#define QCA8K_MDIO_MASTER_DATA(x) (x)
-#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
-#define QCA8K_MDIO_MASTER_MAX_PORTS 5
-#define QCA8K_MDIO_MASTER_MAX_REG 32
-#define QCA8K_GOL_MAC_ADDR0 0x60
-#define QCA8K_GOL_MAC_ADDR1 0x64
-#define QCA8K_MAX_FRAME_SIZE 0x78
-#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
-#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0)
-#define QCA8K_PORT_STATUS_SPEED_10 0
-#define QCA8K_PORT_STATUS_SPEED_100 0x1
-#define QCA8K_PORT_STATUS_SPEED_1000 0x2
-#define QCA8K_PORT_STATUS_TXMAC BIT(2)
-#define QCA8K_PORT_STATUS_RXMAC BIT(3)
-#define QCA8K_PORT_STATUS_TXFLOW BIT(4)
-#define QCA8K_PORT_STATUS_RXFLOW BIT(5)
-#define QCA8K_PORT_STATUS_DUPLEX BIT(6)
-#define QCA8K_PORT_STATUS_LINK_UP BIT(8)
-#define QCA8K_PORT_STATUS_LINK_AUTO BIT(9)
-#define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10)
-#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12)
-#define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4))
-#define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2)
-#define QCA8K_PORT_HDR_CTRL_RX_S 2
-#define QCA8K_PORT_HDR_CTRL_TX_MASK GENMASK(1, 0)
-#define QCA8K_PORT_HDR_CTRL_TX_S 0
-#define QCA8K_PORT_HDR_CTRL_ALL 2
-#define QCA8K_PORT_HDR_CTRL_MGMT 1
-#define QCA8K_PORT_HDR_CTRL_NONE 0
-#define QCA8K_REG_SGMII_CTRL 0x0e0
-#define QCA8K_SGMII_EN_PLL BIT(1)
-#define QCA8K_SGMII_EN_RX BIT(2)
-#define QCA8K_SGMII_EN_TX BIT(3)
-#define QCA8K_SGMII_EN_SD BIT(4)
-#define QCA8K_SGMII_CLK125M_DELAY BIT(7)
-#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23))
-#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22)
-#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
-#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
-
-/* MAC_PWR_SEL registers */
-#define QCA8K_REG_MAC_PWR_SEL 0x0e4
-#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
-#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
-
-/* EEE control registers */
-#define QCA8K_REG_EEE_CTRL 0x100
-#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
-
-/* ACL registers */
-#define QCA8K_REG_PORT_VLAN_CTRL0(_i) (0x420 + (_i * 8))
-#define QCA8K_PORT_VLAN_CVID(x) (x << 16)
-#define QCA8K_PORT_VLAN_SVID(x) x
-#define QCA8K_REG_PORT_VLAN_CTRL1(_i) (0x424 + (_i * 8))
-#define QCA8K_REG_IPV4_PRI_BASE_ADDR 0x470
-#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
-
-/* Lookup registers */
-#define QCA8K_REG_ATU_DATA0 0x600
-#define QCA8K_ATU_ADDR2_S 24
-#define QCA8K_ATU_ADDR3_S 16
-#define QCA8K_ATU_ADDR4_S 8
-#define QCA8K_REG_ATU_DATA1 0x604
-#define QCA8K_ATU_PORT_M 0x7f
-#define QCA8K_ATU_PORT_S 16
-#define QCA8K_ATU_ADDR0_S 8
-#define QCA8K_REG_ATU_DATA2 0x608
-#define QCA8K_ATU_VID_M 0xfff
-#define QCA8K_ATU_VID_S 8
-#define QCA8K_ATU_STATUS_M 0xf
-#define QCA8K_ATU_STATUS_STATIC 0xf
-#define QCA8K_REG_ATU_FUNC 0x60c
-#define QCA8K_ATU_FUNC_BUSY BIT(31)
-#define QCA8K_ATU_FUNC_PORT_EN BIT(14)
-#define QCA8K_ATU_FUNC_MULTI_EN BIT(13)
-#define QCA8K_ATU_FUNC_FULL BIT(12)
-#define QCA8K_ATU_FUNC_PORT_M 0xf
-#define QCA8K_ATU_FUNC_PORT_S 8
-#define QCA8K_REG_VTU_FUNC0 0x610
-#define QCA8K_VTU_FUNC0_VALID BIT(20)
-#define QCA8K_VTU_FUNC0_IVL_EN BIT(19)
-#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
-#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3
-#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0
-#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1
-#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2
-#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3
-#define QCA8K_REG_VTU_FUNC1 0x614
-#define QCA8K_VTU_FUNC1_BUSY BIT(31)
-#define QCA8K_VTU_FUNC1_VID_S 16
-#define QCA8K_VTU_FUNC1_FULL BIT(4)
-#define QCA8K_REG_GLOBAL_FW_CTRL0 0x620
-#define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
-#define QCA8K_REG_GLOBAL_FW_CTRL1 0x624
-#define QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S 24
-#define QCA8K_GLOBAL_FW_CTRL1_BC_DP_S 16
-#define QCA8K_GLOBAL_FW_CTRL1_MC_DP_S 8
-#define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0
-#define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc)
-#define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8)
-#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8)
-#define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16)
-#define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LISTENING (2 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_LEARNING (3 << 16)
-#define QCA8K_PORT_LOOKUP_STATE_FORWARD (4 << 16)
-#define QCA8K_PORT_LOOKUP_STATE GENMASK(18, 16)
-#define QCA8K_PORT_LOOKUP_LEARN BIT(20)
-
-#define QCA8K_REG_GLOBAL_FC_THRESH 0x800
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES(x) ((x) << 16)
-#define QCA8K_GLOBAL_FC_GOL_XON_THRES_S GENMASK(24, 16)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES(x) ((x) << 0)
-#define QCA8K_GLOBAL_FC_GOL_XOFF_THRES_S GENMASK(8, 0)
-
-#define QCA8K_REG_PORT_HOL_CTRL0(_i) (0x970 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI0(x) ((x) << 0)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1_BUF GENMASK(7, 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI1(x) ((x) << 4)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2_BUF GENMASK(11, 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI2(x) ((x) << 8)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3_BUF GENMASK(15, 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI3(x) ((x) << 12)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4_BUF GENMASK(19, 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI4(x) ((x) << 16)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5_BUF GENMASK(23, 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PRI5(x) ((x) << 20)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT_BUF GENMASK(29, 24)
-#define QCA8K_PORT_HOL_CTRL0_EG_PORT(x) ((x) << 24)
-
-#define QCA8K_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8)
-#define QCA8K_PORT_HOL_CTRL1_ING_BUF GENMASK(3, 0)
-#define QCA8K_PORT_HOL_CTRL1_ING(x) ((x) << 0)
-#define QCA8K_PORT_HOL_CTRL1_EG_PRI_BUF_EN BIT(6)
-#define QCA8K_PORT_HOL_CTRL1_EG_PORT_BUF_EN BIT(7)
-#define QCA8K_PORT_HOL_CTRL1_WRED_EN BIT(8)
-#define QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16)
-
-/* Pkt edit registers */
-#define QCA8K_EGRESS_VLAN(x) (0x0c70 + (4 * (x / 2)))
-
-/* L3 registers */
-#define QCA8K_HROUTER_CONTROL 0xe00
-#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_M GENMASK(17, 16)
-#define QCA8K_HROUTER_CONTROL_GLB_LOCKTIME_S 16
-#define QCA8K_HROUTER_CONTROL_ARP_AGE_MODE 1
-#define QCA8K_HROUTER_PBASED_CONTROL1 0xe08
-#define QCA8K_HROUTER_PBASED_CONTROL2 0xe0c
-#define QCA8K_HNAT_CONTROL 0xe38
-
-/* MIB registers */
-#define QCA8K_PORT_MIB_COUNTER(_i) (0x1000 + (_i) * 0x100)
-
-/* QCA specific MII registers */
-#define MII_ATH_MMD_ADDR 0x0d
-#define MII_ATH_MMD_DATA 0x0e
-
-enum {
- QCA8K_PORT_SPEED_10M = 0,
- QCA8K_PORT_SPEED_100M = 1,
- QCA8K_PORT_SPEED_1000M = 2,
- QCA8K_PORT_SPEED_ERR = 3,
-};
-
-enum qca8k_fdb_cmd {
- QCA8K_FDB_FLUSH = 1,
- QCA8K_FDB_LOAD = 2,
- QCA8K_FDB_PURGE = 3,
- QCA8K_FDB_NEXT = 6,
- QCA8K_FDB_SEARCH = 7,
-};
-
-enum qca8k_vlan_cmd {
- QCA8K_VLAN_FLUSH = 1,
- QCA8K_VLAN_LOAD = 2,
- QCA8K_VLAN_PURGE = 3,
- QCA8K_VLAN_REMOVE_PORT = 4,
- QCA8K_VLAN_NEXT = 5,
- QCA8K_VLAN_READ = 6,
-};
-
-struct ar8xxx_port_status {
- int enabled;
-};
-
-struct qca8k_match_data {
- u8 id;
- bool reduced_package;
-};
-
-enum {
- QCA8K_CPU_PORT0,
- QCA8K_CPU_PORT6,
-};
-
-struct qca8k_ports_config {
- bool sgmii_rx_clk_falling_edge;
- bool sgmii_tx_clk_falling_edge;
- bool sgmii_enable_pll;
- u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
- u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
-};
-
-struct qca8k_priv {
- u8 switch_id;
- u8 switch_revision;
- bool legacy_phy_port_mapping;
- struct qca8k_ports_config ports_config;
- struct regmap *regmap;
- struct mii_bus *bus;
- struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
- struct dsa_switch *ds;
- struct mutex reg_mutex;
- struct device *dev;
- struct dsa_switch_ops ops;
- struct gpio_desc *reset_gpio;
- unsigned int port_mtu[QCA8K_NUM_PORTS];
-};
-
-struct qca8k_mib_desc {
- unsigned int size;
- unsigned int offset;
- const char *name;
-};
-
-struct qca8k_fdb {
- u16 vid;
- u8 port_mask;
- u8 aging;
- u8 mac[6];
-};
-
-#endif /* __QCA8K_H */
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
deleted file mode 100644
index c66ebd0ee217..000000000000
--- a/drivers/net/dsa/realtek-smi-core.c
+++ /dev/null
@@ -1,523 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* Realtek Simple Management Interface (SMI) driver
- * It can be discussed how "simple" this interface is.
- *
- * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
- * but the protocol is not MDIO at all. Instead it is a Realtek
- * pecularity that need to bit-bang the lines in a special way to
- * communicate with the switch.
- *
- * ASICs we intend to support with this driver:
- *
- * RTL8366 - The original version, apparently
- * RTL8369 - Similar enough to have the same datsheet as RTL8366
- * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
- * different register layout from the other two
- * RTL8366S - Is this "RTL8366 super"?
- * RTL8367 - Has an OpenWRT driver as well
- * RTL8368S - Seems to be an alternative name for RTL8366RB
- * RTL8370 - Also uses SMI
- *
- * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
- * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
- * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
- * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
- * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/skbuff.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-#include <linux/bitops.h>
-#include <linux/if_bridge.h>
-
-#include "realtek-smi-core.h"
-
-#define REALTEK_SMI_ACK_RETRY_COUNT 5
-#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
-#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
-
-static inline void realtek_smi_clk_delay(struct realtek_smi *smi)
-{
- ndelay(smi->clk_delay);
-}
-
-static void realtek_smi_start(struct realtek_smi *smi)
-{
- /* Set GPIO pins to output mode, with initial state:
- * SCK = 0, SDA = 1
- */
- gpiod_direction_output(smi->mdc, 0);
- gpiod_direction_output(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
-
- /* CLK 1: 0 -> 1, 1 -> 0 */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
-
- /* CLK 2: */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
-}
-
-static void realtek_smi_stop(struct realtek_smi *smi)
-{
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 0);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdio, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Add a click */
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 1);
-
- /* Set GPIO pins to input mode */
- gpiod_direction_input(smi->mdio);
- gpiod_direction_input(smi->mdc);
-}
-
-static void realtek_smi_write_bits(struct realtek_smi *smi, u32 data, u32 len)
-{
- for (; len > 0; len--) {
- realtek_smi_clk_delay(smi);
-
- /* Prepare data */
- gpiod_set_value(smi->mdio, !!(data & (1 << (len - 1))));
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- gpiod_set_value(smi->mdc, 0);
- }
-}
-
-static void realtek_smi_read_bits(struct realtek_smi *smi, u32 len, u32 *data)
-{
- gpiod_direction_input(smi->mdio);
-
- for (*data = 0; len > 0; len--) {
- u32 u;
-
- realtek_smi_clk_delay(smi);
-
- /* Clocking */
- gpiod_set_value(smi->mdc, 1);
- realtek_smi_clk_delay(smi);
- u = !!gpiod_get_value(smi->mdio);
- gpiod_set_value(smi->mdc, 0);
-
- *data |= (u << (len - 1));
- }
-
- gpiod_direction_output(smi->mdio, 0);
-}
-
-static int realtek_smi_wait_for_ack(struct realtek_smi *smi)
-{
- int retry_cnt;
-
- retry_cnt = 0;
- do {
- u32 ack;
-
- realtek_smi_read_bits(smi, 1, &ack);
- if (ack == 0)
- break;
-
- if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
- dev_err(smi->dev, "ACK timeout\n");
- return -ETIMEDOUT;
- }
- } while (1);
-
- return 0;
-}
-
-static int realtek_smi_write_byte(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return realtek_smi_wait_for_ack(smi);
-}
-
-static int realtek_smi_write_byte_noack(struct realtek_smi *smi, u8 data)
-{
- realtek_smi_write_bits(smi, data, 8);
- return 0;
-}
-
-static int realtek_smi_read_byte0(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x00, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_byte1(struct realtek_smi *smi, u8 *data)
-{
- u32 t;
-
- /* Read data */
- realtek_smi_read_bits(smi, 8, &t);
- *data = (t & 0xff);
-
- /* Send an ACK */
- realtek_smi_write_bits(smi, 0x01, 1);
-
- return 0;
-}
-
-static int realtek_smi_read_reg(struct realtek_smi *smi, u32 addr, u32 *data)
-{
- unsigned long flags;
- u8 lo = 0;
- u8 hi = 0;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send READ command */
- ret = realtek_smi_write_byte(smi, smi->cmd_read);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Read DATA[7:0] */
- realtek_smi_read_byte0(smi, &lo);
- /* Read DATA[15:8] */
- realtek_smi_read_byte1(smi, &hi);
-
- *data = ((u32)lo) | (((u32)hi) << 8);
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-static int realtek_smi_write_reg(struct realtek_smi *smi,
- u32 addr, u32 data, bool ack)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&smi->lock, flags);
-
- realtek_smi_start(smi);
-
- /* Send WRITE command */
- ret = realtek_smi_write_byte(smi, smi->cmd_write);
- if (ret)
- goto out;
-
- /* Set ADDR[7:0] */
- ret = realtek_smi_write_byte(smi, addr & 0xff);
- if (ret)
- goto out;
-
- /* Set ADDR[15:8] */
- ret = realtek_smi_write_byte(smi, addr >> 8);
- if (ret)
- goto out;
-
- /* Write DATA[7:0] */
- ret = realtek_smi_write_byte(smi, data & 0xff);
- if (ret)
- goto out;
-
- /* Write DATA[15:8] */
- if (ack)
- ret = realtek_smi_write_byte(smi, data >> 8);
- else
- ret = realtek_smi_write_byte_noack(smi, data >> 8);
- if (ret)
- goto out;
-
- ret = 0;
-
- out:
- realtek_smi_stop(smi);
- spin_unlock_irqrestore(&smi->lock, flags);
-
- return ret;
-}
-
-/* There is one single case when we need to use this accessor and that
- * is when issueing soft reset. Since the device reset as soon as we write
- * that bit, no ACK will come back for natural reasons.
- */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data)
-{
- return realtek_smi_write_reg(smi, addr, data, false);
-}
-EXPORT_SYMBOL_GPL(realtek_smi_write_reg_noack);
-
-/* Regmap accessors */
-
-static int realtek_smi_write(void *ctx, u32 reg, u32 val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_write_reg(smi, reg, val, true);
-}
-
-static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
-{
- struct realtek_smi *smi = ctx;
-
- return realtek_smi_read_reg(smi, reg, val);
-}
-
-static const struct regmap_config realtek_smi_mdio_regmap_config = {
- .reg_bits = 10, /* A4..A0 R4..R0 */
- .val_bits = 16,
- .reg_stride = 1,
- /* PHY regs are at 0x8000 */
- .max_register = 0xffff,
- .reg_format_endian = REGMAP_ENDIAN_BIG,
- .reg_read = realtek_smi_read,
- .reg_write = realtek_smi_write,
- .cache_type = REGCACHE_NONE,
-};
-
-static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_read(smi, addr, regnum);
-}
-
-static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct realtek_smi *smi = bus->priv;
-
- return smi->ops->phy_write(smi, addr, regnum, val);
-}
-
-int realtek_smi_setup_mdio(struct realtek_smi *smi)
-{
- struct device_node *mdio_np;
- int ret;
-
- mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
- if (!mdio_np) {
- dev_err(smi->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
- if (!smi->slave_mii_bus) {
- ret = -ENOMEM;
- goto err_put_node;
- }
- smi->slave_mii_bus->priv = smi;
- smi->slave_mii_bus->name = "SMI slave MII";
- smi->slave_mii_bus->read = realtek_smi_mdio_read;
- smi->slave_mii_bus->write = realtek_smi_mdio_write;
- snprintf(smi->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
- smi->ds->index);
- smi->slave_mii_bus->dev.of_node = mdio_np;
- smi->slave_mii_bus->parent = smi->dev;
- smi->ds->slave_mii_bus = smi->slave_mii_bus;
-
- ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
- if (ret) {
- dev_err(smi->dev, "unable to register MDIO bus %s\n",
- smi->slave_mii_bus->id);
- goto err_put_node;
- }
-
- return 0;
-
-err_put_node:
- of_node_put(mdio_np);
-
- return ret;
-}
-
-static int realtek_smi_probe(struct platform_device *pdev)
-{
- const struct realtek_smi_variant *var;
- struct device *dev = &pdev->dev;
- struct realtek_smi *smi;
- struct device_node *np;
- int ret;
-
- var = of_device_get_match_data(dev);
- np = dev->of_node;
-
- smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
- if (!smi)
- return -ENOMEM;
- smi->chip_data = (void *)smi + sizeof(*smi);
- smi->map = devm_regmap_init(dev, NULL, smi,
- &realtek_smi_mdio_regmap_config);
- if (IS_ERR(smi->map)) {
- ret = PTR_ERR(smi->map);
- dev_err(dev, "regmap init failed: %d\n", ret);
- return ret;
- }
-
- /* Link forward and backward */
- smi->dev = dev;
- smi->clk_delay = var->clk_delay;
- smi->cmd_read = var->cmd_read;
- smi->cmd_write = var->cmd_write;
- smi->ops = var->ops;
-
- dev_set_drvdata(dev, smi);
- spin_lock_init(&smi->lock);
-
- /* TODO: if power is software controlled, set up any regulators here */
-
- /* Assert then deassert RESET */
- smi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(smi->reset)) {
- dev_err(dev, "failed to get RESET GPIO\n");
- return PTR_ERR(smi->reset);
- }
- msleep(REALTEK_SMI_HW_STOP_DELAY);
- gpiod_set_value(smi->reset, 0);
- msleep(REALTEK_SMI_HW_START_DELAY);
- dev_info(dev, "deasserted RESET\n");
-
- /* Fetch MDIO pins */
- smi->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdc))
- return PTR_ERR(smi->mdc);
- smi->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
- if (IS_ERR(smi->mdio))
- return PTR_ERR(smi->mdio);
-
- smi->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
-
- ret = smi->ops->detect(smi);
- if (ret) {
- dev_err(dev, "unable to detect switch\n");
- return ret;
- }
-
- smi->ds = devm_kzalloc(dev, sizeof(*smi->ds), GFP_KERNEL);
- if (!smi->ds)
- return -ENOMEM;
-
- smi->ds->dev = dev;
- smi->ds->num_ports = smi->num_ports;
- smi->ds->priv = smi;
-
- smi->ds->ops = var->ds_ops;
- ret = dsa_register_switch(smi->ds);
- if (ret) {
- dev_err(dev, "unable to register switch ret = %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static int realtek_smi_remove(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return 0;
-
- dsa_unregister_switch(smi->ds);
- if (smi->slave_mii_bus)
- of_node_put(smi->slave_mii_bus->dev.of_node);
- gpiod_set_value(smi->reset, 1);
-
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static void realtek_smi_shutdown(struct platform_device *pdev)
-{
- struct realtek_smi *smi = platform_get_drvdata(pdev);
-
- if (!smi)
- return;
-
- dsa_switch_shutdown(smi->ds);
-
- platform_set_drvdata(pdev, NULL);
-}
-
-static const struct of_device_id realtek_smi_of_match[] = {
- {
- .compatible = "realtek,rtl8366rb",
- .data = &rtl8366rb_variant,
- },
- {
- /* FIXME: add support for RTL8366S and more */
- .compatible = "realtek,rtl8366s",
- .data = NULL,
- },
- {
- .compatible = "realtek,rtl8365mb",
- .data = &rtl8365mb_variant,
- },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
-
-static struct platform_driver realtek_smi_driver = {
- .driver = {
- .name = "realtek-smi",
- .of_match_table = of_match_ptr(realtek_smi_of_match),
- },
- .probe = realtek_smi_probe,
- .remove = realtek_smi_remove,
- .shutdown = realtek_smi_shutdown,
-};
-module_platform_driver(realtek_smi_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
new file mode 100644
index 000000000000..060165a85fb7
--- /dev/null
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menuconfig NET_DSA_REALTEK
+ tristate "Realtek Ethernet switch family support"
+ depends on NET_DSA
+ select FIXED_PHY
+ select IRQ_DOMAIN
+ select REALTEK_PHY
+ select REGMAP
+ help
+ Select to enable support for Realtek Ethernet switch chips.
+
+ Note that at least one interface driver must be enabled for the
+ subdrivers to be loaded. Moreover, an interface driver cannot achieve
+ anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
+config NET_DSA_REALTEK_MDIO
+ tristate "Realtek MDIO interface driver"
+ depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
+ help
+ Select to enable support for registering switches configured
+ through MDIO.
+
+config NET_DSA_REALTEK_SMI
+ tristate "Realtek SMI interface driver"
+ depends on OF
+ depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+ depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+ depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
+ help
+ Select to enable support for registering switches connected
+ through SMI.
+
+config NET_DSA_REALTEK_RTL8365MB
+ tristate "Realtek RTL8365MB switch subdriver"
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL8_4
+ help
+ Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
+
+config NET_DSA_REALTEK_RTL8366RB
+ tristate "Realtek RTL8366RB switch subdriver"
+ imply NET_DSA_REALTEK_SMI
+ imply NET_DSA_REALTEK_MDIO
+ select NET_DSA_TAG_RTL4_A
+ help
+ Select to enable support for Realtek RTL8366RB.
+
+endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
new file mode 100644
index 000000000000..0aab57252a7c
--- /dev/null
+++ b/drivers/net/dsa/realtek/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_NET_DSA_REALTEK_MDIO) += realtek-mdio.o
+obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
+rtl8366-objs := rtl8366-core.o rtl8366rb.o
+obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
new file mode 100644
index 000000000000..5a8fe707ca25
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek MDIO interface driver
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/overflow.h>
+#include <linux/regmap.h>
+
+#include "realtek.h"
+
+/* Read/write via mdiobus */
+#define REALTEK_MDIO_CTRL0_REG 31
+#define REALTEK_MDIO_START_REG 29
+#define REALTEK_MDIO_CTRL1_REG 21
+#define REALTEK_MDIO_ADDRESS_REG 23
+#define REALTEK_MDIO_DATA_WRITE_REG 24
+#define REALTEK_MDIO_DATA_READ_REG 25
+
+#define REALTEK_MDIO_START_OP 0xFFFF
+#define REALTEK_MDIO_ADDR_OP 0x000E
+#define REALTEK_MDIO_READ_OP 0x0001
+#define REALTEK_MDIO_WRITE_OP 0x0003
+
+static int realtek_mdio_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_DATA_WRITE_REG, val);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_WRITE_OP);
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int realtek_mdio_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+ struct mii_bus *bus = priv->bus;
+ int ret;
+
+ mutex_lock(&bus->mdio_lock);
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL0_REG, REALTEK_MDIO_ADDR_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_ADDRESS_REG, reg);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->write(bus, priv->mdio_addr, REALTEK_MDIO_CTRL1_REG, REALTEK_MDIO_READ_OP);
+ if (ret)
+ goto out_unlock;
+
+ ret = bus->read(bus, priv->mdio_addr, REALTEK_MDIO_DATA_READ_REG);
+ if (ret >= 0) {
+ *val = ret;
+ ret = 0;
+ }
+
+out_unlock:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static void realtek_mdio_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_mdio_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_mdio_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_mdio_lock,
+ .unlock = realtek_mdio_unlock,
+};
+
+static const struct regmap_config realtek_mdio_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_mdio_read,
+ .reg_write = realtek_mdio_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_mdio_probe(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv;
+ struct device *dev = &mdiodev->dev;
+ const struct realtek_variant *var;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ if (!var)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&mdiodev->dev,
+ size_add(sizeof(*priv), var->chip_data_sz),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_mdio_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_mdio_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ priv->mdio_addr = mdiodev->addr;
+ priv->bus = mdiodev->bus;
+ priv->dev = &mdiodev->dev;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->write_reg_noack = realtek_mdio_write;
+
+ np = dev->of_node;
+
+ dev_set_drvdata(dev, priv);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+ priv->ds->ops = var->ds_ops_mdio;
+
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err(priv->dev, "unable to register switch ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void realtek_mdio_remove(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_unregister_switch(priv->ds);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+}
+
+static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct realtek_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static const struct of_device_id realtek_mdio_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ { .compatible = "realtek,rtl8366rb", .data = &rtl8366rb_variant, },
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ { .compatible = "realtek,rtl8365mb", .data = &rtl8365mb_variant, },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_mdio_of_match);
+
+static struct mdio_driver realtek_mdio_driver = {
+ .mdiodrv.driver = {
+ .name = "realtek-mdio",
+ .of_match_table = of_match_ptr(realtek_mdio_of_match),
+ },
+ .probe = realtek_mdio_probe,
+ .remove = realtek_mdio_remove,
+ .shutdown = realtek_mdio_shutdown,
+};
+
+mdio_module_driver(realtek_mdio_driver);
+
+MODULE_AUTHOR("Luiz Angelo Daros de Luca <luizluca@gmail.com>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via MDIO interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
new file mode 100644
index 000000000000..1b447d96b9c4
--- /dev/null
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Realtek Simple Management Interface (SMI) driver
+ * It can be discussed how "simple" this interface is.
+ *
+ * The SMI protocol piggy-backs the MDIO MDC and MDIO signals levels
+ * but the protocol is not MDIO at all. Instead it is a Realtek
+ * pecularity that need to bit-bang the lines in a special way to
+ * communicate with the switch.
+ *
+ * ASICs we intend to support with this driver:
+ *
+ * RTL8366 - The original version, apparently
+ * RTL8369 - Similar enough to have the same datsheet as RTL8366
+ * RTL8366RB - Probably reads out "RTL8366 revision B", has a quite
+ * different register layout from the other two
+ * RTL8366S - Is this "RTL8366 super"?
+ * RTL8367 - Has an OpenWRT driver as well
+ * RTL8368S - Seems to be an alternative name for RTL8366RB
+ * RTL8370 - Also uses SMI
+ *
+ * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org>
+ * Copyright (C) 2010 Antti Seppälä <a.seppala@gmail.com>
+ * Copyright (C) 2010 Roman Yeryomin <roman@advem.lv>
+ * Copyright (C) 2011 Colin Leitner <colin.leitner@googlemail.com>
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bitops.h>
+#include <linux/if_bridge.h>
+
+#include "realtek.h"
+
+#define REALTEK_SMI_ACK_RETRY_COUNT 5
+
+static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
+{
+ ndelay(priv->clk_delay);
+}
+
+static void realtek_smi_start(struct realtek_priv *priv)
+{
+ /* Set GPIO pins to output mode, with initial state:
+ * SCK = 0, SDA = 1
+ */
+ gpiod_direction_output(priv->mdc, 0);
+ gpiod_direction_output(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 1: 0 -> 1, 1 -> 0 */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+
+ /* CLK 2: */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+}
+
+static void realtek_smi_stop(struct realtek_priv *priv)
+{
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 0);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdio, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Add a click */
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 1);
+
+ /* Set GPIO pins to input mode */
+ gpiod_direction_input(priv->mdio);
+ gpiod_direction_input(priv->mdc);
+}
+
+static void realtek_smi_write_bits(struct realtek_priv *priv, u32 data, u32 len)
+{
+ for (; len > 0; len--) {
+ realtek_smi_clk_delay(priv);
+
+ /* Prepare data */
+ gpiod_set_value(priv->mdio, !!(data & (1 << (len - 1))));
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ gpiod_set_value(priv->mdc, 0);
+ }
+}
+
+static void realtek_smi_read_bits(struct realtek_priv *priv, u32 len, u32 *data)
+{
+ gpiod_direction_input(priv->mdio);
+
+ for (*data = 0; len > 0; len--) {
+ u32 u;
+
+ realtek_smi_clk_delay(priv);
+
+ /* Clocking */
+ gpiod_set_value(priv->mdc, 1);
+ realtek_smi_clk_delay(priv);
+ u = !!gpiod_get_value(priv->mdio);
+ gpiod_set_value(priv->mdc, 0);
+
+ *data |= (u << (len - 1));
+ }
+
+ gpiod_direction_output(priv->mdio, 0);
+}
+
+static int realtek_smi_wait_for_ack(struct realtek_priv *priv)
+{
+ int retry_cnt;
+
+ retry_cnt = 0;
+ do {
+ u32 ack;
+
+ realtek_smi_read_bits(priv, 1, &ack);
+ if (ack == 0)
+ break;
+
+ if (++retry_cnt > REALTEK_SMI_ACK_RETRY_COUNT) {
+ dev_err(priv->dev, "ACK timeout\n");
+ return -ETIMEDOUT;
+ }
+ } while (1);
+
+ return 0;
+}
+
+static int realtek_smi_write_byte(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return realtek_smi_wait_for_ack(priv);
+}
+
+static int realtek_smi_write_byte_noack(struct realtek_priv *priv, u8 data)
+{
+ realtek_smi_write_bits(priv, data, 8);
+ return 0;
+}
+
+static int realtek_smi_read_byte0(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x00, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_byte1(struct realtek_priv *priv, u8 *data)
+{
+ u32 t;
+
+ /* Read data */
+ realtek_smi_read_bits(priv, 8, &t);
+ *data = (t & 0xff);
+
+ /* Send an ACK */
+ realtek_smi_write_bits(priv, 0x01, 1);
+
+ return 0;
+}
+
+static int realtek_smi_read_reg(struct realtek_priv *priv, u32 addr, u32 *data)
+{
+ unsigned long flags;
+ u8 lo = 0;
+ u8 hi = 0;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send READ command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_read);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Read DATA[7:0] */
+ realtek_smi_read_byte0(priv, &lo);
+ /* Read DATA[15:8] */
+ realtek_smi_read_byte1(priv, &hi);
+
+ *data = ((u32)lo) | (((u32)hi) << 8);
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static int realtek_smi_write_reg(struct realtek_priv *priv,
+ u32 addr, u32 data, bool ack)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ realtek_smi_start(priv);
+
+ /* Send WRITE command */
+ ret = realtek_smi_write_byte(priv, priv->cmd_write);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[7:0] */
+ ret = realtek_smi_write_byte(priv, addr & 0xff);
+ if (ret)
+ goto out;
+
+ /* Set ADDR[15:8] */
+ ret = realtek_smi_write_byte(priv, addr >> 8);
+ if (ret)
+ goto out;
+
+ /* Write DATA[7:0] */
+ ret = realtek_smi_write_byte(priv, data & 0xff);
+ if (ret)
+ goto out;
+
+ /* Write DATA[15:8] */
+ if (ack)
+ ret = realtek_smi_write_byte(priv, data >> 8);
+ else
+ ret = realtek_smi_write_byte_noack(priv, data >> 8);
+ if (ret)
+ goto out;
+
+ ret = 0;
+
+ out:
+ realtek_smi_stop(priv);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+/* There is one single case when we need to use this accessor and that
+ * is when issueing soft reset. Since the device reset as soon as we write
+ * that bit, no ACK will come back for natural reasons.
+ */
+static int realtek_smi_write_reg_noack(void *ctx, u32 reg, u32 val)
+{
+ return realtek_smi_write_reg(ctx, reg, val, false);
+}
+
+/* Regmap accessors */
+
+static int realtek_smi_write(void *ctx, u32 reg, u32 val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_write_reg(priv, reg, val, true);
+}
+
+static int realtek_smi_read(void *ctx, u32 reg, u32 *val)
+{
+ struct realtek_priv *priv = ctx;
+
+ return realtek_smi_read_reg(priv, reg, val);
+}
+
+static void realtek_smi_lock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_lock(&priv->map_lock);
+}
+
+static void realtek_smi_unlock(void *ctx)
+{
+ struct realtek_priv *priv = ctx;
+
+ mutex_unlock(&priv->map_lock);
+}
+
+static const struct regmap_config realtek_smi_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .lock = realtek_smi_lock,
+ .unlock = realtek_smi_unlock,
+};
+
+static const struct regmap_config realtek_smi_nolock_regmap_config = {
+ .reg_bits = 10, /* A4..A0 R4..R0 */
+ .val_bits = 16,
+ .reg_stride = 1,
+ /* PHY regs are at 0x8000 */
+ .max_register = 0xffff,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .reg_read = realtek_smi_read,
+ .reg_write = realtek_smi_write,
+ .cache_type = REGCACHE_NONE,
+ .disable_locking = true,
+};
+
+static int realtek_smi_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_read(priv, addr, regnum);
+}
+
+static int realtek_smi_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct realtek_priv *priv = bus->priv;
+
+ return priv->ops->phy_write(priv, addr, regnum, val);
+}
+
+static int realtek_smi_setup_mdio(struct dsa_switch *ds)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct device_node *mdio_np;
+ int ret;
+
+ mdio_np = of_get_compatible_child(priv->dev->of_node, "realtek,smi-mdio");
+ if (!mdio_np) {
+ dev_err(priv->dev, "no MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ priv->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ if (!priv->slave_mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+ priv->slave_mii_bus->priv = priv;
+ priv->slave_mii_bus->name = "SMI slave MII";
+ priv->slave_mii_bus->read = realtek_smi_mdio_read;
+ priv->slave_mii_bus->write = realtek_smi_mdio_write;
+ snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "SMI-%d",
+ ds->index);
+ priv->slave_mii_bus->dev.of_node = mdio_np;
+ priv->slave_mii_bus->parent = priv->dev;
+ ds->slave_mii_bus = priv->slave_mii_bus;
+
+ ret = devm_of_mdiobus_register(priv->dev, priv->slave_mii_bus, mdio_np);
+ if (ret) {
+ dev_err(priv->dev, "unable to register MDIO bus %s\n",
+ priv->slave_mii_bus->id);
+ goto err_put_node;
+ }
+
+ return 0;
+
+err_put_node:
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static int realtek_smi_probe(struct platform_device *pdev)
+{
+ const struct realtek_variant *var;
+ struct device *dev = &pdev->dev;
+ struct realtek_priv *priv;
+ struct regmap_config rc;
+ struct device_node *np;
+ int ret;
+
+ var = of_device_get_match_data(dev);
+ np = dev->of_node;
+
+ priv = devm_kzalloc(dev, sizeof(*priv) + var->chip_data_sz, GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->chip_data = (void *)priv + sizeof(*priv);
+
+ mutex_init(&priv->map_lock);
+
+ rc = realtek_smi_regmap_config;
+ rc.lock_arg = priv;
+ priv->map = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map)) {
+ ret = PTR_ERR(priv->map);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ rc = realtek_smi_nolock_regmap_config;
+ priv->map_nolock = devm_regmap_init(dev, NULL, priv, &rc);
+ if (IS_ERR(priv->map_nolock)) {
+ ret = PTR_ERR(priv->map_nolock);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Link forward and backward */
+ priv->dev = dev;
+ priv->clk_delay = var->clk_delay;
+ priv->cmd_read = var->cmd_read;
+ priv->cmd_write = var->cmd_write;
+ priv->ops = var->ops;
+
+ priv->setup_interface = realtek_smi_setup_mdio;
+ priv->write_reg_noack = realtek_smi_write_reg_noack;
+
+ dev_set_drvdata(dev, priv);
+ spin_lock_init(&priv->lock);
+
+ /* TODO: if power is software controlled, set up any regulators here */
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
+ /* Fetch MDIO pins */
+ priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdc))
+ return PTR_ERR(priv->mdc);
+ priv->mdio = devm_gpiod_get_optional(dev, "mdio", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->mdio))
+ return PTR_ERR(priv->mdio);
+
+ priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+
+ ret = priv->ops->detect(priv);
+ if (ret) {
+ dev_err(dev, "unable to detect switch\n");
+ return ret;
+ }
+
+ priv->ds = devm_kzalloc(dev, sizeof(*priv->ds), GFP_KERNEL);
+ if (!priv->ds)
+ return -ENOMEM;
+
+ priv->ds->dev = dev;
+ priv->ds->num_ports = priv->num_ports;
+ priv->ds->priv = priv;
+
+ priv->ds->ops = var->ds_ops_smi;
+ ret = dsa_register_switch(priv->ds);
+ if (ret) {
+ dev_err_probe(dev, ret, "unable to register switch\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int realtek_smi_remove(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return 0;
+
+ dsa_unregister_switch(priv->ds);
+ if (priv->slave_mii_bus)
+ of_node_put(priv->slave_mii_bus->dev.of_node);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+
+ return 0;
+}
+
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id realtek_smi_of_match[] = {
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB)
+ {
+ .compatible = "realtek,rtl8366rb",
+ .data = &rtl8366rb_variant,
+ },
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
+#endif
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
+
+static struct platform_driver realtek_smi_driver = {
+ .driver = {
+ .name = "realtek-smi",
+ .of_match_table = of_match_ptr(realtek_smi_of_match),
+ },
+ .probe = realtek_smi_probe,
+ .remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
+};
+module_platform_driver(realtek_smi_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for Realtek ethernet switch connected via SMI interface");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek/realtek.h
index 5bfa53e2480a..4fa7c6ba874a 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -5,15 +5,18 @@
* Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
*/
-#ifndef _REALTEK_SMI_H
-#define _REALTEK_SMI_H
+#ifndef _REALTEK_H
+#define _REALTEK_H
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
-struct realtek_smi_ops;
+#define REALTEK_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_HW_START_DELAY 100 /* msecs */
+
+struct realtek_ops;
struct dentry;
struct inode;
struct file;
@@ -25,7 +28,7 @@ struct rtl8366_mib_counter {
const char *name;
};
-/**
+/*
* struct rtl8366_vlan_mc - Virtual LAN member configuration
*/
struct rtl8366_vlan_mc {
@@ -43,13 +46,17 @@ struct rtl8366_vlan_4k {
u8 fid;
};
-struct realtek_smi {
+struct realtek_priv {
struct device *dev;
struct gpio_desc *reset;
struct gpio_desc *mdc;
struct gpio_desc *mdio;
struct regmap *map;
+ struct regmap *map_nolock;
+ struct mutex map_lock;
struct mii_bus *slave_mii_bus;
+ struct mii_bus *bus;
+ int mdio_addr;
unsigned int clk_delay;
u8 cmd_read;
@@ -65,7 +72,9 @@ struct realtek_smi {
unsigned int num_mib_counters;
struct rtl8366_mib_counter *mib_counters;
- const struct realtek_smi_ops *ops;
+ const struct realtek_ops *ops;
+ int (*setup_interface)(struct dsa_switch *ds);
+ int (*write_reg_noack)(void *ctx, u32 addr, u32 data);
int vlan_enabled;
int vlan4k_enabled;
@@ -74,61 +83,57 @@ struct realtek_smi {
void *chip_data; /* Per-chip extra variant data */
};
-/**
- * struct realtek_smi_ops - vtable for the per-SMI-chiptype operations
+/*
+ * struct realtek_ops - vtable for the per-SMI-chiptype operations
* @detect: detects the chiptype
*/
-struct realtek_smi_ops {
- int (*detect)(struct realtek_smi *smi);
- int (*reset_chip)(struct realtek_smi *smi);
- int (*setup)(struct realtek_smi *smi);
- void (*cleanup)(struct realtek_smi *smi);
- int (*get_mib_counter)(struct realtek_smi *smi,
+struct realtek_ops {
+ int (*detect)(struct realtek_priv *priv);
+ int (*reset_chip)(struct realtek_priv *priv);
+ int (*setup)(struct realtek_priv *priv);
+ void (*cleanup)(struct realtek_priv *priv);
+ int (*get_mib_counter)(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue);
- int (*get_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*get_vlan_mc)(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc);
- int (*set_vlan_mc)(struct realtek_smi *smi, u32 index,
+ int (*set_vlan_mc)(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc);
- int (*get_vlan_4k)(struct realtek_smi *smi, u32 vid,
+ int (*get_vlan_4k)(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k);
- int (*set_vlan_4k)(struct realtek_smi *smi,
+ int (*set_vlan_4k)(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k);
- int (*get_mc_index)(struct realtek_smi *smi, int port, int *val);
- int (*set_mc_index)(struct realtek_smi *smi, int port, int index);
- bool (*is_vlan_valid)(struct realtek_smi *smi, unsigned int vlan);
- int (*enable_vlan)(struct realtek_smi *smi, bool enable);
- int (*enable_vlan4k)(struct realtek_smi *smi, bool enable);
- int (*enable_port)(struct realtek_smi *smi, int port, bool enable);
- int (*phy_read)(struct realtek_smi *smi, int phy, int regnum);
- int (*phy_write)(struct realtek_smi *smi, int phy, int regnum,
+ int (*get_mc_index)(struct realtek_priv *priv, int port, int *val);
+ int (*set_mc_index)(struct realtek_priv *priv, int port, int index);
+ bool (*is_vlan_valid)(struct realtek_priv *priv, unsigned int vlan);
+ int (*enable_vlan)(struct realtek_priv *priv, bool enable);
+ int (*enable_vlan4k)(struct realtek_priv *priv, bool enable);
+ int (*enable_port)(struct realtek_priv *priv, int port, bool enable);
+ int (*phy_read)(struct realtek_priv *priv, int phy, int regnum);
+ int (*phy_write)(struct realtek_priv *priv, int phy, int regnum,
u16 val);
};
-struct realtek_smi_variant {
- const struct dsa_switch_ops *ds_ops;
- const struct realtek_smi_ops *ops;
+struct realtek_variant {
+ const struct dsa_switch_ops *ds_ops_smi;
+ const struct dsa_switch_ops *ds_ops_mdio;
+ const struct realtek_ops *ops;
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
size_t chip_data_sz;
};
-/* SMI core calls */
-int realtek_smi_write_reg_noack(struct realtek_smi *smi, u32 addr,
- u32 data);
-int realtek_smi_setup_mdio(struct realtek_smi *smi);
-
/* RTL8366 library helpers */
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used);
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
-int rtl8366_reset_vlan(struct realtek_smi *smi);
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable);
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable);
+int rtl8366_reset_vlan(struct realtek_priv *priv);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -139,7 +144,7 @@ void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
-extern const struct realtek_smi_variant rtl8366rb_variant;
-extern const struct realtek_smi_variant rtl8365mb_variant;
+extern const struct realtek_variant rtl8366rb_variant;
+extern const struct realtek_variant rtl8365mb_variant;
-#endif /* _REALTEK_SMI_H */
+#endif /* _REALTEK_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 078ca4cd7160..41ea3b5a42b1 100644
--- a/drivers/net/dsa/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -98,19 +98,17 @@
#include <linux/of_irq.h>
#include <linux/regmap.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
-#include "realtek-smi-core.h"
-
-/* Chip-specific data and limits */
-#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
-#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
-#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+#include "realtek.h"
/* Family-specific data and limits */
-#define RTL8365MB_PHYADDRMAX 7
-#define RTL8365MB_NUM_PHYREGS 32
-#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
-#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+#define RTL8365MB_PHYADDRMAX 7
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+#define RTL8365MB_MAX_NUM_PORTS 11
+#define RTL8365MB_MAX_NUM_EXTINTS 3
+#define RTL8365MB_LEARN_LIMIT_MAX 2112
/* Chip identification registers */
#define RTL8365MB_CHIP_ID_REG 0x1300
@@ -191,7 +189,7 @@
/* The PHY OCP addresses of PHY registers 0~31 start here */
#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
-/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+/* External interface port mode values - used in DIGITAL_INTERFACE_SELECT */
#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
#define RTL8365MB_EXT_PORT_MODE_RGMII 1
#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
@@ -207,39 +205,44 @@
#define RTL8365MB_EXT_PORT_MODE_1000X 12
#define RTL8365MB_EXT_PORT_MODE_100FX 13
-/* EXT port interface mode configuration registers 0~1 */
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
- ((_extport) >> 1) * (0x13C3 - 0x1305))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
- (0xF << (((_extport) % 2)))
-#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
- (((_extport) % 2) * 4)
-
-/* EXT port RGMII TX/RX delay configuration registers 1~2 */
-#define RTL8365MB_EXT_RGMXF_REG1 0x1307
-#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
-#define RTL8365MB_EXT_RGMXF_REG(_extport) \
- (RTL8365MB_EXT_RGMXF_REG1 + \
- (((_extport) >> 1) * (0x13C5 - 0x1307)))
+/* External interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extint) \
+ ((_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 : \
+ 0x0)
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extint) \
+ (0xF << (((_extint) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extint) \
+ (((_extint) % 2) * 4)
+
+/* External interface RGMII TX/RX delay configuration registers 0~2 */
+#define RTL8365MB_EXT_RGMXF_REG0 0x1306 /* EXT0 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307 /* EXT1 */
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5 /* EXT2 */
+#define RTL8365MB_EXT_RGMXF_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_EXT_RGMXF_REG0 : \
+ (_extint) == 1 ? RTL8365MB_EXT_RGMXF_REG1 : \
+ (_extint) == 2 ? RTL8365MB_EXT_RGMXF_REG2 : \
+ 0x0)
#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
-/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+/* External interface port speed values - used in DIGITAL_INTERFACE_FORCE */
#define RTL8365MB_PORT_SPEED_10M 0
#define RTL8365MB_PORT_SPEED_100M 1
#define RTL8365MB_PORT_SPEED_1000M 2
-/* EXT port force configuration registers 0~2 */
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
-#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
- (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
- ((_extport) & 0x1) + \
- ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+/* External interface force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310 /* EXT0 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311 /* EXT1 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4 /* EXT2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extint) \
+ ((_extint) == 0 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 : \
+ (_extint) == 1 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 : \
+ (_extint) == 2 ? RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 : \
+ 0x0)
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
@@ -265,6 +268,7 @@
/* Maximum packet length register */
#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
+#define RTL8365MB_CFG0_MAX_LEN_MAX 0x3FFF
/* Port learning limit registers */
#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
@@ -277,7 +281,7 @@
(RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
-/* MSTP port state registers - indexed by tree instancrSTI (tree ine */
+/* MSTP port state registers - indexed by tree instance */
#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
(RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
@@ -463,6 +467,95 @@ static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
{ 0x1D32, 0x0002 },
};
+enum rtl8365mb_phy_interface_mode {
+ RTL8365MB_PHY_INTERFACE_MODE_INVAL = 0,
+ RTL8365MB_PHY_INTERFACE_MODE_INTERNAL = BIT(0),
+ RTL8365MB_PHY_INTERFACE_MODE_MII = BIT(1),
+ RTL8365MB_PHY_INTERFACE_MODE_TMII = BIT(2),
+ RTL8365MB_PHY_INTERFACE_MODE_RMII = BIT(3),
+ RTL8365MB_PHY_INTERFACE_MODE_RGMII = BIT(4),
+ RTL8365MB_PHY_INTERFACE_MODE_SGMII = BIT(5),
+ RTL8365MB_PHY_INTERFACE_MODE_HSGMII = BIT(6),
+};
+
+/**
+ * struct rtl8365mb_extint - external interface info
+ * @port: the port with an external interface
+ * @id: the external interface ID, which is either 0, 1, or 2
+ * @supported_interfaces: a bitmask of supported PHY interface modes
+ *
+ * Represents a mapping: port -> { id, supported_interfaces }. To be embedded
+ * in &struct rtl8365mb_chip_info for every port with an external interface.
+ */
+struct rtl8365mb_extint {
+ int port;
+ int id;
+ unsigned int supported_interfaces;
+};
+
+/**
+ * struct rtl8365mb_chip_info - static chip-specific info
+ * @name: human-readable chip name
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @extints: available external interfaces
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * These data are specific to a given chip in the family of switches supported
+ * by this driver. When adding support for another chip in the family, a new
+ * chip info should be added to the rtl8365mb_chip_infos array.
+ */
+struct rtl8365mb_chip_info {
+ const char *name;
+ u32 chip_id;
+ u32 chip_ver;
+ const struct rtl8365mb_extint extints[RTL8365MB_MAX_NUM_EXTINTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+/* Chip info for each supported switch in the family */
+#define PHY_INTF(_mode) (RTL8365MB_PHY_INTERFACE_MODE_ ## _mode)
+static const struct rtl8365mb_chip_info rtl8365mb_chip_infos[] = {
+ {
+ .name = "RTL8365MB-VC",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0040,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367S",
+ .chip_id = 0x6367,
+ .chip_ver = 0x00A0,
+ .extints = {
+ { 6, 1, PHY_INTF(SGMII) | PHY_INTF(HSGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+ {
+ .name = "RTL8367RB-VB",
+ .chip_id = 0x6367,
+ .chip_ver = 0x0020,
+ .extints = {
+ { 6, 1, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ { 7, 2, PHY_INTF(MII) | PHY_INTF(TMII) |
+ PHY_INTF(RMII) | PHY_INTF(RGMII) },
+ },
+ .jam_table = rtl8365mb_init_jam_8365mb_vc,
+ .jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc),
+ },
+};
+
enum rtl8365mb_stp_state {
RTL8365MB_STP_STATE_DISABLED = 0,
RTL8365MB_STP_STATE_BLOCKING = 1,
@@ -516,7 +609,7 @@ struct rtl8365mb_cpu {
/**
* struct rtl8365mb_port - private per-port data
- * @smi: pointer to parent realtek_smi data
+ * @priv: pointer to parent realtek_priv data
* @index: DSA port index, same as dsa_port::index
* @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
* access via rtl8365mb_get_stats64
@@ -524,7 +617,7 @@ struct rtl8365mb_cpu {
* @mib_work: delayed work for polling MIB counters
*/
struct rtl8365mb_port {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
unsigned int index;
struct rtnl_link_stats64 stats;
spinlock_t stats_lock;
@@ -532,45 +625,35 @@ struct rtl8365mb_port {
};
/**
- * struct rtl8365mb - private chip-specific driver data
- * @smi: pointer to parent realtek_smi data
+ * struct rtl8365mb - driver private data
+ * @priv: pointer to parent realtek_priv data
* @irq: registered IRQ or zero
- * @chip_id: chip identifier
- * @chip_ver: chip silicon revision
- * @port_mask: mask of all ports
- * @learn_limit_max: maximum number of L2 addresses the chip can learn
+ * @chip_info: chip-specific info about the attached switch
* @cpu: CPU tagging and CPU port configuration for this chip
* @mib_lock: prevent concurrent reads of MIB counters
* @ports: per-port data
- * @jam_table: chip-specific initialization jam table
- * @jam_size: size of the chip's jam table
*
* Private data for this driver.
*/
struct rtl8365mb {
- struct realtek_smi *smi;
+ struct realtek_priv *priv;
int irq;
- u32 chip_id;
- u32 chip_ver;
- u32 port_mask;
- u32 learn_limit_max;
+ const struct rtl8365mb_chip_info *chip_info;
struct rtl8365mb_cpu cpu;
struct mutex mib_lock;
struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
- const struct rtl8365mb_jam_tbl_entry *jam_table;
- size_t jam_size;
};
-static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+static int rtl8365mb_phy_poll_busy(struct realtek_priv *priv)
{
u32 val;
- return regmap_read_poll_timeout(smi->map,
+ return regmap_read_poll_timeout(priv->map_nolock,
RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
val, !val, 10, 100);
}
-static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_prepare(struct realtek_priv *priv, int phy,
u32 ocp_addr)
{
u32 val;
@@ -579,7 +662,7 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
/* Set OCP prefix */
val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
ret = regmap_update_bits(
- smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ priv->map_nolock, RTL8365MB_GPHY_OCP_MSB_0_REG,
RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
if (ret)
@@ -592,89 +675,101 @@ static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
ocp_addr >> 1);
val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
ocp_addr >> 6);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
- val);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_read(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 *data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Execute read operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
/* Get PHY register data */
- ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
- &val);
+ ret = regmap_read(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG, &val);
if (ret)
- return ret;
+ goto out;
*data = val & 0xFFFF;
- return 0;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
}
-static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
u32 ocp_addr, u16 data)
{
u32 val;
int ret;
- ret = rtl8365mb_phy_poll_busy(smi);
+ mutex_lock(&priv->map_lock);
+
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ ret = rtl8365mb_phy_ocp_prepare(priv, phy, ocp_addr);
if (ret)
- return ret;
+ goto out;
/* Set PHY register data */
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
- data);
+ ret = regmap_write(priv->map_nolock,
+ RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG, data);
if (ret)
- return ret;
+ goto out;
/* Execute write operation */
val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
- ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ ret = regmap_write(priv->map_nolock, RTL8365MB_INDIRECT_ACCESS_CTRL_REG,
+ val);
if (ret)
- return ret;
+ goto out;
- ret = rtl8365mb_phy_poll_busy(smi);
+ ret = rtl8365mb_phy_poll_busy(priv);
if (ret)
- return ret;
+ goto out;
+
+out:
+ mutex_unlock(&priv->map_lock);
return 0;
}
-static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 ocp_addr;
u16 val;
@@ -688,21 +783,21 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ ret = rtl8365mb_phy_ocp_read(priv, phy, ocp_addr, &val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ dev_dbg(priv->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
phy, regnum, ocp_addr, val);
return val;
}
-static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8365mb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 ocp_addr;
@@ -716,46 +811,84 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
- ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ ret = rtl8365mb_phy_ocp_write(priv, phy, ocp_addr, val);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
regnum, ocp_addr, ret);
return ret;
}
- dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
phy, regnum, ocp_addr, val);
return 0;
}
+static int rtl8365mb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8365mb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8365mb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8365mb_phy_write(ds->priv, phy, regnum, val);
+}
+
+static const struct rtl8365mb_extint *
+rtl8365mb_get_port_extint(struct realtek_priv *priv, int port)
+{
+ struct rtl8365mb *mb = priv->chip_data;
+ int i;
+
+ for (i = 0; i < RTL8365MB_MAX_NUM_EXTINTS; i++) {
+ const struct rtl8365mb_extint *extint =
+ &mb->chip_info->extints[i];
+
+ if (!extint->supported_interfaces)
+ continue;
+
+ if (extint->port == port)
+ return extint;
+ }
+
+ return NULL;
+}
+
static enum dsa_tag_protocol
rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ if (cpu->position == RTL8365MB_CPU_POS_BEFORE_CRC)
+ return DSA_TAG_PROTO_RTL8_4T;
+
return DSA_TAG_PROTO_RTL8_4;
}
-static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_rgmii(struct realtek_priv *priv, int port,
phy_interface_t interface)
{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
struct device_node *dn;
struct dsa_port *dp;
int tx_delay = 0;
int rx_delay = 0;
- int ext_port;
u32 val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
- return -EINVAL;
- }
+ if (!extint)
+ return -ENODEV;
- dp = dsa_to_port(smi->ds, port);
+ dp = dsa_to_port(priv->ds, port);
dn = dp->dn;
/* Set the RGMII TX/RX delay
@@ -767,7 +900,8 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
* 0 = no delay, 1 = 2 ns delay
* RX delay:
* 0 = no delay, 7 = maximum delay
- * No units are specified, but there are a total of 8 steps.
+ * Each step is approximately 0.3 ns, so the maximum delay is about
+ * 2.1 ns.
*
* The vendor driver also states that this must be configured *before*
* forcing the external interface into a particular mode, which is done
@@ -778,10 +912,6 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
* specified. We ignore the detail of the RGMII interface mode
* (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
* property.
- *
- * For the RX delay, we assume that a register value of 4 corresponds to
- * 2 ns. But this is just an educated guess, so ignore all other values
- * to avoid too much confusion.
*/
if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
val = val / 1000; /* convert to ns */
@@ -789,22 +919,22 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
if (val == 0 || val == 2)
tx_delay = val / 2;
else
- dev_warn(smi->dev,
- "EXT port TX delay must be 0 or 2 ns\n");
+ dev_warn(priv->dev,
+ "RGMII TX delay must be 0 or 2 ns\n");
}
if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
- val = val / 1000; /* convert to ns */
+ val = DIV_ROUND_CLOSEST(val, 300); /* convert to 0.3 ns step */
- if (val == 0 || val == 2)
- rx_delay = val * 2;
+ if (val <= 7)
+ rx_delay = val;
else
- dev_warn(smi->dev,
- "EXT port RX delay must be 0 to 2 ns\n");
+ dev_warn(priv->dev,
+ "RGMII RX delay must be 0 to 2.1 ns\n");
}
ret = regmap_update_bits(
- smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ priv->map, RTL8365MB_EXT_RGMXF_REG(extint->id),
RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
@@ -813,36 +943,33 @@ static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
return ret;
ret = regmap_update_bits(
- smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
- RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ priv->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(extint->id),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(extint->id),
RTL8365MB_EXT_PORT_MODE_RGMII
<< RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
- ext_port));
+ extint->id));
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port,
bool link, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(priv, port);
u32 r_tx_pause;
u32 r_rx_pause;
u32 r_duplex;
u32 r_speed;
u32 r_link;
- int ext_port;
int val;
int ret;
- if (port == smi->cpu_port) {
- ext_port = 1;
- } else {
- dev_err(smi->dev, "only one EXT port is currently supported\n");
- return -EINVAL;
- }
+ if (!extint)
+ return -ENODEV;
if (link) {
/* Force the link up with the desired configuration */
@@ -857,7 +984,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (speed == SPEED_10) {
r_speed = RTL8365MB_PORT_SPEED_10M;
} else {
- dev_err(smi->dev, "unsupported port speed %s\n",
+ dev_err(priv->dev, "unsupported port speed %s\n",
phy_speed_to_str(speed));
return -EINVAL;
}
@@ -867,7 +994,7 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
} else if (duplex == DUPLEX_HALF) {
r_duplex = 0;
} else {
- dev_err(smi->dev, "unsupported duplex %s\n",
+ dev_err(priv->dev, "unsupported duplex %s\n",
phy_duplex_to_str(duplex));
return -EINVAL;
}
@@ -889,8 +1016,8 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
r_duplex) |
FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
- ret = regmap_write(smi->map,
- RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ ret = regmap_write(priv->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(extint->id),
val);
if (ret)
return ret;
@@ -898,81 +1025,54 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
return 0;
}
-static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
- phy_interface_t interface)
+static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- if (dsa_is_user_port(ds, port) &&
- (interface == PHY_INTERFACE_MODE_NA ||
- interface == PHY_INTERFACE_MODE_INTERNAL))
- /* Internal PHY */
- return true;
- else if (dsa_is_cpu_port(ds, port) &&
- phy_interface_mode_is_rgmii(interface))
- /* Extension MAC */
- return true;
-
- return false;
-}
+ const struct rtl8365mb_extint *extint =
+ rtl8365mb_get_port_extint(ds->priv, port);
-static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct realtek_smi *smi = ds->priv;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
- */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- linkmode_zero(supported);
+ if (!extint) {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+
+ /* GMII is the default interface mode for phylib, so
+ * we have to support it for ports with integrated PHY.
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
return;
}
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
+ /* Populate according to the modes supported by _this driver_,
+ * not necessarily the modes supported by the hardware, some of
+ * which remain unimplemented.
+ */
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (extint->supported_interfaces & RTL8365MB_PHY_INTERFACE_MODE_RGMII)
+ phy_interface_set_rgmii(config->supported_interfaces);
}
static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
- dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
- phy_modes(state->interface), port);
- return;
- }
-
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"port %d supports only conventional PHY or fixed-link\n",
port);
return;
}
if (phy_interface_mode_is_rgmii(state->interface)) {
- ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ ret = rtl8365mb_ext_config_rgmii(priv, port, state->interface);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to configure RGMII mode on port %d: %d\n",
port, ret);
return;
@@ -987,20 +1087,20 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
cancel_delayed_work_sync(&p->mib_work);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, false, 0, 0,
false, false);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to reset forced mode on port %d: %d\n",
port, ret);
@@ -1015,21 +1115,21 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
int duplex, bool tx_pause,
bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
int ret;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
schedule_delayed_work(&p->mib_work, 0);
if (phy_interface_mode_is_rgmii(interface)) {
- ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ ret = rtl8365mb_ext_config_forcemode(priv, port, true, speed,
duplex, tx_pause,
rx_pause);
if (ret)
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to force mode on port %d: %d\n", port,
ret);
@@ -1037,10 +1137,39 @@ static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
}
}
+static int rtl8365mb_port_change_mtu(struct dsa_switch *ds, int port,
+ int new_mtu)
+{
+ struct realtek_priv *priv = ds->priv;
+ int frame_size;
+
+ /* When a new MTU is set, DSA always sets the CPU port's MTU to the
+ * largest MTU of the slave ports. Because the switch only has a global
+ * RX length register, only allowing CPU port here is enough.
+ */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ frame_size = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ dev_dbg(priv->dev, "changing mtu to %d (frame size: %d)\n",
+ new_mtu, frame_size);
+
+ return regmap_update_bits(priv->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ RTL8365MB_CFG0_MAX_LEN_MASK,
+ FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK,
+ frame_size));
+}
+
+static int rtl8365mb_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return RTL8365MB_CFG0_MAX_LEN_MAX - VLAN_ETH_HLEN - ETH_FCS_LEN;
+}
+
static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
enum rtl8365mb_stp_state val;
int msti = 0;
@@ -1059,36 +1188,34 @@ static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
val = RTL8365MB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "invalid STP state: %u\n", state);
+ dev_err(priv->dev, "invalid STP state: %u\n", state);
return;
}
- regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ regmap_update_bits(priv->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
}
-static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_learning(struct realtek_priv *priv, int port,
bool enable)
{
- struct rtl8365mb *mb = smi->chip_data;
-
/* Enable/disable learning by limiting the number of L2 addresses the
* port can learn. Realtek documentation states that a limit of zero
* disables learning. When enabling learning, set it to the chip's
* maximum.
*/
- return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
- enable ? mb->learn_limit_max : 0);
+ return regmap_write(priv->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ enable ? RTL8365MB_LEARN_LIMIT_MAX : 0);
}
-static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+static int rtl8365mb_port_set_isolation(struct realtek_priv *priv, int port,
u32 mask)
{
- return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+ return regmap_write(priv->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
}
-static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+static int rtl8365mb_mib_counter_read(struct realtek_priv *priv, int port,
u32 offset, u32 length, u64 *mibvalue)
{
u64 tmpvalue = 0;
@@ -1100,13 +1227,13 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
* and then poll the control register before reading the value from some
* counter registers.
*/
- ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_MIB_ADDRESS_REG,
RTL8365MB_MIB_ADDRESS(port, offset));
if (ret)
return ret;
/* Poll for completion */
- ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ ret = regmap_read_poll_timeout(priv->map, RTL8365MB_MIB_CTRL0_REG, val,
!(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
10, 100);
if (ret)
@@ -1128,7 +1255,7 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
/* Read the MIB counter 16 bits at a time */
for (i = 0; i < length; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
if (ret)
return ret;
@@ -1144,21 +1271,21 @@ static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &data[i]);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to read port %d counters: %d\n", port,
ret);
break;
@@ -1192,15 +1319,15 @@ static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&phy_stats->SymbolErrorDuringCarrier);
mutex_unlock(&mb->mib_lock);
}
@@ -1228,12 +1355,12 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
[RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
};
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mutex_lock(&mb->mib_lock);
for (i = 0; i < RTL8365MB_MIB_END; i++) {
@@ -1243,7 +1370,7 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, mib->offset,
mib->length, &cnt[i]);
if (ret)
break;
@@ -1293,20 +1420,20 @@ static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_mib_counter *mib;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
mutex_lock(&mb->mib_lock);
- rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ rtl8365mb_mib_counter_read(priv, port, mib->offset, mib->length,
&ctrl_stats->UnsupportedOpcodesReceived);
mutex_unlock(&mb->mib_lock);
}
-static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+static void rtl8365mb_stats_update(struct realtek_priv *priv, int port)
{
u64 cnt[RTL8365MB_MIB_END] = {
[RTL8365MB_MIB_ifOutOctets] = 1,
@@ -1325,7 +1452,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
[RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
[RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
};
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtnl_link_stats64 *stats;
int ret;
int i;
@@ -1340,7 +1467,7 @@ static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
if (!cnt[i])
continue;
- ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ ret = rtl8365mb_mib_counter_read(priv, port, c->offset,
c->length, &cnt[i]);
if (ret)
break;
@@ -1390,9 +1517,9 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
struct rtl8365mb_port *p = container_of(to_delayed_work(work),
struct rtl8365mb_port,
mib_work);
- struct realtek_smi *smi = p->smi;
+ struct realtek_priv *priv = p->priv;
- rtl8365mb_stats_update(smi, p->index);
+ rtl8365mb_stats_update(priv, p->index);
schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
}
@@ -1400,11 +1527,11 @@ static void rtl8365mb_stats_poll(struct work_struct *work)
static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
struct rtnl_link_stats64 *s)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
- mb = smi->chip_data;
+ mb = priv->chip_data;
p = &mb->ports[port];
spin_lock(&p->stats_lock);
@@ -1412,9 +1539,9 @@ static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
-static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+static void rtl8365mb_stats_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int i;
/* Per-chip global mutex to protect MIB counter access, since doing
@@ -1422,10 +1549,10 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
*/
mutex_init(&mb->mib_lock);
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
/* Per-port spinlock to protect the stats64 data */
@@ -1438,45 +1565,42 @@ static void rtl8365mb_stats_setup(struct realtek_smi *smi)
}
}
-static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+static void rtl8365mb_stats_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int i;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
cancel_delayed_work_sync(&p->mib_work);
}
}
-static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_priv *priv, u32 reg,
u32 *val)
{
int ret;
- ret = regmap_read(smi->map, reg, val);
+ ret = regmap_read(priv->map, reg, val);
if (ret)
return ret;
- return regmap_write(smi->map, reg, *val);
+ return regmap_write(priv->map, reg, *val);
}
static irqreturn_t rtl8365mb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
unsigned long line_changes = 0;
- struct rtl8365mb *mb;
u32 stat;
int line;
int ret;
- mb = smi->chip_data;
-
- ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ ret = rtl8365mb_get_and_clear_status_reg(priv, RTL8365MB_INTR_STATUS_REG,
&stat);
if (ret)
goto out_error;
@@ -1487,27 +1611,27 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
u32 val;
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKUP_IND_REG, &val);
if (ret)
goto out_error;
linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
ret = rtl8365mb_get_and_clear_status_reg(
- smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ priv, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
if (ret)
goto out_error;
linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
- line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
+ line_changes = linkup_ind | linkdown_ind;
}
if (!line_changes)
goto out_none;
- for_each_set_bit(line, &line_changes, smi->num_ports) {
- int child_irq = irq_find_mapping(smi->irqdomain, line);
+ for_each_set_bit(line, &line_changes, priv->num_ports) {
+ int child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
@@ -1515,7 +1639,7 @@ static irqreturn_t rtl8365mb_irq(int irq, void *data)
return IRQ_HANDLED;
out_error:
- dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+ dev_err(priv->dev, "failed to read interrupt status: %d\n", ret);
out_none:
return IRQ_NONE;
@@ -1550,27 +1674,27 @@ static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+static int rtl8365mb_set_irq_enable(struct realtek_priv *priv, bool enable)
{
- return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ return regmap_update_bits(priv->map, RTL8365MB_INTR_CTRL_REG,
RTL8365MB_INTR_LINK_CHANGE_MASK,
FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
enable ? 1 : 0));
}
-static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+static int rtl8365mb_irq_enable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, true);
+ return rtl8365mb_set_irq_enable(priv, true);
}
-static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+static int rtl8365mb_irq_disable(struct realtek_priv *priv)
{
- return rtl8365mb_set_irq_enable(smi, false);
+ return rtl8365mb_set_irq_enable(priv, false);
}
-static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+static int rtl8365mb_irq_setup(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct device_node *intc;
u32 irq_trig;
int virq;
@@ -1579,9 +1703,9 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
int ret;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
@@ -1589,24 +1713,24 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
if (irq != -EPROBE_DEFER)
- dev_err(smi->dev, "failed to get parent irq: %d\n",
+ dev_err(priv->dev, "failed to get parent irq: %d\n",
irq);
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
- &rtl8365mb_irqdomain_ops, smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to add irq domain\n");
+ priv->irqdomain = irq_domain_add_linear(intc, priv->num_ports,
+ &rtl8365mb_irqdomain_ops, priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_create_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_create_mapping(priv->irqdomain, i);
if (!virq) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to create irq domain mapping\n");
ret = -EINVAL;
goto out_remove_irqdomain;
@@ -1627,40 +1751,40 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
val = RTL8365MB_INTR_POLARITY_LOW;
break;
default:
- dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ dev_err(priv->dev, "unsupported irq trigger type %u\n",
irq_trig);
ret = -EINVAL;
goto out_remove_irqdomain;
}
- ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_INTR_POLARITY_REG,
RTL8365MB_INTR_POLARITY_MASK,
FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
if (ret)
goto out_remove_irqdomain;
/* Disable the interrupt in case the chip has it enabled on reset */
- ret = rtl8365mb_irq_disable(smi);
+ ret = rtl8365mb_irq_disable(priv);
if (ret)
goto out_remove_irqdomain;
/* Clear the interrupt status register */
- ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ ret = regmap_write(priv->map, RTL8365MB_INTR_STATUS_REG,
RTL8365MB_INTR_ALL_MASK);
if (ret)
goto out_remove_irqdomain;
ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
- "rtl8365mb", smi);
+ "rtl8365mb", priv);
if (ret) {
- dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ dev_err(priv->dev, "failed to request irq: %d\n", ret);
goto out_remove_irqdomain;
}
/* Store the irq so that we know to free it during teardown */
mb->irq = irq;
- ret = rtl8365mb_irq_enable(smi);
+ ret = rtl8365mb_irq_enable(priv);
if (ret)
goto out_free_irq;
@@ -1669,17 +1793,17 @@ static int rtl8365mb_irq_setup(struct realtek_smi *smi)
return 0;
out_free_irq:
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
out_remove_irqdomain:
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
out_put_node:
of_node_put(intc);
@@ -1687,36 +1811,36 @@ out_put_node:
return ret;
}
-static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+static void rtl8365mb_irq_teardown(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
int virq;
int i;
if (mb->irq) {
- free_irq(mb->irq, smi);
+ free_irq(mb->irq, priv);
mb->irq = 0;
}
- if (smi->irqdomain) {
- for (i = 0; i < smi->num_ports; i++) {
- virq = irq_find_mapping(smi->irqdomain, i);
+ if (priv->irqdomain) {
+ for (i = 0; i < priv->num_ports; i++) {
+ virq = irq_find_mapping(priv->irqdomain, i);
irq_dispose_mapping(virq);
}
- irq_domain_remove(smi->irqdomain);
- smi->irqdomain = NULL;
+ irq_domain_remove(priv->irqdomain);
+ priv->irqdomain = NULL;
}
}
-static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+static int rtl8365mb_cpu_config(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
struct rtl8365mb_cpu *cpu = &mb->cpu;
u32 val;
int ret;
- ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8365MB_CPU_PORT_MASK_REG,
RTL8365MB_CPU_PORT_MASK_MASK,
FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
cpu->mask));
@@ -1728,27 +1852,61 @@ static int rtl8365mb_cpu_config(struct realtek_smi *smi)
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
- FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port & 0x7) |
FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
- cpu->trap_port >> 3);
- ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ cpu->trap_port >> 3 & 0x1);
+ ret = regmap_write(priv->map, RTL8365MB_CPU_CTRL_REG, val);
if (ret)
return ret;
return 0;
}
-static int rtl8365mb_switch_init(struct realtek_smi *smi)
+static int rtl8365mb_change_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct rtl8365mb *mb;
+
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
+
+ switch (proto) {
+ case DSA_TAG_PROTO_RTL8_4:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_AFTER_SA;
+ break;
+ case DSA_TAG_PROTO_RTL8_4T:
+ cpu->format = RTL8365MB_CPU_FORMAT_8BYTES;
+ cpu->position = RTL8365MB_CPU_POS_BEFORE_CRC;
+ break;
+ /* The switch also supports a 4-byte format, similar to rtl4a but with
+ * the same 0x04 8-bit version and probably 8-bit port source/dest.
+ * There is no public doc about it. Not supported yet and it will probably
+ * never be.
+ */
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return rtl8365mb_cpu_config(priv);
+}
+
+static int rtl8365mb_switch_init(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
+ const struct rtl8365mb_chip_info *ci;
int ret;
int i;
+ ci = mb->chip_info;
+
/* Do any chip-specific init jam before getting to the common stuff */
- if (mb->jam_table) {
- for (i = 0; i < mb->jam_size; i++) {
- ret = regmap_write(smi->map, mb->jam_table[i].reg,
- mb->jam_table[i].val);
+ if (ci->jam_table) {
+ for (i = 0; i < ci->jam_size; i++) {
+ ret = regmap_write(priv->map, ci->jam_table[i].reg,
+ ci->jam_table[i].val);
if (ret)
return ret;
}
@@ -1756,7 +1914,7 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
/* Common init jam */
for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
- ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ ret = regmap_write(priv->map, rtl8365mb_init_jam_common[i].reg,
rtl8365mb_init_jam_common[i].val);
if (ret)
return ret;
@@ -1765,75 +1923,80 @@ static int rtl8365mb_switch_init(struct realtek_smi *smi)
return 0;
}
-static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+static int rtl8365mb_reset_chip(struct realtek_priv *priv)
{
u32 val;
- realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
- FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
- 1));
+ priv->write_reg_noack(priv, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK, 1));
/* Realtek documentation says the chip needs 1 second to reset. Sleep
* for 100 ms before accessing any registers to prevent ACK timeouts.
*/
msleep(100);
- return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ return regmap_read_poll_timeout(priv->map, RTL8365MB_CHIP_RESET_REG, val,
!(val & RTL8365MB_CHIP_RESET_HW_MASK),
20000, 1e6);
}
static int rtl8365mb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
+ struct rtl8365mb_cpu *cpu;
+ struct dsa_port *cpu_dp;
struct rtl8365mb *mb;
int ret;
int i;
- mb = smi->chip_data;
+ mb = priv->chip_data;
+ cpu = &mb->cpu;
- ret = rtl8365mb_reset_chip(smi);
+ ret = rtl8365mb_reset_chip(priv);
if (ret) {
- dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ dev_err(priv->dev, "failed to reset chip: %d\n", ret);
goto out_error;
}
/* Configure switch to vendor-defined initial state */
- ret = rtl8365mb_switch_init(smi);
+ ret = rtl8365mb_switch_init(priv);
if (ret) {
- dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ dev_err(priv->dev, "failed to initialize switch: %d\n", ret);
goto out_error;
}
/* Set up cascading IRQs */
- ret = rtl8365mb_irq_setup(smi);
+ ret = rtl8365mb_irq_setup(priv);
if (ret == -EPROBE_DEFER)
return ret;
else if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
/* Configure CPU tagging */
- ret = rtl8365mb_cpu_config(smi);
+ dsa_switch_for_each_cpu_port(cpu_dp, priv->ds) {
+ cpu->mask |= BIT(cpu_dp->index);
+
+ if (cpu->trap_port == RTL8365MB_MAX_NUM_PORTS)
+ cpu->trap_port = cpu_dp->index;
+ }
+ cpu->enable = cpu->mask > 0;
+ ret = rtl8365mb_cpu_config(priv);
if (ret)
goto out_teardown_irq;
/* Configure ports */
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
struct rtl8365mb_port *p = &mb->ports[i];
- if (dsa_is_unused_port(smi->ds, i))
+ if (dsa_is_unused_port(priv->ds, i))
continue;
- /* Set up per-port private data */
- p->smi = smi;
- p->index = i;
-
/* Forward only to the CPU */
- ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ ret = rtl8365mb_port_set_isolation(priv, i, cpu->mask);
if (ret)
goto out_teardown_irq;
/* Disable learning */
- ret = rtl8365mb_port_set_learning(smi, i, false);
+ ret = rtl8365mb_port_set_learning(priv, i, false);
if (ret)
goto out_teardown_irq;
@@ -1841,29 +2004,32 @@ static int rtl8365mb_setup(struct dsa_switch *ds)
* ports will still forward frames to the CPU despite being
* administratively down by default.
*/
- rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ rtl8365mb_port_stp_state_set(priv->ds, i, BR_STATE_DISABLED);
+
+ /* Set up per-port private data */
+ p->priv = priv;
+ p->index = i;
}
- /* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
- RTL8365MB_CFG0_MAX_LEN_MASK,
- FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
+ ret = rtl8365mb_port_change_mtu(ds, cpu->trap_port, ETH_DATA_LEN);
if (ret)
goto out_teardown_irq;
- ret = realtek_smi_setup_mdio(smi);
- if (ret) {
- dev_err(smi->dev, "could not set up MDIO bus\n");
- goto out_teardown_irq;
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
}
/* Start statistics counter polling */
- rtl8365mb_stats_setup(smi);
+ rtl8365mb_stats_setup(priv);
return 0;
out_teardown_irq:
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_irq_teardown(priv);
out_error:
return ret;
@@ -1871,10 +2037,10 @@ out_error:
static void rtl8365mb_teardown(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
- rtl8365mb_stats_teardown(smi);
- rtl8365mb_irq_teardown(smi);
+ rtl8365mb_stats_teardown(priv);
+ rtl8365mb_irq_teardown(priv);
}
static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
@@ -1904,61 +2070,56 @@ static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
return 0;
}
-static int rtl8365mb_detect(struct realtek_smi *smi)
+static int rtl8365mb_detect(struct realtek_priv *priv)
{
- struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb *mb = priv->chip_data;
u32 chip_id;
u32 chip_ver;
int ret;
+ int i;
- ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ ret = rtl8365mb_get_chip_id_and_ver(priv->map, &chip_id, &chip_ver);
if (ret) {
- dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ dev_err(priv->dev, "failed to read chip id and version: %d\n",
ret);
return ret;
}
- switch (chip_id) {
- case RTL8365MB_CHIP_ID_8365MB_VC:
- dev_info(smi->dev,
- "found an RTL8365MB-VC switch (ver=0x%04x)\n",
- chip_ver);
-
- smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
- smi->num_ports = smi->cpu_port + 1;
-
- mb->smi = smi;
- mb->chip_id = chip_id;
- mb->chip_ver = chip_ver;
- mb->port_mask = BIT(smi->num_ports) - 1;
- mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
- mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
- mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
-
- mb->cpu.enable = 1;
- mb->cpu.mask = BIT(smi->cpu_port);
- mb->cpu.trap_port = smi->cpu_port;
- mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
- mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
- mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
- mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_chip_infos); i++) {
+ const struct rtl8365mb_chip_info *ci = &rtl8365mb_chip_infos[i];
- break;
- default:
- dev_err(smi->dev,
- "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
- chip_id, chip_ver);
+ if (ci->chip_id == chip_id && ci->chip_ver == chip_ver) {
+ mb->chip_info = ci;
+ break;
+ }
+ }
+
+ if (!mb->chip_info) {
+ dev_err(priv->dev,
+ "unrecognized switch (id=0x%04x, ver=0x%04x)", chip_id,
+ chip_ver);
return -ENODEV;
}
+ dev_info(priv->dev, "found an %s switch\n", mb->chip_info->name);
+
+ priv->num_ports = RTL8365MB_MAX_NUM_PORTS;
+ mb->priv = priv;
+ mb->cpu.trap_port = RTL8365MB_MAX_NUM_PORTS;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
return 0;
}
-static const struct dsa_switch_ops rtl8365mb_switch_ops = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops_smi = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
- .phylink_validate = rtl8365mb_phylink_validate,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
.phylink_mac_config = rtl8365mb_phylink_mac_config,
.phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
.phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
@@ -1970,20 +2131,50 @@ static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_eth_mac_stats = rtl8365mb_get_mac_stats,
.get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
.get_stats64 = rtl8365mb_get_stats64,
+ .port_change_mtu = rtl8365mb_port_change_mtu,
+ .port_max_mtu = rtl8365mb_port_max_mtu,
};
-static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+static const struct dsa_switch_ops rtl8365mb_switch_ops_mdio = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .change_tag_protocol = rtl8365mb_change_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_get_caps = rtl8365mb_phylink_get_caps,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .phy_read = rtl8365mb_dsa_phy_read,
+ .phy_write = rtl8365mb_dsa_phy_write,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+ .port_change_mtu = rtl8365mb_port_change_mtu,
+ .port_max_mtu = rtl8365mb_port_max_mtu,
+};
+
+static const struct realtek_ops rtl8365mb_ops = {
.detect = rtl8365mb_detect,
.phy_read = rtl8365mb_phy_read,
.phy_write = rtl8365mb_phy_write,
};
-const struct realtek_smi_variant rtl8365mb_variant = {
- .ds_ops = &rtl8365mb_switch_ops,
- .ops = &rtl8365mb_smi_ops,
+const struct realtek_variant rtl8365mb_variant = {
+ .ds_ops_smi = &rtl8365mb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8365mb_switch_ops_mdio,
+ .ops = &rtl8365mb_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
.chip_data_sz = sizeof(struct rtl8365mb),
};
EXPORT_SYMBOL_GPL(rtl8365mb_variant);
+
+MODULE_AUTHOR("Alvin Šipraga <alsi@bang-olufsen.dk>");
+MODULE_DESCRIPTION("Driver for RTL8365MB-VC ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/realtek/rtl8366-core.c
index bdb8d8d34880..dc5f75be3017 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/realtek/rtl8366-core.c
@@ -11,18 +11,18 @@
#include <linux/if_bridge.h>
#include <net/dsa.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
-int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
+int rtl8366_mc_is_used(struct realtek_priv *priv, int mc_index, int *used)
{
int ret;
int i;
*used = 0;
- for (i = 0; i < smi->num_ports; i++) {
+ for (i = 0; i < priv->num_ports; i++) {
int index = 0;
- ret = smi->ops->get_mc_index(smi, i, &index);
+ ret = priv->ops->get_mc_index(priv, i, &index);
if (ret)
return ret;
@@ -38,13 +38,13 @@ EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
/**
* rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
- * @smi: the Realtek SMI device instance
+ * @priv: the Realtek SMI device instance
* @vid: the VLAN ID to look up or allocate
* @vlanmc: the pointer will be assigned to a pointer to a valid member config
* if successful
* @return: index of a new member config or negative error number
*/
-static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+static int rtl8366_obtain_mc(struct realtek_priv *priv, int vid,
struct rtl8366_vlan_mc *vlanmc)
{
struct rtl8366_vlan_4k vlan4k;
@@ -52,10 +52,10 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
int i;
/* Try to find an existing member config entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -65,19 +65,19 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
}
/* We have no MC entry for this VID, try to find an empty one */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->get_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vlanmc->vid == 0 && vlanmc->member == 0) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret) {
- dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "error looking for 4K VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
@@ -86,30 +86,30 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ dev_dbg(priv->dev, "created new MC at index %d for VID %d\n",
i, vid);
return i;
}
}
/* MC table is full, try to find an unused entry and replace it */
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
int used;
- ret = rtl8366_mc_is_used(smi, i, &used);
+ ret = rtl8366_mc_is_used(priv, i, &used);
if (ret)
return ret;
if (!used) {
/* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
@@ -117,23 +117,23 @@ static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, vlanmc);
if (ret) {
- dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ dev_err(priv->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
- dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ dev_dbg(priv->dev, "recycled MC at index %i for VID %d\n",
i, vid);
return i;
}
}
- dev_err(smi->dev, "all VLAN member configurations are in use\n");
+ dev_err(priv->dev, "all VLAN member configurations are in use\n");
return -ENOSPC;
}
-int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
+int rtl8366_set_vlan(struct realtek_priv *priv, int vid, u32 member,
u32 untag, u32 fid)
{
struct rtl8366_vlan_mc vlanmc;
@@ -141,31 +141,31 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, member, untag);
/* Update the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ ret = priv->ops->get_vlan_4k(priv, vid, &vlan4k);
if (ret)
return ret;
vlan4k.member |= member;
vlan4k.untag |= untag;
vlan4k.fid = fid;
- ret = smi->ops->set_vlan_4k(smi, &vlan4k);
+ ret = priv->ops->set_vlan_4k(priv, &vlan4k);
if (ret)
return ret;
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
@@ -176,12 +176,12 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
vlanmc.fid = fid;
/* Commit updates to the MC entry */
- ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, mc, &vlanmc);
if (ret)
- dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ dev_err(priv->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
mc, vid);
else
- dev_dbg(smi->dev,
+ dev_dbg(priv->dev,
"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
vid, vlanmc.member, vlanmc.untag);
@@ -189,37 +189,37 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
}
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
-int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
+int rtl8366_set_pvid(struct realtek_priv *priv, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
int mc;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vid))
+ if (!priv->ops->is_vlan_valid(priv, vid))
return -EINVAL;
/* Find or allocate a member config for this VID */
- ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ ret = rtl8366_obtain_mc(priv, vid, &vlanmc);
if (ret < 0)
return ret;
mc = ret;
- ret = smi->ops->set_mc_index(smi, port, mc);
+ ret = priv->ops->set_mc_index(priv, port, mc);
if (ret) {
- dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ dev_err(priv->dev, "set PVID: failed to set MC index %d for port %d\n",
mc, port);
return ret;
}
- dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ dev_dbg(priv->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
port, vid, mc);
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
-int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
int ret;
@@ -229,52 +229,52 @@ int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable)
*/
if (enable) {
/* Make sure VLAN is ON */
- ret = smi->ops->enable_vlan(smi, true);
+ ret = priv->ops->enable_vlan(priv, true);
if (ret)
return ret;
- smi->vlan_enabled = true;
+ priv->vlan_enabled = true;
}
- ret = smi->ops->enable_vlan4k(smi, enable);
+ ret = priv->ops->enable_vlan4k(priv, enable);
if (ret)
return ret;
- smi->vlan4k_enabled = enable;
+ priv->vlan4k_enabled = enable;
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan4k);
-int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable)
+int rtl8366_enable_vlan(struct realtek_priv *priv, bool enable)
{
int ret;
- ret = smi->ops->enable_vlan(smi, enable);
+ ret = priv->ops->enable_vlan(priv, enable);
if (ret)
return ret;
- smi->vlan_enabled = enable;
+ priv->vlan_enabled = enable;
/* If we turn VLAN off, make sure that we turn off
* 4k VLAN as well, if that happened to be on.
*/
if (!enable) {
- smi->vlan4k_enabled = false;
- ret = smi->ops->enable_vlan4k(smi, false);
+ priv->vlan4k_enabled = false;
+ ret = priv->ops->enable_vlan4k(priv, false);
}
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_enable_vlan);
-int rtl8366_reset_vlan(struct realtek_smi *smi)
+int rtl8366_reset_vlan(struct realtek_priv *priv)
{
struct rtl8366_vlan_mc vlanmc;
int ret;
int i;
- rtl8366_enable_vlan(smi, false);
- rtl8366_enable_vlan4k(smi, false);
+ rtl8366_enable_vlan(priv, false);
+ rtl8366_enable_vlan4k(priv, false);
/* Clear the 16 VLAN member configurations */
vlanmc.vid = 0;
@@ -282,8 +282,8 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
vlanmc.member = 0;
vlanmc.untag = 0;
vlanmc.fid = 0;
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ for (i = 0; i < priv->num_vlan_mc; i++) {
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
}
@@ -298,12 +298,12 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
{
bool untagged = !!(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
bool pvid = !!(vlan->flags & BRIDGE_VLAN_INFO_PVID);
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 member = 0;
u32 untag = 0;
int ret;
- if (!smi->ops->is_vlan_valid(smi, vlan->vid)) {
+ if (!priv->ops->is_vlan_valid(priv, vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack, "VLAN ID not valid");
return -EINVAL;
}
@@ -312,13 +312,13 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
* FIXME: what's with this 4k business?
* Just rtl8366_enable_vlan() seems inconclusive.
*/
- ret = rtl8366_enable_vlan4k(smi, true);
+ ret = rtl8366_enable_vlan4k(priv, true);
if (ret) {
NL_SET_ERR_MSG_MOD(extack, "Failed to enable VLAN 4K");
return ret;
}
- dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ dev_dbg(priv->dev, "add VLAN %d on port %d, %s, %s\n",
vlan->vid, port, untagged ? "untagged" : "tagged",
pvid ? "PVID" : "no PVID");
@@ -327,18 +327,18 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (untagged)
untag |= BIT(port);
- ret = rtl8366_set_vlan(smi, vlan->vid, member, untag, 0);
+ ret = rtl8366_set_vlan(priv, vlan->vid, member, untag, 0);
if (ret) {
- dev_err(smi->dev, "failed to set up VLAN %04x", vlan->vid);
+ dev_err(priv->dev, "failed to set up VLAN %04x", vlan->vid);
return ret;
}
if (!pvid)
return 0;
- ret = rtl8366_set_pvid(smi, port, vlan->vid);
+ ret = rtl8366_set_pvid(priv, port, vlan->vid);
if (ret) {
- dev_err(smi->dev, "failed to set PVID on port %d to VLAN %04x",
+ dev_err(priv->dev, "failed to set PVID on port %d to VLAN %04x",
port, vlan->vid);
return ret;
}
@@ -350,15 +350,15 @@ EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
int rtl8366_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret, i;
- dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
+ dev_dbg(priv->dev, "del VLAN %d on port %d\n", vlan->vid, port);
- for (i = 0; i < smi->num_vlan_mc; i++) {
+ for (i = 0; i < priv->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->get_vlan_mc(priv, i, &vlanmc);
if (ret)
return ret;
@@ -376,9 +376,9 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
vlanmc.priority = 0;
vlanmc.fid = 0;
}
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ ret = priv->ops->set_vlan_mc(priv, i, &vlanmc);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to remove VLAN %04x\n",
vlan->vid);
return ret;
@@ -394,15 +394,15 @@ EXPORT_SYMBOL_GPL(rtl8366_vlan_del);
void rtl8366_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366_mib_counter *mib;
int i;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
- mib = &smi->mib_counters[i];
+ for (i = 0; i < priv->num_mib_counters; i++) {
+ mib = &priv->mib_counters[i];
strncpy(data + i * ETH_GSTRING_LEN,
mib->name, ETH_GSTRING_LEN);
}
@@ -411,35 +411,35 @@ EXPORT_SYMBOL_GPL(rtl8366_get_strings);
int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* We only support SS_STATS */
if (sset != ETH_SS_STATS)
return 0;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- return smi->num_mib_counters;
+ return priv->num_mib_counters;
}
EXPORT_SYMBOL_GPL(rtl8366_get_sset_count);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int i;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return;
- for (i = 0; i < smi->num_mib_counters; i++) {
+ for (i = 0; i < priv->num_mib_counters; i++) {
struct rtl8366_mib_counter *mib;
u64 mibvalue = 0;
- mib = &smi->mib_counters[i];
- ret = smi->ops->get_mib_counter(smi, port, mib, &mibvalue);
+ mib = &priv->mib_counters[i];
+ ret = priv->ops->get_mib_counter(priv, port, mib, &mibvalue);
if (ret) {
- dev_err(smi->dev, "error reading MIB counter %s\n",
+ dev_err(priv->dev, "error reading MIB counter %s\n",
mib->name);
}
data[i] = mibvalue;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 03deacd83e61..25f88022b9e4 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -21,7 +21,7 @@
#include <linux/of_irq.h>
#include <linux/regmap.h>
-#include "realtek-smi-core.h"
+#include "realtek.h"
#define RTL8366RB_PORT_NUM_CPU 5
#define RTL8366RB_NUM_PORTS 6
@@ -396,7 +396,7 @@ static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 70, 2, "IfOutBroadcastPkts" },
};
-static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
+static int rtl8366rb_get_mib_counter(struct realtek_priv *priv,
int port,
struct rtl8366_mib_counter *mib,
u64 *mibvalue)
@@ -412,12 +412,12 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Writing access counter address first
* then ASIC will prepare 64bits counter wait for being retrived
*/
- ret = regmap_write(smi->map, addr, 0); /* Write whatever */
+ ret = regmap_write(priv->map, addr, 0); /* Write whatever */
if (ret)
return ret;
/* Read MIB control register */
- ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_MIB_CTRL_REG, &val);
if (ret)
return -EIO;
@@ -430,7 +430,7 @@ static int rtl8366rb_get_mib_counter(struct realtek_smi *smi,
/* Read each individual MIB 16 bits at the time */
*mibvalue = 0;
for (i = mib->length; i > 0; i--) {
- ret = regmap_read(smi->map, addr + (i - 1), &val);
+ ret = regmap_read(priv->map, addr + (i - 1), &val);
if (ret)
return ret;
*mibvalue = (*mibvalue << 16) | (val & 0xFFFF);
@@ -455,38 +455,38 @@ static u32 rtl8366rb_get_irqmask(struct irq_data *d)
static void rtl8366rb_mask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d), 0);
if (ret)
- dev_err(smi->dev, "could not mask IRQ\n");
+ dev_err(priv->dev, "could not mask IRQ\n");
}
static void rtl8366rb_unmask_irq(struct irq_data *d)
{
- struct realtek_smi *smi = irq_data_get_irq_chip_data(d);
+ struct realtek_priv *priv = irq_data_get_irq_chip_data(d);
int ret;
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_MASK_REG,
rtl8366rb_get_irqmask(d),
rtl8366rb_get_irqmask(d));
if (ret)
- dev_err(smi->dev, "could not unmask IRQ\n");
+ dev_err(priv->dev, "could not unmask IRQ\n");
}
static irqreturn_t rtl8366rb_irq(int irq, void *data)
{
- struct realtek_smi *smi = data;
+ struct realtek_priv *priv = data;
u32 stat;
int ret;
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&stat);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
return IRQ_NONE;
}
stat &= RTL8366RB_INTERRUPT_VALID;
@@ -502,7 +502,7 @@ static irqreturn_t rtl8366rb_irq(int irq, void *data)
*/
if (line < 12 && line > 5)
line -= 5;
- child_irq = irq_find_mapping(smi->irqdomain, line);
+ child_irq = irq_find_mapping(priv->irqdomain, line);
handle_nested_irq(child_irq);
}
return IRQ_HANDLED;
@@ -538,7 +538,7 @@ static const struct irq_domain_ops rtl8366rb_irqdomain_ops = {
.xlate = irq_domain_xlate_onecell,
};
-static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
+static int rtl8366rb_setup_cascaded_irq(struct realtek_priv *priv)
{
struct device_node *intc;
unsigned long irq_trig;
@@ -547,24 +547,24 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
u32 val;
int i;
- intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ intc = of_get_child_by_name(priv->dev->of_node, "interrupt-controller");
if (!intc) {
- dev_err(smi->dev, "missing child interrupt-controller node\n");
+ dev_err(priv->dev, "missing child interrupt-controller node\n");
return -EINVAL;
}
/* RB8366RB IRQs cascade off this one */
irq = of_irq_get(intc, 0);
if (irq <= 0) {
- dev_err(smi->dev, "failed to get parent IRQ\n");
+ dev_err(priv->dev, "failed to get parent IRQ\n");
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
/* This clears the IRQ status register */
- ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG,
+ ret = regmap_read(priv->map, RTL8366RB_INTERRUPT_STATUS_REG,
&val);
if (ret) {
- dev_err(smi->dev, "can't read interrupt status\n");
+ dev_err(priv->dev, "can't read interrupt status\n");
goto out_put_node;
}
@@ -573,48 +573,48 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
switch (irq_trig) {
case IRQF_TRIGGER_RISING:
case IRQF_TRIGGER_HIGH:
- dev_info(smi->dev, "active high/rising IRQ\n");
+ dev_info(priv->dev, "active high/rising IRQ\n");
val = 0;
break;
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
- dev_info(smi->dev, "active low/falling IRQ\n");
+ dev_info(priv->dev, "active low/falling IRQ\n");
val = RTL8366RB_INTERRUPT_POLARITY;
break;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_INTERRUPT_POLARITY,
val);
if (ret) {
- dev_err(smi->dev, "could not configure IRQ polarity\n");
+ dev_err(priv->dev, "could not configure IRQ polarity\n");
goto out_put_node;
}
- ret = devm_request_threaded_irq(smi->dev, irq, NULL,
+ ret = devm_request_threaded_irq(priv->dev, irq, NULL,
rtl8366rb_irq, IRQF_ONESHOT,
- "RTL8366RB", smi);
+ "RTL8366RB", priv);
if (ret) {
- dev_err(smi->dev, "unable to request irq: %d\n", ret);
+ dev_err(priv->dev, "unable to request irq: %d\n", ret);
goto out_put_node;
}
- smi->irqdomain = irq_domain_add_linear(intc,
- RTL8366RB_NUM_INTERRUPT,
- &rtl8366rb_irqdomain_ops,
- smi);
- if (!smi->irqdomain) {
- dev_err(smi->dev, "failed to create IRQ domain\n");
+ priv->irqdomain = irq_domain_add_linear(intc,
+ RTL8366RB_NUM_INTERRUPT,
+ &rtl8366rb_irqdomain_ops,
+ priv);
+ if (!priv->irqdomain) {
+ dev_err(priv->dev, "failed to create IRQ domain\n");
ret = -EINVAL;
goto out_put_node;
}
- for (i = 0; i < smi->num_ports; i++)
- irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
+ for (i = 0; i < priv->num_ports; i++)
+ irq_set_parent(irq_create_mapping(priv->irqdomain, i), irq);
out_put_node:
of_node_put(intc);
return ret;
}
-static int rtl8366rb_set_addr(struct realtek_smi *smi)
+static int rtl8366rb_set_addr(struct realtek_priv *priv)
{
u8 addr[ETH_ALEN];
u16 val;
@@ -622,18 +622,18 @@ static int rtl8366rb_set_addr(struct realtek_smi *smi)
eth_random_addr(addr);
- dev_info(smi->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ dev_info(priv->dev, "set MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
val = addr[0] << 8 | addr[1];
- ret = regmap_write(smi->map, RTL8366RB_SMAR0, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR0, val);
if (ret)
return ret;
val = addr[2] << 8 | addr[3];
- ret = regmap_write(smi->map, RTL8366RB_SMAR1, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR1, val);
if (ret)
return ret;
val = addr[4] << 8 | addr[5];
- ret = regmap_write(smi->map, RTL8366RB_SMAR2, val);
+ ret = regmap_write(priv->map, RTL8366RB_SMAR2, val);
if (ret)
return ret;
@@ -765,7 +765,7 @@ static const struct rtl8366rb_jam_tbl_entry rtl8366rb_green_jam[] = {
/* Function that jams the tables in the proper registers */
static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
- int jam_size, struct realtek_smi *smi,
+ int jam_size, struct realtek_priv *priv,
bool write_dbg)
{
u32 val;
@@ -774,24 +774,24 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
for (i = 0; i < jam_size; i++) {
if ((jam_table[i].reg & 0xBE00) == 0xBE00) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_PHY_ACCESS_BUSY_REG,
&val);
if (ret)
return ret;
if (!(val & RTL8366RB_PHY_INT_BUSY)) {
- ret = regmap_write(smi->map,
- RTL8366RB_PHY_ACCESS_CTRL_REG,
- RTL8366RB_PHY_CTRL_WRITE);
+ ret = regmap_write(priv->map,
+ RTL8366RB_PHY_ACCESS_CTRL_REG,
+ RTL8366RB_PHY_CTRL_WRITE);
if (ret)
return ret;
}
}
if (write_dbg)
- dev_dbg(smi->dev, "jam %04x into register %04x\n",
+ dev_dbg(priv->dev, "jam %04x into register %04x\n",
jam_table[i].val,
jam_table[i].reg);
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
jam_table[i].reg,
jam_table[i].val);
if (ret)
@@ -802,7 +802,7 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
static int rtl8366rb_setup(struct dsa_switch *ds)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
const struct rtl8366rb_jam_tbl_entry *jam_table;
struct rtl8366rb *rb;
u32 chip_ver = 0;
@@ -812,11 +812,11 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
int ret;
int i;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_ID_REG, &chip_id);
if (ret) {
- dev_err(smi->dev, "unable to read chip id\n");
+ dev_err(priv->dev, "unable to read chip id\n");
return ret;
}
@@ -824,18 +824,18 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
case RTL8366RB_CHIP_ID_8366:
break;
default:
- dev_err(smi->dev, "unknown chip id (%04x)\n", chip_id);
+ dev_err(priv->dev, "unknown chip id (%04x)\n", chip_id);
return -ENODEV;
}
- ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
+ ret = regmap_read(priv->map, RTL8366RB_CHIP_VERSION_CTRL_REG,
&chip_ver);
if (ret) {
- dev_err(smi->dev, "unable to read chip version\n");
+ dev_err(priv->dev, "unable to read chip version\n");
return ret;
}
- dev_info(smi->dev, "RTL%04x ver %u chip found\n",
+ dev_info(priv->dev, "RTL%04x ver %u chip found\n",
chip_id, chip_ver & RTL8366RB_CHIP_VERSION_MASK);
/* Do the init dance using the right jam table */
@@ -872,20 +872,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
jam_size = ARRAY_SIZE(rtl8366rb_init_jam_dgn3500);
}
- ret = rtl8366rb_jam_table(jam_table, jam_size, smi, true);
+ ret = rtl8366rb_jam_table(jam_table, jam_size, priv, true);
if (ret)
return ret;
/* Isolate all user ports so they can only send packets to itself and the CPU port */
for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
return ret;
}
/* CPU port can send packets to all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ ret = regmap_write(priv->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
RTL8366RB_PORT_ISO_EN);
if (ret)
@@ -893,26 +893,26 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
- ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
+ ARRAY_SIZE(rtl8366rb_green_jam), priv, false);
if (ret)
return ret;
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_GREEN_FEATURE_REG,
(chip_ver == 1) ? 0x0007 : 0x0003);
if (ret)
return ret;
/* Vendor driver sets 0x240 in registers 0xc and 0xd (undocumented) */
- ret = regmap_write(smi->map, 0x0c, 0x240);
+ ret = regmap_write(priv->map, 0x0c, 0x240);
if (ret)
return ret;
- ret = regmap_write(smi->map, 0x0d, 0x240);
+ ret = regmap_write(priv->map, 0x0d, 0x240);
if (ret)
return ret;
/* Set some random MAC address */
- ret = rtl8366rb_set_addr(smi);
+ ret = rtl8366rb_set_addr(priv);
if (ret)
return ret;
@@ -921,21 +921,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
* the custom tag is turned off.
*/
- ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8368RB_CPU_CTRL_REG,
0xFFFF,
- BIT(smi->cpu_port));
+ BIT(priv->cpu_port));
if (ret)
return ret;
/* Make sure we default-enable the fixed CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR,
- BIT(smi->cpu_port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR,
+ BIT(priv->cpu_port),
0);
if (ret)
return ret;
/* Set maximum packet length to 1536 bytes */
- ret = regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
@@ -945,13 +945,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
rb->max_mtu[i] = 1532;
/* Disable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_write(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
+ ret = regmap_write(priv->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -962,30 +962,30 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* connected to something exotic such as fiber, then this might
* be worth experimenting with.
*/
- ret = regmap_update_bits(smi->map, RTL8366RB_PMC0,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PMC0,
RTL8366RB_PMC0_P4_IOMODE_MASK,
0 << RTL8366RB_PMC0_P4_IOMODE_SHIFT);
if (ret)
return ret;
/* Accept all packets by default, we enable filtering on-demand */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
0);
if (ret)
return ret;
- ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
0);
if (ret)
return ret;
/* Don't drop packets whose DA has not been learned */
- ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_SSCR2,
RTL8366RB_SSCR2_DROP_UNKNOWN_DA, 0);
if (ret)
return ret;
/* Set blinking, TODO: make this configurable */
- ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
if (ret)
@@ -996,15 +996,15 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
* behaviour (no individual config) but we can set up each
* LED separately.
*/
- if (smi->leds_disabled) {
+ if (priv->leds_disabled) {
/* Turn everything off */
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x0FFF, 0);
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x0FFF, 0);
- regmap_update_bits(smi->map,
+ regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
0);
@@ -1014,7 +1014,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
val = RTL8366RB_LED_FORCE;
}
for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_CTRL_REG,
0xf << (i * 4),
val << (i * 4));
@@ -1022,18 +1022,20 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
}
- ret = rtl8366_reset_vlan(smi);
+ ret = rtl8366_reset_vlan(priv);
if (ret)
return ret;
- ret = rtl8366rb_setup_cascaded_irq(smi);
+ ret = rtl8366rb_setup_cascaded_irq(priv);
if (ret)
- dev_info(smi->dev, "no interrupt support\n");
+ dev_info(priv->dev, "no interrupt support\n");
- ret = realtek_smi_setup_mdio(smi);
- if (ret) {
- dev_info(smi->dev, "could not set up MDIO bus\n");
- return -ENODEV;
+ if (priv->setup_interface) {
+ ret = priv->setup_interface(ds);
+ if (ret) {
+ dev_err(priv->dev, "could not set up MDIO bus\n");
+ return -ENODEV;
+ }
}
return 0;
@@ -1052,35 +1054,35 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link up on CPU port (%d)\n", port);
/* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
- ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
+ dev_err(priv->dev, "failed to force 1Gbit on CPU port\n");
return;
}
- ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PAACR2,
0xFF00U,
RTL8366RB_PAACR_CPU_PORT << 8);
if (ret) {
- dev_err(smi->dev, "failed to set PAACR on CPU port\n");
+ dev_err(priv->dev, "failed to set PAACR on CPU port\n");
return;
}
/* Enable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret) {
- dev_err(smi->dev, "failed to enable the CPU port\n");
+ dev_err(priv->dev, "failed to enable the CPU port\n");
return;
}
}
@@ -1089,106 +1091,108 @@ static void
rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- if (port != smi->cpu_port)
+ if (port != priv->cpu_port)
return;
- dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
+ dev_dbg(priv->dev, "MAC link down on CPU port (%d)\n", port);
/* Disable the CPU port */
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret) {
- dev_err(smi->dev, "failed to disable the CPU port\n");
+ dev_err(priv->dev, "failed to disable the CPU port\n");
return;
}
}
-static void rb8366rb_set_port_led(struct realtek_smi *smi,
+static void rb8366rb_set_port_led(struct realtek_priv *priv,
int port, bool enable)
{
u16 val = enable ? 0x3f : 0;
int ret;
- if (smi->leds_disabled)
+ if (priv->leds_disabled)
return;
switch (port) {
case 0:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F, val);
break;
case 1:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_0_1_CTRL_REG,
0x3F << RTL8366RB_LED_1_OFFSET,
val << RTL8366RB_LED_1_OFFSET);
break;
case 2:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F, val);
break;
case 3:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_LED_2_3_CTRL_REG,
0x3F << RTL8366RB_LED_3_OFFSET,
val << RTL8366RB_LED_3_OFFSET);
break;
case 4:
- ret = regmap_update_bits(smi->map,
+ ret = regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
enable ? RTL8366RB_P4_RGMII_LED : 0);
break;
default:
- dev_err(smi->dev, "no LED for port %d\n", port);
+ dev_err(priv->dev, "no LED for port %d\n", port);
return;
}
if (ret)
- dev_err(smi->dev, "error updating LED on port %d\n", port);
+ dev_err(priv->dev, "error updating LED on port %d\n", port);
}
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "enable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "enable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
0);
if (ret)
return ret;
- rb8366rb_set_port_led(smi, port, true);
+ rb8366rb_set_port_led(priv, port, true);
return 0;
}
static void
rtl8366rb_port_disable(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
- dev_dbg(smi->dev, "disable port %d\n", port);
- ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ dev_dbg(priv->dev, "disable port %d\n", port);
+ ret = regmap_update_bits(priv->map, RTL8366RB_PECR, BIT(port),
BIT(port));
if (ret)
return;
- rb8366rb_set_port_led(smi, port, false);
+ rb8366rb_set_port_led(priv, port, false);
}
static int
rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1198,29 +1202,29 @@ rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
if (i == port)
continue;
/* Not on this bridge */
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Join this port to each other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)),
RTL8366RB_PORT_ISO_PORTS(BIT(port)));
if (ret)
- dev_err(smi->dev, "failed to join port %d\n", port);
+ dev_err(priv->dev, "failed to join port %d\n", port);
port_bitmap |= BIT(i);
}
/* Set the bits for the ports we can access */
- return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ return regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap),
RTL8366RB_PORT_ISO_PORTS(port_bitmap));
}
static void
rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
unsigned int port_bitmap = 0;
int ret, i;
@@ -1230,31 +1234,33 @@ rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
if (i == port)
continue;
/* Not on this bridge */
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port from any other port on the bridge */
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(i),
RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
if (ret)
- dev_err(smi->dev, "failed to leave port %d\n", port);
+ dev_err(priv->dev, "failed to leave port %d\n", port);
port_bitmap |= BIT(i);
}
/* Clear the bits for the ports we can not access, leave ourselves */
- regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ regmap_update_bits(priv->map, RTL8366RB_PORT_ISO(port),
RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
}
/**
* rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
- * @smi: SMI state container
+ * @priv: SMI state container
* @port: the port to drop untagged and C-tagged frames on
* @drop: whether to drop or pass untagged and C-tagged frames
+ *
+ * Return: zero for success, a negative number on error.
*/
-static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+static int rtl8366rb_drop_untagged(struct realtek_priv *priv, int port, bool drop)
{
- return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ return regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
}
@@ -1263,17 +1269,17 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
- dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
+ dev_dbg(priv->dev, "port %d: %s VLAN filtering\n", port,
vlan_filtering ? "enable" : "disable");
/* If the port is not in the member set, the frame will be dropped */
- ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ ret = regmap_update_bits(priv->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
BIT(port), vlan_filtering ? BIT(port) : 0);
if (ret)
return ret;
@@ -1283,9 +1289,9 @@ static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
* filtering on a port, we need to accept any frames.
*/
if (vlan_filtering)
- ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ ret = rtl8366rb_drop_untagged(priv, port, !rb->pvid_enabled[port]);
else
- ret = rtl8366rb_drop_untagged(smi, port, false);
+ ret = rtl8366rb_drop_untagged(priv, port, false);
return ret;
}
@@ -1307,11 +1313,11 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_LEARNDIS_CTRL,
BIT(port),
(flags.val & BR_LEARNING) ? 0 : BIT(port));
if (ret)
@@ -1324,7 +1330,7 @@ rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
static void
rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
u32 val;
int i;
@@ -1343,13 +1349,13 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
val = RTL8366RB_STP_STATE_FORWARDING;
break;
default:
- dev_err(smi->dev, "unknown bridge state requested\n");
+ dev_err(priv->dev, "unknown bridge state requested\n");
return;
}
/* Set the same status for the port on all the FIDs */
for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
- regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ regmap_update_bits(priv->map, RTL8366RB_STP_STATE_BASE + i,
RTL8366RB_STP_STATE_MASK(port),
RTL8366RB_STP_STATE(port, val));
}
@@ -1358,26 +1364,26 @@ rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
static void
rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
/* This will age out any learned L2 entries */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), BIT(port));
/* Restore the normal state of things */
- regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ regmap_update_bits(priv->map, RTL8366RB_SECURITY_CTRL,
BIT(port), 0);
}
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
- struct realtek_smi *smi = ds->priv;
+ struct realtek_priv *priv = ds->priv;
struct rtl8366rb *rb;
unsigned int max_mtu;
u32 len;
int i;
/* Cache the per-port MTU setting */
- rb = smi->chip_data;
+ rb = priv->chip_data;
rb->max_mtu[port] = new_mtu;
/* Roof out the MTU for the entire switch to the greatest
@@ -1405,7 +1411,7 @@ static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
else
len = RTL8366RB_SGCR_MAX_LENGTH_16000;
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_MAX_LENGTH_MASK,
len);
}
@@ -1418,7 +1424,7 @@ static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
return 15996;
}
-static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
+static int rtl8366rb_get_vlan_4k(struct realtek_priv *priv, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1431,19 +1437,19 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return -EINVAL;
/* write VID */
- ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
+ ret = regmap_write(priv->map, RTL8366RB_VLAN_TABLE_WRITE_BASE,
vid & RTL8366RB_VLAN_VID_MASK);
if (ret)
return ret;
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_READ_CTRL);
if (ret)
return ret;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_TABLE_READ_BASE + i,
&data[i]);
if (ret)
@@ -1459,7 +1465,7 @@ static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
return 0;
}
-static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
+static int rtl8366rb_set_vlan_4k(struct realtek_priv *priv,
const struct rtl8366_vlan_4k *vlan4k)
{
u32 data[3];
@@ -1479,7 +1485,7 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
data[2] = vlan4k->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_TABLE_WRITE_BASE + i,
data[i]);
if (ret)
@@ -1487,13 +1493,13 @@ static int rtl8366rb_set_vlan_4k(struct realtek_smi *smi,
}
/* write table access control word */
- ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
+ ret = regmap_write(priv->map, RTL8366RB_TABLE_ACCESS_CTRL_REG,
RTL8366RB_TABLE_VLAN_WRITE_CTRL);
return ret;
}
-static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_get_vlan_mc(struct realtek_priv *priv, u32 index,
struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1506,7 +1512,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return -EINVAL;
for (i = 0; i < 3; i++) {
- ret = regmap_read(smi->map,
+ ret = regmap_read(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
&data[i]);
if (ret)
@@ -1524,7 +1530,7 @@ static int rtl8366rb_get_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
+static int rtl8366rb_set_vlan_mc(struct realtek_priv *priv, u32 index,
const struct rtl8366_vlan_mc *vlanmc)
{
u32 data[3];
@@ -1548,7 +1554,7 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
data[2] = vlanmc->fid & RTL8366RB_VLAN_FID_MASK;
for (i = 0; i < 3; i++) {
- ret = regmap_write(smi->map,
+ ret = regmap_write(priv->map,
RTL8366RB_VLAN_MC_BASE(index) + i,
data[i]);
if (ret)
@@ -1558,15 +1564,15 @@ static int rtl8366rb_set_vlan_mc(struct realtek_smi *smi, u32 index,
return 0;
}
-static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
+static int rtl8366rb_get_mc_index(struct realtek_priv *priv, int port, int *val)
{
u32 data;
int ret;
- if (port >= smi->num_ports)
+ if (port >= priv->num_ports)
return -EINVAL;
- ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_read(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
&data);
if (ret)
return ret;
@@ -1577,22 +1583,22 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
return 0;
}
-static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
+static int rtl8366rb_set_mc_index(struct realtek_priv *priv, int port, int index)
{
struct rtl8366rb *rb;
bool pvid_enabled;
int ret;
- rb = smi->chip_data;
+ rb = priv->chip_data;
pvid_enabled = !!index;
- if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
+ if (port >= priv->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
- RTL8366RB_PORT_VLAN_CTRL_MASK <<
+ ret = regmap_update_bits(priv->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
- (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
+ (index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
if (ret)
return ret;
@@ -1603,17 +1609,17 @@ static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
* not drop any untagged or C-tagged frames. Make sure to update the
* filtering setting.
*/
- if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
- ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+ if (dsa_port_is_vlan_filtering(dsa_to_port(priv->ds, port)))
+ ret = rtl8366rb_drop_untagged(priv, port, !pvid_enabled);
return ret;
}
-static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
+static bool rtl8366rb_is_vlan_valid(struct realtek_priv *priv, unsigned int vlan)
{
unsigned int max = RTL8366RB_NUM_VLANS - 1;
- if (smi->vlan4k_enabled)
+ if (priv->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
if (vlan > max)
@@ -1622,23 +1628,23 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
return true;
}
-static int rtl8366rb_enable_vlan(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map,
+ dev_dbg(priv->dev, "%s VLAN\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map,
RTL8366RB_SGCR, RTL8366RB_SGCR_EN_VLAN,
enable ? RTL8366RB_SGCR_EN_VLAN : 0);
}
-static int rtl8366rb_enable_vlan4k(struct realtek_smi *smi, bool enable)
+static int rtl8366rb_enable_vlan4k(struct realtek_priv *priv, bool enable)
{
- dev_dbg(smi->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
- return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ dev_dbg(priv->dev, "%s VLAN 4k\n", enable ? "enable" : "disable");
+ return regmap_update_bits(priv->map, RTL8366RB_SGCR,
RTL8366RB_SGCR_EN_VLAN_4KTB,
enable ? RTL8366RB_SGCR_EN_VLAN_4KTB : 0);
}
-static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+static int rtl8366rb_phy_read(struct realtek_priv *priv, int phy, int regnum)
{
u32 val;
u32 reg;
@@ -1647,32 +1653,40 @@ static int rtl8366rb_phy_read(struct realtek_smi *smi, int phy, int regnum)
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ mutex_lock(&priv->map_lock);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_READ);
if (ret)
- return ret;
+ goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- ret = regmap_write(smi->map, reg, 0);
+ ret = regmap_write(priv->map_nolock, reg, 0);
if (ret) {
- dev_err(smi->dev,
+ dev_err(priv->dev,
"failed to write PHY%d reg %04x @ %04x, ret %d\n",
phy, regnum, reg, ret);
- return ret;
+ goto out;
}
- ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val);
+ ret = regmap_read(priv->map_nolock, RTL8366RB_PHY_ACCESS_DATA_REG,
+ &val);
if (ret)
- return ret;
+ goto out;
- dev_dbg(smi->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
+ ret = val;
+
+ dev_dbg(priv->dev, "read PHY%d register 0x%04x @ %08x, val <- %04x\n",
phy, regnum, reg, val);
- return val;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
}
-static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+static int rtl8366rb_phy_write(struct realtek_priv *priv, int phy, int regnum,
u16 val)
{
u32 reg;
@@ -1681,34 +1695,50 @@ static int rtl8366rb_phy_write(struct realtek_smi *smi, int phy, int regnum,
if (phy > RTL8366RB_PHY_NO_MAX)
return -EINVAL;
- ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG,
+ mutex_lock(&priv->map_lock);
+
+ ret = regmap_write(priv->map_nolock, RTL8366RB_PHY_ACCESS_CTRL_REG,
RTL8366RB_PHY_CTRL_WRITE);
if (ret)
- return ret;
+ goto out;
reg = 0x8000 | (1 << (phy + RTL8366RB_PHY_NO_OFFSET)) | regnum;
- dev_dbg(smi->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
+ dev_dbg(priv->dev, "write PHY%d register 0x%04x @ %04x, val -> %04x\n",
phy, regnum, reg, val);
- ret = regmap_write(smi->map, reg, val);
+ ret = regmap_write(priv->map_nolock, reg, val);
if (ret)
- return ret;
+ goto out;
- return 0;
+out:
+ mutex_unlock(&priv->map_lock);
+
+ return ret;
+}
+
+static int rtl8366rb_dsa_phy_read(struct dsa_switch *ds, int phy, int regnum)
+{
+ return rtl8366rb_phy_read(ds->priv, phy, regnum);
+}
+
+static int rtl8366rb_dsa_phy_write(struct dsa_switch *ds, int phy, int regnum,
+ u16 val)
+{
+ return rtl8366rb_phy_write(ds->priv, phy, regnum, val);
}
-static int rtl8366rb_reset_chip(struct realtek_smi *smi)
+static int rtl8366rb_reset_chip(struct realtek_priv *priv)
{
int timeout = 10;
u32 val;
int ret;
- realtek_smi_write_reg_noack(smi, RTL8366RB_RESET_CTRL_REG,
- RTL8366RB_CHIP_CTRL_RESET_HW);
+ priv->write_reg_noack(priv, RTL8366RB_RESET_CTRL_REG,
+ RTL8366RB_CHIP_CTRL_RESET_HW);
do {
usleep_range(20000, 25000);
- ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val);
+ ret = regmap_read(priv->map, RTL8366RB_RESET_CTRL_REG, &val);
if (ret)
return ret;
@@ -1717,21 +1747,21 @@ static int rtl8366rb_reset_chip(struct realtek_smi *smi)
} while (--timeout);
if (!timeout) {
- dev_err(smi->dev, "timeout waiting for the switch to reset\n");
+ dev_err(priv->dev, "timeout waiting for the switch to reset\n");
return -EIO;
}
return 0;
}
-static int rtl8366rb_detect(struct realtek_smi *smi)
+static int rtl8366rb_detect(struct realtek_priv *priv)
{
- struct device *dev = smi->dev;
+ struct device *dev = priv->dev;
int ret;
u32 val;
/* Detect device */
- ret = regmap_read(smi->map, 0x5c, &val);
+ ret = regmap_read(priv->map, 0x5c, &val);
if (ret) {
dev_err(dev, "can't get chip ID (%d)\n", ret);
return ret;
@@ -1744,11 +1774,11 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
return -ENODEV;
case 0x5937:
dev_info(dev, "found an RTL8366RB switch\n");
- smi->cpu_port = RTL8366RB_PORT_NUM_CPU;
- smi->num_ports = RTL8366RB_NUM_PORTS;
- smi->num_vlan_mc = RTL8366RB_NUM_VLANS;
- smi->mib_counters = rtl8366rb_mib_counters;
- smi->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
+ priv->cpu_port = RTL8366RB_PORT_NUM_CPU;
+ priv->num_ports = RTL8366RB_NUM_PORTS;
+ priv->num_vlan_mc = RTL8366RB_NUM_VLANS;
+ priv->mib_counters = rtl8366rb_mib_counters;
+ priv->num_mib_counters = ARRAY_SIZE(rtl8366rb_mib_counters);
break;
default:
dev_info(dev, "found an Unknown Realtek switch (id=0x%04x)\n",
@@ -1756,16 +1786,41 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
break;
}
- ret = rtl8366rb_reset_chip(smi);
+ ret = rtl8366rb_reset_chip(priv);
if (ret)
return ret;
return 0;
}
-static const struct dsa_switch_ops rtl8366rb_switch_ops = {
+static const struct dsa_switch_ops rtl8366rb_switch_ops_smi = {
+ .get_tag_protocol = rtl8366_get_tag_protocol,
+ .setup = rtl8366rb_setup,
+ .phylink_mac_link_up = rtl8366rb_mac_link_up,
+ .phylink_mac_link_down = rtl8366rb_mac_link_down,
+ .get_strings = rtl8366_get_strings,
+ .get_ethtool_stats = rtl8366_get_ethtool_stats,
+ .get_sset_count = rtl8366_get_sset_count,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
+ .port_vlan_add = rtl8366_vlan_add,
+ .port_vlan_del = rtl8366_vlan_del,
+ .port_enable = rtl8366rb_port_enable,
+ .port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
+ .port_change_mtu = rtl8366rb_change_mtu,
+ .port_max_mtu = rtl8366rb_max_mtu,
+};
+
+static const struct dsa_switch_ops rtl8366rb_switch_ops_mdio = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
+ .phy_read = rtl8366rb_dsa_phy_read,
+ .phy_write = rtl8366rb_dsa_phy_write,
.phylink_mac_link_up = rtl8366rb_mac_link_up,
.phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
@@ -1786,7 +1841,7 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_max_mtu = rtl8366rb_max_mtu,
};
-static const struct realtek_smi_ops rtl8366rb_smi_ops = {
+static const struct realtek_ops rtl8366rb_ops = {
.detect = rtl8366rb_detect,
.get_vlan_mc = rtl8366rb_get_vlan_mc,
.set_vlan_mc = rtl8366rb_set_vlan_mc,
@@ -1802,12 +1857,17 @@ static const struct realtek_smi_ops rtl8366rb_smi_ops = {
.phy_write = rtl8366rb_phy_write,
};
-const struct realtek_smi_variant rtl8366rb_variant = {
- .ds_ops = &rtl8366rb_switch_ops,
- .ops = &rtl8366rb_smi_ops,
+const struct realtek_variant rtl8366rb_variant = {
+ .ds_ops_smi = &rtl8366rb_switch_ops_smi,
+ .ds_ops_mdio = &rtl8366rb_switch_ops_mdio,
+ .ops = &rtl8366rb_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
.chip_data_sz = sizeof(struct rtl8366rb),
};
EXPORT_SYMBOL_GPL(rtl8366rb_variant);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Driver for RTL8366RB ethernet switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
new file mode 100644
index 000000000000..919027cf2012
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <net/dsa.h>
+
+#include "rzn1_a5psw.h"
+
+struct a5psw_stats {
+ u16 offset;
+ const char name[ETH_GSTRING_LEN];
+};
+
+#define STAT_DESC(_offset) { \
+ .offset = A5PSW_##_offset, \
+ .name = __stringify(_offset), \
+}
+
+static const struct a5psw_stats a5psw_stats[] = {
+ STAT_DESC(aFramesTransmittedOK),
+ STAT_DESC(aFramesReceivedOK),
+ STAT_DESC(aFrameCheckSequenceErrors),
+ STAT_DESC(aAlignmentErrors),
+ STAT_DESC(aOctetsTransmittedOK),
+ STAT_DESC(aOctetsReceivedOK),
+ STAT_DESC(aTxPAUSEMACCtrlFrames),
+ STAT_DESC(aRxPAUSEMACCtrlFrames),
+ STAT_DESC(ifInErrors),
+ STAT_DESC(ifOutErrors),
+ STAT_DESC(ifInUcastPkts),
+ STAT_DESC(ifInMulticastPkts),
+ STAT_DESC(ifInBroadcastPkts),
+ STAT_DESC(ifOutDiscards),
+ STAT_DESC(ifOutUcastPkts),
+ STAT_DESC(ifOutMulticastPkts),
+ STAT_DESC(ifOutBroadcastPkts),
+ STAT_DESC(etherStatsDropEvents),
+ STAT_DESC(etherStatsOctets),
+ STAT_DESC(etherStatsPkts),
+ STAT_DESC(etherStatsUndersizePkts),
+ STAT_DESC(etherStatsOversizePkts),
+ STAT_DESC(etherStatsPkts64Octets),
+ STAT_DESC(etherStatsPkts65to127Octets),
+ STAT_DESC(etherStatsPkts128to255Octets),
+ STAT_DESC(etherStatsPkts256to511Octets),
+ STAT_DESC(etherStatsPkts1024to1518Octets),
+ STAT_DESC(etherStatsPkts1519toXOctets),
+ STAT_DESC(etherStatsJabbers),
+ STAT_DESC(etherStatsFragments),
+ STAT_DESC(VLANReceived),
+ STAT_DESC(VLANTransmitted),
+ STAT_DESC(aDeferred),
+ STAT_DESC(aMultipleCollisions),
+ STAT_DESC(aSingleCollisions),
+ STAT_DESC(aLateCollisions),
+ STAT_DESC(aExcessiveCollisions),
+ STAT_DESC(aCarrierSenseErrors),
+};
+
+static void a5psw_reg_writel(struct a5psw *a5psw, int offset, u32 value)
+{
+ writel(value, a5psw->base + offset);
+}
+
+static u32 a5psw_reg_readl(struct a5psw *a5psw, int offset)
+{
+ return readl(a5psw->base + offset);
+}
+
+static void a5psw_reg_rmw(struct a5psw *a5psw, int offset, u32 mask, u32 val)
+{
+ u32 reg;
+
+ spin_lock(&a5psw->reg_lock);
+
+ reg = a5psw_reg_readl(a5psw, offset);
+ reg &= ~mask;
+ reg |= val;
+ a5psw_reg_writel(a5psw, offset, reg);
+
+ spin_unlock(&a5psw->reg_lock);
+}
+
+static enum dsa_tag_protocol a5psw_get_tag_protocol(struct dsa_switch *ds,
+ int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RZN1_A5PSW;
+}
+
+static void a5psw_port_pattern_set(struct a5psw *a5psw, int port, int pattern,
+ bool enable)
+{
+ u32 rx_match = 0;
+
+ if (enable)
+ rx_match |= A5PSW_RXMATCH_CONFIG_PATTERN(pattern);
+
+ a5psw_reg_rmw(a5psw, A5PSW_RXMATCH_CONFIG(port),
+ A5PSW_RXMATCH_CONFIG_PATTERN(pattern), rx_match);
+}
+
+static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
+{
+ /* Enable "management forward" pattern matching, this will forward
+ * packets from this port only towards the management port and thus
+ * isolate the port.
+ */
+ a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
+}
+
+static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
+{
+ u32 port_ena = 0;
+
+ if (enable)
+ port_ena |= A5PSW_PORT_ENA_TX_RX(port);
+
+ a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, A5PSW_PORT_ENA_TX_RX(port),
+ port_ena);
+}
+
+static int a5psw_lk_execute_ctrl(struct a5psw *a5psw, u32 *ctrl)
+{
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_ADDR_CTRL, *ctrl);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_ADDR_CTRL, *ctrl,
+ !(*ctrl & A5PSW_LK_ADDR_CTRL_BUSY),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret)
+ dev_err(a5psw->dev, "LK_CTRL timeout waiting for BUSY bit\n");
+
+ return ret;
+}
+
+static void a5psw_port_fdb_flush(struct a5psw *a5psw, int port)
+{
+ u32 ctrl = A5PSW_LK_ADDR_CTRL_DELETE_PORT | BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+ a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ mutex_unlock(&a5psw->lk_lock);
+}
+
+static void a5psw_port_authorize_set(struct a5psw *a5psw, int port,
+ bool authorize)
+{
+ u32 reg = a5psw_reg_readl(a5psw, A5PSW_AUTH_PORT(port));
+
+ if (authorize)
+ reg |= A5PSW_AUTH_PORT_AUTHORIZED;
+ else
+ reg &= ~A5PSW_AUTH_PORT_AUTHORIZED;
+
+ a5psw_reg_writel(a5psw, A5PSW_AUTH_PORT(port), reg);
+}
+
+static void a5psw_port_disable(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, false);
+ a5psw_port_enable_set(a5psw, port, false);
+}
+
+static int a5psw_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phy)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_authorize_set(a5psw, port, true);
+ a5psw_port_enable_set(a5psw, port, true);
+
+ return 0;
+}
+
+static int a5psw_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ new_mtu += ETH_HLEN + A5PSW_EXTRA_MTU_LEN + ETH_FCS_LEN;
+ a5psw_reg_writel(a5psw, A5PSW_FRM_LENGTH(port), new_mtu);
+
+ return 0;
+}
+
+static int a5psw_port_max_mtu(struct dsa_switch *ds, int port)
+{
+ return A5PSW_MAX_MTU;
+}
+
+static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ unsigned long *intf = config->supported_interfaces;
+
+ config->mac_capabilities = MAC_1000FD;
+
+ if (dsa_is_cpu_port(ds, port)) {
+ /* GMII is used internally and GMAC2 is connected to the switch
+ * using 1000Mbps Full-Duplex mode only (cf ethernet manual)
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII, intf);
+ } else {
+ config->mac_capabilities |= MAC_100 | MAC_10;
+ phy_interface_set_rgmii(intf);
+ __set_bit(PHY_INTERFACE_MODE_RMII, intf);
+ __set_bit(PHY_INTERFACE_MODE_MII, intf);
+ }
+}
+
+static struct phylink_pcs *
+a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct a5psw *a5psw = ds->priv;
+
+ if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
+ return a5psw->pcs[port];
+
+ return NULL;
+}
+
+static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct a5psw *a5psw = ds->priv;
+ u32 cmd_cfg;
+
+ cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
+ cmd_cfg &= ~(A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
+ A5PSW_CMD_CFG_TX_CRC_APPEND;
+ struct a5psw *a5psw = ds->priv;
+
+ if (speed == SPEED_1000)
+ cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
+
+ if (duplex == DUPLEX_HALF)
+ cmd_cfg |= A5PSW_CMD_CFG_HD_ENA;
+
+ cmd_cfg |= A5PSW_CMD_CFG_CNTL_FRM_ENA;
+
+ if (!rx_pause)
+ cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
+
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+}
+
+static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned long rate;
+ u64 max, tmp;
+ u32 agetime;
+
+ rate = clk_get_rate(a5psw->clk);
+ max = div64_ul(((u64)A5PSW_LK_AGETIME_MASK * A5PSW_TABLE_ENTRIES * 1024),
+ rate) * 1000;
+ if (msecs > max)
+ return -EINVAL;
+
+ tmp = div_u64(rate, MSEC_PER_SEC);
+ agetime = div_u64(msecs * tmp, 1024 * A5PSW_TABLE_ENTRIES);
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_AGETIME, agetime);
+
+ return 0;
+}
+
+static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
+ bool set)
+{
+ u8 offsets[] = {A5PSW_UCAST_DEF_MASK, A5PSW_BCAST_DEF_MASK,
+ A5PSW_MCAST_DEF_MASK};
+ int i;
+
+ if (set)
+ a5psw->bridged_ports |= BIT(port);
+ else
+ a5psw->bridged_ports &= ~BIT(port);
+
+ for (i = 0; i < ARRAY_SIZE(offsets); i++)
+ a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
+}
+
+static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ /* We only support 1 bridge device */
+ if (a5psw->br_dev && bridge.dev != a5psw->br_dev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Forwarding offload supported for a single bridge");
+ return -EOPNOTSUPP;
+ }
+
+ a5psw->br_dev = bridge.dev;
+ a5psw_flooding_set_resolution(a5psw, port, true);
+ a5psw_port_mgmtfwd_set(a5psw, port, false);
+
+ return 0;
+}
+
+static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct dsa_bridge bridge)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_flooding_set_resolution(a5psw, port, false);
+ a5psw_port_mgmtfwd_set(a5psw, port, true);
+
+ /* No more ports bridged */
+ if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
+ a5psw->br_dev = NULL;
+}
+
+static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
+ struct a5psw *a5psw = ds->priv;
+ u32 reg = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ reg |= A5PSW_INPUT_LEARN_DIS(port);
+ reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ break;
+ case BR_STATE_LISTENING:
+ reg |= A5PSW_INPUT_LEARN_DIS(port);
+ break;
+ case BR_STATE_LEARNING:
+ reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+ break;
+ case BR_STATE_FORWARDING:
+ default:
+ break;
+ }
+
+ a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct a5psw *a5psw = ds->priv;
+
+ a5psw_port_fdb_flush(a5psw, port);
+}
+
+static int a5psw_lk_execute_lookup(struct a5psw *a5psw, union lk_data *lk_data,
+ u16 *entry)
+{
+ u32 ctrl;
+ int ret;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_LO, lk_data->lo);
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data->hi);
+
+ ctrl = A5PSW_LK_ADDR_CTRL_LOOKUP;
+ ret = a5psw_lk_execute_ctrl(a5psw, &ctrl);
+ if (ret)
+ return ret;
+
+ *entry = ctrl & A5PSW_LK_ADDR_CTRL_ADDRESS;
+
+ return 0;
+}
+
+static int a5psw_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool inc_learncount = false;
+ int ret = 0;
+ u16 entry;
+ u32 reg;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+ lk_data.entry.port_mask = BIT(port);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ /* Set the value to be written in the lookup table */
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ if (!lk_data.entry.valid) {
+ inc_learncount = true;
+ /* port_mask set to 0x1f when entry is not valid, clear it */
+ lk_data.entry.port_mask = 0;
+ lk_data.entry.prio = 0;
+ }
+
+ lk_data.entry.port_mask |= BIT(port);
+ lk_data.entry.is_static = 1;
+ lk_data.entry.valid = 1;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = A5PSW_LK_ADDR_CTRL_WRITE | entry;
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ if (inc_learncount) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_INC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data = {0};
+ bool clear = false;
+ u16 entry;
+ u32 reg;
+ int ret;
+
+ ether_addr_copy(lk_data.entry.mac, addr);
+
+ mutex_lock(&a5psw->lk_lock);
+
+ ret = a5psw_lk_execute_lookup(a5psw, &lk_data, &entry);
+ if (ret)
+ goto lk_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+
+ /* Our hardware does not associate any VID to the FDB entries so this
+ * means that if two entries were added for the same mac but for
+ * different VID, then, on the deletion of the first one, we would also
+ * delete the second one. Since there is unfortunately nothing we can do
+ * about that, do not return an error...
+ */
+ if (!lk_data.entry.valid)
+ goto lk_unlock;
+
+ lk_data.entry.port_mask &= ~BIT(port);
+ /* If there is no more port in the mask, clear the entry */
+ if (lk_data.entry.port_mask == 0)
+ clear = true;
+
+ a5psw_reg_writel(a5psw, A5PSW_LK_DATA_HI, lk_data.hi);
+
+ reg = entry;
+ if (clear)
+ reg |= A5PSW_LK_ADDR_CTRL_CLEAR;
+ else
+ reg |= A5PSW_LK_ADDR_CTRL_WRITE;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto lk_unlock;
+
+ /* Decrement LEARNCOUNT */
+ if (clear) {
+ reg = A5PSW_LK_LEARNCOUNT_MODE_DEC;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+ }
+
+lk_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static int a5psw_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ union lk_data lk_data;
+ int i = 0, ret = 0;
+ u32 reg;
+
+ mutex_lock(&a5psw->lk_lock);
+
+ for (i = 0; i < A5PSW_TABLE_ENTRIES; i++) {
+ reg = A5PSW_LK_ADDR_CTRL_READ | A5PSW_LK_ADDR_CTRL_WAIT | i;
+
+ ret = a5psw_lk_execute_ctrl(a5psw, &reg);
+ if (ret)
+ goto out_unlock;
+
+ lk_data.hi = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_HI);
+ /* If entry is not valid or does not contain the port, skip */
+ if (!lk_data.entry.valid ||
+ !(lk_data.entry.port_mask & BIT(port)))
+ continue;
+
+ lk_data.lo = a5psw_reg_readl(a5psw, A5PSW_LK_DATA_LO);
+
+ ret = cb(lk_data.entry.mac, 0, lk_data.entry.is_static, data);
+ if (ret)
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&a5psw->lk_lock);
+
+ return ret;
+}
+
+static u64 a5psw_read_stat(struct a5psw *a5psw, u32 offset, int port)
+{
+ u32 reg_lo, reg_hi;
+
+ reg_lo = a5psw_reg_readl(a5psw, offset + A5PSW_PORT_OFFSET(port));
+ /* A5PSW_STATS_HIWORD is latched on stat read */
+ reg_hi = a5psw_reg_readl(a5psw, A5PSW_STATS_HIWORD);
+
+ return ((u64)reg_hi << 32) | reg_lo;
+}
+
+static void a5psw_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+{
+ unsigned int u;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++) {
+ memcpy(data + u * ETH_GSTRING_LEN, a5psw_stats[u].name,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void a5psw_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
+{
+ struct a5psw *a5psw = ds->priv;
+ unsigned int u;
+
+ for (u = 0; u < ARRAY_SIZE(a5psw_stats); u++)
+ data[u] = a5psw_read_stat(a5psw, a5psw_stats[u].offset, port);
+}
+
+static int a5psw_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return ARRAY_SIZE(a5psw_stats);
+}
+
+static void a5psw_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ mac_stats->FramesTransmittedOK = RD(aFramesTransmittedOK);
+ mac_stats->SingleCollisionFrames = RD(aSingleCollisions);
+ mac_stats->MultipleCollisionFrames = RD(aMultipleCollisions);
+ mac_stats->FramesReceivedOK = RD(aFramesReceivedOK);
+ mac_stats->FrameCheckSequenceErrors = RD(aFrameCheckSequenceErrors);
+ mac_stats->AlignmentErrors = RD(aAlignmentErrors);
+ mac_stats->OctetsTransmittedOK = RD(aOctetsTransmittedOK);
+ mac_stats->FramesWithDeferredXmissions = RD(aDeferred);
+ mac_stats->LateCollisions = RD(aLateCollisions);
+ mac_stats->FramesAbortedDueToXSColls = RD(aExcessiveCollisions);
+ mac_stats->FramesLostDueToIntMACXmitError = RD(ifOutErrors);
+ mac_stats->CarrierSenseErrors = RD(aCarrierSenseErrors);
+ mac_stats->OctetsReceivedOK = RD(aOctetsReceivedOK);
+ mac_stats->FramesLostDueToIntMACRcvError = RD(ifInErrors);
+ mac_stats->MulticastFramesXmittedOK = RD(ifOutMulticastPkts);
+ mac_stats->BroadcastFramesXmittedOK = RD(ifOutBroadcastPkts);
+ mac_stats->FramesWithExcessiveDeferral = RD(aDeferred);
+ mac_stats->MulticastFramesReceivedOK = RD(ifInMulticastPkts);
+ mac_stats->BroadcastFramesReceivedOK = RD(ifInBroadcastPkts);
+#undef RD
+}
+
+static const struct ethtool_rmon_hist_range a5psw_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, A5PSW_MAX_MTU },
+ {}
+};
+
+static void a5psw_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct a5psw *a5psw = ds->priv;
+
+#define RD(name) a5psw_read_stat(a5psw, A5PSW_##name, port)
+ rmon_stats->undersize_pkts = RD(etherStatsUndersizePkts);
+ rmon_stats->oversize_pkts = RD(etherStatsOversizePkts);
+ rmon_stats->fragments = RD(etherStatsFragments);
+ rmon_stats->jabbers = RD(etherStatsJabbers);
+ rmon_stats->hist[0] = RD(etherStatsPkts64Octets);
+ rmon_stats->hist[1] = RD(etherStatsPkts65to127Octets);
+ rmon_stats->hist[2] = RD(etherStatsPkts128to255Octets);
+ rmon_stats->hist[3] = RD(etherStatsPkts256to511Octets);
+ rmon_stats->hist[4] = RD(etherStatsPkts512to1023Octets);
+ rmon_stats->hist[5] = RD(etherStatsPkts1024to1518Octets);
+ rmon_stats->hist[6] = RD(etherStatsPkts1519toXOctets);
+#undef RD
+
+ *ranges = a5psw_rmon_ranges;
+}
+
+static void a5psw_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct a5psw *a5psw = ds->priv;
+ u64 stat;
+
+ stat = a5psw_read_stat(a5psw, A5PSW_aTxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesTransmitted = stat;
+ stat = a5psw_read_stat(a5psw, A5PSW_aRxPAUSEMACCtrlFrames, port);
+ ctrl_stats->MACControlFramesReceived = stat;
+}
+
+static int a5psw_setup(struct dsa_switch *ds)
+{
+ struct a5psw *a5psw = ds->priv;
+ int port, vlan, ret;
+ struct dsa_port *dp;
+ u32 reg;
+
+ /* Validate that there is only 1 CPU port with index A5PSW_CPU_PORT */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ if (dp->index != A5PSW_CPU_PORT) {
+ dev_err(a5psw->dev, "Invalid CPU port\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Configure management port */
+ reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
+
+ /* Set pattern 0 to forward all frame to mgmt port */
+ a5psw_reg_writel(a5psw, A5PSW_PATTERN_CTRL(A5PSW_PATTERN_MGMTFWD),
+ A5PSW_PATTERN_CTRL_MGMTFWD);
+
+ /* Enable port tagging */
+ reg = FIELD_PREP(A5PSW_MGMT_TAG_CFG_TAGFIELD, ETH_P_DSA_A5PSW);
+ reg |= A5PSW_MGMT_TAG_CFG_ENABLE | A5PSW_MGMT_TAG_CFG_ALL_FRAMES;
+ a5psw_reg_writel(a5psw, A5PSW_MGMT_TAG_CFG, reg);
+
+ /* Enable normal switch operation */
+ reg = A5PSW_LK_ADDR_CTRL_BLOCKING | A5PSW_LK_ADDR_CTRL_LEARNING |
+ A5PSW_LK_ADDR_CTRL_AGEING | A5PSW_LK_ADDR_CTRL_ALLOW_MIGR |
+ A5PSW_LK_ADDR_CTRL_CLEAR_TABLE;
+ a5psw_reg_writel(a5psw, A5PSW_LK_CTRL, reg);
+
+ ret = readl_poll_timeout(a5psw->base + A5PSW_LK_CTRL, reg,
+ !(reg & A5PSW_LK_ADDR_CTRL_CLEAR_TABLE),
+ A5PSW_LK_BUSY_USEC_POLL, A5PSW_CTRL_TIMEOUT);
+ if (ret) {
+ dev_err(a5psw->dev, "Failed to clear lookup table\n");
+ return ret;
+ }
+
+ /* Reset learn count to 0 */
+ reg = A5PSW_LK_LEARNCOUNT_MODE_SET;
+ a5psw_reg_writel(a5psw, A5PSW_LK_LEARNCOUNT, reg);
+
+ /* Clear VLAN resource table */
+ reg = A5PSW_VLAN_RES_WR_PORTMASK | A5PSW_VLAN_RES_WR_TAGMASK;
+ for (vlan = 0; vlan < A5PSW_VLAN_COUNT; vlan++)
+ a5psw_reg_writel(a5psw, A5PSW_VLAN_RES(vlan), reg);
+
+ /* Reset all ports */
+ dsa_switch_for_each_port(dp, ds) {
+ port = dp->index;
+
+ /* Reset the port */
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port),
+ A5PSW_CMD_CFG_SW_RESET);
+
+ /* Enable only CPU port */
+ a5psw_port_enable_set(a5psw, port, dsa_port_is_cpu(dp));
+
+ if (dsa_port_is_unused(dp))
+ continue;
+
+ /* Enable egress flooding for CPU port */
+ if (dsa_port_is_cpu(dp))
+ a5psw_flooding_set_resolution(a5psw, port, true);
+
+ /* Enable management forward only for user ports */
+ if (dsa_port_is_user(dp))
+ a5psw_port_mgmtfwd_set(a5psw, port, true);
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops a5psw_switch_ops = {
+ .get_tag_protocol = a5psw_get_tag_protocol,
+ .setup = a5psw_setup,
+ .port_disable = a5psw_port_disable,
+ .port_enable = a5psw_port_enable,
+ .phylink_get_caps = a5psw_phylink_get_caps,
+ .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .phylink_mac_link_down = a5psw_phylink_mac_link_down,
+ .phylink_mac_link_up = a5psw_phylink_mac_link_up,
+ .port_change_mtu = a5psw_port_change_mtu,
+ .port_max_mtu = a5psw_port_max_mtu,
+ .get_sset_count = a5psw_get_sset_count,
+ .get_strings = a5psw_get_strings,
+ .get_ethtool_stats = a5psw_get_ethtool_stats,
+ .get_eth_mac_stats = a5psw_get_eth_mac_stats,
+ .get_eth_ctrl_stats = a5psw_get_eth_ctrl_stats,
+ .get_rmon_stats = a5psw_get_rmon_stats,
+ .set_ageing_time = a5psw_set_ageing_time,
+ .port_bridge_join = a5psw_port_bridge_join,
+ .port_bridge_leave = a5psw_port_bridge_leave,
+ .port_stp_state_set = a5psw_port_stp_state_set,
+ .port_fast_age = a5psw_port_fast_age,
+ .port_fdb_add = a5psw_port_fdb_add,
+ .port_fdb_del = a5psw_port_fdb_del,
+ .port_fdb_dump = a5psw_port_fdb_dump,
+};
+
+static int a5psw_mdio_wait_busy(struct a5psw *a5psw)
+{
+ u32 status;
+ int err;
+
+ err = readl_poll_timeout(a5psw->base + A5PSW_MDIO_CFG_STATUS, status,
+ !(status & A5PSW_MDIO_CFG_STATUS_BUSY), 10,
+ 1000 * USEC_PER_MSEC);
+ if (err)
+ dev_err(a5psw->dev, "MDIO command timeout\n");
+
+ return err;
+}
+
+static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd, status;
+ int ret;
+
+ cmd = A5PSW_MDIO_COMMAND_READ;
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+
+ ret = a5psw_mdio_wait_busy(a5psw);
+ if (ret)
+ return ret;
+
+ ret = a5psw_reg_readl(a5psw, A5PSW_MDIO_DATA) & A5PSW_MDIO_DATA_MASK;
+
+ status = a5psw_reg_readl(a5psw, A5PSW_MDIO_CFG_STATUS);
+ if (status & A5PSW_MDIO_CFG_STATUS_READERR)
+ return -EIO;
+
+ return ret;
+}
+
+static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
+ u16 phy_data)
+{
+ struct a5psw *a5psw = bus->priv;
+ u32 cmd;
+
+ cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
+ cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_COMMAND, cmd);
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_DATA, phy_data);
+
+ return a5psw_mdio_wait_busy(a5psw);
+}
+
+static int a5psw_mdio_config(struct a5psw *a5psw, u32 mdio_freq)
+{
+ unsigned long rate;
+ unsigned long div;
+ u32 cfgstatus;
+
+ rate = clk_get_rate(a5psw->hclk);
+ div = ((rate / mdio_freq) / 2);
+ if (div > FIELD_MAX(A5PSW_MDIO_CFG_STATUS_CLKDIV) ||
+ div < A5PSW_MDIO_CLK_DIV_MIN) {
+ dev_err(a5psw->dev, "MDIO clock div %ld out of range\n", div);
+ return -ERANGE;
+ }
+
+ cfgstatus = FIELD_PREP(A5PSW_MDIO_CFG_STATUS_CLKDIV, div);
+
+ a5psw_reg_writel(a5psw, A5PSW_MDIO_CFG_STATUS, cfgstatus);
+
+ return 0;
+}
+
+static int a5psw_probe_mdio(struct a5psw *a5psw, struct device_node *node)
+{
+ struct device *dev = a5psw->dev;
+ struct mii_bus *bus;
+ u32 mdio_freq;
+ int ret;
+
+ if (of_property_read_u32(node, "clock-frequency", &mdio_freq))
+ mdio_freq = A5PSW_MDIO_DEF_FREQ;
+
+ ret = a5psw_mdio_config(a5psw, mdio_freq);
+ if (ret)
+ return ret;
+
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "a5psw_mdio";
+ bus->read = a5psw_mdio_read;
+ bus->write = a5psw_mdio_write;
+ bus->priv = a5psw;
+ bus->parent = dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ a5psw->mii_bus = bus;
+
+ return devm_of_mdiobus_register(dev, bus, node);
+}
+
+static void a5psw_pcs_free(struct a5psw *a5psw)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5psw->pcs); i++) {
+ if (a5psw->pcs[i])
+ miic_destroy(a5psw->pcs[i]);
+ }
+}
+
+static int a5psw_pcs_get(struct a5psw *a5psw)
+{
+ struct device_node *ports, *port, *pcs_node;
+ struct phylink_pcs *pcs;
+ int ret;
+ u32 reg;
+
+ ports = of_get_child_by_name(a5psw->dev->of_node, "ethernet-ports");
+ if (!ports)
+ return -EINVAL;
+
+ for_each_available_child_of_node(ports, port) {
+ pcs_node = of_parse_phandle(port, "pcs-handle", 0);
+ if (!pcs_node)
+ continue;
+
+ if (of_property_read_u32(port, "reg", &reg)) {
+ ret = -EINVAL;
+ goto free_pcs;
+ }
+
+ if (reg >= ARRAY_SIZE(a5psw->pcs)) {
+ ret = -ENODEV;
+ goto free_pcs;
+ }
+
+ pcs = miic_create(a5psw->dev, pcs_node);
+ if (IS_ERR(pcs)) {
+ dev_err(a5psw->dev, "Failed to create PCS for port %d\n",
+ reg);
+ ret = PTR_ERR(pcs);
+ goto free_pcs;
+ }
+
+ a5psw->pcs[reg] = pcs;
+ of_node_put(pcs_node);
+ }
+ of_node_put(ports);
+
+ return 0;
+
+free_pcs:
+ of_node_put(pcs_node);
+ of_node_put(port);
+ of_node_put(ports);
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio;
+ struct dsa_switch *ds;
+ struct a5psw *a5psw;
+ int ret;
+
+ a5psw = devm_kzalloc(dev, sizeof(*a5psw), GFP_KERNEL);
+ if (!a5psw)
+ return -ENOMEM;
+
+ a5psw->dev = dev;
+ mutex_init(&a5psw->lk_lock);
+ spin_lock_init(&a5psw->reg_lock);
+ a5psw->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(a5psw->base))
+ return PTR_ERR(a5psw->base);
+
+ ret = a5psw_pcs_get(a5psw);
+ if (ret)
+ return ret;
+
+ a5psw->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(a5psw->hclk)) {
+ dev_err(dev, "failed get hclk clock\n");
+ ret = PTR_ERR(a5psw->hclk);
+ goto free_pcs;
+ }
+
+ a5psw->clk = devm_clk_get(dev, "clk");
+ if (IS_ERR(a5psw->clk)) {
+ dev_err(dev, "failed get clk_switch clock\n");
+ ret = PTR_ERR(a5psw->clk);
+ goto free_pcs;
+ }
+
+ ret = clk_prepare_enable(a5psw->clk);
+ if (ret)
+ goto free_pcs;
+
+ ret = clk_prepare_enable(a5psw->hclk);
+ if (ret)
+ goto clk_disable;
+
+ mdio = of_get_child_by_name(dev->of_node, "mdio");
+ if (of_device_is_available(mdio)) {
+ ret = a5psw_probe_mdio(a5psw, mdio);
+ if (ret) {
+ of_node_put(mdio);
+ dev_err(dev, "Failed to register MDIO: %d\n", ret);
+ goto hclk_disable;
+ }
+ }
+
+ of_node_put(mdio);
+
+ ds = &a5psw->ds;
+ ds->dev = dev;
+ ds->num_ports = A5PSW_PORTS_NUM;
+ ds->ops = &a5psw_switch_ops;
+ ds->priv = a5psw;
+
+ ret = dsa_register_switch(ds);
+ if (ret) {
+ dev_err(dev, "Failed to register DSA switch: %d\n", ret);
+ goto hclk_disable;
+ }
+
+ return 0;
+
+hclk_disable:
+ clk_disable_unprepare(a5psw->hclk);
+clk_disable:
+ clk_disable_unprepare(a5psw->clk);
+free_pcs:
+ a5psw_pcs_free(a5psw);
+
+ return ret;
+}
+
+static int a5psw_remove(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return 0;
+
+ dsa_unregister_switch(&a5psw->ds);
+ a5psw_pcs_free(a5psw);
+ clk_disable_unprepare(a5psw->hclk);
+ clk_disable_unprepare(a5psw->clk);
+
+ return 0;
+}
+
+static void a5psw_shutdown(struct platform_device *pdev)
+{
+ struct a5psw *a5psw = platform_get_drvdata(pdev);
+
+ if (!a5psw)
+ return;
+
+ dsa_switch_shutdown(&a5psw->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id a5psw_of_mtable[] = {
+ { .compatible = "renesas,rzn1-a5psw", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, a5psw_of_mtable);
+
+static struct platform_driver a5psw_driver = {
+ .driver = {
+ .name = "rzn1_a5psw",
+ .of_match_table = of_match_ptr(a5psw_of_mtable),
+ },
+ .probe = a5psw_probe,
+ .remove = a5psw_remove,
+ .shutdown = a5psw_shutdown,
+};
+module_platform_driver(a5psw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas RZ/N1 Advanced 5-port Switch driver");
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
diff --git a/drivers/net/dsa/rzn1_a5psw.h b/drivers/net/dsa/rzn1_a5psw.h
new file mode 100644
index 000000000000..c67abd49c013
--- /dev/null
+++ b/drivers/net/dsa/rzn1_a5psw.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 Schneider Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <net/dsa.h>
+
+#define A5PSW_REVISION 0x0
+#define A5PSW_PORT_OFFSET(port) (0x400 * (port))
+
+#define A5PSW_PORT_ENA 0x8
+#define A5PSW_PORT_ENA_RX_SHIFT 16
+#define A5PSW_PORT_ENA_TX_RX(port) (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
+ BIT(port))
+#define A5PSW_UCAST_DEF_MASK 0xC
+
+#define A5PSW_VLAN_VERIFY 0x10
+#define A5PSW_VLAN_VERI_SHIFT 0
+#define A5PSW_VLAN_DISC_SHIFT 16
+
+#define A5PSW_BCAST_DEF_MASK 0x14
+#define A5PSW_MCAST_DEF_MASK 0x18
+
+#define A5PSW_INPUT_LEARN 0x1C
+#define A5PSW_INPUT_LEARN_DIS(p) BIT((p) + 16)
+#define A5PSW_INPUT_LEARN_BLOCK(p) BIT(p)
+
+#define A5PSW_MGMT_CFG 0x20
+#define A5PSW_MGMT_CFG_DISCARD BIT(7)
+
+#define A5PSW_MODE_CFG 0x24
+#define A5PSW_MODE_STATS_RESET BIT(31)
+
+#define A5PSW_VLAN_IN_MODE 0x28
+#define A5PSW_VLAN_IN_MODE_PORT_SHIFT(port) ((port) * 2)
+#define A5PSW_VLAN_IN_MODE_PORT(port) (GENMASK(1, 0) << \
+ A5PSW_VLAN_IN_MODE_PORT_SHIFT(port))
+#define A5PSW_VLAN_IN_MODE_SINGLE_PASSTHROUGH 0x0
+#define A5PSW_VLAN_IN_MODE_SINGLE_REPLACE 0x1
+#define A5PSW_VLAN_IN_MODE_TAG_ALWAYS 0x2
+
+#define A5PSW_VLAN_OUT_MODE 0x2C
+#define A5PSW_VLAN_OUT_MODE_PORT(port) (GENMASK(1, 0) << ((port) * 2))
+#define A5PSW_VLAN_OUT_MODE_DIS 0x0
+#define A5PSW_VLAN_OUT_MODE_STRIP 0x1
+#define A5PSW_VLAN_OUT_MODE_TAG_THROUGH 0x2
+#define A5PSW_VLAN_OUT_MODE_TRANSPARENT 0x3
+
+#define A5PSW_VLAN_IN_MODE_ENA 0x30
+#define A5PSW_VLAN_TAG_ID 0x34
+
+#define A5PSW_SYSTEM_TAGINFO(port) (0x200 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_AUTH_PORT(port) (0x240 + 4 * (port))
+#define A5PSW_AUTH_PORT_AUTHORIZED BIT(0)
+
+#define A5PSW_VLAN_RES(entry) (0x280 + 4 * (entry))
+#define A5PSW_VLAN_RES_WR_PORTMASK BIT(30)
+#define A5PSW_VLAN_RES_WR_TAGMASK BIT(29)
+#define A5PSW_VLAN_RES_RD_TAGMASK BIT(28)
+#define A5PSW_VLAN_RES_ID GENMASK(16, 5)
+#define A5PSW_VLAN_RES_PORTMASK GENMASK(4, 0)
+
+#define A5PSW_RXMATCH_CONFIG(port) (0x3e80 + 4 * (port))
+#define A5PSW_RXMATCH_CONFIG_PATTERN(p) BIT(p)
+
+#define A5PSW_PATTERN_CTRL(p) (0x3eb0 + 4 * (p))
+#define A5PSW_PATTERN_CTRL_MGMTFWD BIT(1)
+
+#define A5PSW_LK_CTRL 0x400
+#define A5PSW_LK_ADDR_CTRL_BLOCKING BIT(0)
+#define A5PSW_LK_ADDR_CTRL_LEARNING BIT(1)
+#define A5PSW_LK_ADDR_CTRL_AGEING BIT(2)
+#define A5PSW_LK_ADDR_CTRL_ALLOW_MIGR BIT(3)
+#define A5PSW_LK_ADDR_CTRL_CLEAR_TABLE BIT(6)
+
+#define A5PSW_LK_ADDR_CTRL 0x408
+#define A5PSW_LK_ADDR_CTRL_BUSY BIT(31)
+#define A5PSW_LK_ADDR_CTRL_DELETE_PORT BIT(30)
+#define A5PSW_LK_ADDR_CTRL_CLEAR BIT(29)
+#define A5PSW_LK_ADDR_CTRL_LOOKUP BIT(28)
+#define A5PSW_LK_ADDR_CTRL_WAIT BIT(27)
+#define A5PSW_LK_ADDR_CTRL_READ BIT(26)
+#define A5PSW_LK_ADDR_CTRL_WRITE BIT(25)
+#define A5PSW_LK_ADDR_CTRL_ADDRESS GENMASK(12, 0)
+
+#define A5PSW_LK_DATA_LO 0x40C
+#define A5PSW_LK_DATA_HI 0x410
+#define A5PSW_LK_DATA_HI_VALID BIT(16)
+#define A5PSW_LK_DATA_HI_PORT BIT(16)
+
+#define A5PSW_LK_LEARNCOUNT 0x418
+#define A5PSW_LK_LEARNCOUNT_COUNT GENMASK(13, 0)
+#define A5PSW_LK_LEARNCOUNT_MODE GENMASK(31, 30)
+#define A5PSW_LK_LEARNCOUNT_MODE_SET 0x0
+#define A5PSW_LK_LEARNCOUNT_MODE_INC 0x1
+#define A5PSW_LK_LEARNCOUNT_MODE_DEC 0x2
+
+#define A5PSW_MGMT_TAG_CFG 0x480
+#define A5PSW_MGMT_TAG_CFG_TAGFIELD GENMASK(31, 16)
+#define A5PSW_MGMT_TAG_CFG_ALL_FRAMES BIT(1)
+#define A5PSW_MGMT_TAG_CFG_ENABLE BIT(0)
+
+#define A5PSW_LK_AGETIME 0x41C
+#define A5PSW_LK_AGETIME_MASK GENMASK(23, 0)
+
+#define A5PSW_MDIO_CFG_STATUS 0x700
+#define A5PSW_MDIO_CFG_STATUS_CLKDIV GENMASK(15, 7)
+#define A5PSW_MDIO_CFG_STATUS_READERR BIT(1)
+#define A5PSW_MDIO_CFG_STATUS_BUSY BIT(0)
+
+#define A5PSW_MDIO_COMMAND 0x704
+/* Register is named TRAININIT in datasheet and should be set when reading */
+#define A5PSW_MDIO_COMMAND_READ BIT(15)
+#define A5PSW_MDIO_COMMAND_PHY_ADDR GENMASK(9, 5)
+#define A5PSW_MDIO_COMMAND_REG_ADDR GENMASK(4, 0)
+
+#define A5PSW_MDIO_DATA 0x708
+#define A5PSW_MDIO_DATA_MASK GENMASK(15, 0)
+
+#define A5PSW_CMD_CFG(port) (0x808 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_CMD_CFG_CNTL_FRM_ENA BIT(23)
+#define A5PSW_CMD_CFG_SW_RESET BIT(13)
+#define A5PSW_CMD_CFG_TX_CRC_APPEND BIT(11)
+#define A5PSW_CMD_CFG_HD_ENA BIT(10)
+#define A5PSW_CMD_CFG_PAUSE_IGNORE BIT(8)
+#define A5PSW_CMD_CFG_CRC_FWD BIT(6)
+#define A5PSW_CMD_CFG_ETH_SPEED BIT(3)
+#define A5PSW_CMD_CFG_RX_ENA BIT(1)
+#define A5PSW_CMD_CFG_TX_ENA BIT(0)
+
+#define A5PSW_FRM_LENGTH(port) (0x814 + A5PSW_PORT_OFFSET(port))
+#define A5PSW_FRM_LENGTH_MASK GENMASK(13, 0)
+
+#define A5PSW_STATUS(port) (0x840 + A5PSW_PORT_OFFSET(port))
+
+#define A5PSW_STATS_HIWORD 0x900
+
+/* Stats */
+#define A5PSW_aFramesTransmittedOK 0x868
+#define A5PSW_aFramesReceivedOK 0x86C
+#define A5PSW_aFrameCheckSequenceErrors 0x870
+#define A5PSW_aAlignmentErrors 0x874
+#define A5PSW_aOctetsTransmittedOK 0x878
+#define A5PSW_aOctetsReceivedOK 0x87C
+#define A5PSW_aTxPAUSEMACCtrlFrames 0x880
+#define A5PSW_aRxPAUSEMACCtrlFrames 0x884
+/* If */
+#define A5PSW_ifInErrors 0x888
+#define A5PSW_ifOutErrors 0x88C
+#define A5PSW_ifInUcastPkts 0x890
+#define A5PSW_ifInMulticastPkts 0x894
+#define A5PSW_ifInBroadcastPkts 0x898
+#define A5PSW_ifOutDiscards 0x89C
+#define A5PSW_ifOutUcastPkts 0x8A0
+#define A5PSW_ifOutMulticastPkts 0x8A4
+#define A5PSW_ifOutBroadcastPkts 0x8A8
+/* Ether */
+#define A5PSW_etherStatsDropEvents 0x8AC
+#define A5PSW_etherStatsOctets 0x8B0
+#define A5PSW_etherStatsPkts 0x8B4
+#define A5PSW_etherStatsUndersizePkts 0x8B8
+#define A5PSW_etherStatsOversizePkts 0x8BC
+#define A5PSW_etherStatsPkts64Octets 0x8C0
+#define A5PSW_etherStatsPkts65to127Octets 0x8C4
+#define A5PSW_etherStatsPkts128to255Octets 0x8C8
+#define A5PSW_etherStatsPkts256to511Octets 0x8CC
+#define A5PSW_etherStatsPkts512to1023Octets 0x8D0
+#define A5PSW_etherStatsPkts1024to1518Octets 0x8D4
+#define A5PSW_etherStatsPkts1519toXOctets 0x8D8
+#define A5PSW_etherStatsJabbers 0x8DC
+#define A5PSW_etherStatsFragments 0x8E0
+
+#define A5PSW_VLANReceived 0x8E8
+#define A5PSW_VLANTransmitted 0x8EC
+
+#define A5PSW_aDeferred 0x910
+#define A5PSW_aMultipleCollisions 0x914
+#define A5PSW_aSingleCollisions 0x918
+#define A5PSW_aLateCollisions 0x91C
+#define A5PSW_aExcessiveCollisions 0x920
+#define A5PSW_aCarrierSenseErrors 0x924
+
+#define A5PSW_VLAN_TAG(prio, id) (((prio) << 12) | (id))
+#define A5PSW_PORTS_NUM 5
+#define A5PSW_CPU_PORT (A5PSW_PORTS_NUM - 1)
+#define A5PSW_MDIO_DEF_FREQ 2500000
+#define A5PSW_MDIO_TIMEOUT 100
+#define A5PSW_JUMBO_LEN (10 * SZ_1K)
+#define A5PSW_MDIO_CLK_DIV_MIN 5
+#define A5PSW_TAG_LEN 8
+#define A5PSW_VLAN_COUNT 32
+
+/* Ensure enough space for 2 VLAN tags */
+#define A5PSW_EXTRA_MTU_LEN (A5PSW_TAG_LEN + 8)
+#define A5PSW_MAX_MTU (A5PSW_JUMBO_LEN - A5PSW_EXTRA_MTU_LEN)
+
+#define A5PSW_PATTERN_MGMTFWD 0
+
+#define A5PSW_LK_BUSY_USEC_POLL 10
+#define A5PSW_CTRL_TIMEOUT 1000
+#define A5PSW_TABLE_ENTRIES 8192
+
+struct fdb_entry {
+ u8 mac[ETH_ALEN];
+ u16 valid:1;
+ u16 is_static:1;
+ u16 prio:3;
+ u16 port_mask:5;
+ u16 reserved:6;
+} __packed;
+
+union lk_data {
+ struct {
+ u32 lo;
+ u32 hi;
+ };
+ struct fdb_entry entry;
+};
+
+/**
+ * struct a5psw - switch struct
+ * @base: Base address of the switch
+ * @hclk: hclk_switch clock
+ * @clk: clk_switch clock
+ * @dev: Device associated to the switch
+ * @mii_bus: MDIO bus struct
+ * @mdio_freq: MDIO bus frequency requested
+ * @pcs: Array of PCS connected to the switch ports (not for the CPU)
+ * @ds: DSA switch struct
+ * @stats_lock: lock to access statistics (shared HI counter)
+ * @lk_lock: Lock for the lookup table
+ * @reg_lock: Lock for register read-modify-write operation
+ * @bridged_ports: Mask of ports that are bridged and should be flooded
+ * @br_dev: Bridge net device
+ */
+struct a5psw {
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *mii_bus;
+ struct phylink_pcs *pcs[A5PSW_PORTS_NUM - 1];
+ struct dsa_switch ds;
+ struct mutex lk_lock;
+ spinlock_t reg_lock;
+ u32 bridged_ports;
+ struct net_device *br_dev;
+};
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 21dba16af097..fb1549a5fe32 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -149,8 +149,10 @@ struct sja1105_info {
bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
int (*clocking_setup)(struct sja1105_private *priv);
- int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
- int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
+ int (*pcs_mdio_read_c45)(struct mii_bus *bus, int phy, int mmd,
+ int reg);
+ int (*pcs_mdio_write_c45)(struct mii_bus *bus, int phy, int mmd,
+ int reg, u16 val);
int (*disable_microcontroller)(struct sja1105_private *priv);
const char *name;
bool supports_mii[SJA1105_MAX_NUM_PORTS];
@@ -249,6 +251,7 @@ struct sja1105_private {
bool fixed_link[SJA1105_MAX_NUM_PORTS];
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
+ unsigned long hwts_tx_en;
const struct sja1105_info *info;
size_t max_xfer_len;
struct spi_device *spidev;
@@ -256,11 +259,13 @@ struct sja1105_private {
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
u16 tag_8021q_pvid[SJA1105_MAX_NUM_PORTS];
struct sja1105_flow_block flow_block;
- struct sja1105_port ports[SJA1105_MAX_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* PTP two-step TX timestamp ID, and its serialization lock */
+ spinlock_t ts_id_lock;
+ u8 ts_id;
/* Serializes access to the dynamic config interface */
struct mutex dynamic_config_lock;
struct devlink_region **regions;
@@ -269,7 +274,6 @@ struct sja1105_private {
struct mii_bus *mdio_base_tx;
struct mii_bus *mdio_pcs;
struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
- struct sja1105_tagger_data tagger_data;
struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
};
@@ -301,10 +305,12 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
/* From sja1105_mdio.c */
int sja1105_mdiobus_register(struct dsa_switch *ds);
void sja1105_mdiobus_unregister(struct dsa_switch *ds);
-int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
-int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
-int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
-int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg);
+int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val);
+int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg);
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val);
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 0569ff066634..30b1f1ba762f 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -93,8 +93,10 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
region = dsa_devlink_region_create(ds, ops, 1, size);
if (IS_ERR(region)) {
- while (i-- >= 0)
+ while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
return PTR_ERR(region);
}
@@ -120,16 +122,10 @@ int sja1105_devlink_info_get(struct dsa_switch *ds,
struct netlink_ext_ack *extack)
{
struct sja1105_private *priv = ds->priv;
- int rc;
-
- rc = devlink_info_driver_name_put(req, "sja1105");
- if (rc)
- return rc;
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
- priv->info->name);
- return rc;
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ priv->info->name);
}
int sja1105_devlink_setup(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 72b9b39b0989..fad5afe3819c 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -300,6 +300,46 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
return -EOPNOTSUPP;
}
+static int sja1105_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
struct flow_cls_offload *cls, bool ingress)
{
@@ -321,12 +361,9 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_POLICE:
- if (act->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- rc = -EOPNOTSUPP;
+ rc = sja1105_policer_validate(&rule->action, act, extack);
+ if (rc)
goto out;
- }
rc = sja1105_flower_policer(priv, port, extack, cookie,
&key,
@@ -379,7 +416,7 @@ int sja1105_cls_flower_add(struct dsa_switch *ds, int port,
vl_rule = true;
rc = sja1105_vl_gate(priv, port, extack, cookie,
- &key, act->gate.index,
+ &key, act->hw_index,
act->gate.prio,
act->gate.basetime,
act->gate.cycletime,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c343effe2e96..b70dcf32a26d 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -118,13 +118,14 @@ static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
static int sja1105_commit_pvid(struct dsa_switch *ds, int port)
{
struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct sja1105_private *priv = ds->priv;
struct sja1105_vlan_lookup_entry *vlan;
bool drop_untagged = false;
int match, rc;
u16 pvid;
- if (dp->bridge_dev && br_vlan_enabled(dp->bridge_dev))
+ if (br && br_vlan_enabled(br))
pvid = priv->bridge_pvid[port];
else
pvid = priv->tag_8021q_pvid[port];
@@ -392,10 +393,8 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.start_dynspc = 0,
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
- /* This selects between Independent VLAN Learning (IVL) and
- * Shared VLAN Learning (SVL)
- */
- .shared_learn = true,
+ /* Always use Independent VLAN Learning (IVL) */
+ .shared_learn = false,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
@@ -1039,7 +1038,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
policing[bcast].sharindx = port;
/* Only SJA1110 has multicast policers */
- if (mcast <= table->ops->max_entry_count)
+ if (mcast < table->ops->max_entry_count)
policing[mcast].sharindx = port;
}
@@ -1357,37 +1356,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
return sja1105_clocking_setup_port(priv, port);
}
-/* The SJA1105 MAC programming model is through the static config (the xMII
- * Mode table cannot be dynamically reconfigured), and we have to program
- * that early (earlier than PHYLINK calls us, anyway).
- * So just error out in case the connected PHY attempts to change the initial
- * system interface MII protocol from what is defined in the DT, at least for
- * now.
- */
-static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
- phy_interface_t interface)
+static struct phylink_pcs *
+sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
{
- return priv->phy_mode[port] != interface;
-}
-
-static void sja1105_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
-{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
- struct dw_xpcs *xpcs;
-
- if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
- phy_modes(state->interface));
- return;
- }
-
- xpcs = priv->xpcs[port];
+ struct dw_xpcs *xpcs = priv->xpcs[port];
if (xpcs)
- phylink_set_pcs(dp->pl, &xpcs->pcs);
+ return &xpcs->pcs;
+
+ return NULL;
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
@@ -1411,48 +1389,53 @@ static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
sja1105_inhibit_tx(priv, BIT(port), false);
}
-static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void sja1105_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- /* Construct a new mask which exhaustively contains all link features
- * supported by the MAC, and then apply that (logical AND) to what will
- * be sent to the PHY for "marketing".
- */
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct sja1105_private *priv = ds->priv;
struct sja1105_xmii_params_entry *mii;
+ phy_interface_t phy_mode;
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
-
- /* include/linux/phylink.h says:
- * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
- * expects the MAC driver to return all supported link modes.
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
*/
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- linkmode_zero(supported);
- return;
+ config->legacy_pre_march2020 = false;
+
+ phy_mode = priv->phy_mode[port];
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* Changing the PHY mode on SERDES ports is possible and makes
+ * sense, because that is done through the XPCS. We allow
+ * changes between SGMII and 2500base-X.
+ */
+ if (priv->info->supports_sgmii[port])
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ config->supported_interfaces);
+
+ if (priv->info->supports_2500basex[port])
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ config->supported_interfaces);
+ } else {
+ /* The SJA1105 MAC programming model is through the static
+ * config (the xMII Mode table cannot be dynamically
+ * reconfigured), and we have to program that early.
+ */
+ __set_bit(phy_mode, config->supported_interfaces);
}
/* The MAC does not support pause frames, and also doesn't
* support half-duplex traffic modes.
*/
- phylink_set(mask, Autoneg);
- phylink_set(mask, MII);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT1_Full);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
mii->xmii_mode[port] == XMII_MODE_SGMII)
- phylink_set(mask, 1000baseT_Full);
- if (priv->info->supports_2500basex[port]) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ config->mac_capabilities |= MAC_1000FD;
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ if (priv->info->supports_2500basex[port])
+ config->mac_capabilities |= MAC_2500FD;
}
static int
@@ -1818,25 +1801,52 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
}
static int sja1105_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ if (!vid) {
+ switch (db.type) {
+ case DSA_DB_PORT:
+ vid = dsa_tag_8021q_standalone_vid(db.dp);
+ break;
+ case DSA_DB_BRIDGE:
+ vid = dsa_tag_8021q_bridge_vid(db.bridge.num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1873,7 +1883,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!dsa_port_is_vlan_filtering(dp))
+ if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -1884,7 +1894,15 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
static void sja1105_fast_age(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
+ struct dsa_db db = {
+ .type = DSA_DB_BRIDGE,
+ .bridge = {
+ .dev = dsa_port_bridge_dev_get(dp),
+ .num = dsa_port_bridge_num_get(dp),
+ },
+ };
int i;
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
@@ -1912,7 +1930,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid);
+ rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
@@ -1923,15 +1941,17 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid, db);
}
static int sja1105_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+ const struct switchdev_obj_port_mdb *mdb,
+ struct dsa_db db)
{
- return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid, db);
}
/* Common function for unicast and broadcast flood configuration.
@@ -1979,7 +1999,7 @@ static int sja1105_manage_flood_domains(struct sja1105_private *priv)
}
static int sja1105_bridge_member(struct dsa_switch *ds, int port,
- struct net_device *br, bool member)
+ struct dsa_bridge bridge, bool member)
{
struct sja1105_l2_forwarding_entry *l2_fwd;
struct sja1105_private *priv = ds->priv;
@@ -2004,7 +2024,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
*/
if (i == port)
continue;
- if (dsa_to_port(ds, i)->bridge_dev != br)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
sja1105_port_allow_traffic(l2_fwd, i, port, member);
sja1105_port_allow_traffic(l2_fwd, port, i, member);
@@ -2073,15 +2093,32 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
}
static int sja1105_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge,
+ bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
- return sja1105_bridge_member(ds, port, br, true);
+ int rc;
+
+ rc = sja1105_bridge_member(ds, port, bridge, true);
+ if (rc)
+ return rc;
+
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
+ if (rc) {
+ sja1105_bridge_member(ds, port, bridge, false);
+ return rc;
+ }
+
+ *tx_fwd_offload = true;
+
+ return 0;
}
static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *br)
+ struct dsa_bridge bridge)
{
- sja1105_bridge_member(ds, port, br, false);
+ dsa_tag_8021q_bridge_leave(ds, port, bridge);
+ sja1105_bridge_member(ds, port, bridge, false);
}
#define BYTES_PER_KBIT (1000LL / 8)
@@ -2215,14 +2252,13 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
* change it through the dynamic interface later.
*/
for (i = 0; i < ds->num_ports; i++) {
- u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1);
-
speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
mac[i].speed);
mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
if (priv->xpcs[i])
- bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr);
+ bmcr[i] = mdiobus_c45_read(priv->mdio_pcs, i,
+ MDIO_MMD_VEND2, MDIO_CTRL1);
}
/* No PTP operations can run right now */
@@ -2294,7 +2330,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
else
mode = MLO_AN_PHY;
- rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode);
+ rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode, NULL);
if (rc < 0)
goto out;
@@ -2340,7 +2376,6 @@ sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
- struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
@@ -2378,28 +2413,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
- /* VLAN filtering => independent VLAN learning.
- * No VLAN filtering (or best effort) => shared VLAN learning.
- *
- * In shared VLAN learning mode, untagged traffic still gets
- * pvid-tagged, and the FDB table gets populated with entries
- * containing the "real" (pvid or from VLAN tag) VLAN ID.
- * However the switch performs a masked L2 lookup in the FDB,
- * effectively only looking up a frame's DMAC (and not VID) for the
- * forwarding decision.
- *
- * This is extremely convenient for us, because in modes with
- * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
- * each front panel port. This is good for identification but breaks
- * learning badly - the VID of the learnt FDB entry is unique, aka
- * no frames coming from any other port are going to have it. So
- * for forwarding purposes, this is as though learning was broken
- * (all frames get flooded).
- */
- table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
- l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !enabled;
-
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
continue;
@@ -2508,7 +2521,7 @@ static int sja1105_bridge_vlan_add(struct dsa_switch *ds, int port,
*/
if (vid_is_dsa_8021q(vlan->vid)) {
NL_SET_ERR_MSG_MOD(extack,
- "Range 1024-3071 reserved for dsa_8021q operation");
+ "Range 3072-4095 reserved for dsa_8021q operation");
return -EBUSY;
}
@@ -2587,8 +2600,9 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
if (netif_is_bridge_master(upper)) {
list_for_each_entry(dp, &dst->ports, list) {
- if (dp->bridge_dev && dp->bridge_dev != upper &&
- br_vlan_enabled(dp->bridge_dev)) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br && br != upper && br_vlan_enabled(br)) {
NL_SET_ERR_MSG_MOD(extack,
"Only one VLAN-aware bridge is supported");
return -EBUSY;
@@ -2599,18 +2613,6 @@ static int sja1105_prechangeupper(struct dsa_switch *ds, int port,
return 0;
}
-static void sja1105_port_disable(struct dsa_switch *ds, int port)
-{
- struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
-
- if (!dsa_is_user_port(ds, port))
- return;
-
- kthread_cancel_work_sync(&sp->xmit_work);
- skb_queue_purge(&sp->xmit_queue);
-}
-
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
struct sk_buff *skb, bool takets)
{
@@ -2669,10 +2671,8 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
return NETDEV_TX_OK;
}
-#define work_to_port(work) \
- container_of((work), struct sja1105_port, xmit_work)
-#define tagger_to_sja1105(t) \
- container_of((t), struct sja1105_private, tagger_data)
+#define work_to_xmit_work(w) \
+ container_of((w), struct sja1105_deferred_xmit_work, work)
/* Deferred work is unfortunately necessary because setting up the management
* route cannot be done from atomit context (SPI transfer takes a sleepable
@@ -2680,25 +2680,41 @@ static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
*/
static void sja1105_port_deferred_xmit(struct kthread_work *work)
{
- struct sja1105_port *sp = work_to_port(work);
- struct sja1105_tagger_data *tagger_data = sp->data;
- struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
- int port = sp - priv->ports;
- struct sk_buff *skb;
+ struct sja1105_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct sja1105_private *priv = ds->priv;
+ int port = xmit_work->dp->index;
- while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
- struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
+ clone = SJA1105_SKB_CB(skb)->clone;
- mutex_lock(&priv->mgmt_lock);
+ mutex_lock(&priv->mgmt_lock);
- sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
+ sja1105_mgmt_xmit(ds, port, 0, skb, !!clone);
- /* The clone, if there, was made by dsa_skb_tx_timestamp */
- if (clone)
- sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
+ /* The clone, if there, was made by dsa_skb_tx_timestamp */
+ if (clone)
+ sja1105_ptp_txtstamp_skb(ds, port, clone);
- mutex_unlock(&priv->mgmt_lock);
- }
+ mutex_unlock(&priv->mgmt_lock);
+
+ kfree(xmit_work);
+}
+
+static int sja1105_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_tagger_data *tagger_data;
+
+ if (proto != priv->info->tag_proto)
+ return -EPROTONOSUPPORT;
+
+ tagger_data = sja1105_tagger_data(ds);
+ tagger_data->xmit_work_fn = sja1105_port_deferred_xmit;
+ tagger_data->meta_tstamp_handler = sja1110_process_meta_tstamp;
+
+ return 0;
}
/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
@@ -2830,7 +2846,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
static int sja1105_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+ bool ingress, struct netlink_ext_ack *extack)
{
return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
ingress, true);
@@ -3001,58 +3017,6 @@ static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
return 0;
}
-static void sja1105_teardown_ports(struct sja1105_private *priv)
-{
- struct dsa_switch *ds = priv->ds;
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_port *sp = &priv->ports[port];
-
- if (sp->xmit_worker)
- kthread_destroy_worker(sp->xmit_worker);
- }
-}
-
-static int sja1105_setup_ports(struct sja1105_private *priv)
-{
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
- struct dsa_switch *ds = priv->ds;
- int port, rc;
-
- /* Connections between dsa_port and sja1105_port */
- for (port = 0; port < ds->num_ports; port++) {
- struct sja1105_port *sp = &priv->ports[port];
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct kthread_worker *worker;
- struct net_device *slave;
-
- if (!dsa_port_is_user(dp))
- continue;
-
- dp->priv = sp;
- sp->data = tagger_data;
- slave = dp->slave;
- kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
- worker = kthread_create_worker(0, "%s_xmit", slave->name);
- if (IS_ERR(worker)) {
- rc = PTR_ERR(worker);
- dev_err(ds->dev,
- "failed to create deferred xmit thread: %d\n",
- rc);
- goto out_destroy_workers;
- }
- sp->xmit_worker = worker;
- skb_queue_head_init(&sp->xmit_queue);
- }
-
- return 0;
-
-out_destroy_workers:
- sja1105_teardown_ports(priv);
- return rc;
-}
-
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
* but not the xMII mode parameters table.
@@ -3098,10 +3062,6 @@ static int sja1105_setup(struct dsa_switch *ds)
}
}
- rc = sja1105_setup_ports(priv);
- if (rc)
- goto out_static_config_free;
-
sja1105_tas_setup(ds);
sja1105_flower_setup(ds);
@@ -3138,8 +3098,9 @@ static int sja1105_setup(struct dsa_switch *ds)
*/
ds->vlan_filtering_is_global = true;
ds->untag_bridge_pvid = true;
+ ds->fdb_isolation = true;
/* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
- ds->num_fwd_offloading_bridges = 7;
+ ds->max_num_bridges = 7;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
@@ -3158,7 +3119,6 @@ out_ptp_clock_unregister:
out_flower_teardown:
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
- sja1105_teardown_ports(priv);
out_static_config_free:
sja1105_static_config_free(&priv->static_config);
@@ -3178,26 +3138,25 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_ptp_clock_unregister(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
- sja1105_teardown_ports(priv);
sja1105_static_config_free(&priv->static_config);
}
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
+ .connect_tag_protocol = sja1105_connect_tag_protocol,
.setup = sja1105_setup,
.teardown = sja1105_teardown,
.set_ageing_time = sja1105_set_ageing_time,
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
- .phylink_validate = sja1105_phylink_validate,
- .phylink_mac_config = sja1105_mac_config,
+ .phylink_get_caps = sja1105_phylink_get_caps,
+ .phylink_mac_select_pcs = sja1105_mac_select_pcs,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
.get_ts_info = sja1105_get_ts_info,
- .port_disable = sja1105_port_disable,
.port_fdb_dump = sja1105_fdb_dump,
.port_fdb_add = sja1105_fdb_add,
.port_fdb_del = sja1105_fdb_del,
@@ -3228,8 +3187,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.tag_8021q_vlan_add = sja1105_dsa_8021q_vlan_add,
.tag_8021q_vlan_del = sja1105_dsa_8021q_vlan_del,
.port_prechangeupper = sja1105_prechangeupper,
- .port_bridge_tx_fwd_offload = dsa_tag_8021q_bridge_tx_fwd_offload,
- .port_bridge_tx_fwd_unoffload = dsa_tag_8021q_bridge_tx_fwd_unoffload,
};
static const struct of_device_id sja1105_dt_ids[];
@@ -3367,6 +3324,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
+ spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);
if (rc < 0) {
@@ -3385,18 +3343,14 @@ static int sja1105_probe(struct spi_device *spi)
return dsa_register_switch(priv->ds);
}
-static int sja1105_remove(struct spi_device *spi)
+static void sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
if (!priv)
- return 0;
+ return;
dsa_unregister_switch(priv->ds);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void sja1105_shutdown(struct spi_device *spi)
@@ -3426,12 +3380,28 @@ static const struct of_device_id sja1105_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+static const struct spi_device_id sja1105_spi_ids[] = {
+ { "sja1105e" },
+ { "sja1105t" },
+ { "sja1105p" },
+ { "sja1105q" },
+ { "sja1105r" },
+ { "sja1105s" },
+ { "sja1110a" },
+ { "sja1110b" },
+ { "sja1110c" },
+ { "sja1110d" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
+
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
+ .id_table = sja1105_spi_ids,
.probe = sja1105_probe,
.remove = sja1105_remove,
.shutdown = sja1105_shutdown,
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 215dd17ca790..01f1cb719042 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -7,20 +7,15 @@
#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
-int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
return 0xffff;
@@ -37,19 +32,15 @@ int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd,
+ int reg, u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- u16 mmd;
-
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
tmp = val;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
@@ -58,7 +49,7 @@ int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
}
-int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -66,17 +57,12 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
int offset, bank;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
return NXP_SJA1110_XPCS_ID >> 16;
@@ -108,7 +94,8 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
+ u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -116,17 +103,12 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
int offset, bank;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
bank = addr >> 8;
offset = addr & GENMASK(7, 0);
@@ -167,7 +149,7 @@ static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
}
-static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
+static int sja1105_base_t1_mdio_read_c22(struct mii_bus *bus, int phy, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -175,30 +157,31 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
u32 tmp;
int rc;
- if (reg & MII_ADDR_C45) {
- u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
-
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
- mmd);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
- tmp = reg & MII_REGADDR_C45_MASK;
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ return tmp & 0xffff;
+}
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
- mmd);
+static int sja1105_base_t1_mdio_read_c45(struct mii_bus *bus, int phy,
+ int mmd, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
- rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
- return tmp & 0xffff;
- }
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &reg, NULL);
+ if (rc < 0)
+ return rc;
- /* Clause 22 read */
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
if (rc < 0)
@@ -207,41 +190,37 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg,
- u16 val)
+static int sja1105_base_t1_mdio_write_c22(struct mii_bus *bus, int phy, int reg,
+ u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- int rc;
-
- if (reg & MII_ADDR_C45) {
- u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
- mmd);
-
- tmp = reg & MII_REGADDR_C45_MASK;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ tmp = val & 0xffff;
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
- mmd);
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
- tmp = val & 0xffff;
+static int sja1105_base_t1_mdio_write_c45(struct mii_bus *bus, int phy,
+ int mmd, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
- return 0;
- }
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &reg, NULL);
+ if (rc < 0)
+ return rc;
- /* Clause 22 write */
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
tmp = val & 0xffff;
@@ -354,8 +333,10 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
bus->name = "SJA1110 100base-T1 MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
dev_name(priv->ds->dev));
- bus->read = sja1105_base_t1_mdio_read;
- bus->write = sja1105_base_t1_mdio_write;
+ bus->read = sja1105_base_t1_mdio_read_c22;
+ bus->write = sja1105_base_t1_mdio_write_c22;
+ bus->read_c45 = sja1105_base_t1_mdio_read_c45;
+ bus->write_c45 = sja1105_base_t1_mdio_write_c45;
bus->parent = priv->ds->dev;
mdio_priv = bus->priv;
mdio_priv->priv = priv;
@@ -392,7 +373,7 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
int rc = 0;
int port;
- if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write)
+ if (!priv->info->pcs_mdio_read_c45 || !priv->info->pcs_mdio_write_c45)
return 0;
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
@@ -402,8 +383,8 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
bus->name = "SJA1105 PCS MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
dev_name(ds->dev));
- bus->read = priv->info->pcs_mdio_read;
- bus->write = priv->info->pcs_mdio_write;
+ bus->read_c45 = priv->info->pcs_mdio_read_c45;
+ bus->write_c45 = priv->info->pcs_mdio_write_c45;
bus->parent = ds->dev;
/* There is no PHY on this MDIO bus => mask out all PHY addresses
* from auto probing.
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 54396992a919..30fb2cc40164 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -58,13 +58,12 @@ enum sja1105_ptp_clk_mode {
#define ptp_data_to_sja1105(d) \
container_of((d), struct sja1105_private, ptp_data)
-/* Must be called only with priv->tagger_data.state bit
- * SJA1105_HWTS_RX_EN cleared
+/* Must be called only while the RX timestamping state of the tagger
+ * is turned off
*/
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
bool on)
{
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
struct sja1105_general_params_entry *general_params;
struct sja1105_table *table;
@@ -74,13 +73,8 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
general_params->send_meta1 = on;
general_params->send_meta0 = on;
- /* Initialize the meta state machine to a known state */
- if (priv->tagger_data.stampable_skb) {
- kfree_skb(priv->tagger_data.stampable_skb);
- priv->tagger_data.stampable_skb = NULL;
- }
ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&tagger_data->skb_txtstamp_queue);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
@@ -88,6 +82,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
+ struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
bool rx_on;
@@ -98,10 +93,10 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->ports[port].hwts_tx_en = false;
+ priv->hwts_tx_en &= ~BIT(port);
break;
case HWTSTAMP_TX_ON:
- priv->ports[port].hwts_tx_en = true;
+ priv->hwts_tx_en |= BIT(port);
break;
default:
return -ERANGE;
@@ -116,8 +111,8 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
break;
}
- if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
- clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ if (rx_on != tagger_data->rxtstamp_get_state(ds)) {
+ tagger_data->rxtstamp_set_state(ds, false);
rc = sja1105_change_rxtstamping(priv, rx_on);
if (rc < 0) {
@@ -126,7 +121,7 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
return rc;
}
if (rx_on)
- set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+ tagger_data->rxtstamp_set_state(ds, true);
}
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
@@ -136,15 +131,16 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
{
+ struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct hwtstamp_config config;
config.flags = 0;
- if (priv->ports[port].hwts_tx_en)
+ if (priv->hwts_tx_en & BIT(port))
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
- if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ if (tagger_data->rxtstamp_get_state(ds))
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
config.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -403,7 +399,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
- netif_rx_ni(skb);
+ netif_rx(skb);
}
if (ptp_data->extts_enabled)
@@ -417,10 +413,11 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
+ struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(ds);
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
- if (!test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
+ if (!tagger_data->rxtstamp_get_state(ds))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
@@ -453,6 +450,39 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
return priv->info->rxtstamp(ds, port, skb);
}
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shwt = {0};
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&ptp_data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+
/* In addition to cloning the skb which is done by the common
* sja1105_port_txtstamp, we need to generate a timestamp ID and save the
* packet to the TX timestamping queue.
@@ -461,22 +491,22 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
u8 ts_id;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- spin_lock(&sp->data->meta_lock);
+ spin_lock(&priv->ts_id_lock);
- ts_id = sp->data->ts_id;
+ ts_id = priv->ts_id;
/* Deal automatically with 8-bit wraparound */
- sp->data->ts_id++;
+ priv->ts_id++;
SJA1105_SKB_CB(clone)->ts_id = ts_id;
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->ts_id_lock);
- skb_queue_tail(&sp->data->skb_txtstamp_queue, clone);
+ skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
}
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
@@ -486,10 +516,9 @@ void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_port *sp = &priv->ports[port];
struct sk_buff *clone;
- if (!sp->hwts_tx_en)
+ if (!(priv->hwts_tx_en & BIT(port)))
return;
clone = skb_clone_sk(skb);
@@ -896,7 +925,6 @@ static struct ptp_pin_desc sja1105_ptp_pin = {
int sja1105_ptp_clock_register(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
ptp_data->caps = (struct ptp_clock_info) {
@@ -919,8 +947,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
/* Only used on SJA1105 */
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
/* Only used on SJA1110 */
- skb_queue_head_init(&tagger_data->skb_txtstamp_queue);
- spin_lock_init(&tagger_data->meta_lock);
+ skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
if (IS_ERR_OR_NULL(ptp_data->clock))
@@ -937,7 +964,6 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
- struct sja1105_tagger_data *tagger_data = &priv->tagger_data;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
if (IS_ERR_OR_NULL(ptp_data->clock))
@@ -945,7 +971,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
del_timer_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
- skb_queue_purge(&tagger_data->skb_txtstamp_queue);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 3ae6b9fdd492..416461ee95d2 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -8,6 +8,21 @@
#if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP)
+/* Timestamps are in units of 8 ns clock ticks (equivalent to
+ * a fixed 125 MHz clock).
+ */
+#define SJA1105_TICK_NS 8
+
+static inline s64 ns_to_sja1105_ticks(s64 ns)
+{
+ return ns / SJA1105_TICK_NS;
+}
+
+static inline s64 sja1105_ticks_to_ns(s64 ticks)
+{
+ return ticks * SJA1105_TICK_NS;
+}
+
/* Calculate the first base_time in the future that satisfies this
* relationship:
*
@@ -62,6 +77,10 @@ struct sja1105_ptp_data {
struct timer_list extts_timer;
/* Used only on SJA1105 to reconstruct partial timestamps */
struct sk_buff_head skb_rxtstamp_queue;
+ /* Used on SJA1110 where meta frames are generated only for
+ * 2-step TX timestamps
+ */
+ struct sk_buff_head skb_txtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
@@ -112,6 +131,9 @@ bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp);
+
#else
struct sja1105_ptp_cmd;
@@ -178,6 +200,8 @@ static inline int sja1105_ptp_commit(struct dsa_switch *ds,
#define sja1110_rxtstamp NULL
#define sja1110_txtstamp NULL
+#define sja1110_process_meta_tstamp NULL
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index d3c9ad6d39d4..5ce29c8057a4 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -719,8 +719,8 @@ const struct sja1105_info sja1105r_info = {
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
- .pcs_mdio_read = sja1105_pcs_mdio_read,
- .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.regs = &sja1105pqrs_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
@@ -756,8 +756,8 @@ const struct sja1105_info sja1105s_info = {
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
- .pcs_mdio_read = sja1105_pcs_mdio_read,
- .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
@@ -794,8 +794,8 @@ const struct sja1105_info sja1110a_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -844,8 +844,8 @@ const struct sja1105_info sja1110b_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -894,8 +894,8 @@ const struct sja1105_info sja1110c_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -944,8 +944,8 @@ const struct sja1105_info sja1110d_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index f5dca6a9b0f9..b7e95d60a6e4 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -296,6 +296,19 @@ static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
return false;
}
+/* FIXME: this should change when the bridge upper of the port changes. */
+static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
+{
+ unsigned long bridge_num;
+
+ if (!dp->bridge)
+ return dsa_tag_8021q_standalone_vid(dp);
+
+ bridge_num = dsa_port_bridge_num_get(dp);
+
+ return dsa_tag_8021q_bridge_vid(bridge_num);
+}
+
static int sja1105_init_virtual_links(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
@@ -394,8 +407,9 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
+ /* FIXME */
struct dsa_port *dp = dsa_to_port(priv->ds, port);
- u16 vid = dsa_tag_8021q_rx_vid(dp);
+ u16 vid = sja1105_port_get_tag_8021q_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index a4b1447ff055..ae55167ce0a6 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1122,9 +1122,6 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc)
vsc->gc.ngpio = 4;
vsc->gc.owner = THIS_MODULE;
vsc->gc.parent = vsc->dev;
-#if IS_ENABLED(CONFIG_OF_GPIO)
- vsc->gc.of_node = vsc->dev->of_node;
-#endif
vsc->gc.base = -1;
vsc->gc.get = vsc73xx_gpio_get;
vsc->gc.set = vsc73xx_gpio_set;
@@ -1216,12 +1213,10 @@ int vsc73xx_probe(struct vsc73xx *vsc)
}
EXPORT_SYMBOL(vsc73xx_probe);
-int vsc73xx_remove(struct vsc73xx *vsc)
+void vsc73xx_remove(struct vsc73xx *vsc)
{
dsa_unregister_switch(vsc->ds);
gpiod_set_value(vsc->reset, 1);
-
- return 0;
}
EXPORT_SYMBOL(vsc73xx_remove);
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index fe4b154a0a57..bd4206e8f9af 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -121,8 +121,6 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
vsc73xx_remove(&vsc_platform->vsc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 645398901e05..85b9a0f51dd8 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -159,18 +159,14 @@ static int vsc73xx_spi_probe(struct spi_device *spi)
return vsc73xx_probe(&vsc_spi->vsc);
}
-static int vsc73xx_spi_remove(struct spi_device *spi)
+static void vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
if (!vsc_spi)
- return 0;
+ return;
vsc73xx_remove(&vsc_spi->vsc);
-
- spi_set_drvdata(spi, NULL);
-
- return 0;
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
@@ -207,10 +203,20 @@ static const struct of_device_id vsc73xx_of_match[] = {
};
MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+static const struct spi_device_id vsc73xx_spi_ids[] = {
+ { "vsc7385" },
+ { "vsc7388" },
+ { "vsc7395" },
+ { "vsc7398" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids);
+
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
.shutdown = vsc73xx_spi_shutdown,
+ .id_table = vsc73xx_spi_ids,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 30b951504e65..30b1f0a36566 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -26,5 +26,5 @@ struct vsc73xx_ops {
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
-int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_remove(struct vsc73xx *vsc);
void vsc73xx_shutdown(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 910fcb3b252b..fa622639d640 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -5,6 +5,7 @@
*/
#include <net/dsa.h>
+#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/of_device.h>
#include <linux/netdev_features.h>
@@ -108,6 +109,7 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
struct rtnl_link_stats64 stats;
+ unsigned long flags;
int i;
memset(&stats, 0, sizeof(stats));
@@ -137,9 +139,9 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
*/
stats.rx_packets += stats.multicast;
- u64_stats_update_begin(&p->syncp);
+ flags = u64_stats_update_begin_irqsave(&p->syncp);
p->stats64 = stats;
- u64_stats_update_end(&p->syncp);
+ u64_stats_update_end_irqrestore(&p->syncp, flags);
mutex_unlock(&p->mib_mutex);
}
@@ -441,34 +443,27 @@ static void xrs700x_teardown(struct dsa_switch *ds)
cancel_delayed_work_sync(&priv->mib_work);
}
-static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
switch (port) {
case 0:
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD;
break;
+
case 1:
case 2:
case 3:
- phylink_set(mask, 1000baseT_Full);
+ phy_interface_set_rgmii(config->supported_interfaces);
+ config->mac_capabilities = MAC_10FD | MAC_100FD | MAC_1000FD;
break;
+
default:
- linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
- return;
+ break;
}
-
- phylink_set_port_modes(mask);
-
- /* The switch only supports full duplex. */
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
}
static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
@@ -501,7 +496,7 @@ static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
- struct net_device *bridge, bool join)
+ struct dsa_bridge bridge, bool join)
{
unsigned int i, cpu_mask = 0, mask = 0;
struct xrs700x *priv = ds->priv;
@@ -513,14 +508,14 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
cpu_mask |= BIT(i);
- if (dsa_to_port(ds, i)->bridge_dev == bridge)
+ if (dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
mask |= BIT(i);
}
for (i = 0; i < ds->num_ports; i++) {
- if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* 1 = Disable forwarding to the port */
@@ -540,13 +535,14 @@ static int xrs700x_bridge_common(struct dsa_switch *ds, int port,
}
static int xrs700x_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge, bool *tx_fwd_offload,
+ struct netlink_ext_ack *extack)
{
return xrs700x_bridge_common(ds, port, bridge, true);
}
static void xrs700x_bridge_leave(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct dsa_bridge bridge)
{
xrs700x_bridge_common(ds, port, bridge, false);
}
@@ -702,7 +698,7 @@ static const struct dsa_switch_ops xrs700x_ops = {
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
- .phylink_validate = xrs700x_phylink_validate,
+ .phylink_get_caps = xrs700x_phylink_get_caps,
.phylink_mac_link_up = xrs700x_mac_link_up,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 6deae388a0d6..14ff6887a225 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -76,8 +76,7 @@ static const struct regmap_config xrs700x_i2c_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_BIG
};
-static int xrs700x_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *i2c_id)
+static int xrs700x_i2c_probe(struct i2c_client *i2c)
{
struct xrs700x *priv;
int ret;
@@ -105,18 +104,14 @@ static int xrs700x_i2c_probe(struct i2c_client *i2c,
return 0;
}
-static int xrs700x_i2c_remove(struct i2c_client *i2c)
+static void xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
if (!priv)
- return 0;
+ return;
xrs700x_switch_remove(priv);
-
- i2c_set_clientdata(i2c, NULL);
-
- return 0;
}
static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
@@ -152,7 +147,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
.name = "xrs700x-i2c",
.of_match_table = of_match_ptr(xrs700x_i2c_dt_ids),
},
- .probe = xrs700x_i2c_probe,
+ .probe_new = xrs700x_i2c_probe,
.remove = xrs700x_i2c_remove,
.shutdown = xrs700x_i2c_shutdown,
.id_table = xrs700x_i2c_id,
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 127a677d1f39..5f7d344b5d73 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -140,8 +140,6 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
return;
xrs700x_switch_remove(priv);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index f82ad7419508..c4b1b0aa438a 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -99,14 +99,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_change_carrier = dummy_change_carrier,
};
-static void dummy_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
-}
-
static const struct ethtool_ops dummy_ethtool_ops = {
- .get_drvinfo = dummy_get_drvinfo,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index 8ef34901c2d8..ca3e4700a813 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -225,7 +225,7 @@ static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
list_del(&slave->list);
queue->num_slaves--;
slave->dev->flags &= ~IFF_SLAVE;
- dev_put(slave->dev);
+ netdev_put(slave->dev, &slave->dev_tracker);
kfree(slave);
}
@@ -399,7 +399,7 @@ static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
if (duplicate_slave)
eql_kill_one_slave(queue, duplicate_slave);
- dev_hold(slave->dev);
+ netdev_hold(slave->dev, &slave->dev_tracker, GFP_ATOMIC);
list_add(&slave->list, &queue->all_slaves);
queue->num_slaves++;
slave->dev->flags |= IFF_SLAVE;
@@ -425,14 +425,13 @@ static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *
if ((master_dev->flags & IFF_UP) == IFF_UP) {
/* slave is not a master & not already a slave: */
if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
- slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
+ slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL);
equalizer_t *eql = netdev_priv(master_dev);
int ret;
if (!s)
return -ENOMEM;
- memset(s, 0, sizeof(*s));
s->dev = slave_dev;
s->priority = srq.priority;
s->priority_bps = srq.priority;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 846fa3af4504..fb68339e1511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1135,7 +1135,7 @@ el3_netdev_set_ecmd(struct net_device *dev,
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int el3_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 1d124b0f65e7..d2f4358cc550 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1527,7 +1527,7 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 4673bc1604e7..82f94b1635bf 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -480,7 +480,7 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ccf07667aa5e..082388bb6169 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2959,13 +2959,13 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ strscpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strlcpy(info->bus_info, dev_name(vp->gendev),
+ strscpy(info->bus_info, dev_name(vp->gendev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 05e15b6e5e2c..aaaff3ba43ef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -138,11 +138,6 @@ MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
module_param(rx_copybreak, int, 0);
module_param(use_mmio, int, 0);
-#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
-#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
-#undef NETIF_F_TSO
-#endif
-
#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
#error TX ring too small!
#endif
@@ -974,12 +969,12 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if (tp->card_state == Sleeping) {
- strlcpy(info->fw_version, "Sleep image",
+ strscpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strlcpy(info->fw_version, "Unknown runtime",
+ strscpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
@@ -989,8 +984,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
}
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
@@ -1138,7 +1133,9 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
static void
-typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
ering->rx_max_pending = RXENT_ENTRIES;
ering->tx_max_pending = TXLO_ENTRIES - 1;
@@ -2259,9 +2256,28 @@ out:
return mode;
}
+#if MAX_SKB_FRAGS > 32
+
+#include <net/vxlan.h>
+
+static netdev_features_t typhoon_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->nr_frags > 32 && skb_is_gso(skb))
+ features &= ~NETIF_F_GSO_MASK;
+
+ features = vlan_features_check(skb, features);
+ return vxlan_features_check(skb, features);
+}
+#endif
+
static const struct net_device_ops typhoon_netdev_ops = {
.ndo_open = typhoon_open,
.ndo_stop = typhoon_close,
+#if MAX_SKB_FRAGS > 32
+ .ndo_features_check = typhoon_features_check,
+#endif
.ndo_start_xmit = typhoon_start_tx,
.ndo_set_rx_mode = typhoon_set_rx_mode,
.ndo_tx_timeout = typhoon_tx_timeout,
@@ -2276,6 +2292,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
+ u8 addr[ETH_ALEN] __aligned(4);
void __iomem *ioaddr;
void *shared;
dma_addr_t shared_dma;
@@ -2407,8 +2424,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_reset;
}
- *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
- *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ *(__be16 *)&addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(__be32 *)&addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
@@ -2446,7 +2464,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &typhoon_netdev_ops;
- netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
+ netif_napi_add_weight(dev, &tp->napi, typhoon_poll, 16);
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &typhoon_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 1f8acbba5b6b..af603256b724 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -579,9 +579,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static u32 ax_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 3aef959fc25b..78f985885547 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -650,7 +650,6 @@ static void block_input(struct net_device *dev, int count,
{
unsigned int nic_base = dev->base_addr;
struct ei_device *ei_local = netdev_priv(dev);
- int xfer_count = count;
char *buf = skb->data;
if ((netif_msg_rx_status(ei_local)) && (count != 4))
@@ -662,9 +661,7 @@ static void block_input(struct net_device *dev, int count,
insw(nic_base + AXNET_DATAPORT,buf,count>>1);
if (count & 0x01) {
buf[count-1] = inb(nic_base + AXNET_DATAPORT);
- xfer_count++;
}
-
}
/*====================================================================*/
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index bd22a534b1c0..05d39ecb97ff 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -555,9 +555,9 @@ static int __init etherm_addr(char *addr)
static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
@@ -655,6 +655,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct ei_device *ei_local;
struct net_device *dev;
struct etherh_priv *eh;
+ u8 addr[ETH_ALEN];
int ret;
ret = ecard_request_resources(ec);
@@ -724,12 +725,13 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
spin_lock_init(&ei_local->page_lock);
if (ec->cid.product == PROD_ANT_ETHERM) {
- etherm_addr(dev->dev_addr);
+ etherm_addr(addr);
ei_local->reg_offset = etherm_regoffsets;
} else {
- etherh_addr(dev->dev_addr, ec);
+ etherh_addr(addr, ec);
ei_local->reg_offset = etherh_regoffsets;
}
+ eth_hw_addr_set(dev, addr);
ei_local->name = dev->name;
ei_local->word16 = 1;
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 941754ea78ec..1df7601af86a 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -116,6 +116,7 @@ static int hydra_init(struct zorro_dev *z)
unsigned long ioaddr = board+HYDRA_NIC_BASE;
const char name[] = "NE2000";
int start_page, stop_page;
+ u8 macaddr[ETH_ALEN];
int j;
int err;
@@ -129,7 +130,8 @@ static int hydra_init(struct zorro_dev *z)
return -ENOMEM;
for (j = 0; j < ETH_ALEN; j++)
- dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ macaddr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
+ eth_hw_addr_set(dev, macaddr);
/* We must set the 8390 for word mode. */
z_writeb(0x4b, ioaddr + NE_EN0_DCFG);
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 91b04abfd687..7fb819b9b89a 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -292,6 +292,7 @@ static bool mac8390_rsrc_init(struct net_device *dev,
struct nubus_dirent ent;
int offset;
volatile unsigned short *i;
+ u8 addr[ETH_ALEN];
dev->irq = SLOT2IRQ(board->slot);
/* This is getting to be a habit */
@@ -314,7 +315,8 @@ static bool mac8390_rsrc_init(struct net_device *dev,
return false;
}
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+ nubus_get_rsrc_mem(addr, &ent, 6);
+ eth_hw_addr_set(dev, addr);
if (useresources[cardtype] == 1) {
nubus_rewinddir(&dir);
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index e320cccba61a..8a7918d33419 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -405,15 +405,13 @@ static int mcf8390_init(struct net_device *dev)
static int mcf8390_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct resource *mem, *irq;
+ struct resource *mem;
resource_size_t msize;
- int ret;
+ int ret, irq;
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (irq == NULL) {
- dev_err(&pdev->dev, "no IRQ specified?\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
return -ENXIO;
- }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem == NULL) {
@@ -433,7 +431,7 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- dev->irq = irq->start;
+ dev->irq = irq;
dev->base_addr = mem->start;
ret = mcf8390_init(dev);
@@ -452,8 +450,7 @@ static int mcf8390_remove(struct platform_device *pdev)
unregister_netdev(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem)
- release_mem_region(mem->start, resource_size(mem));
+ release_mem_region(mem->start, resource_size(mem));
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c
index 0890fa493f70..6e62c37c9400 100644
--- a/drivers/net/ethernet/8390/smc-ultra.c
+++ b/drivers/net/ethernet/8390/smc-ultra.c
@@ -204,6 +204,7 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
{
int i, retval;
int checksum = 0;
+ u8 macaddr[ETH_ALEN];
const char *model_name;
unsigned char eeprom_irq = 0;
static unsigned version_printed;
@@ -239,7 +240,8 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ macaddr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, macaddr);
netdev_info(dev, "%s at %#3x, %pM", model_name,
ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 263a942d81fa..5b00c452bede 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -168,6 +168,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
int checksum = 0;
int ancient = 0; /* An old card without config registers. */
int word16 = 0; /* 0 = 8 bit, 1 = 16 bit */
+ u8 addr[ETH_ALEN];
const char *model_name;
static unsigned version_printed;
struct ei_device *ei_local = netdev_priv(dev);
@@ -191,7 +192,8 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
netdev_info(dev, version);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + 8 + i);
+ addr[i] = inb(ioaddr + 8 + i);
+ eth_hw_addr_set(dev, addr);
netdev_info(dev, "WD80x3 at %#3x, %pM", ioaddr, dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 4601b38f532a..5a274b99f299 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/broadcom/Kconfig"
-source "drivers/net/ethernet/brocade/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/calxeda/Kconfig"
-source "drivers/net/ethernet/cavium/Kconfig"
-source "drivers/net/ethernet/chelsio/Kconfig"
-source "drivers/net/ethernet/cirrus/Kconfig"
-source "drivers/net/ethernet/cisco/Kconfig"
-source "drivers/net/ethernet/cortina/Kconfig"
config CX_ECAT
tristate "Beckhoff CX5020 EtherCAT master support"
@@ -57,6 +48,14 @@ config CX_ECAT
To compile this driver as a module, choose M here. The module
will be called ec_bhf.
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
source "drivers/net/ethernet/davicom/Kconfig"
config DNET
@@ -73,17 +72,18 @@ config DNET
source "drivers/net/ethernet/dec/Kconfig"
source "drivers/net/ethernet/dlink/Kconfig"
source "drivers/net/ethernet/emulex/Kconfig"
+source "drivers/net/ethernet/engleder/Kconfig"
source "drivers/net/ethernet/ezchip/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/fungible/Kconfig"
source "drivers/net/ethernet/google/Kconfig"
source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/huawei/Kconfig"
source "drivers/net/ethernet/i825xx/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/microsoft/Kconfig"
source "drivers/net/ethernet/xscale/Kconfig"
config JME
@@ -120,14 +120,16 @@ config LANTIQ_XRX200
Support for the PMAC of the Gigabit switch (GSWIP) inside the
Lantiq / Intel VRX200 VDSL SoC
+source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
-source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
@@ -139,10 +141,10 @@ config FEALNX
Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
cards. <http://www.myson.com.tw/>
+source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/natsemi/Kconfig"
source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/netronome/Kconfig"
-source "drivers/net/ethernet/ni/Kconfig"
source "drivers/net/ethernet/8390/Kconfig"
source "drivers/net/ethernet/nvidia/Kconfig"
source "drivers/net/ethernet/nxp/Kconfig"
@@ -162,6 +164,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
source "drivers/net/ethernet/pasemi/Kconfig"
source "drivers/net/ethernet/pensando/Kconfig"
source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/qualcomm/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/realtek/Kconfig"
@@ -169,20 +172,23 @@ source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/socionext/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/sunplus/Kconfig"
source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/toshiba/Kconfig"
source "drivers/net/ethernet/tundra/Kconfig"
+source "drivers/net/ethernet/vertexcom/Kconfig"
source "drivers/net/ethernet/via/Kconfig"
+source "drivers/net/ethernet/wangxun/Kconfig"
source "drivers/net/ethernet/wiznet/Kconfig"
source "drivers/net/ethernet/xilinx/Kconfig"
source "drivers/net/ethernet/xircom/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index fdd8c6c17451..0d872d4efcd1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
@@ -36,10 +37,12 @@ obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_NET_VENDOR_DEC) += dec/
obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/
obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/
+obj-$(CONFIG_NET_VENDOR_ENGLEDER) += engleder/
obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_FUNGIBLE) += fungible/
obj-$(CONFIG_NET_VENDOR_GOOGLE) += google/
obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/
@@ -88,11 +91,14 @@ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SUNPLUS) += sunplus/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
+obj-$(CONFIG_NET_VENDOR_VERTEXCOM) += vertexcom/
obj-$(CONFIG_NET_VENDOR_VIA) += via/
+obj-$(CONFIG_NET_VENDOR_WANGXUN) += wangxun/
obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 1cfdd01b4c2e..c6f8f852bff1 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1275,9 +1275,6 @@ static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
u32 data, tmp;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
data = OWL_EMAC_BIT_MAC_CSR10_SB;
data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
@@ -1305,9 +1302,6 @@ owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
struct owl_emac_priv *priv = bus->priv;
u32 data, tmp;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
data = OWL_EMAC_BIT_MAC_CSR10_SB;
data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
@@ -1576,7 +1570,7 @@ static int owl_emac_probe(struct platform_device *pdev)
netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT;
netdev->netdev_ops = &owl_emac_netdev_ops;
netdev->ethtool_ops = &owl_emac_ethtool_ops;
- netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, owl_emac_poll);
ret = devm_register_netdev(dev, netdev);
if (ret) {
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c6982f7caf9b..857361c74f5d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -772,7 +772,7 @@ static int starfire_init_one(struct pci_dev *pdev,
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &ethtool_ops;
- netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
+ netif_napi_add_weight(dev, &np->napi, netdev_poll, max_interrupt_work);
if (mtu)
dev->mtu = mtu;
@@ -1844,8 +1844,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 000000000000..da3bdd302502
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Analog Devices device configuration
+#
+
+config NET_VENDOR_ADI
+ bool "Analog Devices devices"
+ default y
+ depends on SPI
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about ADI devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ADI
+
+config ADIN1110
+ tristate "Analog Devices ADIN1110 MAC-PHY"
+ depends on SPI && NET_SWITCHDEV
+ select CRC8
+ help
+ Say yes here to build support for Analog Devices ADIN1110
+ Low Power 10BASE-T1L Ethernet MAC-PHY.
+
+endif # NET_VENDOR_ADI
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 000000000000..d0383d94303c
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Makefile for the Analog Devices network device drivers.
+#
+
+obj-$(CONFIG_ADIN1110) += adin1110.o
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
new file mode 100644
index 000000000000..f5c2d7a9abc1
--- /dev/null
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -0,0 +1,1735 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY
+ * ADIN2111 2-Port Ethernet Switch with Integrated 10BASE-T1L PHY
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cache.h>
+#include <linux/crc8.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_bridge.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/regulator/consumer.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <net/switchdev.h>
+
+#include <asm/unaligned.h>
+
+#define ADIN1110_PHY_ID 0x1
+
+#define ADIN1110_RESET 0x03
+#define ADIN1110_SWRESET BIT(0)
+
+#define ADIN1110_CONFIG1 0x04
+#define ADIN1110_CONFIG1_SYNC BIT(15)
+
+#define ADIN1110_CONFIG2 0x06
+#define ADIN2111_P2_FWD_UNK2HOST BIT(12)
+#define ADIN2111_PORT_CUT_THRU_EN BIT(11)
+#define ADIN1110_CRC_APPEND BIT(5)
+#define ADIN1110_FWD_UNK2HOST BIT(2)
+
+#define ADIN1110_STATUS0 0x08
+
+#define ADIN1110_STATUS1 0x09
+#define ADIN2111_P2_RX_RDY BIT(17)
+#define ADIN1110_SPI_ERR BIT(10)
+#define ADIN1110_RX_RDY BIT(4)
+
+#define ADIN1110_IMASK1 0x0D
+#define ADIN2111_RX_RDY_IRQ BIT(17)
+#define ADIN1110_SPI_ERR_IRQ BIT(10)
+#define ADIN1110_RX_RDY_IRQ BIT(4)
+#define ADIN1110_TX_RDY_IRQ BIT(3)
+
+#define ADIN1110_MDIOACC 0x20
+#define ADIN1110_MDIO_TRDONE BIT(31)
+#define ADIN1110_MDIO_ST GENMASK(29, 28)
+#define ADIN1110_MDIO_OP GENMASK(27, 26)
+#define ADIN1110_MDIO_PRTAD GENMASK(25, 21)
+#define ADIN1110_MDIO_DEVAD GENMASK(20, 16)
+#define ADIN1110_MDIO_DATA GENMASK(15, 0)
+
+#define ADIN1110_TX_FSIZE 0x30
+#define ADIN1110_TX 0x31
+#define ADIN1110_TX_SPACE 0x32
+
+#define ADIN1110_MAC_ADDR_FILTER_UPR 0x50
+#define ADIN2111_MAC_ADDR_APPLY2PORT2 BIT(31)
+#define ADIN1110_MAC_ADDR_APPLY2PORT BIT(30)
+#define ADIN2111_MAC_ADDR_TO_OTHER_PORT BIT(17)
+#define ADIN1110_MAC_ADDR_TO_HOST BIT(16)
+
+#define ADIN1110_MAC_ADDR_FILTER_LWR 0x51
+
+#define ADIN1110_MAC_ADDR_MASK_UPR 0x70
+#define ADIN1110_MAC_ADDR_MASK_LWR 0x71
+
+#define ADIN1110_RX_FSIZE 0x90
+#define ADIN1110_RX 0x91
+
+#define ADIN2111_RX_P2_FSIZE 0xC0
+#define ADIN2111_RX_P2 0xC1
+
+#define ADIN1110_CLEAR_STATUS0 0xFFF
+
+/* MDIO_OP codes */
+#define ADIN1110_MDIO_OP_WR 0x1
+#define ADIN1110_MDIO_OP_RD 0x3
+
+#define ADIN1110_CD BIT(7)
+#define ADIN1110_WRITE BIT(5)
+
+#define ADIN1110_MAX_BUFF 2048
+#define ADIN1110_MAX_FRAMES_READ 64
+#define ADIN1110_WR_HEADER_LEN 2
+#define ADIN1110_FRAME_HEADER_LEN 2
+#define ADIN1110_INTERNAL_SIZE_HEADER_LEN 2
+#define ADIN1110_RD_HEADER_LEN 3
+#define ADIN1110_REG_LEN 4
+#define ADIN1110_FEC_LEN 4
+
+#define ADIN1110_PHY_ID_VAL 0x0283BC91
+#define ADIN2111_PHY_ID_VAL 0x0283BCA1
+
+#define ADIN_MAC_MAX_PORTS 2
+#define ADIN_MAC_MAX_ADDR_SLOTS 16
+
+#define ADIN_MAC_MULTICAST_ADDR_SLOT 0
+#define ADIN_MAC_BROADCAST_ADDR_SLOT 1
+#define ADIN_MAC_P1_ADDR_SLOT 2
+#define ADIN_MAC_P2_ADDR_SLOT 3
+#define ADIN_MAC_FDB_ADDR_SLOT 4
+
+DECLARE_CRC8_TABLE(adin1110_crc_table);
+
+enum adin1110_chips_id {
+ ADIN1110_MAC = 0,
+ ADIN2111_MAC,
+};
+
+struct adin1110_cfg {
+ enum adin1110_chips_id id;
+ char name[MDIO_NAME_SIZE];
+ u32 phy_ids[PHY_MAX_ADDR];
+ u32 ports_nr;
+ u32 phy_id_val;
+};
+
+struct adin1110_port_priv {
+ struct adin1110_priv *priv;
+ struct net_device *netdev;
+ struct net_device *bridge;
+ struct phy_device *phydev;
+ struct work_struct tx_work;
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ struct work_struct rx_mode_work;
+ u32 flags;
+ struct sk_buff_head txq;
+ u32 nr;
+ u32 state;
+ struct adin1110_cfg *cfg;
+};
+
+struct adin1110_priv {
+ struct mutex lock; /* protect spi */
+ spinlock_t state_lock; /* protect RX mode */
+ struct mii_bus *mii_bus;
+ struct spi_device *spidev;
+ bool append_crc;
+ struct adin1110_cfg *cfg;
+ u32 tx_space;
+ u32 irq_mask;
+ bool forwarding;
+ int irq;
+ struct adin1110_port_priv *ports[ADIN_MAC_MAX_PORTS];
+ char mii_bus_name[MII_BUS_ID_SIZE];
+ u8 data[ADIN1110_MAX_BUFF] ____cacheline_aligned;
+};
+
+struct adin1110_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct adin1110_port_priv *port_priv;
+ unsigned long event;
+};
+
+static struct adin1110_cfg adin1110_cfgs[] = {
+ {
+ .id = ADIN1110_MAC,
+ .name = "adin1110",
+ .phy_ids = {1},
+ .ports_nr = 1,
+ .phy_id_val = ADIN1110_PHY_ID_VAL,
+ },
+ {
+ .id = ADIN2111_MAC,
+ .name = "adin2111",
+ .phy_ids = {1, 2},
+ .ports_nr = 2,
+ .phy_id_val = ADIN2111_PHY_ID_VAL,
+ },
+};
+
+static u8 adin1110_crc_data(u8 *data, u32 len)
+{
+ return crc8(adin1110_crc_table, data, len, 0);
+}
+
+static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+{
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ u32 read_len = ADIN1110_REG_LEN;
+ struct spi_transfer t = {0};
+ int ret;
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+ priv->data[2] = 0x00;
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ priv->data[3] = 0x00;
+ header_len++;
+ }
+
+ if (priv->append_crc)
+ read_len++;
+
+ memset(&priv->data[header_len], 0, read_len);
+ t.tx_buf = &priv->data[0];
+ t.rx_buf = &priv->data[0];
+ t.len = read_len + header_len;
+
+ ret = spi_sync_transfer(priv->spidev, &t, 1);
+ if (ret)
+ return ret;
+
+ if (priv->append_crc) {
+ u8 recv_crc;
+ u8 crc;
+
+ crc = adin1110_crc_data(&priv->data[header_len],
+ ADIN1110_REG_LEN);
+ recv_crc = priv->data[header_len + ADIN1110_REG_LEN];
+
+ if (crc != recv_crc) {
+ dev_err_ratelimited(&priv->spidev->dev, "CRC error.");
+ return -EBADMSG;
+ }
+ }
+
+ *val = get_unaligned_be32(&priv->data[header_len]);
+
+ return ret;
+}
+
+static int adin1110_write_reg(struct adin1110_priv *priv, u16 reg, u32 val)
+{
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ u32 write_len = ADIN1110_REG_LEN;
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], header_len);
+ header_len++;
+ }
+
+ put_unaligned_be32(val, &priv->data[header_len]);
+ if (priv->append_crc) {
+ priv->data[header_len + write_len] = adin1110_crc_data(&priv->data[header_len],
+ write_len);
+ write_len++;
+ }
+
+ return spi_write(priv->spidev, &priv->data[0], header_len + write_len);
+}
+
+static int adin1110_set_bits(struct adin1110_priv *priv, u16 reg,
+ unsigned long mask, unsigned long val)
+{
+ u32 write_val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, reg, &write_val);
+ if (ret < 0)
+ return ret;
+
+ set_mask_bits(&write_val, mask, val);
+
+ return adin1110_write_reg(priv, reg, write_val);
+}
+
+static int adin1110_round_len(int len)
+{
+ /* can read/write only mutiples of 4 bytes of payload */
+ len = ALIGN(len, 4);
+
+ /* NOTE: ADIN1110_WR_HEADER_LEN should be used for write ops. */
+ if (len + ADIN1110_RD_HEADER_LEN > ADIN1110_MAX_BUFF)
+ return -EINVAL;
+
+ return len;
+}
+
+static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ struct spi_transfer t;
+ u32 frame_size_no_fcs;
+ struct sk_buff *rxb;
+ u32 frame_size;
+ int round_len;
+ u16 reg;
+ int ret;
+
+ if (!port_priv->nr) {
+ reg = ADIN1110_RX;
+ ret = adin1110_read_reg(priv, ADIN1110_RX_FSIZE, &frame_size);
+ } else {
+ reg = ADIN2111_RX_P2;
+ ret = adin1110_read_reg(priv, ADIN2111_RX_P2_FSIZE,
+ &frame_size);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* The read frame size includes the extra 2 bytes
+ * from the ADIN1110 frame header.
+ */
+ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
+ return ret;
+
+ round_len = adin1110_round_len(frame_size);
+ if (round_len < 0)
+ return ret;
+
+ frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+ memset(priv->data, 0, ADIN1110_RD_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ rxb = netdev_alloc_skb(port_priv->netdev, round_len + header_len);
+ if (!rxb)
+ return -ENOMEM;
+
+ skb_put(rxb, frame_size_no_fcs + header_len + ADIN1110_FRAME_HEADER_LEN);
+
+ t.tx_buf = &priv->data[0];
+ t.rx_buf = &rxb->data[0];
+ t.len = header_len + round_len;
+
+ ret = spi_sync_transfer(priv->spidev, &t, 1);
+ if (ret) {
+ kfree_skb(rxb);
+ return ret;
+ }
+
+ skb_pull(rxb, header_len + ADIN1110_FRAME_HEADER_LEN);
+ rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
+
+ if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+ (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
+ rxb->offload_fwd_mark = port_priv->priv->forwarding;
+
+ netif_rx(rxb);
+
+ port_priv->rx_bytes += frame_size - ADIN1110_FRAME_HEADER_LEN;
+ port_priv->rx_packets++;
+
+ return 0;
+}
+
+static int adin1110_write_fifo(struct adin1110_port_priv *port_priv,
+ struct sk_buff *txb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ __be16 frame_header;
+ int padding = 0;
+ int padded_len;
+ int round_len;
+ int ret;
+
+ /* Pad frame to 64 byte length,
+ * MAC nor PHY will otherwise add the
+ * required padding.
+ * The FEC will be added by the MAC internally.
+ */
+ if (txb->len + ADIN1110_FEC_LEN < 64)
+ padding = 64 - (txb->len + ADIN1110_FEC_LEN);
+
+ padded_len = txb->len + padding + ADIN1110_FRAME_HEADER_LEN;
+
+ round_len = adin1110_round_len(padded_len);
+ if (round_len < 0)
+ return round_len;
+
+ ret = adin1110_write_reg(priv, ADIN1110_TX_FSIZE, padded_len);
+ if (ret < 0)
+ return ret;
+
+ memset(priv->data, 0, round_len + ADIN1110_WR_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE;
+ priv->data[0] |= FIELD_GET(GENMASK(12, 8), ADIN1110_TX);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), ADIN1110_TX);
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ /* mention the port on which to send the frame in the frame header */
+ frame_header = cpu_to_be16(port_priv->nr);
+ memcpy(&priv->data[header_len], &frame_header,
+ ADIN1110_FRAME_HEADER_LEN);
+
+ memcpy(&priv->data[header_len + ADIN1110_FRAME_HEADER_LEN],
+ txb->data, txb->len);
+
+ ret = spi_write(priv->spidev, &priv->data[0], round_len + header_len);
+ if (ret < 0)
+ return ret;
+
+ port_priv->tx_bytes += txb->len;
+ port_priv->tx_packets++;
+
+ return 0;
+}
+
+static int adin1110_read_mdio_acc(struct adin1110_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_read_reg(priv, ADIN1110_MDIOACC, &val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return 0;
+
+ return val;
+}
+
+static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_RD);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+
+ /* write the clause 22 read command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ /* ADIN1110_MDIO_TRDONE BIT of the ADIN1110_MDIOACC
+ * register is set when the read is done.
+ * After the transaction is done, ADIN1110_MDIO_DATA
+ * bitfield of ADIN1110_MDIOACC register will contain
+ * the requested register value.
+ */
+ ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ if (ret < 0)
+ return ret;
+
+ return (val & ADIN1110_MDIO_DATA);
+}
+
+static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 reg_val)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_WR);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+ val |= FIELD_PREP(ADIN1110_MDIO_DATA, reg_val);
+
+ /* write the clause 22 write command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+}
+
+/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
+ * ADIN2111 MAC-PHY contains two ADIN1100 PHYs.
+ * By registering a new MDIO bus we allow the PAL to discover
+ * the encapsulated PHY and probe the ADIN1100 driver.
+ */
+static int adin1110_register_mdiobus(struct adin1110_priv *priv,
+ struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = devm_mdiobus_alloc(dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ snprintf(priv->mii_bus_name, MII_BUS_ID_SIZE, "%s-%u",
+ priv->cfg->name, spi_get_chipselect(priv->spidev, 0));
+
+ mii_bus->name = priv->mii_bus_name;
+ mii_bus->read = adin1110_mdio_read;
+ mii_bus->write = adin1110_mdio_write;
+ mii_bus->priv = priv;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)GENMASK(2, 0));
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = devm_mdiobus_register(dev, mii_bus);
+ if (ret)
+ return ret;
+
+ priv->mii_bus = mii_bus;
+
+ return 0;
+}
+
+static bool adin1110_port_rx_ready(struct adin1110_port_priv *port_priv,
+ u32 status)
+{
+ if (!netif_oper_up(port_priv->netdev))
+ return false;
+
+ if (!port_priv->nr)
+ return !!(status & ADIN1110_RX_RDY);
+ else
+ return !!(status & ADIN2111_P2_RX_RDY);
+}
+
+static void adin1110_read_frames(struct adin1110_port_priv *port_priv,
+ unsigned int budget)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 status1;
+ int ret;
+
+ while (budget) {
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ return;
+
+ if (!adin1110_port_rx_ready(port_priv, status1))
+ break;
+
+ ret = adin1110_read_fifo(port_priv);
+ if (ret < 0)
+ return;
+
+ budget--;
+ }
+}
+
+static void adin1110_wake_queues(struct adin1110_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++)
+ netif_wake_queue(priv->ports[i]->netdev);
+}
+
+static irqreturn_t adin1110_irq(int irq, void *p)
+{
+ struct adin1110_priv *priv = p;
+ u32 status1;
+ u32 val;
+ int ret;
+ int i;
+
+ mutex_lock(&priv->lock);
+
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ goto out;
+
+ if (priv->append_crc && (status1 & ADIN1110_SPI_ERR))
+ dev_warn_ratelimited(&priv->spidev->dev,
+ "SPI CRC error on write.\n");
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0)
+ goto out;
+
+ /* TX FIFO space is expressed in half-words */
+ priv->tx_space = 2 * val;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (adin1110_port_rx_ready(priv->ports[i], status1))
+ adin1110_read_frames(priv->ports[i],
+ ADIN1110_MAX_FRAMES_READ);
+ }
+
+ /* clear IRQ sources */
+ adin1110_write_reg(priv, ADIN1110_STATUS0, ADIN1110_CLEAR_STATUS0);
+ adin1110_write_reg(priv, ADIN1110_STATUS1, priv->irq_mask);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (priv->tx_space > 0 && ret >= 0)
+ adin1110_wake_queues(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* ADIN1110 can filter up to 16 MAC addresses, mac_nr here is the slot used */
+static int adin1110_write_mac_address(struct adin1110_port_priv *port_priv,
+ int mac_nr, const u8 *addr,
+ u8 *mask, u32 port_rules)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 offset = mac_nr * 2;
+ u32 port_rules_mask;
+ int ret;
+ u32 val;
+
+ if (!port_priv->nr)
+ port_rules_mask = ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules_mask = ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (port_rules & port_rules_mask)
+ port_rules_mask |= ADIN1110_MAC_ADDR_TO_HOST | ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ port_rules_mask |= GENMASK(15, 0);
+ val = port_rules | get_unaligned_be16(&addr[0]);
+ ret = adin1110_set_bits(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset,
+ port_rules_mask, val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&addr[2]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_FILTER_LWR + offset, val);
+ if (ret < 0)
+ return ret;
+
+ /* Only the first two MAC address slots support masking. */
+ if (mac_nr < ADIN_MAC_P1_ADDR_SLOT) {
+ val = get_unaligned_be16(&mask[0]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_UPR + offset,
+ val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&mask[2]);
+ return adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_LWR + offset,
+ val);
+ }
+
+ return 0;
+}
+
+static int adin1110_clear_mac_address(struct adin1110_priv *priv, int mac_nr)
+{
+ u32 offset = mac_nr * 2;
+ int ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ /* only the first two MAC address slots are maskable */
+ if (mac_nr <= 1) {
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_LWR + offset, 0);
+ }
+
+ return ret;
+}
+
+static u32 adin1110_port_rules(struct adin1110_port_priv *port_priv,
+ bool fw_to_host,
+ bool fw_to_other_port)
+{
+ u32 port_rules = 0;
+
+ if (!port_priv->nr)
+ port_rules |= ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules |= ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (fw_to_host)
+ port_rules |= ADIN1110_MAC_ADDR_TO_HOST;
+
+ if (fw_to_other_port && port_priv->priv->forwarding)
+ port_rules |= ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ return port_rules;
+}
+
+static int adin1110_multicast_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_multicast)
+{
+ u8 mask[ETH_ALEN] = {0};
+ u8 mac[ETH_ALEN] = {0};
+ u32 port_rules = 0;
+
+ mask[0] = BIT(0);
+ mac[0] = BIT(0);
+
+ if (accept_multicast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mac,
+ mask, port_rules);
+}
+
+static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_broadcast)
+{
+ u32 port_rules = 0;
+ u8 mask[ETH_ALEN];
+
+ memset(mask, 0xFF, ETH_ALEN);
+
+ if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mask,
+ mask, port_rules);
+}
+
+static int adin1110_set_mac_address(struct net_device *netdev,
+ const unsigned char *dev_addr)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ u32 mac_slot;
+
+ if (!is_valid_ether_addr(dev_addr))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, dev_addr);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ port_rules = adin1110_port_rules(port_priv, true, false);
+
+ return adin1110_write_mac_address(port_priv, mac_slot, netdev->dev_addr,
+ mask, port_rules);
+}
+
+static int adin1110_ndo_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_mac_address(netdev, sa->sa_data);
+}
+
+static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return phy_do_ioctl(netdev, rq, cmd);
+}
+
+static int adin1110_set_promisc_mode(struct adin1110_port_priv *port_priv,
+ bool promisc)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+
+ if (port_priv->state != BR_STATE_FORWARDING)
+ promisc = false;
+
+ if (!port_priv->nr)
+ mask = ADIN1110_FWD_UNK2HOST;
+ else
+ mask = ADIN2111_P2_FWD_UNK2HOST;
+
+ return adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ mask, promisc ? mask : 0);
+}
+
+static int adin1110_setup_rx_mode(struct adin1110_port_priv *port_priv)
+{
+ int ret;
+
+ ret = adin1110_set_promisc_mode(port_priv,
+ !!(port_priv->flags & IFF_PROMISC));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_multicast_filter(port_priv, ADIN_MAC_MULTICAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_ALLMULTI));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_broadcasts_filter(port_priv,
+ ADIN_MAC_BROADCAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_BROADCAST));
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_bits(port_priv->priv, ADIN1110_CONFIG1,
+ ADIN1110_CONFIG1_SYNC, ADIN1110_CONFIG1_SYNC);
+}
+
+static bool adin1110_can_offload_forwarding(struct adin1110_priv *priv)
+{
+ int i;
+
+ if (priv->cfg->id != ADIN2111_MAC)
+ return false;
+
+ /* Can't enable forwarding if ports do not belong to the same bridge */
+ if (priv->ports[0]->bridge != priv->ports[1]->bridge || !priv->ports[0]->bridge)
+ return false;
+
+ /* Can't enable forwarding if there is a port
+ * that has been blocked by STP.
+ */
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (priv->ports[i]->state != BR_STATE_FORWARDING)
+ return false;
+ }
+
+ return true;
+}
+
+static void adin1110_rx_mode_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+
+ port_priv = container_of(work, struct adin1110_port_priv, rx_mode_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+ adin1110_setup_rx_mode(port_priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void adin1110_set_rx_mode(struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ spin_lock(&priv->state_lock);
+
+ port_priv->flags = dev->flags;
+ schedule_work(&port_priv->rx_mode_work);
+
+ spin_unlock(&priv->state_lock);
+}
+
+static int adin1110_net_open(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /* Configure MAC to compute and append the FCS itself. */
+ ret = adin1110_write_reg(priv, ADIN1110_CONFIG2, ADIN1110_CRC_APPEND);
+ if (ret < 0)
+ goto out;
+
+ val = ADIN1110_TX_RDY_IRQ | ADIN1110_RX_RDY_IRQ | ADIN1110_SPI_ERR_IRQ;
+ if (priv->cfg->id == ADIN2111_MAC)
+ val |= ADIN2111_RX_RDY_IRQ;
+
+ priv->irq_mask = val;
+ ret = adin1110_write_reg(priv, ADIN1110_IMASK1, ~val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to enable chip IRQs: %d\n", ret);
+ goto out;
+ }
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to read TX FIFO space: %d\n", ret);
+ goto out;
+ }
+
+ priv->tx_space = 2 * val;
+
+ port_priv->state = BR_STATE_FORWARDING;
+ ret = adin1110_set_mac_address(net_dev, net_dev->dev_addr);
+ if (ret < 0) {
+ netdev_err(net_dev, "Could not set MAC address: %pM, %d\n",
+ net_dev->dev_addr, ret);
+ goto out;
+ }
+
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG1, ADIN1110_CONFIG1_SYNC,
+ ADIN1110_CONFIG1_SYNC);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+
+ phy_start(port_priv->phydev);
+
+ netif_start_queue(net_dev);
+
+ return 0;
+}
+
+static int adin1110_net_stop(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+ int ret;
+
+ mask = !port_priv->nr ? ADIN2111_RX_RDY_IRQ : ADIN1110_RX_RDY_IRQ;
+
+ /* Disable RX RDY IRQs */
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_bits(priv, ADIN1110_IMASK1, mask, mask);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ netif_stop_queue(port_priv->netdev);
+ flush_work(&port_priv->tx_work);
+ phy_stop(port_priv->phydev);
+
+ return 0;
+}
+
+static void adin1110_tx_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+ struct sk_buff *txb;
+ int ret;
+
+ port_priv = container_of(work, struct adin1110_port_priv, tx_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+
+ while ((txb = skb_dequeue(&port_priv->txq))) {
+ ret = adin1110_write_fifo(port_priv, txb);
+ if (ret < 0)
+ dev_err_ratelimited(&priv->spidev->dev,
+ "Frame write error: %d\n", ret);
+
+ dev_kfree_skb(txb);
+ }
+
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t adin1110_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ netdev_tx_t netdev_ret = NETDEV_TX_OK;
+ u32 tx_space_needed;
+
+ tx_space_needed = skb->len + ADIN1110_FRAME_HEADER_LEN + ADIN1110_INTERNAL_SIZE_HEADER_LEN;
+ if (tx_space_needed > priv->tx_space) {
+ netif_stop_queue(dev);
+ netdev_ret = NETDEV_TX_BUSY;
+ } else {
+ priv->tx_space -= tx_space_needed;
+ skb_queue_tail(&port_priv->txq, skb);
+ }
+
+ schedule_work(&port_priv->tx_work);
+
+ return netdev_ret;
+}
+
+static void adin1110_ndo_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ storage->rx_packets = port_priv->rx_packets;
+ storage->tx_packets = port_priv->tx_packets;
+
+ storage->rx_bytes = port_priv->rx_bytes;
+ storage->tx_bytes = port_priv->tx_bytes;
+}
+
+static int adin1110_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ ppid->id_len = strnlen(priv->mii_bus_name, MAX_PHYS_ITEM_ID_LEN);
+ memcpy(ppid->id, priv->mii_bus_name, ppid->id_len);
+
+ return 0;
+}
+
+static int adin1110_ndo_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->nr);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct net_device_ops adin1110_netdev_ops = {
+ .ndo_open = adin1110_net_open,
+ .ndo_stop = adin1110_net_stop,
+ .ndo_eth_ioctl = adin1110_ioctl,
+ .ndo_start_xmit = adin1110_start_xmit,
+ .ndo_set_mac_address = adin1110_ndo_set_mac_address,
+ .ndo_set_rx_mode = adin1110_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = adin1110_ndo_get_stats64,
+ .ndo_get_port_parent_id = adin1110_port_get_port_parent_id,
+ .ndo_get_phys_port_name = adin1110_ndo_get_phys_port_name,
+};
+
+static void adin1110_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, "ADIN1110", sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static const struct ethtool_ops adin1110_ethtool_ops = {
+ .get_drvinfo = adin1110_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static void adin1110_adjust_link(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (!phydev->link)
+ phy_print_status(phydev);
+}
+
+/* PHY ID is stored in the MAC registers too,
+ * check spi connection by reading it.
+ */
+static int adin1110_check_spi(struct adin1110_priv *priv)
+{
+ struct gpio_desc *reset_gpio;
+ int ret;
+ u32 val;
+
+ reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (reset_gpio) {
+ /* MISO pin is used for internal configuration, can't have
+ * anyone else disturbing the SDO line.
+ */
+ spi_bus_lock(priv->spidev->controller);
+
+ gpiod_set_value(reset_gpio, 1);
+ fsleep(10000);
+ gpiod_set_value(reset_gpio, 0);
+
+ /* Need to wait 90 ms before interacting with
+ * the MAC after a HW reset.
+ */
+ fsleep(90000);
+
+ spi_bus_unlock(priv->spidev->controller);
+ }
+
+ ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != priv->cfg->phy_id_val) {
+ dev_err(&priv->spidev->dev, "PHY ID expected: %x, read: %x\n",
+ priv->cfg->phy_id_val, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adin1110_hw_forwarding(struct adin1110_priv *priv, bool enable)
+{
+ int ret;
+ int i;
+
+ priv->forwarding = enable;
+
+ if (!priv->forwarding) {
+ for (i = ADIN_MAC_FDB_ADDR_SLOT; i < ADIN_MAC_MAX_ADDR_SLOTS; i++) {
+ ret = adin1110_clear_mac_address(priv, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Forwarding is optimised when MAC runs in Cut Through mode. */
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ ADIN2111_PORT_CUT_THRU_EN,
+ priv->forwarding ? ADIN2111_PORT_CUT_THRU_EN : 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = adin1110_setup_rx_mode(priv->ports[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adin1110_port_bridge_join(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = bridge;
+
+ if (adin1110_can_offload_forwarding(priv)) {
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, true);
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return adin1110_set_mac_address(port_priv->netdev, bridge->dev_addr);
+}
+
+static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = NULL;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, false);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static bool adin1110_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &adin1110_netdev_ops;
+}
+
+static int adin1110_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ int ret = 0;
+
+ if (!adin1110_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = adin1110_port_bridge_join(port_priv, info->upper_dev);
+ else
+ ret = adin1110_port_bridge_leave(port_priv, info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block adin1110_netdevice_nb = {
+ .notifier_call = adin1110_netdevice_event,
+};
+
+static void adin1110_disconnect_phy(void *data)
+{
+ phy_disconnect(data);
+}
+
+static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->state = BR_STATE_FORWARDING;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_mac_address(port_priv->netdev,
+ port_priv->netdev->dev_addr);
+ if (ret < 0)
+ goto out;
+
+ if (adin1110_can_offload_forwarding(priv))
+ ret = adin1110_hw_forwarding(priv, true);
+ else
+ ret = adin1110_setup_rx_mode(port_priv);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv)
+{
+ u8 mac[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00};
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_slot;
+ int ret;
+
+ port_priv->state = BR_STATE_BLOCKING;
+
+ mutex_lock(&priv->lock);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ ret = adin1110_clear_mac_address(priv, mac_slot);
+ if (ret < 0)
+ goto out;
+
+ ret = adin1110_hw_forwarding(priv, false);
+ if (ret < 0)
+ goto out;
+
+ /* Allow only BPDUs to be passed to the CPU */
+ memset(mask, 0xFF, ETH_ALEN);
+ port_rules = adin1110_port_rules(port_priv, true, false);
+ ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
+ mask, port_rules);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/* ADIN1110/2111 does not have any native STP support.
+ * Listen for bridge core state changes and
+ * allow all frames to pass or only the BPDUs.
+ */
+static int adin1110_port_attr_stp_state_set(struct adin1110_port_priv *port_priv,
+ u8 state)
+{
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ return adin1110_port_set_forwarding_state(port_priv);
+ case BR_STATE_LEARNING:
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ return adin1110_port_set_blocking_state(port_priv);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adin1110_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return adin1110_port_attr_stp_state_set(port_priv,
+ attr->u.stp_state);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin1110_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(netdev, ptr,
+ adin1110_port_dev_check,
+ adin1110_port_attr_set);
+
+ return notifier_from_errno(ret);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block adin1110_switchdev_blocking_notifier = {
+ .notifier_call = adin1110_switchdev_blocking_event,
+};
+
+static void adin1110_fdb_offload_notify(struct net_device *netdev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ netdev, &info.info, NULL);
+}
+
+static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ struct adin1110_port_priv *other_port;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_nr;
+ u32 val;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (!priv->forwarding)
+ return 0;
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ /* Find free FDB slot on device. */
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+ if (!val)
+ break;
+ }
+
+ if (mac_nr == ADIN_MAC_MAX_ADDR_SLOTS)
+ return -ENOMEM;
+
+ other_port = priv->ports[!port_priv->nr];
+ port_rules = adin1110_port_rules(port_priv, false, true);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+ mask, port_rules);
+}
+
+static int adin1110_read_mac(struct adin1110_priv *priv, int mac_nr, u8 *addr)
+{
+ u32 val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be16(val, addr);
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be32(val, addr + 2);
+
+ return 0;
+}
+
+static int adin1110_fdb_del(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 addr[ETH_ALEN];
+ int mac_nr;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_mac(priv, mac_nr, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(addr, fdb->addr)) {
+ ret = adin1110_clear_mac_address(priv, mac_nr);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void adin1110_switchdev_event_work(struct work_struct *work)
+{
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct adin1110_port_priv *port_priv;
+ int ret;
+
+ switchdev_work = container_of(work, struct adin1110_switchdev_event_work, work);
+ port_priv = switchdev_work->port_priv;
+
+ mutex_lock(&port_priv->priv->lock);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ret = adin1110_fdb_add(port_priv, &switchdev_work->fdb_info);
+ if (!ret)
+ adin1110_fdb_offload_notify(port_priv->netdev,
+ &switchdev_work->fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ adin1110_fdb_del(port_priv, &switchdev_work->fdb_info);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&port_priv->priv->lock);
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port_priv->netdev);
+}
+
+/* called under rcu_read_lock() */
+static int adin1110_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ if (!adin1110_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, adin1110_switchdev_event_work);
+ switchdev_work->port_priv = port_priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(netdev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block adin1110_switchdev_notifier = {
+ .notifier_call = adin1110_switchdev_event,
+};
+
+static void adin1110_unregister_notifiers(void)
+{
+ unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+}
+
+static int adin1110_setup_notifiers(void)
+{
+ int ret;
+
+ ret = register_netdevice_notifier(&adin1110_netdevice_nb);
+ if (ret < 0)
+ return ret;
+
+ ret = register_switchdev_notifier(&adin1110_switchdev_notifier);
+ if (ret < 0)
+ goto err_netdev;
+
+ ret = register_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ if (ret < 0)
+ goto err_sdev;
+
+ return 0;
+
+err_sdev:
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+
+err_netdev:
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+
+ return ret;
+}
+
+static int adin1110_probe_netdevs(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct adin1110_port_priv *port_priv;
+ struct net_device *netdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ netdev = devm_alloc_etherdev(dev, sizeof(*port_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ port_priv = netdev_priv(netdev);
+ port_priv->netdev = netdev;
+ port_priv->priv = priv;
+ port_priv->cfg = priv->cfg;
+ port_priv->nr = i;
+ priv->ports[i] = port_priv;
+ SET_NETDEV_DEV(netdev, dev);
+
+ ret = device_get_ethdev_address(dev, netdev);
+ if (ret < 0)
+ return ret;
+
+ netdev->irq = priv->spidev->irq;
+ INIT_WORK(&port_priv->tx_work, adin1110_tx_work);
+ INIT_WORK(&port_priv->rx_mode_work, adin1110_rx_mode_work);
+ skb_queue_head_init(&port_priv->txq);
+
+ netif_carrier_off(netdev);
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->netdev_ops = &adin1110_netdev_ops;
+ netdev->ethtool_ops = &adin1110_ethtool_ops;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->features |= NETIF_F_NETNS_LOCAL;
+
+ port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not find PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ port_priv->phydev = phy_connect(netdev,
+ phydev_name(port_priv->phydev),
+ adin1110_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not connect PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, adin1110_disconnect_phy,
+ port_priv->phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* ADIN1110 INT_N pin will be used to signal the host */
+ ret = devm_request_threaded_irq(dev, priv->spidev->irq, NULL,
+ adin1110_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), priv);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = devm_register_netdev(dev, priv->ports[i]->netdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register network device.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int adin1110_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *dev_id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct adin1110_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct adin1110_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spidev = spi;
+ priv->cfg = &adin1110_cfgs[dev_id->driver_data];
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+
+ mutex_init(&priv->lock);
+ spin_lock_init(&priv->state_lock);
+
+ /* use of CRC on control and data transactions is pin dependent */
+ priv->append_crc = device_property_read_bool(dev, "adi,spi-crc");
+ if (priv->append_crc)
+ crc8_populate_msb(adin1110_crc_table, 0x7);
+
+ ret = adin1110_check_spi(priv);
+ if (ret < 0) {
+ dev_err(dev, "Probe SPI Read check failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = adin1110_write_reg(priv, ADIN1110_RESET, ADIN1110_SWRESET);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_register_mdiobus(priv, dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not register MDIO bus %d\n", ret);
+ return ret;
+ }
+
+ return adin1110_probe_netdevs(priv);
+}
+
+static const struct of_device_id adin1110_match_table[] = {
+ { .compatible = "adi,adin1110" },
+ { .compatible = "adi,adin2111" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adin1110_match_table);
+
+static const struct spi_device_id adin1110_spi_id[] = {
+ { .name = "adin1110", .driver_data = ADIN1110_MAC },
+ { .name = "adin2111", .driver_data = ADIN2111_MAC },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adin1110_spi_id);
+
+static struct spi_driver adin1110_driver = {
+ .driver = {
+ .name = "adin1110",
+ .of_match_table = adin1110_match_table,
+ },
+ .probe = adin1110_probe,
+ .id_table = adin1110_spi_id,
+};
+
+static int __init adin1110_driver_init(void)
+{
+ int ret;
+
+ ret = adin1110_setup_notifiers();
+ if (ret < 0)
+ return ret;
+
+ ret = spi_register_driver(&adin1110_driver);
+ if (ret < 0) {
+ adin1110_unregister_notifiers();
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit adin1110_exit(void)
+{
+ adin1110_unregister_notifiers();
+ spi_unregister_driver(&adin1110_driver);
+}
+module_init(adin1110_driver_init);
+module_exit(adin1110_exit);
+
+MODULE_DESCRIPTION("ADIN1110 Network driver");
+MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 447dc64a17e5..aa0d2f3aaeaa 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth)
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ dev_kfree_skb(skb);
goto cleanup;
}
greth->rx_skbuff[i] = skb;
@@ -1112,9 +1113,9 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strlcpy(info->driver, dev_driver_string(greth->dev),
+ strscpy(info->driver, dev_driver_string(greth->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strscpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
@@ -1507,7 +1508,7 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* setup NAPI */
- netif_napi_add(dev, &greth->napi, greth_poll, 64);
+ netif_napi_add(dev, &greth->napi, greth_poll);
return 0;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index f4edc616388c..5fab589b3ddf 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -1106,7 +1106,7 @@ static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0, &rxmac->mif_ctrl);
writel(0, &rxmac->space_avail);
- /* Initialize the the mif_ctrl register
+ /* Initialize the mif_ctrl register
* bit 3: Receive code error. One or more nibbles were signaled as
* errors during the reception of the packet. Clear this
* bit in Gigabit, set it in 100Mbit. This was derived
@@ -2413,11 +2413,13 @@ static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
kfree(tx_ring->tcb_ring);
}
+#define MAX_TX_DESC_PER_PKT 24
+
/* nic_send_packet - NIC specific send handler for version B silicon. */
static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
{
u32 i;
- struct tx_desc desc[24];
+ struct tx_desc desc[MAX_TX_DESC_PER_PKT];
u32 frag = 0;
u32 thiscopy, remainder;
struct sk_buff *skb = tcb->skb;
@@ -2432,9 +2434,6 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
* more than 5 fragments.
*/
- /* nr_frags should be no more than 18. */
- BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
-
memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
for (i = 0; i < nr_frags; i++) {
@@ -2953,8 +2952,8 @@ static void et131x_get_drvinfo(struct net_device *netdev,
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -3762,6 +3761,13 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
struct tx_ring *tx_ring = &adapter->tx_ring;
+ /* This driver does not support TSO, it is very unlikely
+ * this condition is true.
+ */
+ if (unlikely(skb_shinfo(skb)->nr_frags > MAX_TX_DESC_PER_PKT - 2)) {
+ if (skb_linearize(skb))
+ goto drop_err;
+ }
/* stop the queue if it's getting full */
if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
netif_stop_queue(netdev);
@@ -3914,10 +3920,9 @@ static int et131x_pci_setup(struct pci_dev *pdev,
pci_set_master(pdev);
/* Check the DMA addressing support of this device */
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
- rc = -EIO;
goto err_release_res;
}
@@ -3964,7 +3969,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
et131x_init_send(adapter);
- netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, et131x_poll);
eth_hw_addr_set(netdev, adapter->addr);
diff --git a/drivers/net/ethernet/alacritech/slic.h b/drivers/net/ethernet/alacritech/slic.h
index 3add305d34b4..82071d0e5f7f 100644
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -265,8 +265,6 @@
#define SLIC_NUM_STAT_DESC_ARRAYS 4
#define SLIC_INVALID_STAT_DESC_IDX 0xffffffff
-#define SLIC_NAPI_WEIGHT 64
-
#define SLIC_UPR_LSTAT 0
#define SLIC_UPR_CONFIG 1
@@ -290,13 +288,13 @@ do { \
u64_stats_update_end(&(st)->syncp); \
} while (0)
-#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
-{ \
- unsigned int start; \
+#define SLIC_GET_STATS_COUNTER(newst, st, counter) \
+{ \
+ unsigned int start; \
do { \
- start = u64_stats_fetch_begin_irq(&(st)->syncp); \
- newst = (st)->counter; \
- } while (u64_stats_fetch_retry_irq(&(st)->syncp, start)); \
+ start = u64_stats_fetch_begin(&(st)->syncp); \
+ newst = (st)->counter; \
+ } while (u64_stats_fetch_retry(&(st)->syncp, start)); \
}
struct slic_upr {
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 1fc9a1cd3ef8..a30d0f172986 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1531,8 +1531,8 @@ static void slic_get_drvinfo(struct net_device *dev,
{
struct slic_device *sdev = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops slic_ethtool_ops = {
@@ -1803,7 +1803,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto unmap;
}
- netif_napi_add(dev, &sdev->napi, slic_poll, SLIC_NAPI_WEIGHT);
+ netif_napi_add(dev, &sdev->napi, slic_poll);
netif_carrier_off(dev);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 800ee022388f..a94c62956eed 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include <linux/phy.h>
#include <linux/soc/sunxi/sunxi_sram.h>
+#include <linux/dmaengine.h>
#include "sun4i-emac.h"
@@ -76,7 +77,6 @@ struct emac_board_info {
void __iomem *membase;
u32 msg_enable;
struct net_device *ndev;
- struct sk_buff *skb_last;
u16 tx_fifo_stat;
int emacrx_completed_flag;
@@ -87,6 +87,16 @@ struct emac_board_info {
unsigned int duplex;
phy_interface_t phy_interface;
+ struct dma_chan *rx_chan;
+ phys_addr_t emac_rx_fifo;
+};
+
+struct emac_dma_req {
+ struct emac_board_info *db;
+ struct dma_async_tx_descriptor *desc;
+ struct sk_buff *skb;
+ dma_addr_t rxbuf;
+ int count;
};
static void emac_update_speed(struct net_device *dev)
@@ -96,9 +106,9 @@ static void emac_update_speed(struct net_device *dev)
/* set EMAC SPEED, depend on PHY */
reg_val = readl(db->membase + EMAC_MAC_SUPP_REG);
- reg_val &= ~(0x1 << 8);
+ reg_val &= ~EMAC_MAC_SUPP_100M;
if (db->speed == SPEED_100)
- reg_val |= 1 << 8;
+ reg_val |= EMAC_MAC_SUPP_100M;
writel(reg_val, db->membase + EMAC_MAC_SUPP_REG);
}
@@ -206,12 +216,123 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
readsl(reg, data, round_up(count, 4) / 4);
}
+static struct emac_dma_req *
+emac_alloc_dma_req(struct emac_board_info *db,
+ struct dma_async_tx_descriptor *desc, struct sk_buff *skb,
+ dma_addr_t rxbuf, int count)
+{
+ struct emac_dma_req *req;
+
+ req = kzalloc(sizeof(struct emac_dma_req), GFP_ATOMIC);
+ if (!req)
+ return NULL;
+
+ req->db = db;
+ req->desc = desc;
+ req->skb = skb;
+ req->rxbuf = rxbuf;
+ req->count = count;
+ return req;
+}
+
+static void emac_free_dma_req(struct emac_dma_req *req)
+{
+ kfree(req);
+}
+
+static void emac_dma_done_callback(void *arg)
+{
+ struct emac_dma_req *req = arg;
+ struct emac_board_info *db = req->db;
+ struct sk_buff *skb = req->skb;
+ struct net_device *dev = db->ndev;
+ int rxlen = req->count;
+ u32 reg_val;
+
+ dma_unmap_single(db->dev, req->rxbuf, rxlen, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += rxlen;
+ /* Pass to upper layer */
+ dev->stats.rx_packets++;
+
+ /* re enable cpu receive */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val &= ~EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+
+ /* re enable interrupt */
+ reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+ reg_val |= EMAC_INT_CTL_RX_EN;
+ writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+
+ db->emacrx_completed_flag = 1;
+ emac_free_dma_req(req);
+}
+
+static int emac_dma_inblk_32bit(struct emac_board_info *db,
+ struct sk_buff *skb, void *rdptr, int count)
+{
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ dma_addr_t rxbuf;
+ struct emac_dma_req *req;
+ int ret = 0;
+
+ rxbuf = dma_map_single(db->dev, rdptr, count, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(db->dev, rxbuf);
+ if (ret) {
+ dev_err(db->dev, "dma mapping error.\n");
+ return ret;
+ }
+
+ desc = dmaengine_prep_slave_single(db->rx_chan, rxbuf, count,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(db->dev, "prepare slave single failed\n");
+ ret = -ENOMEM;
+ goto prepare_err;
+ }
+
+ req = emac_alloc_dma_req(db, desc, skb, rxbuf, count);
+ if (!req) {
+ dev_err(db->dev, "alloc emac dma req error.\n");
+ ret = -ENOMEM;
+ goto alloc_req_err;
+ }
+
+ desc->callback_param = req;
+ desc->callback = emac_dma_done_callback;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dev_err(db->dev, "dma submit error.\n");
+ goto submit_err;
+ }
+
+ dma_async_issue_pending(db->rx_chan);
+ return ret;
+
+submit_err:
+ emac_free_dma_req(req);
+
+alloc_req_err:
+ dmaengine_desc_free(desc);
+
+prepare_err:
+ dma_unmap_single(db->dev, rxbuf, count, DMA_FROM_DEVICE);
+ return ret;
+}
+
/* ethtool ops */
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static u32 emac_get_msglevel(struct net_device *dev)
@@ -308,7 +429,7 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* initial EMAC */
/* flush RX FIFO */
reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val |= 0x8;
+ reg_val |= EMAC_RX_CTL_FLUSH_FIFO;
writel(reg_val, db->membase + EMAC_RX_CTL_REG);
udelay(1);
@@ -320,8 +441,8 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* set MII clock */
reg_val = readl(db->membase + EMAC_MAC_MCFG_REG);
- reg_val &= (~(0xf << 2));
- reg_val |= (0xD << 2);
+ reg_val &= ~EMAC_MAC_MCFG_MII_CLKD_MASK;
+ reg_val |= EMAC_MAC_MCFG_MII_CLKD_72;
writel(reg_val, db->membase + EMAC_MAC_MCFG_REG);
/* clear RX counter */
@@ -385,7 +506,7 @@ static void emac_init_device(struct net_device *dev)
/* enable RX/TX0/RX Hlevel interrup */
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
spin_unlock_irqrestore(&db->lock, flags);
@@ -499,7 +620,6 @@ static void emac_rx(struct net_device *dev)
struct sk_buff *skb;
u8 *rdptr;
bool good_packet;
- static int rxlen_last;
unsigned int reg_val;
u32 rxhdr, rxstatus, rxcount, rxlen;
@@ -514,26 +634,12 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RXCount: %x\n", rxcount);
- if ((db->skb_last != NULL) && (rxlen_last > 0)) {
- dev->stats.rx_bytes += rxlen_last;
-
- /* Pass to upper layer */
- db->skb_last->protocol = eth_type_trans(db->skb_last,
- dev);
- netif_rx(db->skb_last);
- dev->stats.rx_packets++;
- db->skb_last = NULL;
- rxlen_last = 0;
-
- reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val &= ~EMAC_RX_CTL_DMA_EN;
- writel(reg_val, db->membase + EMAC_RX_CTL_REG);
- }
-
if (!rxcount) {
db->emacrx_completed_flag = 1;
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
/* had one stuck? */
@@ -565,7 +671,9 @@ static void emac_rx(struct net_device *dev)
writel(reg_val | EMAC_CTL_RX_EN,
db->membase + EMAC_CTL_REG);
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
db->emacrx_completed_flag = 1;
@@ -623,6 +731,19 @@ static void emac_rx(struct net_device *dev)
if (netif_msg_rx_status(db))
dev_dbg(db->dev, "RxLen %x\n", rxlen);
+ if (rxlen >= dev->mtu && db->rx_chan) {
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val |= EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+ if (!emac_dma_inblk_32bit(db, skb, rdptr, rxlen))
+ break;
+
+ /* re enable cpu receive. then try to receive by emac_inblk_32bit */
+ reg_val = readl(db->membase + EMAC_RX_CTL_REG);
+ reg_val &= ~EMAC_RX_CTL_DMA_EN;
+ writel(reg_val, db->membase + EMAC_RX_CTL_REG);
+ }
+
emac_inblk_32bit(db->membase + EMAC_RX_IO_DATA_REG,
rdptr, rxlen);
dev->stats.rx_bytes += rxlen;
@@ -666,18 +787,23 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
}
/* Transmit Interrupt check */
- if (int_status & (0x01 | 0x02))
+ if (int_status & EMAC_INT_STA_TX_COMPLETE)
emac_tx_done(dev, db, int_status);
- if (int_status & (0x04 | 0x08))
+ if (int_status & EMAC_INT_STA_TX_ABRT)
netdev_info(dev, " ab : %x\n", int_status);
/* Re-enable interrupt mask */
if (db->emacrx_completed_flag == 1) {
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
+ writel(reg_val, db->membase + EMAC_INT_CTL_REG);
+ } else {
+ reg_val = readl(db->membase + EMAC_INT_CTL_REG);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
+
spin_unlock(&db->lock);
return IRQ_HANDLED;
@@ -782,6 +908,58 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+static int emac_configure_dma(struct emac_board_info *db)
+{
+ struct platform_device *pdev = db->pdev;
+ struct net_device *ndev = db->ndev;
+ struct dma_slave_config conf = {};
+ struct resource *regs;
+ int err = 0;
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ netdev_err(ndev, "get io resource from device failed.\n");
+ err = -ENOMEM;
+ goto out_clear_chan;
+ }
+
+ netdev_info(ndev, "get io resource from device: %pa, size = %u\n",
+ &regs->start, (unsigned int)resource_size(regs));
+ db->emac_rx_fifo = regs->start + EMAC_RX_IO_DATA_REG;
+
+ db->rx_chan = dma_request_chan(&pdev->dev, "rx");
+ if (IS_ERR(db->rx_chan)) {
+ netdev_err(ndev,
+ "failed to request dma channel. dma is disabled\n");
+ err = PTR_ERR(db->rx_chan);
+ goto out_clear_chan;
+ }
+
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ conf.src_addr = db->emac_rx_fifo;
+ conf.dst_maxburst = 4;
+ conf.src_maxburst = 4;
+ conf.device_fc = false;
+
+ err = dmaengine_slave_config(db->rx_chan, &conf);
+ if (err) {
+ netdev_err(ndev, "config dma slave failed\n");
+ err = -EINVAL;
+ goto out_slave_configure_err;
+ }
+
+ return err;
+
+out_slave_configure_err:
+ dma_release_channel(db->rx_chan);
+
+out_clear_chan:
+ db->rx_chan = NULL;
+ return err;
+}
+
/* Search EMAC board, allocate space and register it
*/
static int emac_probe(struct platform_device *pdev)
@@ -824,6 +1002,9 @@ static int emac_probe(struct platform_device *pdev)
goto out_iounmap;
}
+ if (emac_configure_dma(db))
+ netdev_info(ndev, "configure dma failed. disable dma.\n");
+
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
@@ -891,6 +1072,7 @@ out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
out_dispose_mapping:
irq_dispose_mapping(ndev->irq);
+ dma_release_channel(db->rx_chan);
out_iounmap:
iounmap(db->membase);
out:
@@ -906,6 +1088,11 @@ static int emac_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_board_info *db = netdev_priv(ndev);
+ if (db->rx_chan) {
+ dmaengine_terminate_all(db->rx_chan);
+ dma_release_channel(db->rx_chan);
+ }
+
unregister_netdev(ndev);
sunxi_sram_release(&pdev->dev);
clk_disable_unprepare(db->clk);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h
index 38c72d9ec600..90bd9ad77607 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.h
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.h
@@ -38,6 +38,7 @@
#define EMAC_RX_CTL_REG (0x3c)
#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1)
#define EMAC_RX_CTL_DMA_EN (1 << 2)
+#define EMAC_RX_CTL_FLUSH_FIFO (1 << 3)
#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4)
#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5)
#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6)
@@ -61,7 +62,21 @@
#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7)
#define EMAC_RX_FBC_REG (0x50)
#define EMAC_INT_CTL_REG (0x54)
+#define EMAC_INT_CTL_RX_EN (1 << 8)
+#define EMAC_INT_CTL_TX0_EN (1)
+#define EMAC_INT_CTL_TX1_EN (1 << 1)
+#define EMAC_INT_CTL_TX_EN (EMAC_INT_CTL_TX0_EN | EMAC_INT_CTL_TX1_EN)
+#define EMAC_INT_CTL_TX0_ABRT_EN (0x1 << 2)
+#define EMAC_INT_CTL_TX1_ABRT_EN (0x1 << 3)
+#define EMAC_INT_CTL_TX_ABRT_EN (EMAC_INT_CTL_TX0_ABRT_EN | EMAC_INT_CTL_TX1_ABRT_EN)
#define EMAC_INT_STA_REG (0x58)
+#define EMAC_INT_STA_TX0_COMPLETE (0x1)
+#define EMAC_INT_STA_TX1_COMPLETE (0x1 << 1)
+#define EMAC_INT_STA_TX_COMPLETE (EMAC_INT_STA_TX0_COMPLETE | EMAC_INT_STA_TX1_COMPLETE)
+#define EMAC_INT_STA_TX0_ABRT (0x1 << 2)
+#define EMAC_INT_STA_TX1_ABRT (0x1 << 3)
+#define EMAC_INT_STA_TX_ABRT (EMAC_INT_STA_TX0_ABRT | EMAC_INT_STA_TX1_ABRT)
+#define EMAC_INT_STA_RX_COMPLETE (0x1 << 8)
#define EMAC_MAC_CTL0_REG (0x5c)
#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2)
#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3)
@@ -87,8 +102,11 @@
#define EMAC_MAC_CLRT_RM (0x0f)
#define EMAC_MAC_MAXF_REG (0x70)
#define EMAC_MAC_SUPP_REG (0x74)
+#define EMAC_MAC_SUPP_100M (0x1 << 8)
#define EMAC_MAC_TEST_REG (0x78)
#define EMAC_MAC_MCFG_REG (0x7c)
+#define EMAC_MAC_MCFG_MII_CLKD_MASK (0xff << 2)
+#define EMAC_MAC_MCFG_MII_CLKD_72 (0x0d << 2)
#define EMAC_MAC_A0_REG (0x98)
#define EMAC_MAC_A1_REG (0x9c)
#define EMAC_MAC_A2_REG (0xa0)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 732da15a3827..eafef84fe3be 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -589,8 +589,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
}
ap->name = dev->name;
- if (ap->pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HIGHDMA;
pci_set_drvdata(pdev, dev);
@@ -1130,11 +1129,7 @@ static int ace_init(struct net_device *dev)
/*
* Configure DMA attributes.
*/
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- ap->pci_using_dac = 1;
- } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- ap->pci_using_dac = 0;
- } else {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
ecode = -ENODEV;
goto init_error;
}
@@ -2440,7 +2435,7 @@ restart:
} else {
dma_addr_t mapping;
u32 vlan_tag = 0;
- int i, len = 0;
+ int i;
mapping = ace_map_tx_skb(ap, skb, NULL, idx);
flagsize = (skb_headlen(skb) << 16);
@@ -2459,7 +2454,6 @@ restart:
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
struct tx_ring_info *info;
- len += skb_frag_size(frag);
info = ap->skb->tx_skbuff + idx;
desc = ap->tx_ring + idx;
@@ -2696,12 +2690,12 @@ static void ace_get_drvinfo(struct net_device *dev,
{
struct ace_private *ap = netdev_priv(dev);
- strlcpy(info->driver, "acenic", sizeof(info->driver));
+ strscpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
if (ap->pdev)
- strlcpy(info->bus_info, pci_name(ap->pdev),
+ strscpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index 265fa601a258..ca5ce0cbbad1 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -692,7 +692,6 @@ struct ace_private
__attribute__ ((aligned (SMP_CACHE_BYTES)));
u32 last_tx, last_std_rx, last_mini_rx;
#endif
- int pci_using_dac;
u8 firmware_major;
u8 firmware_minor;
u8 firmware_fix;
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 914e56b91467..dd7fd41ccde5 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -3,6 +3,8 @@ config ALTERA_TSE
tristate "Altera Triple-Speed Ethernet MAC support"
depends on HAS_DMA
select PHYLIB
+ select PHYLINK
+ select PCS_ALTERA_TSE
help
This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index db97170da8c7..7f247ccbe6ba 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -513,7 +513,7 @@ static int sgdma_txbusy(struct altera_tse_private *priv)
{
int delay = 0;
- /* if DMA is busy, wait for current transactino to finish */
+ /* if DMA is busy, wait for current transaction to finish */
while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index f17acfb579a0..db5eed06e92d 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
@@ -109,17 +110,6 @@
#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
-/* SGMII PCS register addresses
- */
-#define SGMII_PCS_SCRATCH 0x10
-#define SGMII_PCS_REV 0x11
-#define SGMII_PCS_LINK_TIMER_0 0x12
-#define SGMII_PCS_LINK_TIMER_1 0x13
-#define SGMII_PCS_IF_MODE 0x14
-#define SGMII_PCS_DIS_READ_TO 0x15
-#define SGMII_PCS_READ_TO 0x16
-#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
-
/* MDIO registers within MAC register Space
*/
struct altera_tse_mdio {
@@ -423,6 +413,9 @@ struct altera_tse_private {
void __iomem *tx_dma_csr;
void __iomem *tx_dma_desc;
+ /* SGMII PCS address space */
+ void __iomem *pcs_base;
+
/* Rx buffers queue */
struct tse_buffer *rx_ring;
u32 rx_cons;
@@ -480,6 +473,10 @@ struct altera_tse_private {
u32 msg_enable;
struct altera_dmaops *dmaops;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs *pcs;
};
/* Function prototypes
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 4299f1301149..81313c85833e 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -199,9 +199,9 @@ static int tse_reglen(struct net_device *dev)
static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *regbuf)
{
- int i;
struct altera_tse_private *priv = netdev_priv(dev);
u32 *buf = regbuf;
+ int i;
/* Set version to a known value, so ethtool knows
* how to do any special formatting of this data.
@@ -221,6 +221,22 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
+static int tse_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static int tse_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
@@ -231,8 +247,9 @@ static const struct ethtool_ops tse_ethtool_ops = {
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = tse_ethtool_get_link_ksettings,
+ .set_link_ksettings = tse_ethtool_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 993b2fb42961..66e3af73ec41 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -32,6 +32,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
+#include <linux/pcs-altera-tse.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
@@ -72,7 +73,7 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
*/
#define ALTERA_RXDMABUFFER_SIZE 2048
-/* Allow network stack to resume queueing packets after we've
+/* Allow network stack to resume queuing packets after we've
* finished transmitting at least 1/4 of the packets in the queue.
*/
#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
@@ -86,27 +87,6 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
}
-/* PCS Register read/write functions
- */
-static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
-{
- return csrrd32(priv->mac_dev,
- tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
-}
-
-static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
- u16 value)
-{
- csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
-}
-
-/* Check PCS scratch memory */
-static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
-{
- sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
- return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
-}
-
/* MDIO specific functions
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -141,10 +121,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
struct device_node *mdio_node = NULL;
- struct mii_bus *mdio = NULL;
struct device_node *child_node = NULL;
+ struct mii_bus *mdio = NULL;
+ int ret;
for_each_child_of_node(priv->device->of_node, child_node) {
if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
@@ -163,7 +143,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio = mdiobus_alloc();
if (mdio == NULL) {
netdev_err(dev, "Error allocating MDIO bus\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_node;
}
mdio->name = ALTERA_TSE_RESOURCE_NAME;
@@ -180,6 +161,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
mdio->id);
goto out_free_mdio;
}
+ of_node_put(mdio_node);
if (netif_msg_drv(priv))
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
@@ -189,6 +171,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
out_free_mdio:
mdiobus_free(mdio);
mdio = NULL;
+put_node:
+ of_node_put(mdio_node);
return ret;
}
@@ -232,8 +216,8 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
static void tse_free_rx_buffer(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct sk_buff *skb = rxbuffer->skb;
dma_addr_t dma_addr = rxbuffer->dma_addr;
+ struct sk_buff *skb = rxbuffer->skb;
if (skb != NULL) {
if (dma_addr)
@@ -354,6 +338,7 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
u16 vid;
+
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
!__vlan_get_tag(skb, &vid)) {
eth_hdr = (struct ethhdr *)skb->data;
@@ -367,10 +352,10 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static int tse_rx(struct altera_tse_private *priv, int limit)
{
- unsigned int count = 0;
+ unsigned int entry = priv->rx_cons % priv->rx_ring_size;
unsigned int next_entry;
+ unsigned int count = 0;
struct sk_buff *skb;
- unsigned int entry = priv->rx_cons % priv->rx_ring_size;
u32 rxstatus;
u16 pktlength;
u16 pktstatus;
@@ -390,7 +375,7 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
- /* DMA trasfer from TSE starts with 2 aditional bytes for
+ /* DMA transfer from TSE starts with 2 additional bytes for
* IP payload alignment. Status returned by get_rx_status()
* contains DMA transfer length. Packet is 2 bytes shorter.
*/
@@ -444,10 +429,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
static int tse_tx_complete(struct altera_tse_private *priv)
{
unsigned int txsize = priv->tx_ring_size;
- u32 ready;
- unsigned int entry;
struct tse_buffer *tx_buff;
+ unsigned int entry;
int txcomplete = 0;
+ u32 ready;
spin_lock(&priv->tx_lock);
@@ -493,8 +478,8 @@ static int tse_poll(struct napi_struct *napi, int budget)
{
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
- int rxcomplete = 0;
unsigned long int flags;
+ int rxcomplete = 0;
tse_tx_complete(priv);
@@ -557,13 +542,13 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
unsigned int txsize = priv->tx_ring_size;
- unsigned int entry;
- struct tse_buffer *buffer = NULL;
int nfrags = skb_shinfo(skb)->nr_frags;
- unsigned int nopaged_len = skb_headlen(skb);
+ struct tse_buffer *buffer = NULL;
netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
+ unsigned int entry;
spin_lock_bh(&priv->tx_lock);
@@ -615,117 +600,6 @@ out:
return ret;
}
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the phydev structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-static void altera_tse_adjust_link(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- /* only change config if there is a link */
- spin_lock(&priv->mac_cfg_lock);
- if (phydev->link) {
- /* Read old config */
- u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
-
- /* Check duplex */
- if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- cfg_reg |= MAC_CMDCFG_HD_ENA;
- else
- cfg_reg &= ~MAC_CMDCFG_HD_ENA;
-
- netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
- dev->name, phydev->duplex);
-
- priv->oldduplex = phydev->duplex;
- }
-
- /* Check speed */
- if (phydev->speed != priv->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case 1000:
- cfg_reg |= MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 100:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 10:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg |= MAC_CMDCFG_ENA_10;
- break;
- default:
- if (netif_msg_link(priv))
- netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
- phydev->speed);
- break;
- }
- priv->oldspeed = phydev->speed;
- }
- iowrite32(cfg_reg, &priv->mac_dev->command_config);
-
- if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
- }
- } else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
-
- spin_unlock(&priv->mac_cfg_lock);
-}
-static struct phy_device *connect_local_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = NULL;
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-
- if (priv->phy_addr != POLL_PHY) {
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
-
- netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
-
- phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
- priv->phy_iface);
- if (IS_ERR(phydev)) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
-
- } else {
- int ret;
- phydev = phy_find_first(priv->mdio);
- if (phydev == NULL) {
- netdev_err(dev, "No PHY found\n");
- return phydev;
- }
-
- ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
- priv->phy_iface);
- if (ret != 0) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
- }
- return phydev;
-}
-
static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
@@ -764,91 +638,6 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
return 0;
}
-/* Initialize driver's PHY state, and attach to the PHY
- */
-static int init_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *phynode;
- bool fixed_link = false;
- int rc = 0;
-
- /* Avoid init phy in case of no phy present */
- if (!priv->phy_iface)
- return 0;
-
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
-
- phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
-
- if (!phynode) {
- /* check if a fixed-link is defined in device-tree */
- if (of_phy_is_fixed_link(priv->device->of_node)) {
- rc = of_phy_register_fixed_link(priv->device->of_node);
- if (rc < 0) {
- netdev_err(dev, "cannot register fixed PHY\n");
- return rc;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phynode = of_node_get(priv->device->of_node);
- fixed_link = true;
-
- netdev_dbg(dev, "fixed-link detected\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link,
- 0, priv->phy_iface);
- } else {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev, "No phy-handle nor local mdio specified\n");
- return -ENODEV;
- }
- phydev = connect_local_phy(dev);
- }
- } else {
- netdev_dbg(dev, "phy-handle found\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link, 0, priv->phy_iface);
- }
- of_node_put(phynode);
-
- if (!phydev) {
- netdev_err(dev, "Could not find the PHY\n");
- if (fixed_link)
- of_phy_deregister_fixed_link(priv->device->of_node);
- return -ENODEV;
- }
-
- /* Stop Advertising 1000BASE Capability if interface is not GMII
- */
- if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
- (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
- phy_set_max_speed(phydev, SPEED_100);
-
- /* Broken HW is sometimes missing the pull-up resistor on the
- * MDIO line, which results in reads to non-existent devices returning
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well. If a fixed-link is used the phy_id is always 0.
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
- */
- if ((phydev->phy_id == 0) && !fixed_link) {
- netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
- phy_disconnect(phydev);
- return -ENODEV;
- }
-
- netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
- phydev->mdio.addr, phydev->phy_id, phydev->link);
-
- return 0;
-}
-
static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
@@ -1008,8 +797,8 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int i;
struct netdev_hw_addr *ha;
+ int i;
/* clear the hash filter */
for (i = 0; i < 64; i++)
@@ -1044,7 +833,7 @@ static void altera_tse_set_mcfilterall(struct net_device *dev)
csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
@@ -1064,7 +853,7 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode(struct net_device *dev)
{
@@ -1083,74 +872,14 @@ static void tse_set_rx_mode(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Initialise (if necessary) the SGMII PCS component
- */
-static int init_sgmii_pcs(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- int n;
- unsigned int tmp_reg = 0;
-
- if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
- return 0; /* Nothing to do, not in SGMII mode */
-
- /* The TSE SGMII PCS block looks a little like a PHY, it is
- * mapped into the zeroth MDIO space of the MAC and it has
- * ID registers like a PHY would. Sadly this is often
- * configured to zeroes, so don't be surprised if it does
- * show 0x00000000.
- */
-
- if (sgmii_pcs_scratch_test(priv, 0x0000) &&
- sgmii_pcs_scratch_test(priv, 0xffff) &&
- sgmii_pcs_scratch_test(priv, 0xa5a5) &&
- sgmii_pcs_scratch_test(priv, 0x5a5a)) {
- netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
- sgmii_pcs_read(priv, MII_PHYSID1),
- sgmii_pcs_read(priv, MII_PHYSID2));
- } else {
- netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
- return -ENOMEM;
- }
-
- /* Starting on page 5-29 of the MegaCore Function User Guide
- * Set SGMII Link timer to 1.6ms
- */
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
-
- /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
- sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
-
- /* Enable Autonegotiation */
- tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
- tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
-
- /* Reset PCS block */
- tmp_reg |= BMCR_RESET;
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
- for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
- if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
- netdev_info(dev, "SGMII PCS block initialised OK\n");
- return 0;
- }
- udelay(1);
- }
-
- /* We failed to reset the block, return a timeout */
- netdev_err(dev, "SGMII PCS block reset failed.\n");
- return -ETIMEDOUT;
-}
-
/* Open and initialize the interface
*/
static int tse_open(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned long flags;
int ret = 0;
int i;
- unsigned long int flags;
/* Reset and configure TSE MAC and probe associated PHY */
ret = priv->dmaops->init_dma(priv);
@@ -1167,14 +896,6 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "TSE revision %x\n", priv->revision);
spin_lock(&priv->mac_cfg_lock);
- /* no-op if MAC not operating in SGMII mode*/
- ret = init_sgmii_pcs(dev);
- if (ret) {
- netdev_err(dev,
- "Cannot init the SGMII PCS (error: %d)\n", ret);
- spin_unlock(&priv->mac_cfg_lock);
- goto phy_error;
- }
ret = reset_mac(priv);
/* Note that reset_mac will fail if the clocks are gated by the PHY
@@ -1232,8 +953,12 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (dev->phydev)
- phy_start(dev->phydev);
+ ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0);
+ if (ret) {
+ netdev_err(dev, "could not connect phylink (%d)\n", ret);
+ goto tx_request_irq_error;
+ }
+ phylink_start(priv->phylink);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1261,13 +986,11 @@ phy_error:
static int tse_shutdown(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
unsigned long int flags;
+ int ret;
- /* Stop the PHY */
- if (dev->phydev)
- phy_stop(dev->phydev);
-
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1313,11 +1036,78 @@ static struct net_device_ops altera_tse_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static void alt_tse_mac_an_restart(struct phylink_config *config)
+{
+}
+
+static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->mac_cfg_lock);
+ reset_mac(priv);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static void alt_tse_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void alt_tse_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = csrrd32(priv->mac_dev, tse_csroffs(command_config));
+ ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA);
+
+ if (duplex == DUPLEX_HALF)
+ ctrl |= MAC_CMDCFG_HD_ENA;
+
+ if (speed == SPEED_1000)
+ ctrl |= MAC_CMDCFG_ETH_SPEED;
+ else if (speed == SPEED_10)
+ ctrl |= MAC_CMDCFG_ENA_10;
+
+ spin_lock(&priv->mac_cfg_lock);
+ csrwr32(ctrl, priv->mac_dev, tse_csroffs(command_config));
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX)
+ return priv->pcs;
+ else
+ return NULL;
+}
+
+static const struct phylink_mac_ops alt_tse_phylink_ops = {
+ .mac_an_restart = alt_tse_mac_an_restart,
+ .mac_config = alt_tse_mac_config,
+ .mac_link_down = alt_tse_mac_link_down,
+ .mac_link_up = alt_tse_mac_link_up,
+ .mac_select_pcs = alt_tse_select_pcs,
+};
+
static int request_and_map(struct platform_device *pdev, const char *name,
struct resource **res, void __iomem **ptr)
{
- struct resource *region;
struct device *device = &pdev->dev;
+ struct resource *region;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
@@ -1346,13 +1136,15 @@ static int request_and_map(struct platform_device *pdev, const char *name,
*/
static int altera_tse_probe(struct platform_device *pdev)
{
- struct net_device *ndev;
- int ret = -ENODEV;
+ const struct of_device_id *of_id = NULL;
+ struct altera_tse_private *priv;
struct resource *control_port;
struct resource *dma_res;
- struct altera_tse_private *priv;
+ struct resource *pcs_res;
+ struct net_device *ndev;
void __iomem *descmap;
- const struct of_device_id *of_id = NULL;
+ int pcs_reg_width = 2;
+ int ret = -ENODEV;
ndev = alloc_etherdev(sizeof(struct altera_tse_private));
if (!ndev) {
@@ -1463,6 +1255,17 @@ static int altera_tse_probe(struct platform_device *pdev)
if (ret)
goto err_free_netdev;
+ /* SGMII PCS address space. The location can vary depending on how the
+ * IP is integrated. We can have a resource dedicated to it at a specific
+ * address space, but if it's not the case, we fallback to the mdiophy0
+ * from the MAC's address space
+ */
+ ret = request_and_map(pdev, "pcs", &pcs_res,
+ &priv->pcs_base);
+ if (ret) {
+ priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
+ pcs_reg_width = 4;
+ }
/* Rx IRQ */
priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
@@ -1562,7 +1365,7 @@ static int altera_tse_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
/* setup NAPI interface */
- netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, tse_poll);
spin_lock_init(&priv->mac_cfg_lock);
spin_lock_init(&priv->tx_lock);
@@ -1586,11 +1389,32 @@ static int altera_tse_probe(struct platform_device *pdev)
(unsigned long) control_port->start, priv->rx_irq,
priv->tx_irq);
- ret = init_phy(ndev);
- if (ret != 0) {
- netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
+ priv->pcs = alt_tse_pcs_create(ndev, priv->pcs_base, pcs_reg_width);
+
+ priv->phylink_config.dev = &ndev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD;
+
+ phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink = phylink_create(&priv->phylink_config,
+ of_fwnode_handle(priv->device->of_node),
+ priv->phy_iface, &alt_tse_phylink_ops);
+ if (IS_ERR(priv->phylink)) {
+ dev_err(&pdev->dev, "failed to create phylink\n");
+ ret = PTR_ERR(priv->phylink);
goto err_init_phy;
}
+
return 0;
err_init_phy:
@@ -1610,16 +1434,10 @@ static int altera_tse_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
- if (ndev->phydev) {
- phy_disconnect(ndev->phydev);
-
- if (of_phy_is_fixed_link(priv->device->of_node))
- of_phy_deregister_fixed_link(priv->device->of_node);
- }
-
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
unregister_netdev(ndev);
+ phylink_destroy(priv->phylink);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
index b7d772f2dcbb..3c2e32fb7389 100644
--- a/drivers/net/ethernet/altera/altera_utils.h
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -3,11 +3,12 @@
* Copyright (C) 2014 Altera Corporation. All rights reserved
*/
-#include <linux/kernel.h>
-
#ifndef __ALTERA_UTILS_H__
#define __ALTERA_UTILS_H__
+#include <linux/compiler.h>
+#include <linux/types.h>
+
void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask);
int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index f5ec35fa4c63..466ad9470d1f 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -48,6 +48,11 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
};
+/* device capabilities */
+enum ena_admin_aq_caps_id {
+ ENA_ADMIN_ENI_STATS = 0,
+};
+
enum ena_admin_placement_policy_type {
/* descriptors and headers are in host memory */
ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
@@ -455,7 +460,10 @@ struct ena_admin_device_attr_feature_desc {
*/
u32 supported_features;
- u32 reserved3;
+ /* bitmap of ena_admin_aq_caps_id, which represents device
+ * capabilities.
+ */
+ u32 capabilities;
/* Indicates how many bits are used physical address access. */
u32 phys_addr_width;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index ab413fc1f68e..451c3a1b6255 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1971,6 +1971,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
sizeof(get_resp.u.dev_attr));
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
+ ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
rc = ena_com_get_feature(ena_dev, &get_resp,
@@ -2223,6 +2224,13 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
struct ena_com_stats_ctx ctx;
int ret;
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+ netdev_err(ena_dev->net_device,
+ "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
+
memset(&ctx, 0x0, sizeof(ctx));
ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
if (likely(ret == 0))
@@ -2392,29 +2400,18 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EOPNOTSUPP;
}
- switch (func) {
- case ENA_ADMIN_TOEPLITZ:
- if (key) {
- if (key_len != sizeof(hash_key->key)) {
- netdev_err(ena_dev->net_device,
- "key len (%u) doesn't equal the supported size (%zu)\n",
- key_len, sizeof(hash_key->key));
- return -EINVAL;
- }
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
+ if ((func == ENA_ADMIN_TOEPLITZ) && key) {
+ if (key_len != sizeof(hash_key->key)) {
+ netdev_err(ena_dev->net_device,
+ "key len (%u) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return -EINVAL;
}
- break;
- case ENA_ADMIN_CRC32:
- rss->hash_init_val = init_val;
- break;
- default:
- netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
- func);
- return -EINVAL;
+ memcpy(hash_key->key, key, key_len);
+ hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
}
+ rss->hash_init_val = init_val;
old_func = rss->hash_func;
rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 73b03ce59412..3c5081d9d25d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -314,6 +314,7 @@ struct ena_com_dev {
struct ena_rss rss;
u32 supported_features;
+ u32 capabilities;
u32 dma_addr_bits;
struct ena_host_attribute host_attr;
@@ -967,6 +968,18 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
ena_dev->adaptive_coalescing = false;
}
+/* ena_com_get_cap - query whether device supports a capability.
+ * @ena_dev: ENA communication layer struct
+ * @cap_id: enum value representing the capability
+ *
+ * @return - true if capability is supported or false otherwise
+ */
+static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
+ enum ena_admin_aq_caps_id cap_id)
+{
+ return !!(ena_dev->capabilities & BIT(cap_id));
+}
+
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 689313ee25a8..372b259279ec 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -10,6 +10,10 @@
/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
#define ENA_COMP_HEAD_THRESH 4
+/* we allow 2 DMA descriptors per LLQ entry */
+#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
+#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
+#define ENA_LLQ_LARGE_HEADER (256UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
struct ena_com_tx_ctx {
struct ena_com_tx_meta ena_meta;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 13e745cf3781..d671df4b76bc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -82,7 +82,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(csum_good),
ENA_STAT_RX_ENTRY(refil_partial),
- ENA_STAT_RX_ENTRY(bad_csum),
+ ENA_STAT_RX_ENTRY(csum_bad),
ENA_STAT_RX_ENTRY(page_alloc_fail),
ENA_STAT_RX_ENTRY(skb_alloc_fail),
ENA_STAT_RX_ENTRY(dma_mapping_err),
@@ -110,8 +110,7 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
-#define ENA_STATS_ARRAY_ENI(adapter) \
- (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
+#define ENA_STATS_ARRAY_ENI(adapter) ARRAY_SIZE(ena_stats_eni_strings)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -119,9 +118,9 @@ static void ena_safe_update_stat(u64 *src, u64 *dst,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
*(dst) = *src;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
}
static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
@@ -213,8 +212,9 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *dev = adapter->ena_dev;
- ena_get_stats(adapter, data, adapter->eni_stats_supported);
+ ena_get_stats(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
}
static int ena_get_sw_stats_count(struct ena_adapter *adapter)
@@ -226,7 +226,9 @@ static int ena_get_sw_stats_count(struct ena_adapter *adapter)
static int ena_get_hw_stats_count(struct ena_adapter *adapter)
{
- return ENA_STATS_ARRAY_ENI(adapter);
+ bool supported = ena_com_get_cap(adapter->ena_dev, ENA_ADMIN_ENI_STATS);
+
+ return ENA_STATS_ARRAY_ENI(adapter) * supported;
}
int ena_get_sset_count(struct net_device *netdev, int sset)
@@ -316,10 +318,11 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
u8 *data)
{
struct ena_adapter *adapter = netdev_priv(netdev);
+ struct ena_com_dev *dev = adapter->ena_dev;
switch (sset) {
case ETH_SS_STATS:
- ena_get_strings(adapter, data, adapter->eni_stats_supported);
+ ena_get_strings(adapter, data, ena_com_get_cap(dev, ENA_ADMIN_ENI_STATS));
break;
}
}
@@ -459,27 +462,47 @@ static void ena_get_drvinfo(struct net_device *dev,
{
struct ena_adapter *adapter = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
static void ena_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
ring->tx_max_pending = adapter->max_tx_ring_size;
ring->rx_max_pending = adapter->max_rx_ring_size;
+ if (adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ bool large_llq_supported = adapter->large_llq_header_supported;
+
+ kernel_ring->tx_push = true;
+ kernel_ring->tx_push_buf_len = adapter->ena_dev->tx_max_header_size;
+ if (large_llq_supported)
+ kernel_ring->tx_push_buf_max_len = ENA_LLQ_LARGE_HEADER;
+ else
+ kernel_ring->tx_push_buf_max_len = ENA_LLQ_HEADER;
+ } else {
+ kernel_ring->tx_push = false;
+ kernel_ring->tx_push_buf_max_len = 0;
+ kernel_ring->tx_push_buf_len = 0;
+ }
+
ring->tx_pending = adapter->tx_ring[0].ring_size;
ring->rx_pending = adapter->rx_ring[0].ring_size;
}
static int ena_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- u32 new_tx_size, new_rx_size;
+ u32 new_tx_size, new_rx_size, new_tx_push_buf_len;
+ bool changed = false;
new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
ENA_MIN_RING_SIZE : ring->tx_pending;
@@ -489,11 +512,51 @@ static int ena_set_ringparam(struct net_device *netdev,
ENA_MIN_RING_SIZE : ring->rx_pending;
new_rx_size = rounddown_pow_of_two(new_rx_size);
- if (new_tx_size == adapter->requested_tx_ring_size &&
- new_rx_size == adapter->requested_rx_ring_size)
+ changed |= new_tx_size != adapter->requested_tx_ring_size ||
+ new_rx_size != adapter->requested_rx_ring_size;
+
+ /* This value is ignored if LLQ is not supported */
+ new_tx_push_buf_len = adapter->ena_dev->tx_max_header_size;
+
+ if ((adapter->ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) !=
+ kernel_ring->tx_push) {
+ NL_SET_ERR_MSG_MOD(extack, "Push mode state cannot be modified");
+ return -EINVAL;
+ }
+
+ /* Validate that the push buffer is supported on the underlying device */
+ if (kernel_ring->tx_push_buf_len) {
+ enum ena_admin_placement_policy_type placement;
+
+ new_tx_push_buf_len = kernel_ring->tx_push_buf_len;
+
+ placement = adapter->ena_dev->tx_mem_queue_type;
+ if (placement == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return -EOPNOTSUPP;
+
+ if (new_tx_push_buf_len != ENA_LLQ_HEADER &&
+ new_tx_push_buf_len != ENA_LLQ_LARGE_HEADER) {
+ bool large_llq_sup = adapter->large_llq_header_supported;
+ char large_llq_size_str[40];
+
+ snprintf(large_llq_size_str, 40, ", %lu", ENA_LLQ_LARGE_HEADER);
+
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Supported tx push buff values: [%lu%s]",
+ ENA_LLQ_HEADER,
+ large_llq_sup ? large_llq_size_str : "");
+
+ return -EINVAL;
+ }
+
+ changed |= new_tx_push_buf_len != adapter->ena_dev->tx_max_header_size;
+ }
+
+ if (!changed)
return 0;
- return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
+ return ena_update_queue_params(adapter, new_tx_size, new_rx_size,
+ new_tx_push_buf_len);
}
static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
@@ -843,11 +906,20 @@ static int ena_set_channels(struct net_device *netdev,
struct ena_adapter *adapter = netdev_priv(netdev);
u32 count = channels->combined_count;
/* The check for max value is already done in ethtool */
- if (count < ENA_MIN_NUM_IO_QUEUES ||
- (ena_xdp_present(adapter) &&
- !ena_xdp_legal_queue_count(adapter, count)))
+ if (count < ENA_MIN_NUM_IO_QUEUES)
return -EINVAL;
+ if (!ena_xdp_legal_queue_count(adapter, count)) {
+ if (ena_xdp_present(adapter))
+ return -EINVAL;
+
+ xdp_clear_features_flag(netdev);
+ } else {
+ xdp_set_features_flag(netdev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT);
+ }
+
return ena_update_queue_count(adapter, count);
}
@@ -880,11 +952,7 @@ static int ena_set_tunable(struct net_device *netdev,
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
len = *(u32 *)data;
- if (len > adapter->netdev->mtu) {
- ret = -EINVAL;
- break;
- }
- adapter->rx_copybreak = len;
+ ret = ena_set_rx_copybreak(adapter, len);
break;
default:
ret = -EINVAL;
@@ -897,6 +965,8 @@ static int ena_set_tunable(struct net_device *netdev,
static const struct ethtool_ops ena_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ .supported_ring_params = ETHTOOL_RING_USE_TX_PUSH_BUF_LEN |
+ ETHTOOL_RING_USE_TX_PUSH,
.get_link_ksettings = ena_get_link_ksettings,
.get_drvinfo = ena_get_drvinfo,
.get_msglevel = ena_get_msglevel,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 7d5d885d85d5..e6a6efaeb87c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -31,8 +31,6 @@ MODULE_LICENSE("GPL");
#define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
-#define ENA_NAPI_BUDGET 64
-
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
@@ -103,7 +101,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
- adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
+ ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n");
@@ -166,11 +164,9 @@ static int ena_xmit_common(struct net_device *dev,
"Failed to prepare tx bufs\n");
ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
&ring->syncp);
- if (rc != -ENOMEM) {
- adapter->reset_reason =
- ENA_REGS_RESET_DRIVER_INVALID_STATE;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
- }
+ if (rc != -ENOMEM)
+ ena_reset_device(adapter,
+ ENA_REGS_RESET_DRIVER_INVALID_STATE);
return rc;
}
@@ -378,9 +374,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
+ u32 verdict = ENA_XDP_PASS;
struct bpf_prog *xdp_prog;
struct ena_ring *xdp_ring;
- u32 verdict = XDP_PASS;
struct xdp_frame *xdpf;
u64 *xdp_stat;
@@ -397,7 +393,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(!xdpf)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = XDP_ABORTED;
+ verdict = ENA_XDP_DROP;
break;
}
@@ -413,29 +409,35 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ verdict = ENA_XDP_TX;
break;
case XDP_REDIRECT:
if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+ verdict = ENA_XDP_REDIRECT;
break;
}
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
- verdict = XDP_ABORTED;
+ verdict = ENA_XDP_DROP;
break;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = ENA_XDP_DROP;
break;
case XDP_DROP:
xdp_stat = &rx_ring->rx_stats.xdp_drop;
+ verdict = ENA_XDP_DROP;
break;
case XDP_PASS:
xdp_stat = &rx_ring->rx_stats.xdp_pass;
+ verdict = ENA_XDP_PASS;
break;
default:
- bpf_warn_invalid_xdp_action(verdict);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+ verdict = ENA_XDP_DROP;
}
ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
@@ -516,16 +518,18 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
struct bpf_prog *prog,
int first, int count)
{
+ struct bpf_prog *old_bpf_prog;
struct ena_ring *rx_ring;
int i = 0;
for (i = first; i < count; i++) {
rx_ring = &adapter->rx_ring[i];
- xchg(&rx_ring->xdp_bpf_prog, prog);
- if (prog) {
+ old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
+
+ if (!old_bpf_prog && prog) {
ena_xdp_register_rxq_info(rx_ring);
rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
- } else {
+ } else if (old_bpf_prog && !prog) {
ena_xdp_unregister_rxq_info(rx_ring);
rx_ring->rx_headroom = NET_SKB_PAD;
}
@@ -593,7 +597,9 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
if (rc)
return rc;
}
+ xdp_features_set_redirect_target(netdev, false);
} else if (old_bpf_prog) {
+ xdp_features_clear_redirect_target(netdev);
rc = ena_destroy_and_free_all_xdp_queues(adapter);
if (rc)
return rc;
@@ -676,6 +682,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0;
ring->cpu = 0;
+ ring->numa_node = 0;
ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
@@ -779,6 +786,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
tx_ring->cpu = ena_irq->cpu;
+ tx_ring->numa_node = node;
return 0;
err_push_buf_intermediate_buf:
@@ -911,6 +919,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_ring->cpu = ena_irq->cpu;
+ rx_ring->numa_node = node;
return 0;
}
@@ -1269,45 +1278,39 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
netif_err(ring->adapter,
tx_done,
ring->netdev,
- "tx_info doesn't have valid %s",
- is_xdp ? "xdp frame" : "skb");
+ "tx_info doesn't have valid %s. qid %u req_id %u",
+ is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
else
netif_err(ring->adapter,
tx_done,
ring->netdev,
- "Invalid req_id: %hu\n",
- req_id);
+ "Invalid req_id %u in qid %u\n",
+ req_id, ring->qid);
ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
+ ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
- /* Trigger device reset */
- ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
- set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
return -EFAULT;
}
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{
- struct ena_tx_buffer *tx_info = NULL;
+ struct ena_tx_buffer *tx_info;
- if (likely(req_id < tx_ring->ring_size)) {
- tx_info = &tx_ring->tx_buffer_info[req_id];
- if (likely(tx_info->skb))
- return 0;
- }
+ tx_info = &tx_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->skb))
+ return 0;
return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
}
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
{
- struct ena_tx_buffer *tx_info = NULL;
+ struct ena_tx_buffer *tx_info;
- if (likely(req_id < xdp_ring->ring_size)) {
- tx_info = &xdp_ring->tx_buffer_info[req_id];
- if (likely(tx_info->xdpf))
- return 0;
- }
+ tx_info = &xdp_ring->tx_buffer_info[req_id];
+ if (likely(tx_info->xdpf))
+ return 0;
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
}
@@ -1332,9 +1335,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id);
- if (rc)
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(tx_ring, req_id, NULL,
+ false);
break;
+ }
+ /* validate that the request id points to a valid skb */
rc = validate_tx_req_id(tx_ring, req_id);
if (rc)
break;
@@ -1403,10 +1411,9 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
struct sk_buff *skb;
if (!first_frag)
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_copybreak);
+ skb = napi_alloc_skb(rx_ring->napi, rx_ring->rx_copybreak);
else
- skb = build_skb(first_frag, ENA_PAGE_SIZE);
+ skb = napi_build_skb(first_frag, ENA_PAGE_SIZE);
if (unlikely(!skb)) {
ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
@@ -1427,6 +1434,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
u16 *next_to_clean)
{
struct ena_rx_buffer *rx_info;
+ struct ena_adapter *adapter;
u16 len, req_id, buf = 0;
struct sk_buff *skb;
void *page_addr;
@@ -1439,8 +1447,11 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) {
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Page is NULL\n");
+ adapter = rx_ring->adapter;
+ netif_err(adapter, rx_err, rx_ring->netdev,
+ "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
+ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
return NULL;
}
@@ -1550,7 +1561,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l3_csum_err))) {
/* ipv4 checksum error */
skb->ip_summed = CHECKSUM_NONE;
- ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n");
@@ -1562,7 +1573,7 @@ static void ena_rx_checksum(struct ena_ring *rx_ring,
(ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
if (unlikely(ena_rx_ctx->l4_csum_err)) {
/* TCP/UDP checksum error */
- ena_increase_stat(&rx_ring->rx_stats.bad_csum, 1,
+ ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n");
@@ -1621,12 +1632,12 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
* we expect, then we simply drop it
*/
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
- return XDP_DROP;
+ return ENA_XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp);
/* The xdp program might expand the headers */
- if (ret == XDP_PASS) {
+ if (ret == ENA_XDP_PASS) {
rx_info->page_offset = xdp->data - xdp->data_hard_start;
rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
}
@@ -1665,7 +1676,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do {
- xdp_verdict = XDP_PASS;
+ xdp_verdict = ENA_XDP_PASS;
skb = NULL;
ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
ena_rx_ctx.max_bufs = rx_ring->sgl_size;
@@ -1693,7 +1704,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
/* allocate skb and fill it */
- if (xdp_verdict == XDP_PASS)
+ if (xdp_verdict == ENA_XDP_PASS)
skb = ena_rx_skb(rx_ring,
rx_ring->ena_bufs,
ena_rx_ctx.descs,
@@ -1711,14 +1722,15 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* Packets was passed for transmission, unmap it
* from RX side.
*/
- if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
+ if (xdp_verdict & ENA_XDP_FORWARDED) {
ena_unmap_rx_buff(rx_ring,
&rx_ring->rx_buffer_info[req_id]);
rx_ring->rx_buffer_info[req_id].page = NULL;
}
}
- if (xdp_verdict != XDP_PASS) {
+ if (xdp_verdict != ENA_XDP_PASS) {
xdp_flags |= xdp_verdict;
+ total_len += ena_rx_ctx.ena_bufs[0].len;
res_budget--;
continue;
}
@@ -1762,7 +1774,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
ena_refill_rx_bufs(rx_ring, refill_required);
}
- if (xdp_flags & XDP_REDIRECT)
+ if (xdp_flags & ENA_XDP_REDIRECT)
xdp_do_flush_map();
return work_done;
@@ -1773,15 +1785,12 @@ error:
if (rc == -ENOSPC) {
ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
&rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+ ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp);
- adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
}
-
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-
return 0;
}
@@ -1819,8 +1828,9 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
static void ena_unmask_interrupt(struct ena_ring *tx_ring,
struct ena_ring *rx_ring)
{
+ u32 rx_interval = tx_ring->smoothed_interval;
struct ena_eth_io_intr_reg intr_reg;
- u32 rx_interval = 0;
+
/* Rx ring can be NULL when for XDP tx queues which don't have an
* accompanying rx_ring pair.
*/
@@ -1858,20 +1868,27 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
if (likely(tx_ring->cpu == cpu))
goto out;
+ tx_ring->cpu = cpu;
+ if (rx_ring)
+ rx_ring->cpu = cpu;
+
numa_node = cpu_to_node(cpu);
+
+ if (likely(tx_ring->numa_node == numa_node))
+ goto out;
+
put_cpu();
if (numa_node != NUMA_NO_NODE) {
ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
- if (rx_ring)
+ tx_ring->numa_node = numa_node;
+ if (rx_ring) {
+ rx_ring->numa_node = numa_node;
ena_com_update_numa_node(rx_ring->ena_com_io_cq,
numa_node);
+ }
}
- tx_ring->cpu = cpu;
- if (rx_ring)
- rx_ring->cpu = cpu;
-
return;
out:
put_cpu();
@@ -1881,7 +1898,6 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
{
u32 total_done = 0;
u16 next_to_clean;
- u32 tx_bytes = 0;
int tx_pkts = 0;
u16 req_id;
int rc;
@@ -1896,9 +1912,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
&req_id);
- if (rc)
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+ handle_invalid_req_id(xdp_ring, req_id, NULL,
+ true);
break;
+ }
+ /* validate that the request id points to a valid xdp_frame */
rc = validate_xdp_req_id(xdp_ring, req_id);
if (rc)
break;
@@ -1914,7 +1935,6 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
"tx_poll: q %d skb %p completed\n", xdp_ring->qid,
xdpf);
- tx_bytes += xdpf->len;
tx_pkts++;
total_done += tx_info->tx_descs;
@@ -1987,11 +2007,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
ena_adjust_adaptive_rx_intr_moderation(ena_napi);
+ ena_update_ring_numa_node(tx_ring, rx_ring);
ena_unmask_interrupt(tx_ring, rx_ring);
}
- ena_update_ring_numa_node(tx_ring, rx_ring);
-
ret = rx_work_done;
} else {
ret = budget;
@@ -2265,10 +2284,8 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
- netif_napi_add(adapter->netdev,
- &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
- ENA_NAPI_BUDGET);
+ netif_napi_add(adapter->netdev, &napi->napi,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
if (!ENA_IS_XDP_INDEX(adapter, i)) {
napi->rx_ring = &adapter->rx_ring[i];
@@ -2378,7 +2395,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
ctx.msix_vector = msix_vector;
ctx.queue_size = tx_ring->ring_size;
- ctx.numa_node = cpu_to_node(tx_ring->cpu);
+ ctx.numa_node = tx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
@@ -2446,7 +2463,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
ctx.msix_vector = msix_vector;
ctx.queue_size = rx_ring->ring_size;
- ctx.numa_node = cpu_to_node(rx_ring->cpu);
+ ctx.numa_node = rx_ring->numa_node;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
@@ -2790,11 +2807,13 @@ static int ena_close(struct net_device *netdev)
return 0;
}
-int ena_update_queue_sizes(struct ena_adapter *adapter,
- u32 new_tx_size,
- u32 new_rx_size)
+int ena_update_queue_params(struct ena_adapter *adapter,
+ u32 new_tx_size,
+ u32 new_rx_size,
+ u32 new_llq_header_len)
{
- bool dev_was_up;
+ bool dev_was_up, large_llq_changed = false;
+ int rc = 0;
dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
ena_close(adapter->netdev);
@@ -2804,7 +2823,39 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
0,
adapter->xdp_num_queues +
adapter->num_io_queues);
- return dev_was_up ? ena_up(adapter) : 0;
+
+ large_llq_changed = adapter->ena_dev->tx_mem_queue_type ==
+ ENA_ADMIN_PLACEMENT_POLICY_DEV;
+ large_llq_changed &=
+ new_llq_header_len != adapter->ena_dev->tx_max_header_size;
+
+ /* a check that the configuration is valid is done by caller */
+ if (large_llq_changed) {
+ adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled;
+
+ ena_destroy_device(adapter, false);
+ rc = ena_restore_device(adapter);
+ }
+
+ return dev_was_up && !rc ? ena_up(adapter) : rc;
+}
+
+int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
+{
+ struct ena_ring *rx_ring;
+ int i;
+
+ if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
+ return -EINVAL;
+
+ adapter->rx_copybreak = rx_copybreak;
+
+ for (i = 0; i < adapter->num_io_queues; i++) {
+ rx_ring = &adapter->rx_ring[i];
+ rx_ring->rx_copybreak = rx_copybreak;
+ }
+
+ return 0;
}
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
@@ -3166,7 +3217,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strlcpy(host_info->kernel_ver_str, utsname()->version,
+ strscpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, utsname()->release,
@@ -3240,11 +3291,11 @@ err:
int ena_update_hw_stats(struct ena_adapter *adapter)
{
- int rc = 0;
+ int rc;
rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
if (rc) {
- dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
+ netdev_err(adapter->netdev, "Failed to get ENI stats\n");
return rc;
}
@@ -3270,10 +3321,10 @@ static void ena_get_stats64(struct net_device *netdev,
tx_ring = &adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+ start = u64_stats_fetch_begin(&tx_ring->syncp);
packets = tx_ring->tx_stats.cnt;
bytes = tx_ring->tx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ } while (u64_stats_fetch_retry(&tx_ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
@@ -3281,20 +3332,20 @@ static void ena_get_stats64(struct net_device *netdev,
rx_ring = &adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+ start = u64_stats_fetch_begin(&rx_ring->syncp);
packets = rx_ring->rx_stats.cnt;
bytes = rx_ring->rx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ } while (u64_stats_fetch_retry(&rx_ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
do {
- start = u64_stats_fetch_begin_irq(&adapter->syncp);
+ start = u64_stats_fetch_begin(&adapter->syncp);
rx_drops = adapter->dev_stats.rx_drops;
tx_drops = adapter->dev_stats.tx_drops;
- } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
+ } while (u64_stats_fetch_retry(&adapter->syncp, start));
stats->rx_dropped = rx_drops;
stats->tx_dropped = tx_drops;
@@ -3327,6 +3378,98 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_xdp_xmit = ena_xdp_xmit,
};
+static void ena_calc_io_queue_size(struct ena_adapter *adapter,
+ struct ena_com_dev_get_features_ctx *get_feat_ctx)
+{
+ struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
+ u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
+ u32 max_tx_queue_size;
+ u32 max_rx_queue_size;
+
+ /* If this function is called after driver load, the ring sizes have already
+ * been configured. Take it into account when recalculating ring size.
+ */
+ if (adapter->tx_ring->ring_size)
+ tx_queue_size = adapter->tx_ring->ring_size;
+
+ if (adapter->rx_ring->ring_size)
+ rx_queue_size = adapter->rx_ring->ring_size;
+
+ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ struct ena_admin_queue_ext_feature_fields *max_queue_ext =
+ &get_feat_ctx->max_queue_ext.max_queue_ext;
+ max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
+ max_queue_ext->max_rx_sq_depth);
+ max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ llq->max_llq_depth);
+ else
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ max_queue_ext->max_tx_sq_depth);
+
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queue_ext->max_per_packet_rx_descs);
+ } else {
+ struct ena_admin_queue_feature_desc *max_queues =
+ &get_feat_ctx->max_queues;
+ max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
+ max_queues->max_sq_depth);
+ max_tx_queue_size = max_queues->max_cq_depth;
+
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ llq->max_llq_depth);
+ else
+ max_tx_queue_size = min_t(u32, max_tx_queue_size,
+ max_queues->max_sq_depth);
+
+ adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_tx_descs);
+ adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
+ max_queues->max_packet_rx_descs);
+ }
+
+ max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
+ max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
+
+ /* When forcing large headers, we multiply the entry size by 2, and therefore divide
+ * the queue size by 2, leaving the amount of memory used by the queues unchanged.
+ */
+ if (adapter->large_llq_header_enabled) {
+ if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
+ ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ max_tx_queue_size /= 2;
+ dev_info(&adapter->pdev->dev,
+ "Forcing large headers and decreasing maximum TX queue size to %d\n",
+ max_tx_queue_size);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
+
+ adapter->large_llq_header_enabled = false;
+ }
+ }
+
+ tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
+ max_tx_queue_size);
+ rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
+ max_rx_queue_size);
+
+ tx_queue_size = rounddown_pow_of_two(tx_queue_size);
+ rx_queue_size = rounddown_pow_of_two(rx_queue_size);
+
+ adapter->max_tx_ring_size = max_tx_queue_size;
+ adapter->max_rx_ring_size = max_rx_queue_size;
+ adapter->requested_tx_ring_size = tx_queue_size;
+ adapter->requested_rx_ring_size = rx_queue_size;
+}
+
static int ena_device_validate_params(struct ena_adapter *adapter,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -3350,13 +3493,30 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
return 0;
}
-static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+static void set_default_llq_configurations(struct ena_adapter *adapter,
+ struct ena_llq_configurations *llq_config,
+ struct ena_admin_feature_llq_desc *llq)
{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
- llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
- llq_config->llq_ring_entry_size_value = 128;
+
+ adapter->large_llq_header_supported =
+ !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ));
+ adapter->large_llq_header_supported &=
+ !!(llq->entry_size_ctrl_supported &
+ ENA_ADMIN_LIST_ENTRY_SIZE_256B);
+
+ if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
+ adapter->large_llq_header_enabled) {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+ llq_config->llq_ring_entry_size_value = 256;
+ } else {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_config->llq_ring_entry_size_value = 128;
+ }
}
static int ena_set_queues_placement_policy(struct pci_dev *pdev,
@@ -3375,6 +3535,13 @@ static int ena_set_queues_placement_policy(struct pci_dev *pdev,
return 0;
}
+ if (!ena_dev->mem_bar) {
+ netdev_err(ena_dev->net_device,
+ "LLQ is advertised as supported but device doesn't expose mem bar\n");
+ ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+ return 0;
+ }
+
rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
if (unlikely(rc)) {
dev_err(&pdev->dev,
@@ -3390,15 +3557,8 @@ static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev
{
bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
- if (!has_mem_bar) {
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- dev_err(&pdev->dev,
- "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
- ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
- }
-
+ if (!has_mem_bar)
return 0;
- }
ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
pci_resource_start(pdev, ENA_MEM_BAR),
@@ -3410,10 +3570,11 @@ static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev
return 0;
}
-static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
+static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
struct ena_com_dev_get_features_ctx *get_feat_ctx,
bool *wd_state)
{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
struct ena_llq_configurations llq_config;
struct device *dev = &pdev->dev;
bool readless_supported;
@@ -3498,7 +3659,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
- set_default_llq_configurations(&llq_config);
+ set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq);
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
@@ -3507,6 +3668,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
goto err_admin_init;
}
+ ena_calc_io_queue_size(adapter, get_feat_ctx);
+
return 0;
err_admin_init:
@@ -3601,17 +3764,25 @@ static int ena_restore_device(struct ena_adapter *adapter)
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = adapter->ena_dev;
struct pci_dev *pdev = adapter->pdev;
+ struct ena_ring *txr;
+ int rc, count, i;
bool wd_state;
- int rc;
set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
- rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
+ rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "Can not initialize device\n");
goto err;
}
adapter->wd_state = wd_state;
+ count = adapter->xdp_num_queues + adapter->num_io_queues;
+ for (i = 0 ; i < count; i++) {
+ txr = &adapter->tx_ring[i];
+ txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
+ txr->tx_max_header_size = ena_dev->tx_max_header_size;
+ }
+
rc = ena_device_validate_params(adapter, &get_feat_ctx);
if (rc) {
dev_err(&pdev->dev, "Validation of device parameters failed\n");
@@ -3641,8 +3812,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
adapter->last_keep_alive_jiffies = jiffies;
- dev_err(&pdev->dev, "Device reset completed successfully\n");
-
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
@@ -3672,6 +3841,8 @@ static void ena_fw_reset_device(struct work_struct *work)
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
ena_destroy_device(adapter, false);
ena_restore_device(adapter);
+
+ dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
}
rtnl_unlock();
@@ -3694,9 +3865,8 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
netif_err(adapter, rx_err, adapter->netdev,
"Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
rx_ring->qid);
- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return -EIO;
}
@@ -3733,9 +3903,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
netif_err(adapter, tx_err, adapter->netdev,
"Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
tx_ring->qid);
- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
- smp_mb__before_atomic();
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
return -EIO;
}
@@ -3761,9 +3929,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
"The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
missed_tx,
adapter->missing_tx_completion_threshold);
- adapter->reset_reason =
- ENA_REGS_RESET_MISS_TX_CMPL;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
rc = -EIO;
}
@@ -3884,8 +4050,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
"Keep alive watchdog timeout.\n");
ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
&adapter->syncp);
- adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
}
}
@@ -3896,8 +4061,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
"ENA admin queue is not in running state!\n");
ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
&adapter->syncp);
- adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
}
}
@@ -4013,10 +4177,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
- if (unlikely(!max_num_io_queues)) {
- dev_err(&pdev->dev, "The device doesn't have io queues\n");
- return -EFAULT;
- }
return max_num_io_queues;
}
@@ -4101,7 +4261,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val));
- if (unlikely(rc && (rc != -EOPNOTSUPP))) {
+ if (unlikely(rc)) {
dev_err(dev, "Cannot fill indirect table\n");
goto err_fill_indir;
}
@@ -4136,73 +4296,6 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
pci_release_selected_regions(pdev, release_bars);
}
-
-static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
-{
- struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
- struct ena_com_dev *ena_dev = ctx->ena_dev;
- u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
- u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
- u32 max_tx_queue_size;
- u32 max_rx_queue_size;
-
- if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
- struct ena_admin_queue_ext_feature_fields *max_queue_ext =
- &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
- max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
- max_queue_ext->max_rx_sq_depth);
- max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
-
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
- max_tx_queue_size = min_t(u32, max_tx_queue_size,
- llq->max_llq_depth);
- else
- max_tx_queue_size = min_t(u32, max_tx_queue_size,
- max_queue_ext->max_tx_sq_depth);
-
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queue_ext->max_per_packet_rx_descs);
- } else {
- struct ena_admin_queue_feature_desc *max_queues =
- &ctx->get_feat_ctx->max_queues;
- max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
- max_queues->max_sq_depth);
- max_tx_queue_size = max_queues->max_cq_depth;
-
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
- max_tx_queue_size = min_t(u32, max_tx_queue_size,
- llq->max_llq_depth);
- else
- max_tx_queue_size = min_t(u32, max_tx_queue_size,
- max_queues->max_sq_depth);
-
- ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_tx_descs);
- ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
- max_queues->max_packet_rx_descs);
- }
-
- max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
- max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
-
- tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
- max_tx_queue_size);
- rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
- max_rx_queue_size);
-
- tx_queue_size = rounddown_pow_of_two(tx_queue_size);
- rx_queue_size = rounddown_pow_of_two(rx_queue_size);
-
- ctx->max_tx_queue_size = max_tx_queue_size;
- ctx->max_rx_queue_size = max_rx_queue_size;
- ctx->tx_queue_size = tx_queue_size;
- ctx->rx_queue_size = rx_queue_size;
-
- return 0;
-}
-
/* ena_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in ena_pci_tbl
@@ -4215,7 +4308,6 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
@@ -4286,24 +4378,20 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, adapter);
- rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
+ rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
- dev_err(&pdev->dev, "ENA device init failed\n");
- if (rc == -ETIME)
- rc = -EPROBE_DEFER;
+ dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
goto err_netdev_destroy;
}
- rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
+ rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
if (rc) {
- dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
- goto err_device_destroy;
+ dev_err(&pdev->dev, "ENA device init failed\n");
+ if (rc == -ETIME)
+ rc = -EPROBE_DEFER;
+ goto err_netdev_destroy;
}
- calc_queue_ctx.ena_dev = ena_dev;
- calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
- calc_queue_ctx.pdev = pdev;
-
/* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity
*/
@@ -4311,8 +4399,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
- rc = ena_calc_io_queue_size(&calc_queue_ctx);
- if (rc || !max_num_io_queues) {
+ if (unlikely(!max_num_io_queues)) {
rc = -EFAULT;
goto err_device_destroy;
}
@@ -4321,13 +4408,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
- adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
- adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
- adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
- adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
- adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
- adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
-
adapter->num_io_queues = max_num_io_queues;
adapter->max_num_io_queues = max_num_io_queues;
adapter->last_monitored_tx_qid = 0;
@@ -4351,6 +4431,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Failed to query interrupt moderation feature\n");
goto err_device_destroy;
}
+
ena_init_io_rings(adapter,
0,
adapter->xdp_num_queues +
@@ -4378,10 +4459,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter);
- if (!ena_update_hw_stats(adapter))
- adapter->eni_stats_supported = true;
- else
- adapter->eni_stats_supported = false;
+ if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT;
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
@@ -4476,6 +4556,7 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
rtnl_lock(); /* lock released inside the below if-else block */
adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
ena_destroy_device(adapter, true);
+
if (shutdown) {
netif_device_detach(netdev);
dev_close(netdev);
@@ -4574,13 +4655,19 @@ static struct pci_driver ena_pci_driver = {
static int __init ena_init(void)
{
+ int ret;
+
ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
if (!ena_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
}
- return pci_register_driver(&ena_pci_driver);
+ ret = pci_register_driver(&ena_pci_driver);
+ if (ret)
+ destroy_workqueue(ena_wq);
+
+ return ret;
}
static void __exit ena_cleanup(void)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0c39fc2fa345..5a0d4ee76172 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
+#include <uapi/linux/bpf.h>
#include "ena_com.h"
#include "ena_eth_com.h"
@@ -139,18 +140,6 @@ struct ena_napi {
struct dim dim;
};
-struct ena_calc_queue_size_ctx {
- struct ena_com_dev_get_features_ctx *get_feat_ctx;
- struct ena_com_dev *ena_dev;
- struct pci_dev *pdev;
- u32 tx_queue_size;
- u32 rx_queue_size;
- u32 max_tx_queue_size;
- u32 max_rx_queue_size;
- u16 max_tx_sgl_size;
- u16 max_rx_sgl_size;
-};
-
struct ena_tx_buffer {
struct sk_buff *skb;
/* num of ena desc for this specific skb
@@ -215,7 +204,7 @@ struct ena_stats_rx {
u64 rx_copybreak_pkt;
u64 csum_good;
u64 refil_partial;
- u64 bad_csum;
+ u64 csum_bad;
u64 page_alloc_fail;
u64 skb_alloc_fail;
u64 dma_mapping_err;
@@ -273,9 +262,11 @@ struct ena_ring {
bool disable_meta_caching;
u16 no_interrupt_event_cnt;
- /* cpu for TPH */
+ /* cpu and NUMA for TPH */
int cpu;
- /* number of tx/rx_buffer_info's entries */
+ int numa_node;
+
+ /* number of tx/rx_buffer_info's entries */
int ring_size;
enum ena_admin_placement_policy_type tx_mem_queue_type;
@@ -343,6 +334,14 @@ struct ena_adapter {
u32 msg_enable;
+ /* large_llq_header_enabled is used for two purposes:
+ * 1. Indicates that large LLQ has been requested.
+ * 2. Indicates whether large LLQ is set or not after device
+ * initialization / configuration.
+ */
+ bool large_llq_header_enabled;
+ bool large_llq_header_supported;
+
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
@@ -378,7 +377,6 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
struct ena_admin_eni_stats eni_stats;
- bool eni_stats_supported;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -398,20 +396,41 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_update_hw_stats(struct ena_adapter *adapter);
-int ena_update_queue_sizes(struct ena_adapter *adapter,
- u32 new_tx_size,
- u32 new_rx_size);
+int ena_update_queue_params(struct ena_adapter *adapter,
+ u32 new_tx_size,
+ u32 new_rx_size,
+ u32 new_llq_header_len);
int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
+int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak);
+
int ena_get_sset_count(struct net_device *netdev, int sset);
+static inline void ena_reset_device(struct ena_adapter *adapter,
+ enum ena_regs_reset_reason_types reset_reason)
+{
+ adapter->reset_reason = reset_reason;
+ /* Make sure reset reason is set before triggering the reset */
+ smp_mb__before_atomic();
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+}
+
enum ena_xdp_errors_t {
ENA_XDP_ALLOWED = 0,
ENA_XDP_CURRENT_MTU_TOO_LARGE,
ENA_XDP_NO_ENOUGH_QUEUES,
};
+enum ENA_XDP_ACTIONS {
+ ENA_XDP_PASS = 0,
+ ENA_XDP_TX = BIT(0),
+ ENA_XDP_REDIRECT = BIT(1),
+ ENA_XDP_DROP = BIT(2)
+};
+
+#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
+
static inline bool ena_xdp_present(struct ena_adapter *adapter)
{
return !!adapter->xdp_bpf_prog;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 899c8a2a34b6..f8cc8925161c 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -130,16 +130,6 @@ config PCMCIA_NMCLAN
To compile this driver as a module, choose M here: the module will be
called nmclan_cs. If unsure, say N.
-config NI65
- tristate "NI6510 support"
- depends on ISA && ISA_DMA_API && !ARM && !PPC32
- select NETDEV_LEGACY_INIT
- help
- If you have a network (Ethernet) card of this type, say Y here.
-
- To compile this driver as a module, choose M here. The module
- will be called ni65.
-
config SUN3LANCE
tristate "Sun3/Sun3x on-board LANCE support"
depends on (SUN3 || SUN3X)
@@ -196,4 +186,18 @@ config AMD_XGBE_HAVE_ECC
bool
default n
+config PDS_CORE
+ tristate "AMD/Pensando Data Systems Core Device Support"
+ depends on 64BIT && PCI
+ select AUXILIARY_BUS
+ select NET_DEVLINK
+ help
+ This enables the support for the AMD/Pensando Core device family of
+ adapters. More specific information on this driver can be
+ found in
+ <file:Documentation/networking/device_drivers/ethernet/amd/pds_core.rst>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pds_core.
+
endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index 0d2f478b49a5..2dcfb84731e1 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -13,8 +13,8 @@ obj-$(CONFIG_LANCE) += lance.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
-obj-$(CONFIG_NI65) += ni65.o
obj-$(CONFIG_PCNET32) += pcnet32.o
obj-$(CONFIG_SUN3LANCE) += sun3lance.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
obj-$(CONFIG_AMD_XGBE) += xgbe/
+obj-$(CONFIG_PDS_CORE) += pds_core/
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 2f808dbc8b0e..68983b717145 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -680,6 +680,7 @@ static int a2065_init_one(struct zorro_dev *z,
unsigned long base_addr = board + A2065_LANCE;
unsigned long mem_start = board + A2065_RAM;
struct resource *r1, *r2;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -694,7 +695,7 @@ static int a2065_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct lance_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct lance_regs));
release_mem_region(mem_start, A2065_RAM_SIZE);
return -ENOMEM;
@@ -706,17 +707,18 @@ static int a2065_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
+ addr[0] = 0x00;
if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
- dev->dev_addr[1] = 0x80;
- dev->dev_addr[2] = 0x10;
+ addr[1] = 0x80;
+ addr[2] = 0x10;
} else { /* Ameristar */
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x9f;
+ addr[1] = 0x00;
+ addr[2] = 0x9f;
}
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 9421afb950f7..ea6cfc2095e1 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -43,7 +43,7 @@ Revision History:
3.0.4 12/09/2003
1. Added set_mac_address routine for bonding driver support.
2. Tested the driver for bonding support
- 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ 3. Bug fix: Fixed mismach in actual receive buffer length and length
indicated to the h/w.
4. Modified amd8111e_rx() routine to receive all the received packets
in the first interrupt.
@@ -185,24 +185,23 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
switch (lp->ext_phy_option) {
-
- default:
- case SPEED_AUTONEG: /* advertise all values */
- tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
- break;
- case SPEED10_HALF:
- tmp |= ADVERTISE_10HALF;
- break;
- case SPEED10_FULL:
- tmp |= ADVERTISE_10FULL;
- break;
- case SPEED100_HALF:
- tmp |= ADVERTISE_100HALF;
- break;
- case SPEED100_FULL:
- tmp |= ADVERTISE_100FULL;
- break;
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
}
if(advert != tmp)
@@ -237,7 +236,7 @@ static int amd8111e_free_skbs(struct net_device *dev)
/* Freeing previously allocated receive buffers */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff != NULL) {
+ if (rx_skbuff) {
dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[i],
lp->rx_buff_len - 2, DMA_FROM_DEVICE);
@@ -1084,7 +1083,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
unsigned int intr0, intren0;
unsigned int handled = 1;
- if (unlikely(dev == NULL))
+ if (unlikely(!dev))
return IRQ_NONE;
spin_lock(&lp->lock);
@@ -1109,7 +1108,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Check if Receive Interrupt has occurred. */
if (intr0 & RINT0) {
if (napi_schedule_prep(&lp->napi)) {
- /* Disable receive interupts */
+ /* Disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
@@ -1364,10 +1363,10 @@ static void amd8111e_get_drvinfo(struct net_device *dev,
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, MODULE_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%u", chip_version);
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1554,7 +1553,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{
- /* Adapter is already stoped/suspended/interrupt-disabled */
+ /* Adapter is already stopped/suspended/interrupt-disabled */
writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
@@ -1828,11 +1827,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
dev->min_mtu = AMD8111E_MIN_MTU;
dev->max_mtu = AMD8111E_MAX_MTU;
- netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
+ netif_napi_add_weight(dev, &lp->napi, amd8111e_rx_poll, 32);
-#if AMD8111E_VLAN_TAG_USED
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
-#endif
/* Probe the external PHY */
amd8111e_probe_ext_phy(dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 37da79da5f5e..9d570adb295b 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -600,7 +600,7 @@ typedef enum {
#define CSTATE 1
#define SSTATE 2
-/* Assume contoller gets data 10 times the maximum processing time */
+/* Assume controller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10
/* amd8111e descriptor flag definitions */
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 5e0f645f5bde..38153e633231 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,7 @@ static int ariadne_rx(struct net_device *dev)
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
@@ -441,11 +441,11 @@ static int ariadne_open(struct net_device *dev)
/* Set the Ethernet Hardware Address */
lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[0];
lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[1];
lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
- lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+ lance->RDP = ((const u_short *)&dev->dev_addr[0])[2];
/* Set the Init Block Mode */
lance->RAP = CSR15; /* Mode Register */
@@ -717,6 +717,7 @@ static int ariadne_init_one(struct zorro_dev *z,
unsigned long mem_start = board + ARIADNE_RAM;
struct resource *r1, *r2;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
u32 serial;
int err;
@@ -730,7 +731,7 @@ static int ariadne_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct ariadne_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct Am79C960));
release_mem_region(mem_start, ARIADNE_RAM_SIZE);
return -ENOMEM;
@@ -740,12 +741,13 @@ static int ariadne_init_one(struct zorro_dev *z,
r2->name = dev->name;
serial = be32_to_cpu(z->rom.er_SerialNumber);
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x60;
- dev->dev_addr[2] = 0x30;
- dev->dev_addr[3] = (serial >> 16) & 0xff;
- dev->dev_addr[4] = (serial >> 8) & 0xff;
- dev->dev_addr[5] = serial & 0xff;
+ addr[0] = 0x00;
+ addr[1] = 0x60;
+ addr[2] = 0x30;
+ addr[3] = (serial >> 16) & 0xff;
+ addr[4] = (serial >> 8) & 0xff;
+ addr[5] = serial & 0xff;
+ eth_hw_addr_set(dev, addr);
dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9c7d9690d00c..ec704222925d 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -471,6 +471,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
int i;
static int did_version;
unsigned short save1, save2;
+ u8 addr[ETH_ALEN];
PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
(long)memaddr, (long)ioaddr ));
@@ -580,19 +581,21 @@ static unsigned long __init lance_probe1( struct net_device *dev,
/* Get the ethernet address */
switch( lp->cardtype ) {
- case OLD_RIEBL:
+ case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
- case NEW_RIEBL:
- lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ case NEW_RIEBL:
+ lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
break;
- case PAM_CARD:
+ case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
- dev->dev_addr[i] =
+ addr[i] =
((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ eth_hw_addr_set(dev, addr);
i = IO->mem;
break;
}
@@ -821,7 +824,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
dev->stats.tx_bytes += skb->len;
- dev_kfree_skb( skb );
+ dev_consume_skb_irq(skb);
lp->cur_tx++;
while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
lp->cur_tx -= TX_RING_SIZE;
@@ -851,7 +854,7 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
int csr0, boguscnt = 10;
int handled = 0;
- if (dev == NULL) {
+ if (!dev) {
DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
return IRQ_NONE;
}
@@ -992,7 +995,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index c6f003975621..c5cec4e79489 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -650,7 +650,7 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
}
@@ -786,7 +786,7 @@ static int au1000_rx(struct net_device *dev)
frmlen = (status & RX_FRAME_LEN_MASK);
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
continue;
}
@@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev)
pr_cont("\n");
}
}
- prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE;
aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
wmb(); /* drain writebuffer */
@@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
ps->tx_packets++;
ps->tx_bytes += ptxd->len;
- ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+ ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE;
wmb(); /* drain writebuffer */
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
@@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev)
/* Allocate the data buffers
* Snooping works fine with eth on all au1xxx
*/
- aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
- (NUM_TX_BUFFS + NUM_RX_BUFFS),
- &aup->dma_addr, 0);
+ aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
if (!aup->vaddr) {
dev_err(&pdev->dev, "failed to allocate data buffers\n");
err = -ENOMEM;
@@ -1199,7 +1199,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL) {
+ if (!aup->mii_bus) {
dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
@@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev)
for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
pDB->pnext = pDBfree;
pDBfree = pDB;
- pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
- pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i;
+ pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i;
pDB++;
}
aup->pDBfree = pDBfree;
@@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev)
if (!pDB)
goto err_out;
- aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
aup->rx_db_inuse[i] = pDB;
}
@@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev)
if (!pDB)
goto err_out;
- aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr);
aup->tx_dma_ring[i]->len = 0;
aup->tx_db_inuse[i] = pDB;
}
@@ -1284,7 +1284,7 @@ static int au1000_probe(struct platform_device *pdev)
return 0;
err_out:
- if (aup->mii_bus != NULL)
+ if (aup->mii_bus)
mdiobus_unregister(aup->mii_bus);
/* here we should have a valid dev plus aup-> register addresses
@@ -1310,7 +1310,7 @@ err_remap2:
iounmap(aup->mac);
err_remap1:
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
+ aup->vaddr, aup->dma_addr);
err_vaddr:
free_netdev(dev);
err_alloc:
@@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev)
au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
+ aup->vaddr, aup->dma_addr);
iounmap(aup->macdma);
iounmap(aup->mac);
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index e3a3ed29db61..2489c2f4fd8a 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -106,8 +106,8 @@ struct au1000_private {
struct mac_reg *mac; /* mac registers */
u32 *enable; /* address of MAC Enable Register */
void __iomem *macdma; /* base of MAC DMA port */
- u32 vaddr; /* virtual address of rx/tx buffers */
- dma_addr_t dma_addr; /* dma address of rx/tx buffers */
+ void *vaddr; /* virtual address of rx/tx buffers */
+ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
spinlock_t lock; /* Serialise access to device */
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 493b0cefcc2a..ec8df05e7bf6 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1032,6 +1032,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ u8 addr[ETH_ALEN];
const char *desc;
if (dec_lance_debug && version_printed++ == 0)
@@ -1228,7 +1229,8 @@ static int dec_lance_probe(struct device *bdev, const int type)
break;
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = esar[i * 4];
+ addr[i] = esar[i * 4];
+ eth_hw_addr_set(dev, addr);
printk("%s: %s, addr = %pM, irq = %d\n",
name, desc, dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 6784f8748638..055fda11c572 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -129,6 +129,7 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
{
unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
struct hplance_private *lp;
+ u8 addr[ETH_ALEN];
int i;
/* reset the board */
@@ -144,9 +145,10 @@ static void hplance_init(struct net_device *dev, struct dio_dev *d)
/* The NVRAM holds our ethernet address, one nibble per byte,
* at bytes NVRAMOFF+1,3,5,7,9...
*/
- dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
| (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
}
+ eth_hw_addr_set(dev, addr);
lp = netdev_priv(dev);
lp->lance.name = d->name;
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 945bf1d87507..8971665a4b2a 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -480,6 +480,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
unsigned long flags;
int err = -ENOMEM;
void __iomem *bios;
+ u8 addr[ETH_ALEN];
/* First we look for special cases.
Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
@@ -541,7 +542,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
/* There is a 16 byte station address PROM at the base address.
The first six bytes are the station address. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
+ addr[i] = inb(ioaddr + i);
+ eth_hw_addr_set(dev, addr);
printk("%pM", dev->dev_addr);
dev->base_addr = ioaddr;
@@ -878,7 +880,7 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
rx_buff = skb->data;
else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
- if (rx_buff == NULL)
+ if (!rx_buff)
lp->rx_ring[i].base = 0;
else
lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
@@ -999,7 +1001,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
lp->tx_ring[entry].base =
((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
- dev_kfree_skb(skb);
+ dev_consume_skb_irq(skb);
} else {
lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
@@ -1184,7 +1186,7 @@ lance_rx(struct net_device *dev)
else
{
skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
+ if (!skb)
{
printk("%s: Memory squeeze, deferring packet.\n", dev->name);
for (i=0; i < RX_RING_SIZE; i++)
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index da97fccea9ea..410c7b67eba4 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -74,6 +74,7 @@ static struct net_device * __init mvme147lance_probe(void)
static int called;
static const char name[] = "MVME147 LANCE";
struct m147lance_private *lp;
+ u8 macaddr[ETH_ALEN];
u_long *addr;
u_long address;
int err;
@@ -93,15 +94,16 @@ static struct net_device * __init mvme147lance_probe(void)
addr = (u_long *)ETHERNET_ADDRESS;
address = *addr;
- dev->dev_addr[0] = 0x08;
- dev->dev_addr[1] = 0x00;
- dev->dev_addr[2] = 0x3e;
+ macaddr[0] = 0x08;
+ macaddr[1] = 0x00;
+ macaddr[2] = 0x3e;
address = address >> 8;
- dev->dev_addr[5] = address&0xff;
+ macaddr[5] = address&0xff;
address = address >> 8;
- dev->dev_addr[4] = address&0xff;
+ macaddr[4] = address&0xff;
address = address >> 8;
- dev->dev_addr[3] = address&0xff;
+ macaddr[3] = address&0xff;
+ eth_hw_addr_set(dev, macaddr);
printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
dev->name, dev->base_addr, MVME147_LANCE_IRQ,
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
deleted file mode 100644
index 032e8922b482..000000000000
--- a/drivers/net/ethernet/amd/ni65.c
+++ /dev/null
@@ -1,1249 +0,0 @@
-/*
- * ni6510 (am7990 'lance' chip) driver for Linux-net-3
- * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
- * copyrights (c) 1994,1995,1996 by M.Hipp
- *
- * This driver can handle the old ni6510 board and the newer ni6510
- * EtherBlaster. (probably it also works with every full NE2100
- * compatible card)
- *
- * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
- *
- * This is an extension to the Linux operating system, and is covered by the
- * same GNU General Public License that covers the Linux-kernel.
- *
- * comments/bugs/suggestions can be sent to:
- * Michael Hipp
- * email: hippm@informatik.uni-tuebingen.de
- *
- * sources:
- * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
- * and from the original drivers by D.Becker
- *
- * known problems:
- * - on some PCI boards (including my own) the card/board/ISA-bridge has
- * problems with bus master DMA. This results in lotsa overruns.
- * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
- * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
- * Or just play with your BIOS options to optimize ISA-DMA access.
- * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
- * defines -> please report me your experience then
- * - Harald reported for ASUS SP3G mainboards, that you should use
- * the 'optimal settings' from the user's manual on page 3-12!
- *
- * credits:
- * thanx to Jason Sullivan for sending me a ni6510 card!
- * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
- *
- * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
- * average: FTP -> 8384421 bytes received in 8.5 seconds
- * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
- * peak: FTP -> 8384421 bytes received in 7.5 seconds
- * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
- */
-
-/*
- * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
- * 96.Sept.29: virt_to_bus stuff added for new memory modell
- * 96.April.29: Added Harald Koenig's Patches (MH)
- * 96.April.13: enhanced error handling .. more tests (MH)
- * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
- * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
- * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
- * hopefully no more 16MB limit
- *
- * 95.Nov.18: multicast tweaked (AC).
- *
- * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
- *
- * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include "ni65.h"
-
-/*
- * the current setting allows an acceptable performance
- * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
- * the header of this file
- * 'invert' the defines for max. performance. This may cause DMA problems
- * on some boards (e.g on my ASUS SP3G)
- */
-#undef XMT_VIA_SKB
-#undef RCV_VIA_SKB
-#define RCV_PARANOIA_CHECK
-
-#define MID_PERFORMANCE
-
-#if defined( LOW_PERFORMANCE )
- static int isa0=7,isa1=7,csr80=0x0c10;
-#elif defined( MID_PERFORMANCE )
- static int isa0=5,isa1=5,csr80=0x2810;
-#else /* high performance */
- static int isa0=4,isa1=4,csr80=0x0017;
-#endif
-
-/*
- * a few card/vendor specific defines
- */
-#define NI65_ID0 0x00
-#define NI65_ID1 0x55
-#define NI65_EB_ID0 0x52
-#define NI65_EB_ID1 0x44
-#define NE2100_ID0 0x57
-#define NE2100_ID1 0x57
-
-#define PORT p->cmdr_addr
-
-/*
- * buffer configuration
- */
-#if 1
-#define RMDNUM 16
-#define RMDNUMMASK 0x80000000
-#else
-#define RMDNUM 8
-#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
-#endif
-
-#if 0
-#define TMDNUM 1
-#define TMDNUMMASK 0x00000000
-#else
-#define TMDNUM 4
-#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
-#endif
-
-/* slightly oversized */
-#define R_BUF_SIZE 1544
-#define T_BUF_SIZE 1544
-
-/*
- * lance register defines
- */
-#define L_DATAREG 0x00
-#define L_ADDRREG 0x02
-#define L_RESET 0x04
-#define L_CONFIG 0x05
-#define L_BUSIF 0x06
-
-/*
- * to access the lance/am7990-regs, you have to write
- * reg-number into L_ADDRREG, then you can access it using L_DATAREG
- */
-#define CSR0 0x00
-#define CSR1 0x01
-#define CSR2 0x02
-#define CSR3 0x03
-
-#define INIT_RING_BEFORE_START 0x1
-#define FULL_RESET_ON_ERROR 0x2
-
-#if 0
-#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
- outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
-#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
- inw(PORT+L_DATAREG))
-#if 0
-#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
-#else
-#define writedatareg(val) { writereg(val,CSR0); }
-#endif
-#else
-#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
-#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
-#define writedatareg(val) { writereg(val,CSR0); }
-#endif
-
-static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
-
-static struct card {
- unsigned char id0,id1;
- short id_offset;
- short total_size;
- short cmd_offset;
- short addr_offset;
- unsigned char *vendor_id;
- char *cardname;
- unsigned long config;
-} cards[] = {
- {
- .id0 = NI65_ID0,
- .id1 = NI65_ID1,
- .id_offset = 0x0e,
- .total_size = 0x10,
- .cmd_offset = 0x0,
- .addr_offset = 0x8,
- .vendor_id = ni_vendor,
- .cardname = "ni6510",
- .config = 0x1,
- },
- {
- .id0 = NI65_EB_ID0,
- .id1 = NI65_EB_ID1,
- .id_offset = 0x0e,
- .total_size = 0x18,
- .cmd_offset = 0x10,
- .addr_offset = 0x0,
- .vendor_id = ni_vendor,
- .cardname = "ni6510 EtherBlaster",
- .config = 0x2,
- },
- {
- .id0 = NE2100_ID0,
- .id1 = NE2100_ID1,
- .id_offset = 0x0e,
- .total_size = 0x18,
- .cmd_offset = 0x10,
- .addr_offset = 0x0,
- .vendor_id = NULL,
- .cardname = "generic NE2100",
- .config = 0x0,
- },
-};
-#define NUM_CARDS 3
-
-struct priv
-{
- struct rmd rmdhead[RMDNUM];
- struct tmd tmdhead[TMDNUM];
- struct init_block ib;
- int rmdnum;
- int tmdnum,tmdlast;
-#ifdef RCV_VIA_SKB
- struct sk_buff *recv_skb[RMDNUM];
-#else
- void *recvbounce[RMDNUM];
-#endif
-#ifdef XMT_VIA_SKB
- struct sk_buff *tmd_skb[TMDNUM];
-#endif
- void *tmdbounce[TMDNUM];
- int tmdbouncenum;
- int lock,xmit_queued;
-
- void *self;
- int cmdr_addr;
- int cardno;
- int features;
- spinlock_t ring_lock;
-};
-
-static int ni65_probe1(struct net_device *dev,int);
-static irqreturn_t ni65_interrupt(int irq, void * dev_id);
-static void ni65_recv_intr(struct net_device *dev,int);
-static void ni65_xmit_intr(struct net_device *dev,int);
-static int ni65_open(struct net_device *dev);
-static int ni65_lance_reinit(struct net_device *dev);
-static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
-static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
- struct net_device *dev);
-static void ni65_timeout(struct net_device *dev, unsigned int txqueue);
-static int ni65_close(struct net_device *dev);
-static int ni65_alloc_buffer(struct net_device *dev);
-static void ni65_free_buffer(struct priv *p);
-static void set_multicast_list(struct net_device *dev);
-
-static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
-static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
-
-static int debuglevel = 1;
-
-/*
- * set 'performance' registers .. we must STOP lance for that
- */
-static void ni65_set_performance(struct priv *p)
-{
- writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
-
- if( !(cards[p->cardno].config & 0x02) )
- return;
-
- outw(80,PORT+L_ADDRREG);
- if(inw(PORT+L_ADDRREG) != 80)
- return;
-
- writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
- outw(0,PORT+L_ADDRREG);
- outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
- outw(1,PORT+L_ADDRREG);
- outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
-
- outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
-}
-
-/*
- * open interface (up)
- */
-static int ni65_open(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
- int irqval = request_irq(dev->irq, ni65_interrupt,0,
- cards[p->cardno].cardname,dev);
- if (irqval) {
- printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
- dev->name,dev->irq, irqval);
- return -EAGAIN;
- }
-
- if(ni65_lance_reinit(dev))
- {
- netif_start_queue(dev);
- return 0;
- }
- else
- {
- free_irq(dev->irq,dev);
- return -EAGAIN;
- }
-}
-
-/*
- * close interface (down)
- */
-static int ni65_close(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
-
- netif_stop_queue(dev);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
-
-#ifdef XMT_VIA_SKB
- {
- int i;
- for(i=0;i<TMDNUM;i++)
- {
- if(p->tmd_skb[i]) {
- dev_kfree_skb(p->tmd_skb[i]);
- p->tmd_skb[i] = NULL;
- }
- }
- }
-#endif
- free_irq(dev->irq,dev);
- return 0;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
- disable_dma(dev->dma);
- free_dma(dev->dma);
- release_region(dev->base_addr, cards[p->cardno].total_size);
- ni65_free_buffer(p);
-}
-
-/* set: io,irq,dma or set it when calling insmod */
-static int irq;
-static int io;
-static int dma;
-
-/*
- * Probe The Card (not the lance-chip)
- */
-struct net_device * __init ni65_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(0);
- static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
- const int *port;
- int err = 0;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- irq = dev->irq;
- dma = dev->dma;
- } else {
- dev->base_addr = io;
- }
-
- if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
- err = ni65_probe1(dev, dev->base_addr);
- } else if (dev->base_addr > 0) { /* Don't probe at all. */
- err = -ENXIO;
- } else {
- for (port = ports; *port && ni65_probe1(dev, *port); port++)
- ;
- if (!*port)
- err = -ENODEV;
- }
- if (err)
- goto out;
-
- err = register_netdev(dev);
- if (err)
- goto out1;
- return dev;
-out1:
- cleanup_card(dev);
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-
-static const struct net_device_ops ni65_netdev_ops = {
- .ndo_open = ni65_open,
- .ndo_stop = ni65_close,
- .ndo_start_xmit = ni65_send_packet,
- .ndo_tx_timeout = ni65_timeout,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-/*
- * this is the real card probe ..
- */
-static int __init ni65_probe1(struct net_device *dev,int ioaddr)
-{
- int i,j;
- struct priv *p;
- unsigned long flags;
-
- dev->irq = irq;
- dev->dma = dma;
-
- for(i=0;i<NUM_CARDS;i++) {
- if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
- continue;
- if(cards[i].id_offset >= 0) {
- if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
- inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
- release_region(ioaddr, cards[i].total_size);
- continue;
- }
- }
- if(cards[i].vendor_id) {
- for(j=0;j<3;j++)
- if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j])
- release_region(ioaddr, cards[i].total_size);
- }
- break;
- }
- if(i == NUM_CARDS)
- return -ENODEV;
-
- for(j=0;j<6;j++)
- dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
-
- if( (j=ni65_alloc_buffer(dev)) < 0) {
- release_region(ioaddr, cards[i].total_size);
- return j;
- }
- p = dev->ml_priv;
- p->cmdr_addr = ioaddr + cards[i].cmd_offset;
- p->cardno = i;
- spin_lock_init(&p->ring_lock);
-
- printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
- if( (j=readreg(CSR0)) != 0x4) {
- printk("failed.\n");
- printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
-
- outw(88,PORT+L_ADDRREG);
- if(inw(PORT+L_ADDRREG) == 88) {
- unsigned long v;
- v = inw(PORT+L_DATAREG);
- v <<= 16;
- outw(89,PORT+L_ADDRREG);
- v |= inw(PORT+L_DATAREG);
- printk("Version %#08lx, ",v);
- p->features = INIT_RING_BEFORE_START;
- }
- else {
- printk("ancient LANCE, ");
- p->features = 0x0;
- }
-
- if(test_bit(0,&cards[i].config)) {
- dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
- dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
- printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
- }
- else {
- if(dev->dma == 0) {
- /* 'stuck test' from lance.c */
- unsigned long dma_channels =
- ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
- | (inb(DMA2_STAT_REG) & 0xf0);
- for(i=1;i<5;i++) {
- int dma = dmatab[i];
- if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
- continue;
-
- flags=claim_dma_lock();
- disable_dma(dma);
- set_dma_mode(dma,DMA_MODE_CASCADE);
- enable_dma(dma);
- release_dma_lock(flags);
-
- ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
-
- flags=claim_dma_lock();
- disable_dma(dma);
- free_dma(dma);
- release_dma_lock(flags);
-
- if(readreg(CSR0) & CSR0_IDON)
- break;
- }
- if(i == 5) {
- printk("failed.\n");
- printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
- dev->dma = dmatab[i];
- printk("DMA %d (autodetected), ",dev->dma);
- }
- else
- printk("DMA %d (assigned), ",dev->dma);
-
- if(dev->irq < 2)
- {
- unsigned long irq_mask;
-
- ni65_init_lance(p,dev->dev_addr,0,0);
- irq_mask = probe_irq_on();
- writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
- msleep(20);
- dev->irq = probe_irq_off(irq_mask);
- if(!dev->irq)
- {
- printk("Failed to detect IRQ line!\n");
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
- printk("IRQ %d (autodetected).\n",dev->irq);
- }
- else
- printk("IRQ %d (assigned).\n",dev->irq);
- }
-
- if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
- {
- printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
- ni65_free_buffer(p);
- release_region(ioaddr, cards[p->cardno].total_size);
- return -EAGAIN;
- }
-
- dev->base_addr = ioaddr;
- dev->netdev_ops = &ni65_netdev_ops;
- dev->watchdog_timeo = HZ/2;
-
- return 0; /* everything is OK */
-}
-
-/*
- * set lance register and trigger init
- */
-static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
-{
- int i;
- u32 pib;
-
- writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
-
- for(i=0;i<6;i++)
- p->ib.eaddr[i] = daddr[i];
-
- for(i=0;i<8;i++)
- p->ib.filter[i] = filter;
- p->ib.mode = mode;
-
- p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
- p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
- writereg(0,CSR3); /* busmaster/no word-swap */
- pib = (u32) isa_virt_to_bus(&p->ib);
- writereg(pib & 0xffff,CSR1);
- writereg(pib >> 16,CSR2);
-
- writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
-
- for(i=0;i<32;i++)
- {
- mdelay(4);
- if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
- break; /* init ok ? */
- }
-}
-
-/*
- * allocate memory area and check the 16MB border
- */
-static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
-{
- struct sk_buff *skb=NULL;
- unsigned char *ptr;
- void *ret;
-
- if(type) {
- ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
- if(!skb) {
- printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
- return NULL;
- }
- skb_reserve(skb,2+16);
- skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
- ptr = skb->data;
- }
- else {
- ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
- if(!ret)
- return NULL;
- }
- if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
- printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
- if(type)
- kfree_skb(skb);
- else
- kfree(ptr);
- return NULL;
- }
- return ret;
-}
-
-/*
- * allocate all memory structures .. send/recv buffers etc ...
- */
-static int ni65_alloc_buffer(struct net_device *dev)
-{
- unsigned char *ptr;
- struct priv *p;
- int i;
-
- /*
- * we need 8-aligned memory ..
- */
- ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
- if(!ptr)
- return -ENOMEM;
-
- p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
- memset((char *)p, 0, sizeof(struct priv));
- p->self = ptr;
-
- for(i=0;i<TMDNUM;i++)
- {
-#ifdef XMT_VIA_SKB
- p->tmd_skb[i] = NULL;
-#endif
- p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
- if(!p->tmdbounce[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
- }
-
- for(i=0;i<RMDNUM;i++)
- {
-#ifdef RCV_VIA_SKB
- p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
- if(!p->recv_skb[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
-#else
- p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
- if(!p->recvbounce[i]) {
- ni65_free_buffer(p);
- return -ENOMEM;
- }
-#endif
- }
-
- return 0; /* everything is OK */
-}
-
-/*
- * free buffers and private struct
- */
-static void ni65_free_buffer(struct priv *p)
-{
- int i;
-
- if(!p)
- return;
-
- for(i=0;i<TMDNUM;i++) {
- kfree(p->tmdbounce[i]);
-#ifdef XMT_VIA_SKB
- dev_kfree_skb(p->tmd_skb[i]);
-#endif
- }
-
- for(i=0;i<RMDNUM;i++)
- {
-#ifdef RCV_VIA_SKB
- dev_kfree_skb(p->recv_skb[i]);
-#else
- kfree(p->recvbounce[i]);
-#endif
- }
- kfree(p->self);
-}
-
-
-/*
- * stop and (re)start lance .. e.g after an error
- */
-static void ni65_stop_start(struct net_device *dev,struct priv *p)
-{
- int csr0 = CSR0_INEA;
-
- writedatareg(CSR0_STOP);
-
- if(debuglevel > 1)
- printk(KERN_DEBUG "ni65_stop_start\n");
-
- if(p->features & INIT_RING_BEFORE_START) {
- int i;
-#ifdef XMT_VIA_SKB
- struct sk_buff *skb_save[TMDNUM];
-#endif
- unsigned long buffer[TMDNUM];
- short blen[TMDNUM];
-
- if(p->xmit_queued) {
- while(1) {
- if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
- break;
- p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
- if(p->tmdlast == p->tmdnum)
- break;
- }
- }
-
- for(i=0;i<TMDNUM;i++) {
- struct tmd *tmdp = p->tmdhead + i;
-#ifdef XMT_VIA_SKB
- skb_save[i] = p->tmd_skb[i];
-#endif
- buffer[i] = (unsigned long)isa_bus_to_virt(tmdp->u.buffer);
- blen[i] = tmdp->blen;
- tmdp->u.s.status = 0x0;
- }
-
- for(i=0;i<RMDNUM;i++) {
- struct rmd *rmdp = p->rmdhead + i;
- rmdp->u.s.status = RCV_OWN;
- }
- p->tmdnum = p->xmit_queued = 0;
- writedatareg(CSR0_STRT | csr0);
-
- for(i=0;i<TMDNUM;i++) {
- int num = (i + p->tmdlast) & (TMDNUM-1);
- p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
- p->tmdhead[i].blen = blen[num];
- if(p->tmdhead[i].u.s.status & XMIT_OWN) {
- p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
- p->xmit_queued = 1;
- writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
- }
-#ifdef XMT_VIA_SKB
- p->tmd_skb[i] = skb_save[num];
-#endif
- }
- p->rmdnum = p->tmdlast = 0;
- if(!p->lock)
- if (p->tmdnum || !p->xmit_queued)
- netif_wake_queue(dev);
- netif_trans_update(dev); /* prevent tx timeout */
- }
- else
- writedatareg(CSR0_STRT | csr0);
-}
-
-/*
- * init lance (write init-values .. init-buffers) (open-helper)
- */
-static int ni65_lance_reinit(struct net_device *dev)
-{
- int i;
- struct priv *p = dev->ml_priv;
- unsigned long flags;
-
- p->lock = 0;
- p->xmit_queued = 0;
-
- flags=claim_dma_lock();
- disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
- set_dma_mode(dev->dma,DMA_MODE_CASCADE);
- enable_dma(dev->dma);
- release_dma_lock(flags);
-
- outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
- if( (i=readreg(CSR0) ) != 0x4)
- {
- printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
- cards[p->cardno].cardname,(int) i);
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(flags);
- return 0;
- }
-
- p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
- for(i=0;i<TMDNUM;i++)
- {
- struct tmd *tmdp = p->tmdhead + i;
-#ifdef XMT_VIA_SKB
- if(p->tmd_skb[i]) {
- dev_kfree_skb(p->tmd_skb[i]);
- p->tmd_skb[i] = NULL;
- }
-#endif
- tmdp->u.buffer = 0x0;
- tmdp->u.s.status = XMIT_START | XMIT_END;
- tmdp->blen = tmdp->status2 = 0;
- }
-
- for(i=0;i<RMDNUM;i++)
- {
- struct rmd *rmdp = p->rmdhead + i;
-#ifdef RCV_VIA_SKB
- rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
-#else
- rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
-#endif
- rmdp->blen = -(R_BUF_SIZE-8);
- rmdp->mlen = 0;
- rmdp->u.s.status = RCV_OWN;
- }
-
- if(dev->flags & IFF_PROMISC)
- ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
- else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
- ni65_init_lance(p,dev->dev_addr,0xff,0x0);
- else
- ni65_init_lance(p,dev->dev_addr,0x00,0x00);
-
- /*
- * ni65_set_lance_mem() sets L_ADDRREG to CSR0
- * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
- */
-
- if(inw(PORT+L_DATAREG) & CSR0_IDON) {
- ni65_set_performance(p);
- /* init OK: start lance , enable interrupts */
- writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
- return 1; /* ->OK */
- }
- printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
- flags=claim_dma_lock();
- disable_dma(dev->dma);
- release_dma_lock(flags);
- return 0; /* ->Error */
-}
-
-/*
- * interrupt handler
- */
-static irqreturn_t ni65_interrupt(int irq, void * dev_id)
-{
- int csr0 = 0;
- struct net_device *dev = dev_id;
- struct priv *p;
- int bcnt = 32;
-
- p = dev->ml_priv;
-
- spin_lock(&p->ring_lock);
-
- while(--bcnt) {
- csr0 = inw(PORT+L_DATAREG);
-
-#if 0
- writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
-#else
- writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
-#endif
-
- if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
- break;
-
- if(csr0 & CSR0_RINT) /* RECV-int? */
- ni65_recv_intr(dev,csr0);
- if(csr0 & CSR0_TINT) /* XMIT-int? */
- ni65_xmit_intr(dev,csr0);
-
- if(csr0 & CSR0_ERR)
- {
- if(debuglevel > 1)
- printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
- if(csr0 & CSR0_BABL)
- dev->stats.tx_errors++;
- if(csr0 & CSR0_MISS) {
- int i;
- for(i=0;i<RMDNUM;i++)
- printk("%02x ",p->rmdhead[i].u.s.status);
- printk("\n");
- dev->stats.rx_errors++;
- }
- if(csr0 & CSR0_MERR) {
- if(debuglevel > 1)
- printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
- ni65_stop_start(dev,p);
- }
- }
- }
-
-#ifdef RCV_PARANOIA_CHECK
-{
- int j;
- for(j=0;j<RMDNUM;j++)
- {
- int i, num2;
- for(i=RMDNUM-1;i>0;i--) {
- num2 = (p->rmdnum + i) & (RMDNUM-1);
- if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
- break;
- }
-
- if(i) {
- int k, num1;
- for(k=0;k<RMDNUM;k++) {
- num1 = (p->rmdnum + k) & (RMDNUM-1);
- if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
- break;
- }
- if(!k)
- break;
-
- if(debuglevel > 0)
- {
- char buf[256],*buf1;
- buf1 = buf;
- for(k=0;k<RMDNUM;k++) {
- sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
- buf1 += 3;
- }
- *buf1 = 0;
- printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
- }
-
- p->rmdnum = num1;
- ni65_recv_intr(dev,csr0);
- if((p->rmdhead[num2].u.s.status & RCV_OWN))
- break; /* ok, we are 'in sync' again */
- }
- else
- break;
- }
-}
-#endif
-
- if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
- printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
- ni65_stop_start(dev,p);
- }
- else
- writedatareg(CSR0_INEA);
-
- spin_unlock(&p->ring_lock);
- return IRQ_HANDLED;
-}
-
-/*
- * We have received an Xmit-Interrupt ..
- * send a new packet if necessary
- */
-static void ni65_xmit_intr(struct net_device *dev,int csr0)
-{
- struct priv *p = dev->ml_priv;
-
- while(p->xmit_queued)
- {
- struct tmd *tmdp = p->tmdhead + p->tmdlast;
- int tmdstat = tmdp->u.s.status;
-
- if(tmdstat & XMIT_OWN)
- break;
-
- if(tmdstat & XMIT_ERR)
- {
-#if 0
- if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
- printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
-#endif
- /* checking some errors */
- if(tmdp->status2 & XMIT_RTRY)
- dev->stats.tx_aborted_errors++;
- if(tmdp->status2 & XMIT_LCAR)
- dev->stats.tx_carrier_errors++;
- if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
- /* this stops the xmitter */
- dev->stats.tx_fifo_errors++;
- if(debuglevel > 0)
- printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
- if(p->features & INIT_RING_BEFORE_START) {
- tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
- ni65_stop_start(dev,p);
- break; /* no more Xmit processing .. */
- }
- else
- ni65_stop_start(dev,p);
- }
- if(debuglevel > 2)
- printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
- if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
- dev->stats.tx_errors++;
- tmdp->status2 = 0;
- }
- else {
- dev->stats.tx_bytes -= (short)(tmdp->blen);
- dev->stats.tx_packets++;
- }
-
-#ifdef XMT_VIA_SKB
- if(p->tmd_skb[p->tmdlast]) {
- dev_consume_skb_irq(p->tmd_skb[p->tmdlast]);
- p->tmd_skb[p->tmdlast] = NULL;
- }
-#endif
-
- p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
- if(p->tmdlast == p->tmdnum)
- p->xmit_queued = 0;
- }
- netif_wake_queue(dev);
-}
-
-/*
- * We have received a packet
- */
-static void ni65_recv_intr(struct net_device *dev,int csr0)
-{
- struct rmd *rmdp;
- int rmdstat,len;
- int cnt=0;
- struct priv *p = dev->ml_priv;
-
- rmdp = p->rmdhead + p->rmdnum;
- while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
- {
- cnt++;
- if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
- {
- if(!(rmdstat & RCV_ERR)) {
- if(rmdstat & RCV_START)
- {
- dev->stats.rx_length_errors++;
- printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
- }
- }
- else {
- if(debuglevel > 2)
- printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
- dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
- if(rmdstat & RCV_FRAM)
- dev->stats.rx_frame_errors++;
- if(rmdstat & RCV_OFLO)
- dev->stats.rx_over_errors++;
- if(rmdstat & RCV_CRC)
- dev->stats.rx_crc_errors++;
- if(rmdstat & RCV_BUF_ERR)
- dev->stats.rx_fifo_errors++;
- }
- if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
- dev->stats.rx_errors++;
- }
- else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
- {
-#ifdef RCV_VIA_SKB
- struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
- if (skb)
- skb_reserve(skb,16);
-#else
- struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
-#endif
- if(skb)
- {
- skb_reserve(skb,2);
-#ifdef RCV_VIA_SKB
- if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
- skb_put(skb,len);
- skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
- }
- else {
- struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
- skb_put(skb,R_BUF_SIZE);
- p->recv_skb[p->rmdnum] = skb;
- rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
- skb = skb1;
- skb_trim(skb,len);
- }
-#else
- skb_put(skb,len);
- skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
-#endif
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
- skb->protocol=eth_type_trans(skb,dev);
- netif_rx(skb);
- }
- else
- {
- printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
- dev->stats.rx_dropped++;
- }
- }
- else {
- printk(KERN_INFO "%s: received runt packet\n",dev->name);
- dev->stats.rx_errors++;
- }
- rmdp->blen = -(R_BUF_SIZE-8);
- rmdp->mlen = 0;
- rmdp->u.s.status = RCV_OWN; /* change owner */
- p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
- rmdp = p->rmdhead + p->rmdnum;
- }
-}
-
-/*
- * kick xmitter ..
- */
-
-static void ni65_timeout(struct net_device *dev, unsigned int txqueue)
-{
- int i;
- struct priv *p = dev->ml_priv;
-
- printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
- for(i=0;i<TMDNUM;i++)
- printk("%02x ",p->tmdhead[i].u.s.status);
- printk("\n");
- ni65_lance_reinit(dev);
- netif_trans_update(dev); /* prevent tx timeout */
- netif_wake_queue(dev);
-}
-
-/*
- * Send a packet
- */
-
-static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
- struct net_device *dev)
-{
- struct priv *p = dev->ml_priv;
-
- netif_stop_queue(dev);
-
- if (test_and_set_bit(0, (void*)&p->lock)) {
- printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
- return NETDEV_TX_BUSY;
- }
-
- {
- short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- struct tmd *tmdp;
- unsigned long flags;
-
-#ifdef XMT_VIA_SKB
- if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
-#endif
-
- skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
- skb->len > T_BUF_SIZE ? T_BUF_SIZE :
- skb->len);
- if (len > skb->len)
- memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
- dev_kfree_skb (skb);
-
- spin_lock_irqsave(&p->ring_lock, flags);
- tmdp = p->tmdhead + p->tmdnum;
- tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
- p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
-
-#ifdef XMT_VIA_SKB
- }
- else {
- spin_lock_irqsave(&p->ring_lock, flags);
-
- tmdp = p->tmdhead + p->tmdnum;
- tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
- p->tmd_skb[p->tmdnum] = skb;
- }
-#endif
- tmdp->blen = -len;
-
- tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
- writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
-
- p->xmit_queued = 1;
- p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
-
- if(p->tmdnum != p->tmdlast)
- netif_wake_queue(dev);
-
- p->lock = 0;
-
- spin_unlock_irqrestore(&p->ring_lock, flags);
- }
-
- return NETDEV_TX_OK;
-}
-
-static void set_multicast_list(struct net_device *dev)
-{
- if(!ni65_lance_reinit(dev))
- printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
- netif_wake_queue(dev);
-}
-
-#ifdef MODULE
-static struct net_device *dev_ni65;
-
-module_param_hw(irq, int, irq, 0);
-module_param_hw(io, int, ioport, 0);
-module_param_hw(dma, int, dma, 0);
-MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
-MODULE_PARM_DESC(io, "ni6510 I/O base address");
-MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
-
-static int __init ni65_init_module(void)
-{
- dev_ni65 = ni65_probe(-1);
- return PTR_ERR_OR_ZERO(dev_ni65);
-}
-module_init(ni65_init_module);
-
-static void __exit ni65_cleanup_module(void)
-{
- unregister_netdev(dev_ni65);
- cleanup_card(dev_ni65);
- free_netdev(dev_ni65);
-}
-module_exit(ni65_cleanup_module);
-#endif /* MODULE */
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h
deleted file mode 100644
index e6217e35edf0..000000000000
--- a/drivers/net/ethernet/amd/ni65.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* am7990 (lance) definitions
- *
- * This is an extension to the Linux operating system, and is covered by
- * same GNU General Public License that covers that work.
- *
- * Michael Hipp
- * email: mhipp@student.uni-tuebingen.de
- *
- * sources: (mail me or ask archie if you need them)
- * crynwr-packet-driver
- */
-
-/*
- * Control and Status Register 0 (CSR0) bit definitions
- * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
- *
- */
-
-#define CSR0_ERR 0x8000 /* Error summary (R) */
-#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
-#define CSR0_CERR 0x2000 /* Collision Error (RC) */
-#define CSR0_MISS 0x1000 /* Missed packet (RC) */
-#define CSR0_MERR 0x0800 /* Memory Error (RC) */
-#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
-#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
-#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
-#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
-#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
-#define CSR0_RXON 0x0020 /* Receiver on (R) */
-#define CSR0_TXON 0x0010 /* Transmitter on (R) */
-#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
-#define CSR0_STOP 0x0004 /* Stop (RS) */
-#define CSR0_STRT 0x0002 /* Start (RS) */
-#define CSR0_INIT 0x0001 /* Initialize (RS) */
-
-#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
-/*
- * Initialization Block Mode operation Bit Definitions.
- */
-
-#define M_PROM 0x8000 /* Promiscuous Mode */
-#define M_INTL 0x0040 /* Internal Loopback */
-#define M_DRTY 0x0020 /* Disable Retry */
-#define M_COLL 0x0010 /* Force Collision */
-#define M_DTCR 0x0008 /* Disable Transmit CRC) */
-#define M_LOOP 0x0004 /* Loopback */
-#define M_DTX 0x0002 /* Disable the Transmitter */
-#define M_DRX 0x0001 /* Disable the Receiver */
-
-
-/*
- * Receive message descriptor bit definitions.
- */
-
-#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
-#define RCV_ERR 0x40 /* Error Summary */
-#define RCV_FRAM 0x20 /* Framing Error */
-#define RCV_OFLO 0x10 /* Overflow Error */
-#define RCV_CRC 0x08 /* CRC Error */
-#define RCV_BUF_ERR 0x04 /* Buffer Error */
-#define RCV_START 0x02 /* Start of Packet */
-#define RCV_END 0x01 /* End of Packet */
-
-
-/*
- * Transmit message descriptor bit definitions.
- */
-
-#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
-#define XMIT_ERR 0x40 /* Error Summary */
-#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
-#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
-#define XMIT_DEF 0x04 /* Deferred */
-#define XMIT_START 0x02 /* Start of Packet */
-#define XMIT_END 0x01 /* End of Packet */
-
-/*
- * transmit status (2) (valid if XMIT_ERR == 1)
- */
-
-#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
-#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
-#define XMIT_LCAR 0x0800 /* Loss of Carrier */
-#define XMIT_LCOL 0x1000 /* Late collision */
-#define XMIT_RESERV 0x2000 /* Reserved */
-#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
-#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
-
-struct init_block {
- unsigned short mode;
- unsigned char eaddr[6];
- unsigned char filter[8];
- /* bit 29-31: number of rmd's (power of 2) */
- u32 rrp; /* receive ring pointer (align 8) */
- /* bit 29-31: number of tmd's (power of 2) */
- u32 trp; /* transmit ring pointer (align 8) */
-};
-
-struct rmd { /* Receive Message Descriptor */
- union {
- volatile u32 buffer;
- struct {
- volatile unsigned char dummy[3];
- volatile unsigned char status;
- } s;
- } u;
- volatile short blen;
- volatile unsigned short mlen;
-};
-
-struct tmd {
- union {
- volatile u32 buffer;
- struct {
- volatile unsigned char dummy[3];
- volatile unsigned char status;
- } s;
- } u;
- volatile unsigned short blen;
- volatile unsigned short status2;
-};
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 30ee5329bd7c..0dd391c84c13 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -485,10 +485,10 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
data = inb(ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -512,10 +512,10 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -567,13 +567,13 @@ static int mace_init(mace_private *lp, unsigned int ioaddr,
* Or just set ASEL in PHYCC below!
*/
switch (if_port) {
- case 1:
+ case 1:
mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
break;
- case 2:
+ case 2:
mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
break;
- default:
+ default:
mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
/* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
and the MACE device will automatically select the operating media
@@ -651,7 +651,7 @@ static int nmclan_config(struct pcmcia_device *link)
} else {
pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
sig[0], sig[1]);
- return -ENODEV;
+ goto failed;
}
}
@@ -815,7 +815,7 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -918,7 +918,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
int status;
int IntrCnt = MACE_MAX_IR_ITERATIONS;
- if (dev == NULL) {
+ if (!dev) {
pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
irq);
return IRQ_NONE;
@@ -1102,7 +1102,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
+ if (skb) {
skb_reserve(skb, 2);
insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
if (pkt_len & 1)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f5c50ff377ff..72db9f9e7bee 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -488,7 +488,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_tx_ring == NULL)
+ if (!new_tx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -547,7 +547,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_rx_ring == NULL)
+ if (!new_rx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -797,9 +797,9 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (lp->pci_dev)
- strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ strscpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
@@ -860,7 +860,9 @@ static int pcnet32_nway_reset(struct net_device *dev)
}
static void pcnet32_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
@@ -871,7 +873,9 @@ static void pcnet32_get_ringparam(struct net_device *dev,
}
static int pcnet32_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct pcnet32_private *lp = netdev_priv(dev);
unsigned long flags;
@@ -1245,7 +1249,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
} else
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
return;
}
@@ -1877,7 +1881,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/* napi.weight is used in both the napi and non-napi cases */
lp->napi.weight = lp->rx_ring_size / 2;
- netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
+ netif_napi_add_weight(dev, &lp->napi, pcnet32_poll,
+ lp->rx_ring_size / 2);
if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
@@ -2013,7 +2018,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
&lp->tx_ring_dma_addr, GFP_KERNEL);
- if (lp->tx_ring == NULL) {
+ if (!lp->tx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2021,7 +2026,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
&lp->rx_ring_dma_addr, GFP_KERNEL);
- if (lp->rx_ring == NULL) {
+ if (!lp->rx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2360,7 +2365,7 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff == NULL) {
+ if (!rx_skbuff) {
lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = lp->rx_skbuff[i];
if (!rx_skbuff) {
diff --git a/drivers/net/ethernet/amd/pds_core/Makefile b/drivers/net/ethernet/amd/pds_core/Makefile
new file mode 100644
index 000000000000..8239742e681f
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2023 Advanced Micro Devices, Inc.
+
+obj-$(CONFIG_PDS_CORE) := pds_core.o
+
+pds_core-y := main.o \
+ devlink.o \
+ auxbus.o \
+ dev.o \
+ adminq.o \
+ core.o \
+ debugfs.o \
+ fw.o
diff --git a/drivers/net/ethernet/amd/pds_core/adminq.c b/drivers/net/ethernet/amd/pds_core/adminq.c
new file mode 100644
index 000000000000..045fe133f6ee
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/adminq.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/dynamic_debug.h>
+
+#include "core.h"
+
+struct pdsc_wait_context {
+ struct pdsc_qcq *qcq;
+ struct completion wait_completion;
+};
+
+static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
+{
+ union pds_core_notifyq_comp *comp;
+ struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc_cq *cq = &qcq->cq;
+ struct pdsc_cq_info *cq_info;
+ int nq_work = 0;
+ u64 eid;
+
+ cq_info = &cq->info[cq->tail_idx];
+ comp = cq_info->comp;
+ eid = le64_to_cpu(comp->event.eid);
+ while (eid > pdsc->last_eid) {
+ u16 ecode = le16_to_cpu(comp->event.ecode);
+
+ switch (ecode) {
+ case PDS_EVENT_LINK_CHANGE:
+ dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
+ ecode, eid);
+ pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
+ break;
+
+ case PDS_EVENT_RESET:
+ dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
+ ecode, eid);
+ pdsc_notify(PDS_EVENT_RESET, comp);
+ break;
+
+ case PDS_EVENT_XCVR:
+ dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
+ ecode, eid);
+ break;
+
+ default:
+ dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
+ ecode, eid);
+ break;
+ }
+
+ pdsc->last_eid = eid;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ cq_info = &cq->info[cq->tail_idx];
+ comp = cq_info->comp;
+ eid = le64_to_cpu(comp->event.eid);
+
+ nq_work++;
+ }
+
+ qcq->accum_work += nq_work;
+
+ return nq_work;
+}
+
+void pdsc_process_adminq(struct pdsc_qcq *qcq)
+{
+ union pds_core_adminq_comp *comp;
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc *pdsc = qcq->pdsc;
+ struct pdsc_cq *cq = &qcq->cq;
+ struct pdsc_q_info *q_info;
+ unsigned long irqflags;
+ int nq_work = 0;
+ int aq_work = 0;
+ int credits;
+
+ /* Don't process AdminQ when shutting down */
+ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
+ dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ __func__);
+ return;
+ }
+
+ /* Check for NotifyQ event */
+ nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
+
+ /* Check for empty queue, which can happen if the interrupt was
+ * for a NotifyQ event and there are no new AdminQ completions.
+ */
+ if (q->tail_idx == q->head_idx)
+ goto credits;
+
+ /* Find the first completion to clean,
+ * run the callback in the related q_info,
+ * and continue while we still match done color
+ */
+ spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
+ comp = cq->info[cq->tail_idx].comp;
+ while (pdsc_color_match(comp->color, cq->done_color)) {
+ q_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+
+ /* Copy out the completion data */
+ memcpy(q_info->dest, comp, sizeof(*comp));
+
+ complete_all(&q_info->wc->wait_completion);
+
+ if (cq->tail_idx == cq->num_descs - 1)
+ cq->done_color = !cq->done_color;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ comp = cq->info[cq->tail_idx].comp;
+
+ aq_work++;
+ }
+ spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
+
+ qcq->accum_work += aq_work;
+
+credits:
+ /* Return the interrupt credits, one for each completion */
+ credits = nq_work + aq_work;
+ if (credits)
+ pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
+ credits,
+ PDS_CORE_INTR_CRED_REARM);
+}
+
+void pdsc_work_thread(struct work_struct *work)
+{
+ struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
+
+ pdsc_process_adminq(qcq);
+}
+
+irqreturn_t pdsc_adminq_isr(int irq, void *data)
+{
+ struct pdsc_qcq *qcq = data;
+ struct pdsc *pdsc = qcq->pdsc;
+
+ /* Don't process AdminQ when shutting down */
+ if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
+ dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
+ __func__);
+ return IRQ_HANDLED;
+ }
+
+ queue_work(pdsc->wq, &qcq->work);
+ pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
+
+ return IRQ_HANDLED;
+}
+
+static int __pdsc_adminq_post(struct pdsc *pdsc,
+ struct pdsc_qcq *qcq,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp,
+ struct pdsc_wait_context *wc)
+{
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc_q_info *q_info;
+ unsigned long irqflags;
+ unsigned int avail;
+ int index;
+ int ret;
+
+ spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
+
+ /* Check for space in the queue */
+ avail = q->tail_idx;
+ if (q->head_idx >= avail)
+ avail += q->num_descs - q->head_idx - 1;
+ else
+ avail -= q->head_idx + 1;
+ if (!avail) {
+ ret = -ENOSPC;
+ goto err_out_unlock;
+ }
+
+ /* Check that the FW is running */
+ if (!pdsc_is_fw_running(pdsc)) {
+ u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+
+ dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ ret = -ENXIO;
+
+ goto err_out_unlock;
+ }
+
+ /* Post the request */
+ index = q->head_idx;
+ q_info = &q->info[index];
+ q_info->wc = wc;
+ q_info->dest = comp;
+ memcpy(q_info->desc, cmd, sizeof(*cmd));
+
+ dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
+ q->head_idx, q->tail_idx);
+ dev_dbg(pdsc->dev, "post admin queue command:\n");
+ dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ cmd, sizeof(*cmd), true);
+
+ q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
+
+ pds_core_dbell_ring(pdsc->kern_dbpage,
+ q->hw_type, q->dbval | q->head_idx);
+ ret = index;
+
+err_out_unlock:
+ spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
+ return ret;
+}
+
+int pdsc_adminq_post(struct pdsc *pdsc,
+ union pds_core_adminq_cmd *cmd,
+ union pds_core_adminq_comp *comp,
+ bool fast_poll)
+{
+ struct pdsc_wait_context wc = {
+ .wait_completion =
+ COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
+ };
+ unsigned long poll_interval = 1;
+ unsigned long poll_jiffies;
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
+ unsigned long remaining;
+ int err = 0;
+ int index;
+
+ wc.qcq = &pdsc->adminqcq;
+ index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
+ if (index < 0) {
+ err = index;
+ goto err_out;
+ }
+
+ time_start = jiffies;
+ time_limit = time_start + HZ * pdsc->devcmd_timeout;
+ do {
+ /* Timeslice the actual wait to catch IO errors etc early */
+ poll_jiffies = msecs_to_jiffies(poll_interval);
+ remaining = wait_for_completion_timeout(&wc.wait_completion,
+ poll_jiffies);
+ if (remaining)
+ break;
+
+ if (!pdsc_is_fw_running(pdsc)) {
+ u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+
+ dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
+ __func__, fw_status);
+ err = -ENXIO;
+ break;
+ }
+
+ /* When fast_poll is not requested, prevent aggressive polling
+ * on failures due to timeouts by doing exponential back off.
+ */
+ if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
+ poll_interval <<= 1;
+ } while (time_before(jiffies, time_limit));
+ time_done = jiffies;
+ dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
+ __func__, jiffies_to_msecs(time_done - time_start));
+
+ /* Check the results */
+ if (time_after_eq(time_done, time_limit))
+ err = -ETIMEDOUT;
+
+ dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
+ dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
+ comp, sizeof(*comp), true);
+
+ if (remaining && comp->status)
+ err = pdsc_err_to_errno(comp->status);
+
+err_out:
+ if (err) {
+ dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
+ __func__, cmd->opcode, comp->status, ERR_PTR(err));
+ if (err == -ENXIO || err == -ETIMEDOUT)
+ queue_work(pdsc->wq, &pdsc->health_work);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pdsc_adminq_post);
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
new file mode 100644
index 000000000000..561af8e5b3ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+
+#include "core.h"
+#include <linux/pds/pds_auxbus.h>
+
+/**
+ * pds_client_register - Link the client to the firmware
+ * @pf_pdev: ptr to the PF driver struct
+ * @devname: name that includes service into, e.g. pds_core.vDPA
+ *
+ * Return: 0 on success, or
+ * negative for error
+ */
+int pds_client_register(struct pci_dev *pf_pdev, char *devname)
+{
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {};
+ struct pdsc *pf;
+ int err;
+ u16 ci;
+
+ pf = pci_get_drvdata(pf_pdev);
+ if (pf->state)
+ return -ENXIO;
+
+ cmd.client_reg.opcode = PDS_AQ_CMD_CLIENT_REG;
+ strscpy(cmd.client_reg.devname, devname,
+ sizeof(cmd.client_reg.devname));
+
+ err = pdsc_adminq_post(pf, &cmd, &comp, false);
+ if (err) {
+ dev_info(pf->dev, "register dev_name %s with DSC failed, status %d: %pe\n",
+ devname, comp.status, ERR_PTR(err));
+ return err;
+ }
+
+ ci = le16_to_cpu(comp.client_reg.client_id);
+ if (!ci) {
+ dev_err(pf->dev, "%s: device returned null client_id\n",
+ __func__);
+ return -EIO;
+ }
+
+ dev_dbg(pf->dev, "%s: device returned client_id %d for %s\n",
+ __func__, ci, devname);
+
+ return ci;
+}
+EXPORT_SYMBOL_GPL(pds_client_register);
+
+/**
+ * pds_client_unregister - Unlink the client from the firmware
+ * @pf_pdev: ptr to the PF driver struct
+ * @client_id: id returned from pds_client_register()
+ *
+ * Return: 0 on success, or
+ * negative for error
+ */
+int pds_client_unregister(struct pci_dev *pf_pdev, u16 client_id)
+{
+ union pds_core_adminq_comp comp = {};
+ union pds_core_adminq_cmd cmd = {};
+ struct pdsc *pf;
+ int err;
+
+ pf = pci_get_drvdata(pf_pdev);
+ if (pf->state)
+ return -ENXIO;
+
+ cmd.client_unreg.opcode = PDS_AQ_CMD_CLIENT_UNREG;
+ cmd.client_unreg.client_id = cpu_to_le16(client_id);
+
+ err = pdsc_adminq_post(pf, &cmd, &comp, false);
+ if (err)
+ dev_info(pf->dev, "unregister client_id %d failed, status %d: %pe\n",
+ client_id, comp.status, ERR_PTR(err));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pds_client_unregister);
+
+/**
+ * pds_client_adminq_cmd - Process an adminq request for the client
+ * @padev: ptr to the client device
+ * @req: ptr to buffer with request
+ * @req_len: length of actual struct used for request
+ * @resp: ptr to buffer where answer is to be copied
+ * @flags: optional flags from pds_core_adminq_flags
+ *
+ * Return: 0 on success, or
+ * negative for error
+ *
+ * Client sends pointers to request and response buffers
+ * Core copies request data into pds_core_client_request_cmd
+ * Core sets other fields as needed
+ * Core posts to AdminQ
+ * Core copies completion data into response buffer
+ */
+int pds_client_adminq_cmd(struct pds_auxiliary_dev *padev,
+ union pds_core_adminq_cmd *req,
+ size_t req_len,
+ union pds_core_adminq_comp *resp,
+ u64 flags)
+{
+ union pds_core_adminq_cmd cmd = {};
+ struct pci_dev *pf_pdev;
+ struct pdsc *pf;
+ size_t cp_len;
+ int err;
+
+ pf_pdev = pci_physfn(padev->vf_pdev);
+ pf = pci_get_drvdata(pf_pdev);
+
+ dev_dbg(pf->dev, "%s: %s opcode %d\n",
+ __func__, dev_name(&padev->aux_dev.dev), req->opcode);
+
+ if (pf->state)
+ return -ENXIO;
+
+ /* Wrap the client's request */
+ cmd.client_request.opcode = PDS_AQ_CMD_CLIENT_CMD;
+ cmd.client_request.client_id = cpu_to_le16(padev->client_id);
+ cp_len = min_t(size_t, req_len, sizeof(cmd.client_request.client_cmd));
+ memcpy(cmd.client_request.client_cmd, req, cp_len);
+
+ err = pdsc_adminq_post(pf, &cmd, resp,
+ !!(flags & PDS_AQ_FLAG_FASTPOLL));
+ if (err && err != -EAGAIN)
+ dev_info(pf->dev, "client admin cmd failed: %pe\n",
+ ERR_PTR(err));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(pds_client_adminq_cmd);
+
+static void pdsc_auxbus_dev_release(struct device *dev)
+{
+ struct pds_auxiliary_dev *padev =
+ container_of(dev, struct pds_auxiliary_dev, aux_dev.dev);
+
+ kfree(padev);
+}
+
+static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
+ struct pdsc *pf,
+ u16 client_id,
+ char *name)
+{
+ struct auxiliary_device *aux_dev;
+ struct pds_auxiliary_dev *padev;
+ int err;
+
+ padev = kzalloc(sizeof(*padev), GFP_KERNEL);
+ if (!padev)
+ return ERR_PTR(-ENOMEM);
+
+ padev->vf_pdev = cf->pdev;
+ padev->client_id = client_id;
+
+ aux_dev = &padev->aux_dev;
+ aux_dev->name = name;
+ aux_dev->id = cf->uid;
+ aux_dev->dev.parent = cf->dev;
+ aux_dev->dev.release = pdsc_auxbus_dev_release;
+
+ err = auxiliary_device_init(aux_dev);
+ if (err < 0) {
+ dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
+ name, ERR_PTR(err));
+ goto err_out;
+ }
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
+ name, ERR_PTR(err));
+ goto err_out_uninit;
+ }
+
+ return padev;
+
+err_out_uninit:
+ auxiliary_device_uninit(aux_dev);
+err_out:
+ kfree(padev);
+ return ERR_PTR(err);
+}
+
+int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
+{
+ struct pds_auxiliary_dev *padev;
+ int err = 0;
+
+ mutex_lock(&pf->config_lock);
+
+ padev = pf->vfs[cf->vf_id].padev;
+ if (padev) {
+ pds_client_unregister(pf->pdev, padev->client_id);
+ auxiliary_device_delete(&padev->aux_dev);
+ auxiliary_device_uninit(&padev->aux_dev);
+ padev->client_id = 0;
+ }
+ pf->vfs[cf->vf_id].padev = NULL;
+
+ mutex_unlock(&pf->config_lock);
+ return err;
+}
+
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
+{
+ struct pds_auxiliary_dev *padev;
+ enum pds_core_vif_types vt;
+ char devname[PDS_DEVNAME_LEN];
+ u16 vt_support;
+ int client_id;
+ int err = 0;
+
+ mutex_lock(&pf->config_lock);
+
+ /* We only support vDPA so far, so it is the only one to
+ * be verified that it is available in the Core device and
+ * enabled in the devlink param. In the future this might
+ * become a loop for several VIF types.
+ */
+
+ /* Verify that the type is supported and enabled. It is not
+ * an error if there is no auxbus device support for this
+ * VF, it just means something else needs to happen with it.
+ */
+ vt = PDS_DEV_TYPE_VDPA;
+ vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
+ if (!(vt_support &&
+ pf->viftype_status[vt].supported &&
+ pf->viftype_status[vt].enabled))
+ goto out_unlock;
+
+ /* Need to register with FW and get the client_id before
+ * creating the aux device so that the aux client can run
+ * adminq commands as part its probe
+ */
+ snprintf(devname, sizeof(devname), "%s.%s.%d",
+ PDS_CORE_DRV_NAME, pf->viftype_status[vt].name, cf->uid);
+ client_id = pds_client_register(pf->pdev, devname);
+ if (client_id < 0) {
+ err = client_id;
+ goto out_unlock;
+ }
+
+ padev = pdsc_auxbus_dev_register(cf, pf, client_id,
+ pf->viftype_status[vt].name);
+ if (IS_ERR(padev)) {
+ pds_client_unregister(pf->pdev, client_id);
+ err = PTR_ERR(padev);
+ goto out_unlock;
+ }
+ pf->vfs[cf->vf_id].padev = padev;
+
+out_unlock:
+ mutex_unlock(&pf->config_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
new file mode 100644
index 000000000000..483a070d96fa
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
+#include "core.h"
+
+static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
+
+int pdsc_register_notify(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&pds_notify_chain, nb);
+}
+EXPORT_SYMBOL_GPL(pdsc_register_notify);
+
+void pdsc_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&pds_notify_chain, nb);
+}
+EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
+
+void pdsc_notify(unsigned long event, void *data)
+{
+ blocking_notifier_call_chain(&pds_notify_chain, event, data);
+}
+
+void pdsc_intr_free(struct pdsc *pdsc, int index)
+{
+ struct pdsc_intr_info *intr_info;
+
+ if (index >= pdsc->nintrs || index < 0) {
+ WARN(true, "bad intr index %d\n", index);
+ return;
+ }
+
+ intr_info = &pdsc->intr_info[index];
+ if (!intr_info->vector)
+ return;
+ dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
+ __func__, index, intr_info->vector, intr_info->name);
+
+ pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
+ pds_core_intr_clean(&pdsc->intr_ctrl[index]);
+
+ free_irq(intr_info->vector, intr_info->data);
+
+ memset(intr_info, 0, sizeof(*intr_info));
+}
+
+int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
+ irq_handler_t handler, void *data)
+{
+ struct pdsc_intr_info *intr_info;
+ unsigned int index;
+ int err;
+
+ /* Find the first available interrupt */
+ for (index = 0; index < pdsc->nintrs; index++)
+ if (!pdsc->intr_info[index].vector)
+ break;
+ if (index >= pdsc->nintrs) {
+ dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
+ __func__, index, pdsc->nintrs);
+ return -ENOSPC;
+ }
+
+ pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
+ PDS_CORE_INTR_CRED_RESET_COALESCE);
+
+ intr_info = &pdsc->intr_info[index];
+
+ intr_info->index = index;
+ intr_info->data = data;
+ strscpy(intr_info->name, name, sizeof(intr_info->name));
+
+ /* Get the OS vector number for the interrupt */
+ err = pci_irq_vector(pdsc->pdev, index);
+ if (err < 0) {
+ dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
+ index, ERR_PTR(err));
+ goto err_out_free_intr;
+ }
+ intr_info->vector = err;
+
+ /* Init the device's intr mask */
+ pds_core_intr_clean(&pdsc->intr_ctrl[index]);
+ pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
+ pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
+
+ /* Register the isr with a name */
+ err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
+ if (err) {
+ dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
+ intr_info->vector, ERR_PTR(err));
+ goto err_out_free_intr;
+ }
+
+ return index;
+
+err_out_free_intr:
+ pdsc_intr_free(pdsc, index);
+ return err;
+}
+
+static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
+ qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
+ return;
+
+ pdsc_intr_free(pdsc, qcq->intx);
+ qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
+}
+
+static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ char name[PDSC_INTR_NAME_MAX_SZ];
+ int index;
+
+ if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
+ qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "%s-%d-%s",
+ PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
+ index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, qcq);
+ if (index < 0)
+ return index;
+ qcq->intx = index;
+
+ return 0;
+}
+
+void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ struct device *dev = pdsc->dev;
+
+ if (!(qcq && qcq->pdsc))
+ return;
+
+ pdsc_debugfs_del_qcq(qcq);
+
+ pdsc_qcq_intr_free(pdsc, qcq);
+
+ if (qcq->q_base)
+ dma_free_coherent(dev, qcq->q_size,
+ qcq->q_base, qcq->q_base_pa);
+
+ if (qcq->cq_base)
+ dma_free_coherent(dev, qcq->cq_size,
+ qcq->cq_base, qcq->cq_base_pa);
+
+ if (qcq->cq.info)
+ vfree(qcq->cq.info);
+
+ if (qcq->q.info)
+ vfree(qcq->q.info);
+
+ memset(qcq, 0, sizeof(*qcq));
+}
+
+static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
+{
+ struct pdsc_q_info *cur;
+ unsigned int i;
+
+ q->base = base;
+ q->base_pa = base_pa;
+
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ cur->desc = base + (i * q->desc_size);
+}
+
+static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
+{
+ struct pdsc_cq_info *cur;
+ unsigned int i;
+
+ cq->base = base;
+ cq->base_pa = base_pa;
+
+ for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
+ cur->comp = base + (i * cq->desc_size);
+}
+
+int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
+ const char *name, unsigned int flags, unsigned int num_descs,
+ unsigned int desc_size, unsigned int cq_desc_size,
+ unsigned int pid, struct pdsc_qcq *qcq)
+{
+ struct device *dev = pdsc->dev;
+ void *q_base, *cq_base;
+ dma_addr_t cq_base_pa;
+ dma_addr_t q_base_pa;
+ int err;
+
+ qcq->q.info = vzalloc(num_descs * sizeof(*qcq->q.info));
+ if (!qcq->q.info) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ qcq->pdsc = pdsc;
+ qcq->flags = flags;
+ INIT_WORK(&qcq->work, pdsc_work_thread);
+
+ qcq->q.type = type;
+ qcq->q.index = index;
+ qcq->q.num_descs = num_descs;
+ qcq->q.desc_size = desc_size;
+ qcq->q.tail_idx = 0;
+ qcq->q.head_idx = 0;
+ qcq->q.pid = pid;
+ snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
+
+ err = pdsc_qcq_intr_alloc(pdsc, qcq);
+ if (err)
+ goto err_out_free_q_info;
+
+ qcq->cq.info = vzalloc(num_descs * sizeof(*qcq->cq.info));
+ if (!qcq->cq.info) {
+ err = -ENOMEM;
+ goto err_out_free_irq;
+ }
+
+ qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
+ qcq->cq.num_descs = num_descs;
+ qcq->cq.desc_size = cq_desc_size;
+ qcq->cq.tail_idx = 0;
+ qcq->cq.done_color = 1;
+
+ if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
+ /* q & cq need to be contiguous in case of notifyq */
+ qcq->q_size = PDS_PAGE_SIZE +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
+ ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
+ qcq->q_base = dma_alloc_coherent(dev,
+ qcq->q_size + qcq->cq_size,
+ &qcq->q_base_pa,
+ GFP_KERNEL);
+ if (!qcq->q_base) {
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
+ q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
+ pdsc_q_map(&qcq->q, q_base, q_base_pa);
+
+ cq_base = PTR_ALIGN(q_base +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
+ PDS_PAGE_SIZE);
+ cq_base_pa = ALIGN(qcq->q_base_pa +
+ ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
+ PDS_PAGE_SIZE);
+
+ } else {
+ /* q DMA descriptors */
+ qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
+ qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
+ &qcq->q_base_pa,
+ GFP_KERNEL);
+ if (!qcq->q_base) {
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
+ q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
+ pdsc_q_map(&qcq->q, q_base, q_base_pa);
+
+ /* cq DMA descriptors */
+ qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
+ qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
+ &qcq->cq_base_pa,
+ GFP_KERNEL);
+ if (!qcq->cq_base) {
+ err = -ENOMEM;
+ goto err_out_free_q;
+ }
+ cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
+ cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
+ }
+
+ pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
+ qcq->cq.bound_q = &qcq->q;
+
+ pdsc_debugfs_add_qcq(pdsc, qcq);
+
+ return 0;
+
+err_out_free_q:
+ dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
+err_out_free_cq_info:
+ vfree(qcq->cq.info);
+err_out_free_irq:
+ pdsc_qcq_intr_free(pdsc, qcq);
+err_out_free_q_info:
+ vfree(qcq->q.info);
+ memset(qcq, 0, sizeof(*qcq));
+err_out:
+ dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
+ return err;
+}
+
+static int pdsc_core_init(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .init.opcode = PDS_CORE_CMD_INIT,
+ };
+ struct pds_core_dev_init_data_out cido;
+ struct pds_core_dev_init_data_in cidi;
+ u32 dbid_count;
+ u32 dbpage_num;
+ size_t sz;
+ int err;
+
+ cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
+ cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
+ cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
+ cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
+ cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
+ cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
+ cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
+
+ mutex_lock(&pdsc->devcmd_lock);
+
+ sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
+ memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
+
+ err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ if (!err) {
+ sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
+ memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
+ }
+
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err) {
+ dev_err(pdsc->dev, "Device init command failed: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
+
+ dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
+ dbpage_num = pdsc->hw_index * dbid_count;
+ pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
+ if (!pdsc->kern_dbpage) {
+ dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
+ return -ENOMEM;
+ }
+
+ pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
+ pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
+ pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
+
+ pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
+ pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
+ pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
+
+ pdsc->last_eid = 0;
+
+ return err;
+}
+
+static struct pdsc_viftype pdsc_viftype_defaults[] = {
+ [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
+ .vif_id = PDS_DEV_TYPE_VDPA,
+ .dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
+ [PDS_DEV_TYPE_MAX] = {}
+};
+
+static int pdsc_viftypes_init(struct pdsc *pdsc)
+{
+ enum pds_core_vif_types vt;
+
+ pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
+ GFP_KERNEL);
+ if (!pdsc->viftype_status)
+ return -ENOMEM;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ bool vt_support;
+
+ if (!pdsc_viftype_defaults[vt].name)
+ continue;
+
+ /* Grab the defaults */
+ pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
+
+ /* See what the Core device has for support */
+ vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
+ dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
+ pdsc->viftype_status[vt].name,
+ vt_support ? "" : "not ");
+
+ pdsc->viftype_status[vt].supported = vt_support;
+ }
+
+ return 0;
+}
+
+int pdsc_setup(struct pdsc *pdsc, bool init)
+{
+ int numdescs;
+ int err;
+
+ if (init)
+ err = pdsc_dev_init(pdsc);
+ else
+ err = pdsc_dev_reinit(pdsc);
+ if (err)
+ return err;
+
+ /* Scale the descriptor ring length based on number of CPUs and VFs */
+ numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
+ numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
+ numdescs = roundup_pow_of_two(numdescs);
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
+ PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
+ numdescs,
+ sizeof(union pds_core_adminq_cmd),
+ sizeof(union pds_core_adminq_comp),
+ 0, &pdsc->adminqcq);
+ if (err)
+ goto err_out_teardown;
+
+ err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
+ PDS_CORE_QCQ_F_NOTIFYQ,
+ PDSC_NOTIFYQ_LENGTH,
+ sizeof(struct pds_core_notifyq_cmd),
+ sizeof(union pds_core_notifyq_comp),
+ 0, &pdsc->notifyqcq);
+ if (err)
+ goto err_out_teardown;
+
+ /* NotifyQ rides on the AdminQ interrupt */
+ pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
+
+ /* Set up the Core with the AdminQ and NotifyQ info */
+ err = pdsc_core_init(pdsc);
+ if (err)
+ goto err_out_teardown;
+
+ /* Set up the VIFs */
+ err = pdsc_viftypes_init(pdsc);
+ if (err)
+ goto err_out_teardown;
+
+ if (init)
+ pdsc_debugfs_add_viftype(pdsc);
+
+ clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
+ return 0;
+
+err_out_teardown:
+ pdsc_teardown(pdsc, init);
+ return err;
+}
+
+void pdsc_teardown(struct pdsc *pdsc, bool removing)
+{
+ int i;
+
+ pdsc_devcmd_reset(pdsc);
+ pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
+ pdsc_qcq_free(pdsc, &pdsc->adminqcq);
+
+ kfree(pdsc->viftype_status);
+ pdsc->viftype_status = NULL;
+
+ if (pdsc->intr_info) {
+ for (i = 0; i < pdsc->nintrs; i++)
+ pdsc_intr_free(pdsc, i);
+
+ if (removing) {
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+ }
+ }
+
+ if (pdsc->kern_dbpage) {
+ iounmap(pdsc->kern_dbpage);
+ pdsc->kern_dbpage = NULL;
+ }
+
+ set_bit(PDSC_S_FW_DEAD, &pdsc->state);
+}
+
+int pdsc_start(struct pdsc *pdsc)
+{
+ pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
+ PDS_CORE_INTR_MASK_CLEAR);
+
+ return 0;
+}
+
+void pdsc_stop(struct pdsc *pdsc)
+{
+ int i;
+
+ if (!pdsc->intr_info)
+ return;
+
+ /* Mask interrupts that are in use */
+ for (i = 0; i < pdsc->nintrs; i++)
+ if (pdsc->intr_info[i].vector)
+ pds_core_intr_mask(&pdsc->intr_ctrl[i],
+ PDS_CORE_INTR_MASK_SET);
+}
+
+static void pdsc_fw_down(struct pdsc *pdsc)
+{
+ union pds_core_notifyq_comp reset_event = {
+ .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
+ .reset.state = 0,
+ };
+
+ if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ dev_err(pdsc->dev, "%s: already happening\n", __func__);
+ return;
+ }
+
+ /* Notify clients of fw_down */
+ devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
+ pdsc_notify(PDS_EVENT_RESET, &reset_event);
+
+ pdsc_stop(pdsc);
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
+}
+
+static void pdsc_fw_up(struct pdsc *pdsc)
+{
+ union pds_core_notifyq_comp reset_event = {
+ .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
+ .reset.state = 1,
+ };
+ int err;
+
+ if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
+ return;
+ }
+
+ err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
+ if (err)
+ goto err_out;
+
+ err = pdsc_start(pdsc);
+ if (err)
+ goto err_out;
+
+ /* Notify clients of fw_up */
+ pdsc->fw_recoveries++;
+ devlink_health_reporter_state_update(pdsc->fw_reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+ pdsc_notify(PDS_EVENT_RESET, &reset_event);
+
+ return;
+
+err_out:
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
+}
+
+void pdsc_health_thread(struct work_struct *work)
+{
+ struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
+ unsigned long mask;
+ bool healthy;
+
+ mutex_lock(&pdsc->config_lock);
+
+ /* Don't do a check when in a transition state */
+ mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
+ BIT_ULL(PDSC_S_STOPPING_DRIVER);
+ if (pdsc->state & mask)
+ goto out_unlock;
+
+ healthy = pdsc_is_fw_good(pdsc);
+ dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
+ __func__, healthy, pdsc->fw_status, pdsc->last_hb);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
+ if (healthy)
+ pdsc_fw_up(pdsc);
+ } else {
+ if (!healthy)
+ pdsc_fw_down(pdsc);
+ }
+
+ pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+out_unlock:
+ mutex_unlock(&pdsc->config_lock);
+}
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
new file mode 100644
index 000000000000..e545fafc4819
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#ifndef _PDSC_H_
+#define _PDSC_H_
+
+#include <linux/debugfs.h>
+#include <net/devlink.h>
+
+#include <linux/pds/pds_common.h>
+#include <linux/pds/pds_core_if.h>
+#include <linux/pds/pds_adminq.h>
+#include <linux/pds/pds_intr.h>
+
+#define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
+
+#define PDSC_WATCHDOG_SECS 5
+#define PDSC_QUEUE_NAME_MAX_SZ 32
+#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
+#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
+#define PDSC_TEARDOWN_RECOVERY false
+#define PDSC_TEARDOWN_REMOVING true
+#define PDSC_SETUP_RECOVERY false
+#define PDSC_SETUP_INIT true
+
+struct pdsc_dev_bar {
+ void __iomem *vaddr;
+ phys_addr_t bus_addr;
+ unsigned long len;
+ int res_index;
+};
+
+struct pdsc;
+
+struct pdsc_vf {
+ struct pds_auxiliary_dev *padev;
+ struct pdsc *vf;
+ u16 index;
+ __le16 vif_types[PDS_DEV_TYPE_MAX];
+};
+
+struct pdsc_devinfo {
+ u8 asic_type;
+ u8 asic_rev;
+ char fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN + 1];
+ char serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN + 1];
+};
+
+struct pdsc_queue {
+ struct pdsc_q_info *info;
+ u64 dbval;
+ u16 head_idx;
+ u16 tail_idx;
+ u8 hw_type;
+ unsigned int index;
+ unsigned int num_descs;
+ u64 dbell_count;
+ u64 features;
+ unsigned int type;
+ unsigned int hw_index;
+ union {
+ void *base;
+ struct pds_core_admin_cmd *adminq;
+ };
+ dma_addr_t base_pa; /* must be page aligned */
+ unsigned int desc_size;
+ unsigned int pid;
+ char name[PDSC_QUEUE_NAME_MAX_SZ];
+};
+
+#define PDSC_INTR_NAME_MAX_SZ 32
+
+struct pdsc_intr_info {
+ char name[PDSC_INTR_NAME_MAX_SZ];
+ unsigned int index;
+ unsigned int vector;
+ void *data;
+};
+
+struct pdsc_cq_info {
+ void *comp;
+};
+
+struct pdsc_buf_info {
+ struct page *page;
+ dma_addr_t dma_addr;
+ u32 page_offset;
+ u32 len;
+};
+
+struct pdsc_q_info {
+ union {
+ void *desc;
+ struct pdsc_admin_cmd *adminq_desc;
+ };
+ unsigned int bytes;
+ unsigned int nbufs;
+ struct pdsc_buf_info bufs[PDS_CORE_MAX_FRAGS];
+ struct pdsc_wait_context *wc;
+ void *dest;
+};
+
+struct pdsc_cq {
+ struct pdsc_cq_info *info;
+ struct pdsc_queue *bound_q;
+ struct pdsc_intr_info *bound_intr;
+ u16 tail_idx;
+ bool done_color;
+ unsigned int num_descs;
+ unsigned int desc_size;
+ void *base;
+ dma_addr_t base_pa; /* must be page aligned */
+} ____cacheline_aligned_in_smp;
+
+struct pdsc_qcq {
+ struct pdsc *pdsc;
+ void *q_base;
+ dma_addr_t q_base_pa; /* might not be page aligned */
+ void *cq_base;
+ dma_addr_t cq_base_pa; /* might not be page aligned */
+ u32 q_size;
+ u32 cq_size;
+ bool armed;
+ unsigned int flags;
+
+ struct work_struct work;
+ struct pdsc_queue q;
+ struct pdsc_cq cq;
+ int intx;
+
+ u32 accum_work;
+ struct dentry *dentry;
+};
+
+struct pdsc_viftype {
+ char *name;
+ bool supported;
+ bool enabled;
+ int dl_id;
+ int vif_id;
+ struct pds_auxiliary_dev *padev;
+};
+
+/* No state flags set means we are in a steady running state */
+enum pdsc_state_flags {
+ PDSC_S_FW_DEAD, /* stopped, wait on startup or recovery */
+ PDSC_S_INITING_DRIVER, /* initial startup from probe */
+ PDSC_S_STOPPING_DRIVER, /* driver remove */
+
+ /* leave this as last */
+ PDSC_S_STATE_SIZE
+};
+
+struct pdsc {
+ struct pci_dev *pdev;
+ struct dentry *dentry;
+ struct device *dev;
+ struct pdsc_dev_bar bars[PDS_CORE_BARS_MAX];
+ struct pdsc_vf *vfs;
+ int num_vfs;
+ int vf_id;
+ int hw_index;
+ int uid;
+
+ unsigned long state;
+ u8 fw_status;
+ u8 fw_generation;
+ unsigned long last_fw_time;
+ u32 last_hb;
+ struct timer_list wdtimer;
+ unsigned int wdtimer_period;
+ struct work_struct health_work;
+ struct devlink_health_reporter *fw_reporter;
+ u32 fw_recoveries;
+
+ struct pdsc_devinfo dev_info;
+ struct pds_core_dev_identity dev_ident;
+ unsigned int nintrs;
+ struct pdsc_intr_info *intr_info; /* array of nintrs elements */
+
+ struct workqueue_struct *wq;
+
+ unsigned int devcmd_timeout;
+ struct mutex devcmd_lock; /* lock for dev_cmd operations */
+ struct mutex config_lock; /* lock for configuration operations */
+ spinlock_t adminq_lock; /* lock for adminq operations */
+ struct pds_core_dev_info_regs __iomem *info_regs;
+ struct pds_core_dev_cmd_regs __iomem *cmd_regs;
+ struct pds_core_intr __iomem *intr_ctrl;
+ u64 __iomem *intr_status;
+ u64 __iomem *db_pages;
+ dma_addr_t phy_db_pages;
+ u64 __iomem *kern_dbpage;
+
+ struct pdsc_qcq adminqcq;
+ struct pdsc_qcq notifyqcq;
+ u64 last_eid;
+ struct pdsc_viftype *viftype_status;
+};
+
+/** enum pds_core_dbell_bits - bitwise composition of dbell values.
+ *
+ * @PDS_CORE_DBELL_QID_MASK: unshifted mask of valid queue id bits.
+ * @PDS_CORE_DBELL_QID_SHIFT: queue id shift amount in dbell value.
+ * @PDS_CORE_DBELL_QID: macro to build QID component of dbell value.
+ *
+ * @PDS_CORE_DBELL_RING_MASK: unshifted mask of valid ring bits.
+ * @PDS_CORE_DBELL_RING_SHIFT: ring shift amount in dbell value.
+ * @PDS_CORE_DBELL_RING: macro to build ring component of dbell value.
+ *
+ * @PDS_CORE_DBELL_RING_0: ring zero dbell component value.
+ * @PDS_CORE_DBELL_RING_1: ring one dbell component value.
+ * @PDS_CORE_DBELL_RING_2: ring two dbell component value.
+ * @PDS_CORE_DBELL_RING_3: ring three dbell component value.
+ *
+ * @PDS_CORE_DBELL_INDEX_MASK: bit mask of valid index bits, no shift needed.
+ */
+enum pds_core_dbell_bits {
+ PDS_CORE_DBELL_QID_MASK = 0xffffff,
+ PDS_CORE_DBELL_QID_SHIFT = 24,
+
+#define PDS_CORE_DBELL_QID(n) \
+ (((u64)(n) & PDS_CORE_DBELL_QID_MASK) << PDS_CORE_DBELL_QID_SHIFT)
+
+ PDS_CORE_DBELL_RING_MASK = 0x7,
+ PDS_CORE_DBELL_RING_SHIFT = 16,
+
+#define PDS_CORE_DBELL_RING(n) \
+ (((u64)(n) & PDS_CORE_DBELL_RING_MASK) << PDS_CORE_DBELL_RING_SHIFT)
+
+ PDS_CORE_DBELL_RING_0 = 0,
+ PDS_CORE_DBELL_RING_1 = PDS_CORE_DBELL_RING(1),
+ PDS_CORE_DBELL_RING_2 = PDS_CORE_DBELL_RING(2),
+ PDS_CORE_DBELL_RING_3 = PDS_CORE_DBELL_RING(3),
+
+ PDS_CORE_DBELL_INDEX_MASK = 0xffff,
+};
+
+static inline void pds_core_dbell_ring(u64 __iomem *db_page,
+ enum pds_core_logical_qtype qtype,
+ u64 val)
+{
+ writeq(val, &db_page[qtype]);
+}
+
+int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int pdsc_dl_enable_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int pdsc_dl_enable_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack);
+
+void __iomem *pdsc_map_dbpage(struct pdsc *pdsc, int page_num);
+
+void pdsc_debugfs_create(void);
+void pdsc_debugfs_destroy(void);
+void pdsc_debugfs_add_dev(struct pdsc *pdsc);
+void pdsc_debugfs_del_dev(struct pdsc *pdsc);
+void pdsc_debugfs_add_ident(struct pdsc *pdsc);
+void pdsc_debugfs_add_viftype(struct pdsc *pdsc);
+void pdsc_debugfs_add_irqs(struct pdsc *pdsc);
+void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq);
+void pdsc_debugfs_del_qcq(struct pdsc_qcq *qcq);
+
+int pdsc_err_to_errno(enum pds_core_status_code code);
+bool pdsc_is_fw_running(struct pdsc *pdsc);
+bool pdsc_is_fw_good(struct pdsc *pdsc);
+int pdsc_devcmd(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds);
+int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds);
+int pdsc_devcmd_init(struct pdsc *pdsc);
+int pdsc_devcmd_reset(struct pdsc *pdsc);
+int pdsc_dev_reinit(struct pdsc *pdsc);
+int pdsc_dev_init(struct pdsc *pdsc);
+
+int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
+ irq_handler_t handler, void *data);
+void pdsc_intr_free(struct pdsc *pdsc, int index);
+void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq);
+int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
+ const char *name, unsigned int flags, unsigned int num_descs,
+ unsigned int desc_size, unsigned int cq_desc_size,
+ unsigned int pid, struct pdsc_qcq *qcq);
+int pdsc_setup(struct pdsc *pdsc, bool init);
+void pdsc_teardown(struct pdsc *pdsc, bool removing);
+int pdsc_start(struct pdsc *pdsc);
+void pdsc_stop(struct pdsc *pdsc);
+void pdsc_health_thread(struct work_struct *work);
+
+int pdsc_register_notify(struct notifier_block *nb);
+void pdsc_unregister_notify(struct notifier_block *nb);
+void pdsc_notify(unsigned long event, void *data);
+int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
+int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
+
+void pdsc_process_adminq(struct pdsc_qcq *qcq);
+void pdsc_work_thread(struct work_struct *work);
+irqreturn_t pdsc_adminq_isr(int irq, void *data);
+
+int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+ struct netlink_ext_ack *extack);
+#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/debugfs.c b/drivers/net/ethernet/amd/pds_core/debugfs.c
new file mode 100644
index 000000000000..8ec392299b7d
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/debugfs.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/pci.h>
+
+#include "core.h"
+
+static struct dentry *pdsc_dir;
+
+void pdsc_debugfs_create(void)
+{
+ pdsc_dir = debugfs_create_dir(PDS_CORE_DRV_NAME, NULL);
+}
+
+void pdsc_debugfs_destroy(void)
+{
+ debugfs_remove_recursive(pdsc_dir);
+}
+
+void pdsc_debugfs_add_dev(struct pdsc *pdsc)
+{
+ pdsc->dentry = debugfs_create_dir(pci_name(pdsc->pdev), pdsc_dir);
+
+ debugfs_create_ulong("state", 0400, pdsc->dentry, &pdsc->state);
+}
+
+void pdsc_debugfs_del_dev(struct pdsc *pdsc)
+{
+ debugfs_remove_recursive(pdsc->dentry);
+ pdsc->dentry = NULL;
+}
+
+static int identity_show(struct seq_file *seq, void *v)
+{
+ struct pdsc *pdsc = seq->private;
+ struct pds_core_dev_identity *ident;
+ int vt;
+
+ ident = &pdsc->dev_ident;
+
+ seq_printf(seq, "fw_heartbeat: 0x%x\n",
+ ioread32(&pdsc->info_regs->fw_heartbeat));
+
+ seq_printf(seq, "nlifs: %d\n",
+ le32_to_cpu(ident->nlifs));
+ seq_printf(seq, "nintrs: %d\n",
+ le32_to_cpu(ident->nintrs));
+ seq_printf(seq, "ndbpgs_per_lif: %d\n",
+ le32_to_cpu(ident->ndbpgs_per_lif));
+ seq_printf(seq, "intr_coal_mult: %d\n",
+ le32_to_cpu(ident->intr_coal_mult));
+ seq_printf(seq, "intr_coal_div: %d\n",
+ le32_to_cpu(ident->intr_coal_div));
+
+ seq_puts(seq, "vif_types: ");
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++)
+ seq_printf(seq, "%d ",
+ le16_to_cpu(pdsc->dev_ident.vif_types[vt]));
+ seq_puts(seq, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(identity);
+
+void pdsc_debugfs_add_ident(struct pdsc *pdsc)
+{
+ debugfs_create_file("identity", 0400, pdsc->dentry,
+ pdsc, &identity_fops);
+}
+
+static int viftype_show(struct seq_file *seq, void *v)
+{
+ struct pdsc *pdsc = seq->private;
+ int vt;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ if (!pdsc->viftype_status[vt].name)
+ continue;
+
+ seq_printf(seq, "%s\t%d supported %d enabled\n",
+ pdsc->viftype_status[vt].name,
+ pdsc->viftype_status[vt].supported,
+ pdsc->viftype_status[vt].enabled);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(viftype);
+
+void pdsc_debugfs_add_viftype(struct pdsc *pdsc)
+{
+ debugfs_create_file("viftypes", 0400, pdsc->dentry,
+ pdsc, &viftype_fops);
+}
+
+static const struct debugfs_reg32 intr_ctrl_regs[] = {
+ { .name = "coal_init", .offset = 0, },
+ { .name = "mask", .offset = 4, },
+ { .name = "credits", .offset = 8, },
+ { .name = "mask_on_assert", .offset = 12, },
+ { .name = "coal_timer", .offset = 16, },
+};
+
+void pdsc_debugfs_add_qcq(struct pdsc *pdsc, struct pdsc_qcq *qcq)
+{
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
+ struct dentry *intr_dentry;
+ struct debugfs_regset32 *intr_ctrl_regset;
+ struct pdsc_intr_info *intr = &pdsc->intr_info[qcq->intx];
+ struct pdsc_queue *q = &qcq->q;
+ struct pdsc_cq *cq = &qcq->cq;
+
+ qcq_dentry = debugfs_create_dir(q->name, pdsc->dentry);
+ if (IS_ERR_OR_NULL(qcq_dentry))
+ return;
+ qcq->dentry = qcq_dentry;
+
+ debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
+ debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
+ debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
+ debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
+ debugfs_create_x32("accum_work", 0400, qcq_dentry, &qcq->accum_work);
+
+ q_dentry = debugfs_create_dir("q", qcq->dentry);
+ if (IS_ERR_OR_NULL(q_dentry))
+ return;
+
+ debugfs_create_u32("index", 0400, q_dentry, &q->index);
+ debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
+ debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
+ debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
+
+ debugfs_create_u16("tail", 0400, q_dentry, &q->tail_idx);
+ debugfs_create_u16("head", 0400, q_dentry, &q->head_idx);
+
+ cq_dentry = debugfs_create_dir("cq", qcq->dentry);
+ if (IS_ERR_OR_NULL(cq_dentry))
+ return;
+
+ debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
+ debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
+ debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
+ debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
+ debugfs_create_u16("tail", 0400, cq_dentry, &cq->tail_idx);
+
+ if (qcq->flags & PDS_CORE_QCQ_F_INTR) {
+ intr_dentry = debugfs_create_dir("intr", qcq->dentry);
+ if (IS_ERR_OR_NULL(intr_dentry))
+ return;
+
+ debugfs_create_u32("index", 0400, intr_dentry, &intr->index);
+ debugfs_create_u32("vector", 0400, intr_dentry, &intr->vector);
+
+ intr_ctrl_regset = kzalloc(sizeof(*intr_ctrl_regset),
+ GFP_KERNEL);
+ if (!intr_ctrl_regset)
+ return;
+ intr_ctrl_regset->regs = intr_ctrl_regs;
+ intr_ctrl_regset->nregs = ARRAY_SIZE(intr_ctrl_regs);
+ intr_ctrl_regset->base = &pdsc->intr_ctrl[intr->index];
+
+ debugfs_create_regset32("intr_ctrl", 0400, intr_dentry,
+ intr_ctrl_regset);
+ }
+};
+
+void pdsc_debugfs_del_qcq(struct pdsc_qcq *qcq)
+{
+ debugfs_remove_recursive(qcq->dentry);
+ qcq->dentry = NULL;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
new file mode 100644
index 000000000000..f7c597ea5daf
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/utsname.h>
+
+#include "core.h"
+
+int pdsc_err_to_errno(enum pds_core_status_code code)
+{
+ switch (code) {
+ case PDS_RC_SUCCESS:
+ return 0;
+ case PDS_RC_EVERSION:
+ case PDS_RC_EQTYPE:
+ case PDS_RC_EQID:
+ case PDS_RC_EINVAL:
+ case PDS_RC_ENOSUPP:
+ return -EINVAL;
+ case PDS_RC_EPERM:
+ return -EPERM;
+ case PDS_RC_ENOENT:
+ return -ENOENT;
+ case PDS_RC_EAGAIN:
+ return -EAGAIN;
+ case PDS_RC_ENOMEM:
+ return -ENOMEM;
+ case PDS_RC_EFAULT:
+ return -EFAULT;
+ case PDS_RC_EBUSY:
+ return -EBUSY;
+ case PDS_RC_EEXIST:
+ return -EEXIST;
+ case PDS_RC_EVFID:
+ return -ENODEV;
+ case PDS_RC_ECLIENT:
+ return -ECHILD;
+ case PDS_RC_ENOSPC:
+ return -ENOSPC;
+ case PDS_RC_ERANGE:
+ return -ERANGE;
+ case PDS_RC_BAD_ADDR:
+ return -EFAULT;
+ case PDS_RC_EOPCODE:
+ case PDS_RC_EINTR:
+ case PDS_RC_DEV_CMD:
+ case PDS_RC_ERROR:
+ case PDS_RC_ERDMA:
+ case PDS_RC_EIO:
+ default:
+ return -EIO;
+ }
+}
+
+bool pdsc_is_fw_running(struct pdsc *pdsc)
+{
+ pdsc->fw_status = ioread8(&pdsc->info_regs->fw_status);
+ pdsc->last_fw_time = jiffies;
+ pdsc->last_hb = ioread32(&pdsc->info_regs->fw_heartbeat);
+
+ /* Firmware is useful only if the running bit is set and
+ * fw_status != 0xff (bad PCI read)
+ */
+ return (pdsc->fw_status != 0xff) &&
+ (pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING);
+}
+
+bool pdsc_is_fw_good(struct pdsc *pdsc)
+{
+ u8 gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+ return pdsc_is_fw_running(pdsc) && gen == pdsc->fw_generation;
+}
+
+static u8 pdsc_devcmd_status(struct pdsc *pdsc)
+{
+ return ioread8(&pdsc->cmd_regs->comp.status);
+}
+
+static bool pdsc_devcmd_done(struct pdsc *pdsc)
+{
+ return ioread32(&pdsc->cmd_regs->done) & PDS_CORE_DEV_CMD_DONE;
+}
+
+static void pdsc_devcmd_dbell(struct pdsc *pdsc)
+{
+ iowrite32(0, &pdsc->cmd_regs->done);
+ iowrite32(1, &pdsc->cmd_regs->doorbell);
+}
+
+static void pdsc_devcmd_clean(struct pdsc *pdsc)
+{
+ iowrite32(0, &pdsc->cmd_regs->doorbell);
+ memset_io(&pdsc->cmd_regs->cmd, 0, sizeof(pdsc->cmd_regs->cmd));
+}
+
+static const char *pdsc_devcmd_str(int opcode)
+{
+ switch (opcode) {
+ case PDS_CORE_CMD_NOP:
+ return "PDS_CORE_CMD_NOP";
+ case PDS_CORE_CMD_IDENTIFY:
+ return "PDS_CORE_CMD_IDENTIFY";
+ case PDS_CORE_CMD_RESET:
+ return "PDS_CORE_CMD_RESET";
+ case PDS_CORE_CMD_INIT:
+ return "PDS_CORE_CMD_INIT";
+ case PDS_CORE_CMD_FW_DOWNLOAD:
+ return "PDS_CORE_CMD_FW_DOWNLOAD";
+ case PDS_CORE_CMD_FW_CONTROL:
+ return "PDS_CORE_CMD_FW_CONTROL";
+ default:
+ return "PDS_CORE_CMD_UNKNOWN";
+ }
+}
+
+static int pdsc_devcmd_wait(struct pdsc *pdsc, int max_seconds)
+{
+ struct device *dev = pdsc->dev;
+ unsigned long start_time;
+ unsigned long max_wait;
+ unsigned long duration;
+ int timeout = 0;
+ int done = 0;
+ int err = 0;
+ int status;
+ int opcode;
+
+ opcode = ioread8(&pdsc->cmd_regs->cmd.opcode);
+
+ start_time = jiffies;
+ max_wait = start_time + (max_seconds * HZ);
+
+ while (!done && !timeout) {
+ done = pdsc_devcmd_done(pdsc);
+ if (done)
+ break;
+
+ timeout = time_after(jiffies, max_wait);
+ if (timeout)
+ break;
+
+ usleep_range(100, 200);
+ }
+ duration = jiffies - start_time;
+
+ if (done && duration > HZ)
+ dev_dbg(dev, "DEVCMD %d %s after %ld secs\n",
+ opcode, pdsc_devcmd_str(opcode), duration / HZ);
+
+ if (!done || timeout) {
+ dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n",
+ opcode, pdsc_devcmd_str(opcode), done, timeout,
+ max_seconds);
+ err = -ETIMEDOUT;
+ pdsc_devcmd_clean(pdsc);
+ }
+
+ status = pdsc_devcmd_status(pdsc);
+ err = pdsc_err_to_errno(status);
+ if (err && err != -EAGAIN)
+ dev_err(dev, "DEVCMD %d %s failed, status=%d err %d %pe\n",
+ opcode, pdsc_devcmd_str(opcode), status, err,
+ ERR_PTR(err));
+
+ return err;
+}
+
+int pdsc_devcmd_locked(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds)
+{
+ int err;
+
+ memcpy_toio(&pdsc->cmd_regs->cmd, cmd, sizeof(*cmd));
+ pdsc_devcmd_dbell(pdsc);
+ err = pdsc_devcmd_wait(pdsc, max_seconds);
+ memcpy_fromio(comp, &pdsc->cmd_regs->comp, sizeof(*comp));
+
+ if (err == -ENXIO || err == -ETIMEDOUT)
+ queue_work(pdsc->wq, &pdsc->health_work);
+
+ return err;
+}
+
+int pdsc_devcmd(struct pdsc *pdsc, union pds_core_dev_cmd *cmd,
+ union pds_core_dev_comp *comp, int max_seconds)
+{
+ int err;
+
+ mutex_lock(&pdsc->devcmd_lock);
+ err = pdsc_devcmd_locked(pdsc, cmd, comp, max_seconds);
+ mutex_unlock(&pdsc->devcmd_lock);
+
+ return err;
+}
+
+int pdsc_devcmd_init(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .opcode = PDS_CORE_CMD_INIT,
+ };
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+int pdsc_devcmd_reset(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .reset.opcode = PDS_CORE_CMD_RESET,
+ };
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_devcmd_identify_locked(struct pdsc *pdsc)
+{
+ union pds_core_dev_comp comp = {};
+ union pds_core_dev_cmd cmd = {
+ .identify.opcode = PDS_CORE_CMD_IDENTIFY,
+ .identify.ver = PDS_CORE_IDENTITY_VERSION_1,
+ };
+
+ return pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static void pdsc_init_devinfo(struct pdsc *pdsc)
+{
+ pdsc->dev_info.asic_type = ioread8(&pdsc->info_regs->asic_type);
+ pdsc->dev_info.asic_rev = ioread8(&pdsc->info_regs->asic_rev);
+ pdsc->fw_generation = PDS_CORE_FW_STS_F_GENERATION &
+ ioread8(&pdsc->info_regs->fw_status);
+
+ memcpy_fromio(pdsc->dev_info.fw_version,
+ pdsc->info_regs->fw_version,
+ PDS_CORE_DEVINFO_FWVERS_BUFLEN);
+ pdsc->dev_info.fw_version[PDS_CORE_DEVINFO_FWVERS_BUFLEN] = 0;
+
+ memcpy_fromio(pdsc->dev_info.serial_num,
+ pdsc->info_regs->serial_num,
+ PDS_CORE_DEVINFO_SERIAL_BUFLEN);
+ pdsc->dev_info.serial_num[PDS_CORE_DEVINFO_SERIAL_BUFLEN] = 0;
+
+ dev_dbg(pdsc->dev, "fw_version %s\n", pdsc->dev_info.fw_version);
+}
+
+static int pdsc_identify(struct pdsc *pdsc)
+{
+ struct pds_core_drv_identity drv = {};
+ size_t sz;
+ int err;
+
+ drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
+ snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
+ "%s %s", PDS_CORE_DRV_NAME, utsname()->release);
+
+ /* Next let's get some info about the device
+ * We use the devcmd_lock at this level in order to
+ * get safe access to the cmd_regs->data before anyone
+ * else can mess it up
+ */
+ mutex_lock(&pdsc->devcmd_lock);
+
+ sz = min_t(size_t, sizeof(drv), sizeof(pdsc->cmd_regs->data));
+ memcpy_toio(&pdsc->cmd_regs->data, &drv, sz);
+
+ err = pdsc_devcmd_identify_locked(pdsc);
+ if (!err) {
+ sz = min_t(size_t, sizeof(pdsc->dev_ident),
+ sizeof(pdsc->cmd_regs->data));
+ memcpy_fromio(&pdsc->dev_ident, &pdsc->cmd_regs->data, sz);
+ }
+ mutex_unlock(&pdsc->devcmd_lock);
+
+ if (err) {
+ dev_err(pdsc->dev, "Cannot identify device: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ if (isprint(pdsc->dev_info.fw_version[0]) &&
+ isascii(pdsc->dev_info.fw_version[0]))
+ dev_info(pdsc->dev, "FW: %.*s\n",
+ (int)(sizeof(pdsc->dev_info.fw_version) - 1),
+ pdsc->dev_info.fw_version);
+ else
+ dev_info(pdsc->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n",
+ (u8)pdsc->dev_info.fw_version[0],
+ (u8)pdsc->dev_info.fw_version[1],
+ (u8)pdsc->dev_info.fw_version[2],
+ (u8)pdsc->dev_info.fw_version[3]);
+
+ return 0;
+}
+
+int pdsc_dev_reinit(struct pdsc *pdsc)
+{
+ pdsc_init_devinfo(pdsc);
+
+ return pdsc_identify(pdsc);
+}
+
+int pdsc_dev_init(struct pdsc *pdsc)
+{
+ unsigned int nintrs;
+ int err;
+
+ /* Initial init and reset of device */
+ pdsc_init_devinfo(pdsc);
+ pdsc->devcmd_timeout = PDS_CORE_DEVCMD_TIMEOUT;
+
+ err = pdsc_devcmd_reset(pdsc);
+ if (err)
+ return err;
+
+ err = pdsc_identify(pdsc);
+ if (err)
+ return err;
+
+ pdsc_debugfs_add_ident(pdsc);
+
+ /* Now we can reserve interrupts */
+ nintrs = le32_to_cpu(pdsc->dev_ident.nintrs);
+ nintrs = min_t(unsigned int, num_online_cpus(), nintrs);
+
+ /* Get intr_info struct array for tracking */
+ pdsc->intr_info = kcalloc(nintrs, sizeof(*pdsc->intr_info), GFP_KERNEL);
+ if (!pdsc->intr_info) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = pci_alloc_irq_vectors(pdsc->pdev, nintrs, nintrs, PCI_IRQ_MSIX);
+ if (err != nintrs) {
+ dev_err(pdsc->dev, "Can't get %d intrs from OS: %pe\n",
+ nintrs, ERR_PTR(err));
+ err = -ENOSPC;
+ goto err_out;
+ }
+ pdsc->nintrs = nintrs;
+
+ return 0;
+
+err_out:
+ kfree(pdsc->intr_info);
+ pdsc->intr_info = NULL;
+
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
new file mode 100644
index 000000000000..9c6b3653c1c7
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include "core.h"
+#include <linux/pds/pds_auxbus.h>
+
+static struct
+pdsc_viftype *pdsc_dl_find_viftype_by_id(struct pdsc *pdsc,
+ enum devlink_param_type dl_id)
+{
+ int vt;
+
+ for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
+ if (pdsc->viftype_status[vt].dl_id == dl_id)
+ return &pdsc->viftype_status[vt];
+ }
+
+ return NULL;
+}
+
+int pdsc_dl_enable_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry)
+ return -ENOENT;
+
+ ctx->val.vbool = vt_entry->enabled;
+
+ return 0;
+}
+
+int pdsc_dl_enable_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+ int err = 0;
+ int vf_id;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry || !vt_entry->supported)
+ return -EOPNOTSUPP;
+
+ if (vt_entry->enabled == ctx->val.vbool)
+ return 0;
+
+ vt_entry->enabled = ctx->val.vbool;
+ for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
+ struct pdsc *vf = pdsc->vfs[vf_id].vf;
+
+ err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
+ pdsc_auxbus_dev_del(vf, pdsc);
+ }
+
+ return err;
+}
+
+int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+ struct pdsc_viftype *vt_entry;
+
+ vt_entry = pdsc_dl_find_viftype_by_id(pdsc, id);
+ if (!vt_entry || !vt_entry->supported)
+ return -EOPNOTSUPP;
+
+ if (!pdsc->viftype_status[vt_entry->vif_id].supported)
+ return -ENODEV;
+
+ return 0;
+}
+
+int pdsc_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_priv(dl);
+
+ return pdsc_firmware_update(pdsc, params->fw, extack);
+}
+
+static char *fw_slotnames[] = {
+ "fw.goldfw",
+ "fw.mainfwa",
+ "fw.mainfwb",
+};
+
+int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_GET_LIST,
+ };
+ struct pds_core_fw_list_info fw_list;
+ struct pdsc *pdsc = devlink_priv(dl);
+ union pds_core_dev_comp comp;
+ char buf[16];
+ int listlen;
+ int err;
+ int i;
+
+ mutex_lock(&pdsc->devcmd_lock);
+ err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout * 2);
+ memcpy_fromio(&fw_list, pdsc->cmd_regs->data, sizeof(fw_list));
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err && err != -EIO)
+ return err;
+
+ listlen = fw_list.num_fw_slots;
+ for (i = 0; i < listlen; i++) {
+ if (i < ARRAY_SIZE(fw_slotnames))
+ strscpy(buf, fw_slotnames[i], sizeof(buf));
+ else
+ snprintf(buf, sizeof(buf), "fw.slot_%d", i);
+ err = devlink_info_version_stored_put(req, buf,
+ fw_list.fw_names[i].fw_version);
+ }
+
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ pdsc->dev_info.fw_version);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "0x%x", pdsc->dev_info.asic_type);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ buf);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "0x%x", pdsc->dev_info.asic_rev);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV,
+ buf);
+ if (err)
+ return err;
+
+ return devlink_info_serial_number_put(req, pdsc->dev_info.serial_num);
+}
+
+int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct pdsc *pdsc = devlink_health_reporter_priv(reporter);
+ int err;
+
+ mutex_lock(&pdsc->config_lock);
+
+ if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
+ err = devlink_fmsg_string_pair_put(fmsg, "Status", "dead");
+ else if (!pdsc_is_fw_good(pdsc))
+ err = devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy");
+ else
+ err = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
+
+ mutex_unlock(&pdsc->config_lock);
+
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "State",
+ pdsc->fw_status &
+ ~PDS_CORE_FW_STS_F_GENERATION);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "Generation",
+ pdsc->fw_generation >> 4);
+ if (err)
+ return err;
+
+ return devlink_fmsg_u32_pair_put(fmsg, "Recoveries",
+ pdsc->fw_recoveries);
+}
diff --git a/drivers/net/ethernet/amd/pds_core/fw.c b/drivers/net/ethernet/amd/pds_core/fw.c
new file mode 100644
index 000000000000..90a811f3878a
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/fw.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#include "core.h"
+
+/* The worst case wait for the install activity is about 25 minutes when
+ * installing a new CPLD, which is very seldom. Normal is about 30-35
+ * seconds. Since the driver can't tell if a CPLD update will happen we
+ * set the timeout for the ugly case.
+ */
+#define PDSC_FW_INSTALL_TIMEOUT (25 * 60)
+#define PDSC_FW_SELECT_TIMEOUT 30
+
+/* Number of periodic log updates during fw file download */
+#define PDSC_FW_INTERVAL_FRACTION 32
+
+static int pdsc_devcmd_fw_download_locked(struct pdsc *pdsc, u64 addr,
+ u32 offset, u32 length)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_download.opcode = PDS_CORE_CMD_FW_DOWNLOAD,
+ .fw_download.offset = cpu_to_le32(offset),
+ .fw_download.addr = cpu_to_le64(addr),
+ .fw_download.length = cpu_to_le32(length),
+ };
+ union pds_core_dev_comp comp;
+
+ return pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_devcmd_fw_install(struct pdsc *pdsc)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_INSTALL_ASYNC
+ };
+ union pds_core_dev_comp comp;
+ int err;
+
+ err = pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ if (err < 0)
+ return err;
+
+ return comp.fw_control.slot;
+}
+
+static int pdsc_devcmd_fw_activate(struct pdsc *pdsc,
+ enum pds_core_fw_slot slot)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = PDS_CORE_FW_ACTIVATE_ASYNC,
+ .fw_control.slot = slot
+ };
+ union pds_core_dev_comp comp;
+
+ return pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+}
+
+static int pdsc_fw_status_long_wait(struct pdsc *pdsc,
+ const char *label,
+ unsigned long timeout,
+ u8 fw_cmd,
+ struct netlink_ext_ack *extack)
+{
+ union pds_core_dev_cmd cmd = {
+ .fw_control.opcode = PDS_CORE_CMD_FW_CONTROL,
+ .fw_control.oper = fw_cmd,
+ };
+ union pds_core_dev_comp comp;
+ unsigned long start_time;
+ unsigned long end_time;
+ int err;
+
+ /* Ping on the status of the long running async install
+ * command. We get EAGAIN while the command is still
+ * running, else we get the final command status.
+ */
+ start_time = jiffies;
+ end_time = start_time + (timeout * HZ);
+ do {
+ err = pdsc_devcmd(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
+ msleep(20);
+ } while (time_before(jiffies, end_time) &&
+ (err == -EAGAIN || err == -ETIMEDOUT));
+
+ if (err == -EAGAIN || err == -ETIMEDOUT) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait timed out");
+ dev_err(pdsc->dev, "DEV_CMD firmware wait %s timed out\n",
+ label);
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait failed");
+ }
+
+ return err;
+}
+
+int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
+ struct netlink_ext_ack *extack)
+{
+ u32 buf_sz, copy_sz, offset;
+ struct devlink *dl;
+ int next_interval;
+ u64 data_addr;
+ int err = 0;
+ int fw_slot;
+
+ dev_info(pdsc->dev, "Installing firmware\n");
+
+ dl = priv_to_devlink(pdsc);
+ devlink_flash_update_status_notify(dl, "Preparing to flash",
+ NULL, 0, 0);
+
+ buf_sz = sizeof(pdsc->cmd_regs->data);
+
+ dev_dbg(pdsc->dev,
+ "downloading firmware - size %d part_sz %d nparts %lu\n",
+ (int)fw->size, buf_sz, DIV_ROUND_UP(fw->size, buf_sz));
+
+ offset = 0;
+ next_interval = 0;
+ data_addr = offsetof(struct pds_core_dev_cmd_regs, data);
+ while (offset < fw->size) {
+ if (offset >= next_interval) {
+ devlink_flash_update_status_notify(dl, "Downloading",
+ NULL, offset,
+ fw->size);
+ next_interval = offset +
+ (fw->size / PDSC_FW_INTERVAL_FRACTION);
+ }
+
+ copy_sz = min_t(unsigned int, buf_sz, fw->size - offset);
+ mutex_lock(&pdsc->devcmd_lock);
+ memcpy_toio(&pdsc->cmd_regs->data, fw->data + offset, copy_sz);
+ err = pdsc_devcmd_fw_download_locked(pdsc, data_addr,
+ offset, copy_sz);
+ mutex_unlock(&pdsc->devcmd_lock);
+ if (err) {
+ dev_err(pdsc->dev,
+ "download failed offset 0x%x addr 0x%llx len 0x%x: %pe\n",
+ offset, data_addr, copy_sz, ERR_PTR(err));
+ NL_SET_ERR_MSG_MOD(extack, "Segment download failed");
+ goto err_out;
+ }
+ offset += copy_sz;
+ }
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ fw->size, fw->size);
+
+ devlink_flash_update_timeout_notify(dl, "Installing", NULL,
+ PDSC_FW_INSTALL_TIMEOUT);
+
+ fw_slot = pdsc_devcmd_fw_install(pdsc);
+ if (fw_slot < 0) {
+ err = fw_slot;
+ dev_err(pdsc->dev, "install failed: %pe\n", ERR_PTR(err));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware install");
+ goto err_out;
+ }
+
+ err = pdsc_fw_status_long_wait(pdsc, "Installing",
+ PDSC_FW_INSTALL_TIMEOUT,
+ PDS_CORE_FW_INSTALL_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ devlink_flash_update_timeout_notify(dl, "Selecting", NULL,
+ PDSC_FW_SELECT_TIMEOUT);
+
+ err = pdsc_devcmd_fw_activate(pdsc, fw_slot);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware select");
+ goto err_out;
+ }
+
+ err = pdsc_fw_status_long_wait(pdsc, "Selecting",
+ PDSC_FW_SELECT_TIMEOUT,
+ PDS_CORE_FW_ACTIVATE_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ dev_info(pdsc->dev, "Firmware update completed, slot %d\n", fw_slot);
+
+err_out:
+ if (err)
+ devlink_flash_update_status_notify(dl, "Flash failed",
+ NULL, 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flash done",
+ NULL, 0, 0);
+ return err;
+}
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
new file mode 100644
index 000000000000..672757932246
--- /dev/null
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Advanced Micro Devices, Inc */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pci.h>
+
+#include <linux/pds/pds_common.h>
+
+#include "core.h"
+
+MODULE_DESCRIPTION(PDSC_DRV_DESCRIPTION);
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_LICENSE("GPL");
+
+/* Supported devices */
+static const struct pci_device_id pdsc_id_table[] = {
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_CORE_PF) },
+ { PCI_VDEVICE(PENSANDO, PCI_DEVICE_ID_PENSANDO_VDPA_VF) },
+ { 0, } /* end of table */
+};
+MODULE_DEVICE_TABLE(pci, pdsc_id_table);
+
+static void pdsc_wdtimer_cb(struct timer_list *t)
+{
+ struct pdsc *pdsc = from_timer(pdsc, t, wdtimer);
+
+ dev_dbg(pdsc->dev, "%s: jiffies %ld\n", __func__, jiffies);
+ mod_timer(&pdsc->wdtimer,
+ round_jiffies(jiffies + pdsc->wdtimer_period));
+
+ queue_work(pdsc->wq, &pdsc->health_work);
+}
+
+static void pdsc_unmap_bars(struct pdsc *pdsc)
+{
+ struct pdsc_dev_bar *bars = pdsc->bars;
+ unsigned int i;
+
+ for (i = 0; i < PDS_CORE_BARS_MAX; i++) {
+ if (bars[i].vaddr)
+ pci_iounmap(pdsc->pdev, bars[i].vaddr);
+ }
+}
+
+static int pdsc_map_bars(struct pdsc *pdsc)
+{
+ struct pdsc_dev_bar *bar = pdsc->bars;
+ struct pci_dev *pdev = pdsc->pdev;
+ struct device *dev = pdsc->dev;
+ struct pdsc_dev_bar *bars;
+ unsigned int i, j;
+ int num_bars = 0;
+ int err;
+ u32 sig;
+
+ bars = pdsc->bars;
+
+ /* Since the PCI interface in the hardware is configurable,
+ * we need to poke into all the bars to find the set we're
+ * expecting.
+ */
+ for (i = 0, j = 0; i < PDS_CORE_BARS_MAX; i++) {
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+ continue;
+
+ bars[j].len = pci_resource_len(pdev, i);
+ bars[j].bus_addr = pci_resource_start(pdev, i);
+ bars[j].res_index = i;
+
+ /* only map the whole bar 0 */
+ if (j > 0) {
+ bars[j].vaddr = NULL;
+ } else {
+ bars[j].vaddr = pci_iomap(pdev, i, bars[j].len);
+ if (!bars[j].vaddr) {
+ dev_err(dev, "Cannot map BAR %d, aborting\n", i);
+ return -ENODEV;
+ }
+ }
+
+ j++;
+ }
+ num_bars = j;
+
+ /* BAR0: dev_cmd and interrupts */
+ if (num_bars < 1) {
+ dev_err(dev, "No bars found\n");
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ if (bar->len < PDS_CORE_BAR0_SIZE) {
+ dev_err(dev, "Resource bar size %lu too small\n", bar->len);
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ pdsc->info_regs = bar->vaddr + PDS_CORE_BAR0_DEV_INFO_REGS_OFFSET;
+ pdsc->cmd_regs = bar->vaddr + PDS_CORE_BAR0_DEV_CMD_REGS_OFFSET;
+ pdsc->intr_status = bar->vaddr + PDS_CORE_BAR0_INTR_STATUS_OFFSET;
+ pdsc->intr_ctrl = bar->vaddr + PDS_CORE_BAR0_INTR_CTRL_OFFSET;
+
+ sig = ioread32(&pdsc->info_regs->signature);
+ if (sig != PDS_CORE_DEV_INFO_SIGNATURE) {
+ dev_err(dev, "Incompatible firmware signature %x", sig);
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ /* BAR1: doorbells */
+ bar++;
+ if (num_bars < 2) {
+ dev_err(dev, "Doorbell bar missing\n");
+ err = -EFAULT;
+ goto err_out;
+ }
+
+ pdsc->db_pages = bar->vaddr;
+ pdsc->phy_db_pages = bar->bus_addr;
+
+ return 0;
+
+err_out:
+ pdsc_unmap_bars(pdsc);
+ return err;
+}
+
+void __iomem *pdsc_map_dbpage(struct pdsc *pdsc, int page_num)
+{
+ return pci_iomap_range(pdsc->pdev,
+ pdsc->bars[PDS_CORE_PCI_BAR_DBELL].res_index,
+ (u64)page_num << PAGE_SHIFT, PAGE_SIZE);
+}
+
+static int pdsc_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct device *dev = pdsc->dev;
+ int ret = 0;
+
+ if (num_vfs > 0) {
+ pdsc->vfs = kcalloc(num_vfs, sizeof(struct pdsc_vf),
+ GFP_KERNEL);
+ if (!pdsc->vfs)
+ return -ENOMEM;
+ pdsc->num_vfs = num_vfs;
+
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret) {
+ dev_err(dev, "Cannot enable SRIOV: %pe\n",
+ ERR_PTR(ret));
+ goto no_vfs;
+ }
+
+ return num_vfs;
+ }
+
+no_vfs:
+ pci_disable_sriov(pdev);
+
+ kfree(pdsc->vfs);
+ pdsc->vfs = NULL;
+ pdsc->num_vfs = 0;
+
+ return ret;
+}
+
+static int pdsc_init_vf(struct pdsc *vf)
+{
+ struct devlink *dl;
+ struct pdsc *pf;
+ int err;
+
+ pf = pdsc_get_pf_struct(vf->pdev);
+ if (IS_ERR_OR_NULL(pf))
+ return PTR_ERR(pf) ?: -1;
+
+ vf->vf_id = pci_iov_vf_id(vf->pdev);
+
+ dl = priv_to_devlink(vf);
+ devl_lock(dl);
+ devl_register(dl);
+ devl_unlock(dl);
+
+ pf->vfs[vf->vf_id].vf = vf;
+ err = pdsc_auxbus_dev_add(vf, pf);
+ if (err) {
+ devl_lock(dl);
+ devl_unregister(dl);
+ devl_unlock(dl);
+ }
+
+ return err;
+}
+
+static const struct devlink_health_reporter_ops pdsc_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = pdsc_fw_reporter_diagnose,
+};
+
+static const struct devlink_param pdsc_dl_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_VNET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ pdsc_dl_enable_get,
+ pdsc_dl_enable_set,
+ pdsc_dl_enable_validate),
+};
+
+#define PDSC_WQ_NAME_LEN 24
+
+static int pdsc_init_pf(struct pdsc *pdsc)
+{
+ struct devlink_health_reporter *hr;
+ char wq_name[PDSC_WQ_NAME_LEN];
+ struct devlink *dl;
+ int err;
+
+ pcie_print_link_status(pdsc->pdev);
+
+ err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
+ if (err) {
+ dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ err = pdsc_map_bars(pdsc);
+ if (err)
+ goto err_out_release_regions;
+
+ /* General workqueue and timer, but don't start timer yet */
+ snprintf(wq_name, sizeof(wq_name), "%s.%d", PDS_CORE_DRV_NAME, pdsc->uid);
+ pdsc->wq = create_singlethread_workqueue(wq_name);
+ INIT_WORK(&pdsc->health_work, pdsc_health_thread);
+ timer_setup(&pdsc->wdtimer, pdsc_wdtimer_cb, 0);
+ pdsc->wdtimer_period = PDSC_WATCHDOG_SECS * HZ;
+
+ mutex_init(&pdsc->devcmd_lock);
+ mutex_init(&pdsc->config_lock);
+ spin_lock_init(&pdsc->adminq_lock);
+
+ mutex_lock(&pdsc->config_lock);
+ set_bit(PDSC_S_FW_DEAD, &pdsc->state);
+
+ err = pdsc_setup(pdsc, PDSC_SETUP_INIT);
+ if (err) {
+ mutex_unlock(&pdsc->config_lock);
+ goto err_out_unmap_bars;
+ }
+
+ err = pdsc_start(pdsc);
+ if (err) {
+ mutex_unlock(&pdsc->config_lock);
+ goto err_out_teardown;
+ }
+
+ mutex_unlock(&pdsc->config_lock);
+
+ dl = priv_to_devlink(pdsc);
+ devl_lock(dl);
+ err = devl_params_register(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+ if (err) {
+ devl_unlock(dl);
+ dev_warn(pdsc->dev, "Failed to register devlink params: %pe\n",
+ ERR_PTR(err));
+ goto err_out_stop;
+ }
+
+ hr = devl_health_reporter_create(dl, &pdsc_fw_reporter_ops, 0, pdsc);
+ if (IS_ERR(hr)) {
+ devl_unlock(dl);
+ dev_warn(pdsc->dev, "Failed to create fw reporter: %pe\n", hr);
+ err = PTR_ERR(hr);
+ goto err_out_unreg_params;
+ }
+ pdsc->fw_reporter = hr;
+
+ devl_register(dl);
+ devl_unlock(dl);
+
+ /* Lastly, start the health check timer */
+ mod_timer(&pdsc->wdtimer, round_jiffies(jiffies + pdsc->wdtimer_period));
+
+ return 0;
+
+err_out_unreg_params:
+ devlink_params_unregister(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+err_out_stop:
+ pdsc_stop(pdsc);
+err_out_teardown:
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
+err_out_unmap_bars:
+ del_timer_sync(&pdsc->wdtimer);
+ if (pdsc->wq)
+ destroy_workqueue(pdsc->wq);
+ mutex_destroy(&pdsc->config_lock);
+ mutex_destroy(&pdsc->devcmd_lock);
+ pci_free_irq_vectors(pdsc->pdev);
+ pdsc_unmap_bars(pdsc);
+err_out_release_regions:
+ pci_release_regions(pdsc->pdev);
+
+ return err;
+}
+
+static const struct devlink_ops pdsc_dl_ops = {
+ .info_get = pdsc_dl_info_get,
+ .flash_update = pdsc_dl_flash_update,
+};
+
+static const struct devlink_ops pdsc_dl_vf_ops = {
+};
+
+static DEFINE_IDA(pdsc_ida);
+
+static int pdsc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ const struct devlink_ops *ops;
+ struct devlink *dl;
+ struct pdsc *pdsc;
+ bool is_pf;
+ int err;
+
+ is_pf = !pdev->is_virtfn;
+ ops = is_pf ? &pdsc_dl_ops : &pdsc_dl_vf_ops;
+ dl = devlink_alloc(ops, sizeof(struct pdsc), dev);
+ if (!dl)
+ return -ENOMEM;
+ pdsc = devlink_priv(dl);
+
+ pdsc->pdev = pdev;
+ pdsc->dev = &pdev->dev;
+ set_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
+ pci_set_drvdata(pdev, pdsc);
+ pdsc_debugfs_add_dev(pdsc);
+
+ err = ida_alloc(&pdsc_ida, GFP_KERNEL);
+ if (err < 0) {
+ dev_err(pdsc->dev, "%s: id alloc failed: %pe\n",
+ __func__, ERR_PTR(err));
+ goto err_out_free_devlink;
+ }
+ pdsc->uid = err;
+
+ /* Query system for DMA addressing limitation for the device. */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(PDS_CORE_ADDR_LEN));
+ if (err) {
+ dev_err(dev, "Unable to obtain 64-bit DMA for consistent allocations, aborting: %pe\n",
+ ERR_PTR(err));
+ goto err_out_free_ida;
+ }
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
+ goto err_out_free_ida;
+ }
+ pci_set_master(pdev);
+
+ if (is_pf)
+ err = pdsc_init_pf(pdsc);
+ else
+ err = pdsc_init_vf(pdsc);
+ if (err) {
+ dev_err(dev, "Cannot init device: %pe\n", ERR_PTR(err));
+ goto err_out_clear_master;
+ }
+
+ clear_bit(PDSC_S_INITING_DRIVER, &pdsc->state);
+ return 0;
+
+err_out_clear_master:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+err_out_free_ida:
+ ida_free(&pdsc_ida, pdsc->uid);
+err_out_free_devlink:
+ pdsc_debugfs_del_dev(pdsc);
+ devlink_free(dl);
+
+ return err;
+}
+
+static void pdsc_remove(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct devlink *dl;
+
+ /* Unhook the registrations first to be sure there
+ * are no requests while we're stopping.
+ */
+ dl = priv_to_devlink(pdsc);
+ devl_lock(dl);
+ devl_unregister(dl);
+ if (!pdev->is_virtfn) {
+ if (pdsc->fw_reporter) {
+ devl_health_reporter_destroy(pdsc->fw_reporter);
+ pdsc->fw_reporter = NULL;
+ }
+ devl_params_unregister(dl, pdsc_dl_params,
+ ARRAY_SIZE(pdsc_dl_params));
+ }
+ devl_unlock(dl);
+
+ if (pdev->is_virtfn) {
+ struct pdsc *pf;
+
+ pf = pdsc_get_pf_struct(pdsc->pdev);
+ if (!IS_ERR(pf)) {
+ pdsc_auxbus_dev_del(pdsc, pf);
+ pf->vfs[pdsc->vf_id].vf = NULL;
+ }
+ } else {
+ /* Remove the VFs and their aux_bus connections before other
+ * cleanup so that the clients can use the AdminQ to cleanly
+ * shut themselves down.
+ */
+ pdsc_sriov_configure(pdev, 0);
+
+ del_timer_sync(&pdsc->wdtimer);
+ if (pdsc->wq)
+ destroy_workqueue(pdsc->wq);
+
+ mutex_lock(&pdsc->config_lock);
+ set_bit(PDSC_S_STOPPING_DRIVER, &pdsc->state);
+
+ pdsc_stop(pdsc);
+ pdsc_teardown(pdsc, PDSC_TEARDOWN_REMOVING);
+ mutex_unlock(&pdsc->config_lock);
+ mutex_destroy(&pdsc->config_lock);
+ mutex_destroy(&pdsc->devcmd_lock);
+
+ pci_free_irq_vectors(pdev);
+ pdsc_unmap_bars(pdsc);
+ pci_release_regions(pdev);
+ }
+
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+
+ ida_free(&pdsc_ida, pdsc->uid);
+ pdsc_debugfs_del_dev(pdsc);
+ devlink_free(dl);
+}
+
+static struct pci_driver pdsc_driver = {
+ .name = PDS_CORE_DRV_NAME,
+ .id_table = pdsc_id_table,
+ .probe = pdsc_probe,
+ .remove = pdsc_remove,
+ .sriov_configure = pdsc_sriov_configure,
+};
+
+void *pdsc_get_pf_struct(struct pci_dev *vf_pdev)
+{
+ return pci_iov_get_pf_drvdata(vf_pdev, &pdsc_driver);
+}
+EXPORT_SYMBOL_GPL(pdsc_get_pf_struct);
+
+static int __init pdsc_init_module(void)
+{
+ if (strcmp(KBUILD_MODNAME, PDS_CORE_DRV_NAME))
+ return -EINVAL;
+
+ pdsc_debugfs_create();
+ return pci_register_driver(&pdsc_driver);
+}
+
+static void __exit pdsc_cleanup_module(void)
+{
+ pci_unregister_driver(&pdsc_driver);
+ pdsc_debugfs_destroy();
+}
+
+module_init(pdsc_init_module);
+module_exit(pdsc_cleanup_module);
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 007bd7787291..246f34c43765 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -341,7 +341,7 @@ static int __init lance_probe( struct net_device *dev)
/* XXX - leak? */
MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
- if (MEM == NULL) {
+ if (!MEM) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
@@ -796,7 +796,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 22d609563af8..68ca1225eedc 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -530,7 +530,7 @@ static void lance_rx_dvma(struct net_device *dev)
len = (rd->mblength & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -700,7 +700,7 @@ static void lance_rx_pio(struct net_device *dev)
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1276,7 +1276,7 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strscpy(info->driver, "sunlance", sizeof(info->driver));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 533b8519ec35..3b70f6737633 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -898,6 +898,8 @@
#define PCS_V2_WINDOW_SELECT 0x9064
#define PCS_V2_RV_WINDOW_DEF 0x1060
#define PCS_V2_RV_WINDOW_SELECT 0x1064
+#define PCS_V2_YC_WINDOW_DEF 0x18060
+#define PCS_V2_YC_WINDOW_SELECT 0x18064
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
@@ -1030,8 +1032,8 @@
#define XP_PROP_0_PORT_ID_WIDTH 8
#define XP_PROP_0_PORT_MODE_INDEX 8
#define XP_PROP_0_PORT_MODE_WIDTH 4
-#define XP_PROP_0_PORT_SPEEDS_INDEX 23
-#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 22
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 5
#define XP_PROP_1_MAX_RX_DMA_INDEX 24
#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
@@ -1283,6 +1285,22 @@
#define MDIO_PMA_RX_CTRL1 0x8051
#endif
+#ifndef MDIO_PMA_RX_LSTS
+#define MDIO_PMA_RX_LSTS 0x018020
+#endif
+
+#ifndef MDIO_PMA_RX_EQ_CTRL4
+#define MDIO_PMA_RX_EQ_CTRL4 0x0001805C
+#endif
+
+#ifndef MDIO_PMA_MP_MISC_STS
+#define MDIO_PMA_MP_MISC_STS 0x0078
+#endif
+
+#ifndef MDIO_PMA_PHY_RX_EQ_CEU
+#define MDIO_PMA_PHY_RX_EQ_CEU 0x1800E
+#endif
+
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
@@ -1393,6 +1411,28 @@
#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+#define XGBE_PMA_RX_SIG_DET_0_MASK BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_ENABLE BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_VALID_0_MASK BIT(12)
+#define XGBE_PMA_RX_VALID_0_ENABLE BIT(12)
+#define XGBE_PMA_RX_VALID_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_AD_REQ_MASK BIT(12)
+#define XGBE_PMA_RX_AD_REQ_ENABLE BIT(12)
+#define XGBE_PMA_RX_AD_REQ_DISABLE 0x0000
+
+#define XGBE_PMA_RX_ADPT_ACK_MASK BIT(12)
+#define XGBE_PMA_RX_ADPT_ACK BIT(12)
+
+#define XGBE_PMA_CFF_UPDTM1_VLD BIT(8)
+#define XGBE_PMA_CFF_UPDT0_VLD BIT(9)
+#define XGBE_PMA_CFF_UPDT1_VLD BIT(10)
+#define XGBE_PMA_CFF_UPDT_MASK (XGBE_PMA_CFF_UPDTM1_VLD |\
+ XGBE_PMA_CFF_UPDT0_VLD | \
+ XGBE_PMA_CFF_UPDT1_VLD)
+
#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
@@ -1697,20 +1737,21 @@ do { \
} while (0)
/* Macros for building, reading or writing register values or bits
- * using MDIO. Different from above because of the use of standardized
- * Linux include values. No shifting is performed with the bit
- * operations, everything works on mask values.
+ * using MDIO.
*/
+
+#define XGBE_ADDR_C45 BIT(30)
+
#define XMDIO_READ(_pdata, _mmd, _reg) \
((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
- MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+ XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
(XMDIO_READ((_pdata), _mmd, _reg) & _mask)
#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
- MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+ XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
do { \
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
index 895d35639129..c68ace804e37 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -230,7 +230,7 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
netif_dbg(pdata, drv, netdev,
- "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
+ "cap=%d, en=%#x, mbc=%d, delay=%d\n",
pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
/* Check PFC for supported number of traffic classes */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 3936543a74d8..f393228d41c7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
}
+static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+
+ /* From MAC ver 30H the TFCR is per priority, instead of per queue */
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
+ return max_q_count;
+ else
+ return min_t(unsigned int, pdata->tx_q_count, max_q_count);
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
- unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
- unsigned int i;
+ unsigned int i, q_count;
/* Clear MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
/* Clear MAC flow control */
- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
struct ieee_pfc *pfc = pdata->pfc;
struct ieee_ets *ets = pdata->ets;
- unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
- unsigned int i;
+ unsigned int i, q_count;
/* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) {
@@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
}
/* Set MAC flow control */
- max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
- q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ q_count = xgbe_get_fc_queue_count(pdata);
reg = MAC_Q0TFCR;
for (i = 0; i < q_count; i++) {
reg_val = XGMAC_IOREAD(pdata, reg);
@@ -807,6 +814,9 @@ static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
unsigned int ss;
switch (speed) {
+ case SPEED_10:
+ ss = 0x07;
+ break;
case SPEED_1000:
ss = 0x03;
break;
@@ -1147,8 +1157,8 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address, index, offset;
int mmd_data;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1179,8 +1189,8 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1210,8 +1220,8 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1238,8 +1248,8 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1284,11 +1294,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
}
}
-static unsigned int xgbe_create_mdio_sca(int port, int reg)
+static unsigned int xgbe_create_mdio_sca_c22(int port, int reg)
{
- unsigned int mdio_sca, da;
+ unsigned int mdio_sca;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
- da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+ return mdio_sca;
+}
+
+static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg)
+{
+ unsigned int mdio_sca;
mdio_sca = 0;
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
@@ -1298,14 +1317,13 @@ static unsigned int xgbe_create_mdio_sca(int port, int reg)
return mdio_sca;
}
-static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
- int reg, u16 val)
+static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata,
+ unsigned int mdio_sca, u16 val)
{
- unsigned int mdio_sca, mdio_sccd;
+ unsigned int mdio_sccd;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1322,14 +1340,33 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
return 0;
}
-static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
- int reg)
+static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
+
+ return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
+}
+
+static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg, u16 val)
{
- unsigned int mdio_sca, mdio_sccd;
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
+
+ return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
+}
+
+static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata,
+ unsigned int mdio_sca)
+{
+ unsigned int mdio_sccd;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1345,6 +1382,26 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
}
+static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
+
+ return xgbe_read_ext_mii_regs(pdata, mdio_sca);
+}
+
+static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
+
+ return xgbe_read_ext_mii_regs(pdata, mdio_sca);
+}
+
static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
enum xgbe_mdio_mode mode)
{
@@ -3558,8 +3615,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->set_speed = xgbe_set_speed;
hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
- hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
- hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
+ hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22;
+ hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22;
+ hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45;
+ hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45;
hw_if->set_gpio = xgbe_set_gpio;
hw_if->clr_gpio = xgbe_clr_gpio;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 30d24d19f40d..614c0278419b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring)
break;
+ /* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer);
+ channel->tx_timer_active = 0;
}
}
@@ -950,14 +952,14 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
channel = pdata->channel[i];
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xgbe_one_poll, NAPI_POLL_WEIGHT);
+ xgbe_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xgbe_all_poll, NAPI_POLL_WEIGHT);
+ xgbe_all_poll);
napi_enable(&pdata->napi);
}
@@ -1062,6 +1064,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ tasklet_kill(&pdata->tasklet_dev);
+ tasklet_kill(&pdata->tasklet_ecc);
+
if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
@@ -1508,9 +1513,6 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
mac_tscr = 0;
switch (config.tx_type) {
@@ -1674,12 +1676,10 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
return ret;
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
- packet->header_len = skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
+ packet->header_len = skb_inner_tcp_all_headers(skb);
packet->tcp_header_len = inner_tcp_hdrlen(skb);
} else {
- packet->header_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ packet->header_len = skb_tcp_all_headers(skb);
packet->tcp_header_len = tcp_hdrlen(skb);
}
packet->tcp_payload_len = skb->len - packet->header_len;
@@ -2553,6 +2553,14 @@ read_again:
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
+ if (buf2_len > rdata->rx.buf.dma_len) {
+ /* Hardware inconsistency within the descriptors
+ * that has resulted in a length underflow.
+ */
+ error = 1;
+ goto skip_data;
+ }
+
if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
@@ -2582,8 +2590,10 @@ skip_data:
if (!last || context_next)
goto read_again;
- if (!skb)
+ if (!skb || error) {
+ dev_kfree_skb(skb);
goto next_packet;
+ }
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
@@ -2775,7 +2785,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
+ netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto));
for (i = 0; i < skb->len; i += 32) {
unsigned int len = min(skb->len - i, 32U);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 94879cf8b420..6e83ff59172a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -402,8 +402,8 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
- strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
@@ -619,8 +619,11 @@ static int xgbe_get_module_eeprom(struct net_device *netdev,
return pdata->phy_if.module_eeprom(pdata, eeprom, data);
}
-static void xgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+static void
+xgbe_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -631,7 +634,9 @@ static void xgbe_get_ringparam(struct net_device *netdev,
}
static int xgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
unsigned int rx, tx;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 22d4fc547a0a..a9ccc4258ee5 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
xgbe_i2c_disable(pdata);
xgbe_i2c_clear_all_interrupts(pdata);
- if (pdata->dev_irq != pdata->i2c_irq)
+ if (pdata->dev_irq != pdata->i2c_irq) {
devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
+ tasklet_kill(&pdata->tasklet_i2c);
+ }
}
static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4e97b4869522..33a9574e9e04 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -274,6 +274,15 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000);
}
+static void xgbe_sgmii_10_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 10M speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_10);
+}
+
static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
{
/* Set MAC to 1G speed */
@@ -306,6 +315,9 @@ static void xgbe_change_mode(struct xgbe_prv_data *pdata,
case XGBE_MODE_KR:
xgbe_kr_mode(pdata);
break;
+ case XGBE_MODE_SGMII_10:
+ xgbe_sgmii_10_mode(pdata);
+ break;
case XGBE_MODE_SGMII_100:
xgbe_sgmii_100_mode(pdata);
break;
@@ -496,6 +508,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
reg |= XGBE_KR_TRAINING_ENABLE;
reg |= XGBE_KR_TRAINING_START;
XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+ pdata->kr_start_time = jiffies;
netif_dbg(pdata, link, pdata->netdev,
"KR training initiated\n");
@@ -632,6 +645,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
xgbe_switch_mode(pdata);
+ pdata->an_result = XGBE_AN_READY;
+
xgbe_an_restart(pdata);
return XGBE_AN_INCOMPAT_LINK;
@@ -1074,6 +1089,8 @@ static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
static const char *xgbe_phy_speed_string(int speed)
{
switch (speed) {
+ case SPEED_10:
+ return "10Mbps";
case SPEED_100:
return "100Mbps";
case SPEED_1000:
@@ -1161,6 +1178,7 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
case XGBE_MODE_KX_1000:
case XGBE_MODE_KX_2500:
case XGBE_MODE_KR:
+ case XGBE_MODE_SGMII_10:
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
case XGBE_MODE_X:
@@ -1222,6 +1240,8 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
} else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_10);
} else {
enable_irq(pdata->an_irq);
ret = -EINVAL;
@@ -1275,9 +1295,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
{
unsigned long link_timeout;
+ unsigned long kr_time;
+ int wait;
link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
if (time_after(jiffies, link_timeout)) {
+ if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
+ pdata->phy.autoneg == AUTONEG_ENABLE) {
+ /* AN restart should not happen while KR training is in progress.
+ * The while loop ensures no AN restart during KR training,
+ * waits up to 500ms and AN restart is triggered only if KR
+ * training is failed.
+ */
+ wait = XGBE_KR_TRAINING_WAIT_ITER;
+ while (wait--) {
+ kr_time = pdata->kr_start_time +
+ msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+ if (time_after(jiffies, kr_time))
+ break;
+ /* AN restart is not required, if AN result is COMPLETE */
+ if (pdata->an_result == XGBE_AN_COMPLETE)
+ return;
+ usleep_range(10000, 11000);
+ }
+ }
netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
xgbe_phy_config_aneg(pdata);
}
@@ -1301,6 +1342,9 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
mode = xgbe_phy_status_aneg(pdata);
switch (mode) {
+ case XGBE_MODE_SGMII_10:
+ pdata->phy.speed = SPEED_10;
+ break;
case XGBE_MODE_SGMII_100:
pdata->phy.speed = SPEED_100;
break;
@@ -1390,8 +1434,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
/* Disable auto-negotiation */
xgbe_an_disable_all(pdata);
- if (pdata->dev_irq != pdata->an_irq)
+ if (pdata->dev_irq != pdata->an_irq) {
devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ tasklet_kill(&pdata->tasklet_an);
+ }
pdata->phy_if.phy_impl.stop(pdata);
@@ -1441,6 +1487,8 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
xgbe_sgmii_1000_mode(pdata);
} else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
xgbe_sgmii_100_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) {
+ xgbe_sgmii_10_mode(pdata);
} else {
ret = -EINVAL;
goto err_irq;
@@ -1538,6 +1586,8 @@ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
return SPEED_1000;
else if (XGBE_ADV(lks, 100baseT_Full))
return SPEED_100;
+ else if (XGBE_ADV(lks, 10baseT_Full))
+ return SPEED_10;
return SPEED_UNKNOWN;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 90cb55eb5466..f409d7bd1f1e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -278,6 +278,16 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ } else if (rdev && (rdev->vendor == PCI_VENDOR_ID_AMD) &&
+ (rdev->device == 0x14b5)) {
+ pdata->xpcs_window_def_reg = PCS_V2_YC_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_YC_WINDOW_SELECT;
+
+ /* Yellow Carp devices do not need cdr workaround */
+ pdata->vdata->an_cdr_workaround = 0;
+
+ /* Yellow Carp devices do not need rrc */
+ pdata->vdata->enable_rrc = 0;
} else {
pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
@@ -418,6 +428,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdata->pcidev);
+ /* Disable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
xgbe_free_pdata(pdata);
}
@@ -460,7 +473,7 @@ static int __maybe_unused xgbe_pci_resume(struct device *dev)
return ret;
}
-static const struct xgbe_version_data xgbe_v2a = {
+static struct xgbe_version_data xgbe_v2a = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
@@ -473,9 +486,10 @@ static const struct xgbe_version_data xgbe_v2a = {
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
.an_cdr_workaround = 1,
+ .enable_rrc = 1,
};
-static const struct xgbe_version_data xgbe_v2b = {
+static struct xgbe_version_data xgbe_v2b = {
.init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
.xpcs_access = XGBE_XPCS_ACCESS_V2,
.mmc_64bit = 1,
@@ -488,6 +502,7 @@ static const struct xgbe_version_data xgbe_v2b = {
.tx_desc_prefetch = 5,
.rx_desc_prefetch = 5,
.an_cdr_workaround = 1,
+ .enable_rrc = 1,
};
static const struct pci_device_id xgbe_pci_table[] = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 213769054391..16e7fb2c0dae 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -124,10 +124,11 @@
#include "xgbe.h"
#include "xgbe-common.h"
-#define XGBE_PHY_PORT_SPEED_100 BIT(0)
-#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
-#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
-#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
+#define XGBE_PHY_PORT_SPEED_10 BIT(0)
+#define XGBE_PHY_PORT_SPEED_100 BIT(1)
+#define XGBE_PHY_PORT_SPEED_1000 BIT(2)
+#define XGBE_PHY_PORT_SPEED_2500 BIT(3)
+#define XGBE_PHY_PORT_SPEED_10000 BIT(4)
#define XGBE_MUTEX_RELEASE 0x80000000
@@ -189,6 +190,7 @@ enum xgbe_sfp_cable {
XGBE_SFP_CABLE_UNKNOWN = 0,
XGBE_SFP_CABLE_ACTIVE,
XGBE_SFP_CABLE_PASSIVE,
+ XGBE_SFP_CABLE_FIBER,
};
enum xgbe_sfp_base {
@@ -236,9 +238,7 @@ enum xgbe_sfp_speed {
#define XGBE_SFP_BASE_BR 12
#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
-#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
-#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
#define XGBE_SFP_BASE_CU_CABLE_LEN 18
@@ -284,6 +284,8 @@ struct xgbe_sfp_eeprom {
#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+#define XGBE_MOLEX_VENDOR "Molex Inc. "
+
struct xgbe_sfp_ascii {
union {
char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
@@ -386,6 +388,10 @@ struct xgbe_phy_data {
static DEFINE_MUTEX(xgbe_phy_comm_lock);
static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ enum xgbe_mb_cmd cmd,
+ enum xgbe_mb_subcmd sub_cmd);
static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
struct xgbe_i2c_op *i2c_op)
@@ -598,20 +604,27 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
return -ETIMEDOUT;
}
-static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr,
- int reg, u16 val)
+static int xgbe_phy_mdio_mii_write_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- if (reg & MII_ADDR_C45) {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
- return -ENOTSUPP;
- } else {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
- return -ENOTSUPP;
- }
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -EOPNOTSUPP;
- return pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val);
+ return pdata->hw_if.write_ext_mii_regs_c22(pdata, addr, reg, val);
+}
+
+static int xgbe_phy_mdio_mii_write_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg, u16 val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.write_ext_mii_regs_c45(pdata, addr, devad,
+ reg, val);
}
static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
@@ -636,7 +649,8 @@ static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
return ret;
}
-static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
+static int xgbe_phy_mii_write_c22(struct mii_bus *mii, int addr, int reg,
+ u16 val)
{
struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -649,29 +663,58 @@ static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
ret = xgbe_phy_i2c_mii_write(pdata, reg, val);
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
- ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val);
+ ret = xgbe_phy_mdio_mii_write_c22(pdata, addr, reg, val);
else
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
xgbe_phy_put_comm_ownership(pdata);
return ret;
}
-static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr,
- int reg)
+static int xgbe_phy_mii_write_c45(struct mii_bus *mii, int addr, int devad,
+ int reg, u16 val)
{
+ struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
- if (reg & MII_ADDR_C45) {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
- return -ENOTSUPP;
- } else {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
- return -ENOTSUPP;
- }
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
- return pdata->hw_if.read_ext_mii_regs(pdata, addr, reg);
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = -EOPNOTSUPP;
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_write_c45(pdata, addr, devad, reg, val);
+ else
+ ret = -EOPNOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mdio_mii_read_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.read_ext_mii_regs_c22(pdata, addr, reg);
+}
+
+static int xgbe_phy_mdio_mii_read_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.read_ext_mii_regs_c45(pdata, addr, devad, reg);
}
static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
@@ -696,7 +739,7 @@ static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
return ret;
}
-static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
+static int xgbe_phy_mii_read_c22(struct mii_bus *mii, int addr, int reg)
{
struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -709,7 +752,30 @@ static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
ret = xgbe_phy_i2c_mii_read(pdata, reg);
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
- ret = xgbe_phy_mdio_mii_read(pdata, addr, reg);
+ ret = xgbe_phy_mdio_mii_read_c22(pdata, addr, reg);
+ else
+ ret = -EOPNOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad,
+ int reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = -EOPNOTSUPP;
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg);
else
ret = -ENOTSUPP;
@@ -758,6 +824,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) {
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)
+ XGBE_SET_SUP(lks, 10baseT_Full);
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
XGBE_SET_SUP(lks, 100baseT_Full);
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
@@ -823,25 +891,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
enum xgbe_sfp_speed sfp_speed)
{
- u8 *sfp_base, min, max;
+ u8 *sfp_base, min;
sfp_base = sfp_eeprom->base;
switch (sfp_speed) {
case XGBE_SFP_SPEED_1000:
min = XGBE_SFP_BASE_BR_1GBE_MIN;
- max = XGBE_SFP_BASE_BR_1GBE_MAX;
break;
case XGBE_SFP_SPEED_10000:
min = XGBE_SFP_BASE_BR_10GBE_MIN;
- max = XGBE_SFP_BASE_BR_10GBE_MAX;
break;
default:
return false;
}
- return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
- (sfp_base[XGBE_SFP_BASE_BR] <= max));
+ return sfp_base[XGBE_SFP_BASE_BR] >= min;
}
static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
@@ -1142,16 +1207,21 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
- /* Assume ACTIVE cable unless told it is PASSIVE */
+ /* Assume FIBER cable unless told otherwise */
if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
- } else {
+ } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) {
phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
+ } else {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER;
}
/* Determine the type of SFP */
- if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
+ if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER &&
+ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
+ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
@@ -1167,9 +1237,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
- else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
- xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
- phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
@@ -1542,6 +1609,16 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
xgbe_phy_phydev_flowctrl(pdata);
switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
+ case XGBE_SGMII_AN_LINK_SPEED_10:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ XGBE_SET_LP_ADV(lks, 10baseT_Full);
+ mode = XGBE_MODE_SGMII_10;
+ } else {
+ /* Half-duplex not supported */
+ XGBE_SET_LP_ADV(lks, 10baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
case XGBE_SGMII_AN_LINK_SPEED_100:
if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
XGBE_SET_LP_ADV(lks, 100baseT_Full);
@@ -1658,7 +1735,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
if (phy_data->phydev &&
- (phy_data->phydev->speed == SPEED_100))
+ (phy_data->phydev->speed == SPEED_10))
+ mode = XGBE_MODE_SGMII_10;
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
mode = XGBE_MODE_SGMII_100;
else
mode = XGBE_MODE_SGMII_1000;
@@ -1673,7 +1753,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
break;
default:
if (phy_data->phydev &&
- (phy_data->phydev->speed == SPEED_100))
+ (phy_data->phydev->speed == SPEED_10))
+ mode = XGBE_MODE_SGMII_10;
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
mode = XGBE_MODE_SGMII_100;
else
mode = XGBE_MODE_SGMII_1000;
@@ -1803,6 +1886,9 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
if (phy_data->phydev &&
(phy_data->phydev->speed == SPEED_10000))
XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_2500))
+ XGBE_SET_ADV(dlks, 2500baseX_Full);
else
XGBE_SET_ADV(dlks, 1000baseKX_Full);
break;
@@ -1910,8 +1996,8 @@ static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata,
redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
redrv_val = (u16)mode;
- return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
- redrv_reg, redrv_val);
+ return pdata->hw_if.write_ext_mii_regs_c22(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
}
static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata,
@@ -1956,6 +2042,93 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
+#define MAX_RX_ADAPT_RETRIES 1
+#define XGBE_PMA_RX_VAL_SIG_MASK (XGBE_PMA_RX_SIG_DET_0_MASK | \
+ XGBE_PMA_RX_VALID_0_MASK)
+
+static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+
+ xgbe_phy_perform_ratechange(pdata,
+ mode == XGBE_MODE_KR ?
+ XGBE_MB_CMD_SET_10G_KR :
+ XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
+}
+
+static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* step 2: force PCS to send RX_ADAPT Req to PHY */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_ENABLE);
+
+ /* Step 3: Wait for RX_ADAPT ACK from the PHY */
+ msleep(200);
+
+ /* Software polls for coefficient update command (given by local PHY) */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_PHY_RX_EQ_CEU);
+
+ /* Clear the RX_AD_REQ bit */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_DISABLE);
+
+ /* Check if coefficient update command is set */
+ if ((reg & XGBE_PMA_CFF_UPDT_MASK) != XGBE_PMA_CFF_UPDT_MASK)
+ goto set_mode;
+
+ /* Step 4: Check for Block lock */
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS) {
+ /* If the block lock is found, update the helpers
+ * and declare the link up
+ */
+ netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
+ pdata->rx_adapt_done = true;
+ pdata->mode_set = false;
+ return;
+ }
+
+set_mode:
+ xgbe_set_rx_adap_mode(pdata, phy_data->cur_mode);
+}
+
+static void xgbe_phy_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+rx_adapt_reinit:
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_LSTS,
+ XGBE_PMA_RX_VAL_SIG_MASK);
+
+ /* step 1: Check for RX_VALID && LF_SIGDET */
+ if ((reg & XGBE_PMA_RX_VAL_SIG_MASK) != XGBE_PMA_RX_VAL_SIG_MASK) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "RX_VALID or LF_SIGDET is unset, issue rrc");
+ xgbe_phy_rrc(pdata);
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+ goto rx_adapt_reinit;
+ }
+
+ /* perform rx adaptation */
+ xgbe_rx_adaptation(pdata);
+}
+
static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
{
int reg;
@@ -1979,6 +2152,10 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
{
+ /* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */
+ if (pdata->phy.autoneg != AUTONEG_DISABLE)
+ return;
+
XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,
XGBE_PMA_PLL_CTRL_MASK,
enable ? XGBE_PMA_PLL_CTRL_ENABLE
@@ -1989,7 +2166,7 @@ static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
}
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
- unsigned int cmd, unsigned int sub_cmd)
+ enum xgbe_mb_cmd cmd, enum xgbe_mb_subcmd sub_cmd)
{
unsigned int s0 = 0;
unsigned int wait;
@@ -2017,7 +2194,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- goto reenable_pll;
+ goto do_rx_adaptation;
usleep_range(1000, 2000);
}
@@ -2027,16 +2204,32 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
/* Reset on error */
xgbe_phy_rx_reset(pdata);
+ goto reenable_pll;
+
+do_rx_adaptation:
+ if (pdata->en_rx_adap && sub_cmd == XGBE_MB_SUBCMD_RX_ADAP &&
+ (cmd == XGBE_MB_CMD_SET_10G_KR || cmd == XGBE_MB_CMD_SET_10G_SFI)) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "Enabling RX adaptation\n");
+ pdata->mode_set = true;
+ xgbe_phy_rx_adaptation(pdata);
+ /* return from here to avoid enabling PLL ctrl
+ * during adaptation phase
+ */
+ return;
+ }
reenable_pll:
- /* Enable PLL re-initialization */
- xgbe_phy_pll_ctrl(pdata, true);
+ /* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
+ if (cmd != XGBE_MB_CMD_POWER_OFF &&
+ cmd != XGBE_MB_CMD_RRC)
+ xgbe_phy_pll_ctrl(pdata, true);
}
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
{
/* Receiver Reset Cycle */
- xgbe_phy_perform_ratechange(pdata, 5, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_RRC, XGBE_MB_SUBCMD_NONE);
netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
}
@@ -2046,13 +2239,38 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
struct xgbe_phy_data *phy_data = pdata->phy_data;
/* Power off */
- xgbe_phy_perform_ratechange(pdata, 0, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_POWER_OFF, XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_UNKNOWN;
netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
}
+static bool enable_rx_adap(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* Rx-Adaptation is not supported on older platforms(< 0x30H) */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if (ver < 0x30)
+ return false;
+
+ /* Re-driver models 4223 && 4227 do not support Rx-Adaptation */
+ if (phy_data->redrv &&
+ (phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4223 ||
+ phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4227))
+ return false;
+
+ /* 10G KR mode with AN does not support Rx-Adaptation */
+ if (mode == XGBE_MODE_KR &&
+ phy_data->port_mode != XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG)
+ return false;
+
+ pdata->en_rx_adap = 1;
+ return true;
+}
+
static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2061,14 +2279,22 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
/* 10G/SFI */
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
- xgbe_phy_perform_ratechange(pdata, 3, 0);
+ pdata->en_rx_adap = 0;
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
+ } else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+ (enable_rx_adap(pdata, XGBE_MODE_SFI))) {
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
} else {
if (phy_data->sfp_cable_len <= 1)
- xgbe_phy_perform_ratechange(pdata, 3, 1);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_1M);
else if (phy_data->sfp_cable_len <= 3)
- xgbe_phy_perform_ratechange(pdata, 3, 2);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_3M);
else
- xgbe_phy_perform_ratechange(pdata, 3, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_PASSIVE_OTHER);
}
phy_data->cur_mode = XGBE_MODE_SFI;
@@ -2083,7 +2309,7 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/X */
- xgbe_phy_perform_ratechange(pdata, 1, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);
phy_data->cur_mode = XGBE_MODE_X;
@@ -2097,7 +2323,7 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/SGMII */
- xgbe_phy_perform_ratechange(pdata, 1, 2);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_SGMII);
phy_data->cur_mode = XGBE_MODE_SGMII_1000;
@@ -2111,13 +2337,27 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 100M/SGMII */
- xgbe_phy_perform_ratechange(pdata, 1, 1);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_100MBITS);
phy_data->cur_mode = XGBE_MODE_SGMII_100;
netif_dbg(pdata, link, pdata->netdev, "100MbE SGMII mode set\n");
}
+static void xgbe_phy_sgmii_10_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 10M/SGMII */
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_10MBITS);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_10;
+
+ netif_dbg(pdata, link, pdata->netdev, "10MbE SGMII mode set\n");
+}
+
static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2125,7 +2365,12 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 10G/KR */
- xgbe_phy_perform_ratechange(pdata, 4, 0);
+ if (enable_rx_adap(pdata, XGBE_MODE_KR))
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_RX_ADAP);
+ else
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KR;
@@ -2139,7 +2384,7 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 2.5G/KX */
- xgbe_phy_perform_ratechange(pdata, 2, 0);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_2_5G, XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KX_2500;
@@ -2153,7 +2398,7 @@ static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 1G/KX */
- xgbe_phy_perform_ratechange(pdata, 1, 3);
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX);
phy_data->cur_mode = XGBE_MODE_KX_1000;
@@ -2176,12 +2421,15 @@ static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
return xgbe_phy_cur_mode(pdata);
switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_SGMII_10:
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
return XGBE_MODE_KR;
+ case XGBE_MODE_KX_2500:
+ return XGBE_MODE_SGMII_1000;
case XGBE_MODE_KR:
default:
- return XGBE_MODE_SGMII_1000;
+ return XGBE_MODE_KX_2500;
}
}
@@ -2243,6 +2491,8 @@ static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
int speed)
{
switch (speed) {
+ case SPEED_10:
+ return XGBE_MODE_SGMII_10;
case SPEED_100:
return XGBE_MODE_SGMII_100;
case SPEED_1000:
@@ -2260,6 +2510,8 @@ static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data,
int speed)
{
switch (speed) {
+ case SPEED_10:
+ return XGBE_MODE_SGMII_10;
case SPEED_100:
return XGBE_MODE_SGMII_100;
case SPEED_1000:
@@ -2334,6 +2586,9 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
case XGBE_MODE_KR:
xgbe_phy_kr_mode(pdata);
break;
+ case XGBE_MODE_SGMII_10:
+ xgbe_phy_sgmii_10_mode(pdata);
+ break;
case XGBE_MODE_SGMII_100:
xgbe_phy_sgmii_100_mode(pdata);
break;
@@ -2390,6 +2645,9 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
switch (mode) {
+ case XGBE_MODE_SGMII_10:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10baseT_Full));
case XGBE_MODE_SGMII_100:
return xgbe_phy_check_mode(pdata, mode,
XGBE_ADV(lks, 100baseT_Full));
@@ -2419,6 +2677,11 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
return false;
return xgbe_phy_check_mode(pdata, mode,
XGBE_ADV(lks, 1000baseX_Full));
+ case XGBE_MODE_SGMII_10:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10baseT_Full));
case XGBE_MODE_SGMII_100:
if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
return false;
@@ -2511,15 +2774,23 @@ static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data,
}
}
-static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
+static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_prv_data *pdata,
int speed)
{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
switch (speed) {
+ case SPEED_10:
+ /* Supported in ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ return (ver >= 0x30) ? true : false;
case SPEED_100:
case SPEED_1000:
return true;
case SPEED_2500:
- return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
+ return ((phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T) ||
+ (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T));
case SPEED_10000:
return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
default:
@@ -2527,10 +2798,17 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
}
}
-static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data,
+static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_prv_data *pdata,
int speed)
{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
switch (speed) {
+ case SPEED_10:
+ /* Supported in ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ return (ver >= 0x30) && (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
case SPEED_100:
return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
case SPEED_1000:
@@ -2577,12 +2855,12 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
case XGBE_PORT_MODE_1000BASE_T:
case XGBE_PORT_MODE_NBASE_T:
case XGBE_PORT_MODE_10GBASE_T:
- return xgbe_phy_valid_speed_baset_mode(phy_data, speed);
+ return xgbe_phy_valid_speed_baset_mode(pdata, speed);
case XGBE_PORT_MODE_1000BASE_X:
case XGBE_PORT_MODE_10GBASE_R:
return xgbe_phy_valid_speed_basex_mode(phy_data, speed);
case XGBE_PORT_MODE_SFP:
- return xgbe_phy_valid_speed_sfp_mode(phy_data, speed);
+ return xgbe_phy_valid_speed_sfp_mode(pdata, speed);
default:
return false;
}
@@ -2605,8 +2883,11 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) {
+ if (pdata->en_rx_adap)
+ pdata->rx_adapt_done = false;
return 0;
+ }
}
if (phy_data->phydev) {
@@ -2628,7 +2909,29 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
*/
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if (reg & MDIO_STAT1_LSTATUS)
+
+ if (pdata->en_rx_adap) {
+ /* if the link is available and adaptation is done,
+ * declare link up
+ */
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ /* If either link is not available or adaptation is not done,
+ * retrigger the adaptation logic. (if the mode is not set,
+ * then issue mailbox command first)
+ */
+ if (pdata->mode_set) {
+ xgbe_phy_rx_adaptation(pdata);
+ } else {
+ pdata->rx_adapt_done = false;
+ xgbe_phy_set_mode(pdata, phy_data->cur_mode);
+ }
+
+ /* check again for the link and adaptation status */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ } else if (reg & MDIO_STAT1_LSTATUS)
return 1;
if (pdata->phy.autoneg == AUTONEG_ENABLE &&
@@ -2640,7 +2943,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
}
/* No link, attempt a receiver reset cycle */
- if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ if (pdata->vdata->enable_rrc && phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
phy_data->rrc_count = 0;
xgbe_phy_rrc(pdata);
}
@@ -2853,6 +3156,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* 10 Mbps speed is not supported in ver < 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if (ver < 0x30 && (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10))
+ return true;
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
@@ -2866,7 +3175,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_1000BASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000))
return false;
break;
@@ -2875,14 +3185,17 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_NBASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500))
return false;
break;
case XGBE_PORT_MODE_10GBASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
break;
@@ -2891,7 +3204,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_SFP:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
@@ -3260,6 +3574,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3290,6 +3608,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3312,6 +3634,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3320,6 +3646,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, 1000baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_1000;
}
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+ XGBE_SET_SUP(lks, 2500baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
XGBE_SET_SUP(lks, 10000baseT_Full);
phy_data->start_mode = XGBE_MODE_KR;
@@ -3352,6 +3682,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
XGBE_SET_SUP(lks, FIBRE);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
phy_data->start_mode = XGBE_MODE_SGMII_100;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
@@ -3406,8 +3738,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
mii->priv = pdata;
mii->name = "amd-xgbe-mii";
- mii->read = xgbe_phy_mii_read;
- mii->write = xgbe_phy_mii_write;
+ mii->read = xgbe_phy_mii_read_c22;
+ mii->write = xgbe_phy_mii_write_c22;
+ mii->read_c45 = xgbe_phy_mii_read_c45;
+ mii->write_c45 = xgbe_phy_mii_write_c45;
mii->parent = pdata->dev;
mii->phy_mask = ~0;
snprintf(mii->id, sizeof(mii->id), "%s", dev_name(pdata->dev));
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4ebd2410185a..4d790a89fe77 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
* the PHY resources listed last
*/
phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
- phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+ phy_irqnum = platform_irq_count(pdev) - 1;
dma_irqnum = 1;
dma_irqend = phy_irqnum;
} else {
@@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
phy_memnum = 0;
phy_irqnum = 0;
dma_irqnum = 1;
- dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
+ dma_irqend = platform_irq_count(pdev);
}
/* Obtain the mmio areas for the device */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
index d06d260cf1e2..7051bd7cf6dc 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -134,27 +134,15 @@ static u64 xgbe_cc_read(const struct cyclecounter *cc)
return nsec;
}
-static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+static int xgbe_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct xgbe_prv_data *pdata = container_of(info,
struct xgbe_prv_data,
ptp_clock_info);
unsigned long flags;
- u64 adjust;
- u32 addend, diff;
- unsigned int neg_adjust = 0;
+ u64 addend;
- if (delta < 0) {
- neg_adjust = 1;
- delta = -delta;
- }
-
- adjust = pdata->tstamp_addend;
- adjust *= delta;
- diff = div_u64(adjust, 1000000000UL);
-
- addend = (neg_adjust) ? pdata->tstamp_addend - diff :
- pdata->tstamp_addend + diff;
+ addend = adjust_by_scaled_ppm(pdata->tstamp_addend, scaled_ppm);
spin_lock_irqsave(&pdata->tstamp_lock, flags);
@@ -235,7 +223,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
netdev_name(pdata->netdev));
info->owner = THIS_MODULE;
info->max_adj = pdata->ptpclk_rate;
- info->adjfreq = xgbe_adjfreq;
+ info->adjfine = xgbe_adjfine;
info->adjtime = xgbe_adjtime;
info->gettime64 = xgbe_gettime;
info->settime64 = xgbe_settime;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 607a2c90513b..ad136ed493ed 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -151,7 +151,8 @@
#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
/* Descriptors required for maximum contiguous TSO/GSO packet */
-#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+#define XGBE_TX_MAX_SPLIT \
+ ((GSO_LEGACY_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
/* Maximum possible descriptors needed for an SKB:
* - Maximum number of SKB frags
@@ -289,9 +290,11 @@
/* Auto-negotiation */
#define XGBE_AN_MS_TIMEOUT 500
#define XGBE_LINK_TIMEOUT 5
+#define XGBE_KR_TRAINING_WAIT_ITER 50
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
@@ -416,7 +419,7 @@ struct xgbe_rx_ring_data {
/* Structure used to hold information related to the descriptor
* and the packet associated with the descriptor (always use
- * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ * the XGBE_GET_DESC_DATA macro to access this data from the ring)
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
@@ -593,6 +596,7 @@ enum xgbe_mode {
XGBE_MODE_KX_2500,
XGBE_MODE_KR,
XGBE_MODE_X,
+ XGBE_MODE_SGMII_10,
XGBE_MODE_SGMII_100,
XGBE_MODE_SGMII_1000,
XGBE_MODE_SFI,
@@ -610,6 +614,32 @@ enum xgbe_mdio_mode {
XGBE_MDIO_MODE_CL45,
};
+enum xgbe_mb_cmd {
+ XGBE_MB_CMD_POWER_OFF = 0,
+ XGBE_MB_CMD_SET_1G,
+ XGBE_MB_CMD_SET_2_5G,
+ XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_CMD_RRC
+};
+
+enum xgbe_mb_subcmd {
+ XGBE_MB_SUBCMD_NONE = 0,
+ XGBE_MB_SUBCMD_RX_ADAP,
+
+ /* 10GbE SFP subcommands */
+ XGBE_MB_SUBCMD_ACTIVE = 0,
+ XGBE_MB_SUBCMD_PASSIVE_1M,
+ XGBE_MB_SUBCMD_PASSIVE_3M,
+ XGBE_MB_SUBCMD_PASSIVE_OTHER,
+
+ /* 1GbE Mode subcommands */
+ XGBE_MB_SUBCMD_10MBITS = 0,
+ XGBE_MB_SUBCMD_100MBITS,
+ XGBE_MB_SUBCMD_1G_SGMII,
+ XGBE_MB_SUBCMD_1G_KX
+};
+
struct xgbe_phy {
struct ethtool_link_ksettings lks;
@@ -747,8 +777,11 @@ struct xgbe_hw_if {
int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int,
enum xgbe_mdio_mode);
- int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int);
- int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, u16);
+ int (*read_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int);
+ int (*write_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int, u16);
+ int (*read_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int);
+ int (*write_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int,
+ u16);
int (*set_gpio)(struct xgbe_prv_data *, unsigned int);
int (*clr_gpio)(struct xgbe_prv_data *, unsigned int);
@@ -1012,6 +1045,7 @@ struct xgbe_version_data {
unsigned int tx_desc_prefetch;
unsigned int rx_desc_prefetch;
unsigned int an_cdr_workaround;
+ unsigned int enable_rrc;
};
struct xgbe_prv_data {
@@ -1253,6 +1287,7 @@ struct xgbe_prv_data {
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
+ unsigned long kr_start_time;
enum xgbe_an_mode an_mode;
/* I2C support */
@@ -1282,6 +1317,10 @@ struct xgbe_prv_data {
bool debugfs_an_cdr_workaround;
bool debugfs_an_cdr_track_early;
+ bool en_rx_adap;
+ int rx_adapt_retries;
+ bool rx_adapt_done;
+ bool mode_set;
};
/* Function prototypes*/
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index d022b6db9e06..379d19d18dbe 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -672,7 +672,7 @@ static int xge_probe(struct platform_device *pdev)
if (ret)
goto err;
- netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &pdata->napi, xge_napi);
ret = register_netdev(ndev);
if (ret) {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 220dc42af31a..390671640388 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -696,6 +696,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
buf_pool->rx_skb[skb_index] = NULL;
datalen = xgene_enet_get_data_len(le64_to_cpu(raw_desc->m1));
+
+ /* strip off CRC as HW isn't doing this */
+ nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
+ if (!nv)
+ datalen -= 4;
+
skb_put(skb, datalen);
prefetch(skb->data - NET_IP_ALIGN);
skb->protocol = eth_type_trans(skb, ndev);
@@ -717,12 +723,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
}
}
- nv = GET_VAL(NV, le64_to_cpu(raw_desc->m0));
- if (!nv) {
- /* strip off CRC as HW isn't doing this */
- datalen -= 4;
+ if (!nv)
goto skip_jumbo;
- }
slots = page_pool->slots - 1;
head = page_pool->head;
@@ -869,7 +871,7 @@ static void xgene_enet_timeout(struct net_device *ndev, unsigned int txqueue)
for (i = 0; i < pdata->txq_cnt; i++) {
txq = netdev_get_tx_queue(ndev, i);
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
netif_tx_start_queue(txq);
}
}
@@ -1002,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev)
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
- if (ret)
+ if (ret) {
+ xgene_enet_napi_disable(pdata);
return ret;
+ }
if (ndev->phydev) {
phy_start(ndev->phydev);
@@ -1975,14 +1979,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) {
napi = &pdata->rx_ring[i]->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring[i]->cp_ring->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 9a650d1c1bdd..9e653e2925f7 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -25,7 +25,6 @@
#include <linux/ethtool.h>
#include <linux/slab.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/page.h>
@@ -1237,6 +1236,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct bmac_data *bp;
const unsigned char *prop_addr;
unsigned char addr[6];
+ u8 macaddr[6];
struct net_device *dev;
int is_bmac_plus = ((int)match->data) != 0;
@@ -1284,7 +1284,9 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j)
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
+
+ eth_hw_addr_set(dev, macaddr);
/* Enable chip without interrupts for now */
bmac_enable_and_reset_chip(dev);
@@ -1508,7 +1510,7 @@ static void bmac_tx_timeout(struct timer_list *t)
i = bp->tx_empty;
++dev->stats.tx_errors;
if (i != bp->tx_fill) {
- dev_kfree_skb(bp->tx_bufs[i]);
+ dev_kfree_skb_irq(bp->tx_bufs[i]);
bp->tx_bufs[i] = NULL;
if (++i >= N_TX_RING) i = 0;
bp->tx_empty = i;
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 4b80e3a52a19..fd1b008b7208 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -20,7 +20,6 @@
#include <linux/bitrev.h>
#include <linux/slab.h>
#include <linux/pgtable.h>
-#include <asm/prom.h>
#include <asm/dbdma.h>
#include <asm/io.h>
#include <asm/macio.h>
@@ -90,7 +89,7 @@ static void mace_set_timeout(struct net_device *dev);
static void mace_tx_timeout(struct timer_list *t);
static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
static inline void mace_clean_rings(struct mace_data *mp);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* If we can't get a skbuff when we need it, we use this area for DMA.
@@ -112,6 +111,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct net_device *dev;
struct mace_data *mp;
const unsigned char *addr;
+ u8 macaddr[ETH_ALEN];
int j, rev, rc = -EBUSY;
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
@@ -167,8 +167,9 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j) {
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
}
+ eth_hw_addr_set(dev, macaddr);
mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
in_8(&mp->mace->chipid_lo);
@@ -369,11 +370,12 @@ static void mace_reset(struct net_device *dev)
out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
}
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -385,7 +387,10 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- out_8(&mb->padr, dev->dev_addr[i] = p[i]);
+ out_8(&mb->padr, macaddr[i] = p[i]);
+
+ eth_hw_addr_set(dev, macaddr);
+
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
}
@@ -841,7 +846,7 @@ static void mace_tx_timeout(struct timer_list *t)
if (mp->tx_bad_runt) {
mp->tx_bad_runt = 0;
} else if (i != mp->tx_fill) {
- dev_kfree_skb(mp->tx_bufs[i]);
+ dev_kfree_skb_irq(mp->tx_bufs[i]);
if (++i >= N_TX_RING)
i = 0;
mp->tx_empty = i;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 95d3061c61be..8fcaf1639920 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -92,7 +92,7 @@ static void mace_reset(struct net_device *dev);
static irqreturn_t mace_interrupt(int irq, void *dev_id);
static irqreturn_t mace_dma_intr(int irq, void *dev_id);
static void mace_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* Load a receive DMA channel with a base address and ring length
@@ -197,6 +197,7 @@ static int mace_probe(struct platform_device *pdev)
unsigned char *addr;
struct net_device *dev;
unsigned char checksum = 0;
+ u8 macaddr[ETH_ALEN];
int err;
dev = alloc_etherdev(PRIV_BYTES);
@@ -229,8 +230,9 @@ static int mace_probe(struct platform_device *pdev)
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
checksum ^= v;
- dev->dev_addr[j] = v;
+ macaddr[j] = v;
}
+ eth_hw_addr_set(dev, macaddr);
for (; j < 8; ++j) {
checksum ^= bitrev8(addr[j<<4]);
}
@@ -315,11 +317,12 @@ static void mace_reset(struct net_device *dev)
* Load the address on a mace controller.
*/
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -331,7 +334,8 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- mb->padr = dev->dev_addr[i] = p[i];
+ mb->padr = macaddr[i] = p[i];
+ eth_hw_addr_set(dev, macaddr);
if (mp->chipid != BROKEN_ADDRCHG_REV)
mb->iac = 0;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 52b9833fda99..7e9c74b141ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -40,6 +40,7 @@
#define AQ_CFG_RX_HDR_SIZE 256U
#define AQ_CFG_RX_PAGEORDER 0U
+#define AQ_CFG_XDP_PAGEORDER 2U
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
@@ -64,8 +65,6 @@
*/
#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
-#define AQ_CFG_NAPI_WEIGHT 64U
-
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
#define AQ_CFG_FC_MODE AQ_NIC_FC_FULL
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
index d3526cd38f3d..414b2e448d59 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -124,7 +124,7 @@ static const struct hwmon_channel_info aq_hwmon_temp = {
.config = aq_hwmon_temp_config,
};
-static const struct hwmon_channel_info *aq_hwmon_info[] = {
+static const struct hwmon_channel_info * const aq_hwmon_info[] = {
&aq_hwmon_temp,
NULL,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a9ef0544e30f..ac4ea93bd8dd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -13,6 +13,7 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_macsec.h"
+#include "aq_main.h"
#include <linux/ptp_clock_kernel.h>
@@ -97,6 +98,15 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = {
"%sQueue[%d] AllocFails",
"%sQueue[%d] SkbAllocFails",
"%sQueue[%d] Polls",
+ "%sQueue[%d] PageFlips",
+ "%sQueue[%d] PageReuses",
+ "%sQueue[%d] PageFrees",
+ "%sQueue[%d] XdpAbort",
+ "%sQueue[%d] XdpDrop",
+ "%sQueue[%d] XdpPass",
+ "%sQueue[%d] XdpTx",
+ "%sQueue[%d] XdpInvalid",
+ "%sQueue[%d] XdpRedirect",
};
static const char * const aq_ethtool_queue_tx_stat_names[] = {
@@ -229,7 +239,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
"%u.%u.%u", firmware_version >> 24,
(firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
- strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ strscpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
@@ -812,7 +822,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
}
static void aq_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg;
@@ -827,7 +839,9 @@ static void aq_get_ringparam(struct net_device *ndev,
}
static int aq_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
const struct aq_hw_caps_s *hw_caps;
@@ -845,7 +859,7 @@ static int aq_set_ringparam(struct net_device *ndev,
if (netif_running(ndev)) {
ndev_running = true;
- dev_close(ndev);
+ aq_ndev_close(ndev);
}
cfg->rxds = max(ring->rx_pending, hw_caps->rxds_min);
@@ -861,7 +875,7 @@ static int aq_set_ringparam(struct net_device *ndev,
goto err_exit;
if (ndev_running)
- err = dev_open(ndev, NULL);
+ err = aq_ndev_open(ndev);
err_exit:
return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 1bc4d33a0ce5..30a573db02bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -826,7 +826,6 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
- int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
@@ -837,8 +836,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
- hweight += hweight_long(aq_nic->active_vlans[i]);
+ hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
@@ -871,7 +869,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
- memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 02058fe79f52..7eb5851eb95d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -292,9 +292,6 @@ static int aq_mdo_dev_open(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
ret = aq_apply_secy_cfg(nic, ctx->secy);
@@ -306,9 +303,6 @@ static int aq_mdo_dev_stop(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int i;
- if (ctx->prepare)
- return 0;
-
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
@@ -466,9 +460,6 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
if (txsc_idx == AQ_MACSEC_MAX_SC)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->sc_sa = sc_sa;
cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
cfg->aq_txsc[txsc_idx].sw_secy = secy;
@@ -492,9 +483,6 @@ static int aq_mdo_upd_secy(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
@@ -543,9 +531,6 @@ static int aq_mdo_del_secy(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (!nic->macsec_cfg)
return 0;
@@ -585,6 +570,7 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx);
+ memzero_explicit(&key_rec, sizeof(key_rec));
return ret;
}
@@ -601,9 +587,6 @@ static int aq_mdo_add_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
@@ -631,9 +614,6 @@ static int aq_mdo_upd_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
@@ -681,9 +661,6 @@ static int aq_mdo_del_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -780,9 +757,6 @@ static int aq_mdo_add_rxsc(struct macsec_context *ctx)
if (rxsc_idx >= rxsc_idx_max)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
cfg->sc_sa);
cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
@@ -809,9 +783,6 @@ static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
@@ -876,9 +847,6 @@ static int aq_mdo_del_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
clear_type = AQ_CLEAR_ALL;
@@ -932,6 +900,7 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx,
ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx);
+ memzero_explicit(&sa_key_record, sizeof(sa_key_record));
return ret;
}
@@ -948,9 +917,6 @@ static int aq_mdo_add_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
@@ -978,9 +944,6 @@ static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
secy, ctx->sa.rx_sa, NULL,
@@ -1029,9 +992,6 @@ static int aq_mdo_del_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -1044,9 +1004,6 @@ static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
struct aq_hw_s *hw = nic->aq_hw;
- if (ctx->prepare)
- return 0;
-
aq_get_macsec_common_stats(hw, stats);
ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
@@ -1073,9 +1030,6 @@ static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
stats = &aq_txsc->stats;
aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
@@ -1106,9 +1060,6 @@ static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
@@ -1147,9 +1098,6 @@ static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
for (i = 0; i < MACSEC_NUM_AN; i++) {
if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
@@ -1196,9 +1144,6 @@ static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
@@ -1451,26 +1396,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic)
egress_sa_threshold_expired);
}
+#define AQ_LOCKED_MDO_DEF(mdo) \
+static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \
+{ \
+ struct aq_nic_s *nic = netdev_priv(ctx->netdev); \
+ int ret; \
+ mutex_lock(&nic->macsec_mutex); \
+ ret = aq_mdo_##mdo(ctx); \
+ mutex_unlock(&nic->macsec_mutex); \
+ return ret; \
+}
+
+AQ_LOCKED_MDO_DEF(dev_open)
+AQ_LOCKED_MDO_DEF(dev_stop)
+AQ_LOCKED_MDO_DEF(add_secy)
+AQ_LOCKED_MDO_DEF(upd_secy)
+AQ_LOCKED_MDO_DEF(del_secy)
+AQ_LOCKED_MDO_DEF(add_rxsc)
+AQ_LOCKED_MDO_DEF(upd_rxsc)
+AQ_LOCKED_MDO_DEF(del_rxsc)
+AQ_LOCKED_MDO_DEF(add_rxsa)
+AQ_LOCKED_MDO_DEF(upd_rxsa)
+AQ_LOCKED_MDO_DEF(del_rxsa)
+AQ_LOCKED_MDO_DEF(add_txsa)
+AQ_LOCKED_MDO_DEF(upd_txsa)
+AQ_LOCKED_MDO_DEF(del_txsa)
+AQ_LOCKED_MDO_DEF(get_dev_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_tx_sa_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sc_stats)
+AQ_LOCKED_MDO_DEF(get_rx_sa_stats)
+
const struct macsec_ops aq_macsec_ops = {
- .mdo_dev_open = aq_mdo_dev_open,
- .mdo_dev_stop = aq_mdo_dev_stop,
- .mdo_add_secy = aq_mdo_add_secy,
- .mdo_upd_secy = aq_mdo_upd_secy,
- .mdo_del_secy = aq_mdo_del_secy,
- .mdo_add_rxsc = aq_mdo_add_rxsc,
- .mdo_upd_rxsc = aq_mdo_upd_rxsc,
- .mdo_del_rxsc = aq_mdo_del_rxsc,
- .mdo_add_rxsa = aq_mdo_add_rxsa,
- .mdo_upd_rxsa = aq_mdo_upd_rxsa,
- .mdo_del_rxsa = aq_mdo_del_rxsa,
- .mdo_add_txsa = aq_mdo_add_txsa,
- .mdo_upd_txsa = aq_mdo_upd_txsa,
- .mdo_del_txsa = aq_mdo_del_txsa,
- .mdo_get_dev_stats = aq_mdo_get_dev_stats,
- .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats,
- .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats,
- .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats,
- .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats,
+ .mdo_dev_open = aq_locked_mdo_dev_open,
+ .mdo_dev_stop = aq_locked_mdo_dev_stop,
+ .mdo_add_secy = aq_locked_mdo_add_secy,
+ .mdo_upd_secy = aq_locked_mdo_upd_secy,
+ .mdo_del_secy = aq_locked_mdo_del_secy,
+ .mdo_add_rxsc = aq_locked_mdo_add_rxsc,
+ .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc,
+ .mdo_del_rxsc = aq_locked_mdo_del_rxsc,
+ .mdo_add_rxsa = aq_locked_mdo_add_rxsa,
+ .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa,
+ .mdo_del_rxsa = aq_locked_mdo_del_rxsa,
+ .mdo_add_txsa = aq_locked_mdo_add_txsa,
+ .mdo_upd_txsa = aq_locked_mdo_upd_txsa,
+ .mdo_del_txsa = aq_locked_mdo_del_txsa,
+ .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats,
};
int aq_macsec_init(struct aq_nic_s *nic)
@@ -1492,6 +1468,7 @@ int aq_macsec_init(struct aq_nic_s *nic)
nic->ndev->features |= NETIF_F_HW_MACSEC;
nic->ndev->macsec_ops = &aq_macsec_ops;
+ mutex_init(&nic->macsec_mutex);
return 0;
}
@@ -1515,7 +1492,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
if (!nic->macsec_cfg)
return 0;
- rtnl_lock();
+ mutex_lock(&nic->macsec_mutex);
if (nic->aq_fw_ops->send_macsec_req) {
struct macsec_cfg_request cfg = { 0 };
@@ -1564,7 +1541,7 @@ int aq_macsec_enable(struct aq_nic_s *nic)
ret = aq_apply_macsec_cfg(nic);
unlock:
- rtnl_unlock();
+ mutex_unlock(&nic->macsec_mutex);
return ret;
}
@@ -1576,9 +1553,9 @@ void aq_macsec_work(struct aq_nic_s *nic)
if (!netif_carrier_ok(nic->ndev))
return;
- rtnl_lock();
+ mutex_lock(&nic->macsec_mutex);
aq_check_txsa_expiration(nic);
- rtnl_unlock();
+ mutex_unlock(&nic->macsec_mutex);
}
int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
@@ -1589,21 +1566,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic)
if (!cfg)
return 0;
+ mutex_lock(&nic->macsec_mutex);
+
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->rxsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy);
}
+ mutex_unlock(&nic->macsec_mutex);
return cnt;
}
int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic)
{
+ int cnt;
+
if (!nic->macsec_cfg)
return 0;
- return hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_lock(&nic->macsec_mutex);
+ cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy);
+ mutex_unlock(&nic->macsec_mutex);
+
+ return cnt;
}
int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
@@ -1614,12 +1600,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic)
if (!cfg)
return 0;
+ mutex_lock(&nic->macsec_mutex);
+
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (!test_bit(i, &cfg->txsc_idx_busy))
continue;
cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy);
}
+ mutex_unlock(&nic->macsec_mutex);
return cnt;
}
@@ -1691,6 +1680,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
if (!cfg)
return data;
+ mutex_lock(&nic->macsec_mutex);
+
aq_macsec_update_stats(nic);
common_stats = &cfg->stats;
@@ -1773,5 +1764,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data)
data += i;
+ mutex_unlock(&nic->macsec_mutex);
+
return data;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index e22935ce9573..0b2a52199914 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -14,17 +14,23 @@
#include "aq_ptp.h"
#include "aq_filters.h"
#include "aq_hw_utils.h"
+#include "aq_vec.h"
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+DEFINE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+EXPORT_SYMBOL(aq_xdp_locking_key);
+
static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
static const struct net_device_ops aq_ndev_ops;
@@ -53,7 +59,7 @@ struct net_device *aq_ndev_alloc(void)
return ndev;
}
-static int aq_ndev_open(struct net_device *ndev)
+int aq_ndev_open(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
@@ -83,17 +89,14 @@ err_exit:
return err;
}
-static int aq_ndev_close(struct net_device *ndev)
+int aq_ndev_close(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
int err = 0;
err = aq_nic_stop(aq_nic);
- if (err < 0)
- goto err_exit;
aq_nic_deinit(aq_nic, true);
-err_exit:
return err;
}
@@ -126,9 +129,19 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
{
+ int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
int err;
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ new_frame_size > AQ_CFG_RX_FRAME_MAX) {
+ netdev_err(ndev, "Illegal MTU %d for XDP prog without frags\n",
+ ndev->mtu);
+ return -EOPNOTSUPP;
+ }
+
err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
if (err < 0)
@@ -204,6 +217,25 @@ err_exit:
return err;
}
+static netdev_features_t aq_ndev_fix_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *prog;
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ prog = READ_ONCE(aq_nic->xdp_prog);
+ if (prog && !prog->aux->xdp_has_frags &&
+ aq_nic->xdp_prog && features & NETIF_F_LRO) {
+ netdev_err(ndev, "LRO is not supported with single buffer XDP, disabling\n");
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
@@ -231,9 +263,6 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
struct hwtstamp_config *config)
{
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
@@ -413,6 +442,56 @@ static int aq_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type,
mqprio->qopt.prio_tc_map);
}
+static int aq_xdp_setup(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ bool need_update, running = netif_running(ndev);
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct bpf_prog *old_prog;
+
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (ndev->mtu > AQ_CFG_RX_FRAME_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "prog does not support XDP frags");
+ return -EOPNOTSUPP;
+ }
+
+ if (prog && ndev->features & NETIF_F_LRO) {
+ netdev_err(ndev,
+ "LRO is not supported with single buffer XDP, disabling\n");
+ ndev->features &= ~NETIF_F_LRO;
+ }
+ }
+
+ need_update = !!aq_nic->xdp_prog != !!prog;
+ if (running && need_update)
+ aq_ndev_close(ndev);
+
+ old_prog = xchg(&aq_nic->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (!old_prog && prog)
+ static_branch_inc(&aq_xdp_locking_key);
+ else if (old_prog && !prog)
+ static_branch_dec(&aq_xdp_locking_key);
+
+ if (running && need_update)
+ return aq_ndev_open(ndev);
+
+ return 0;
+}
+
+static int aq_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return aq_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -421,10 +500,13 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
.ndo_set_features = aq_ndev_set_features,
+ .ndo_fix_features = aq_ndev_fix_features,
.ndo_eth_ioctl = aq_ndev_ioctl,
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
.ndo_setup_tc = aq_ndo_setup_tc,
+ .ndo_bpf = aq_xdp,
+ .ndo_xdp_xmit = aq_xdp_xmit,
};
static int __init aq_ndev_init_module(void)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index a5a624b9ce73..a78c1a168d8e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -12,7 +12,11 @@
#include "aq_common.h"
#include "aq_nic.h"
+DECLARE_STATIC_KEY_FALSE(aq_xdp_locking_key);
+
void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
+int aq_ndev_open(struct net_device *ndev);
+int aq_ndev_close(struct net_device *ndev);
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 33f1a1377588..d6d6d5d37ff3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -265,12 +265,10 @@ static void aq_nic_service_timer_cb(struct timer_list *t)
static void aq_nic_polling_timer_cb(struct timer_list *t)
{
struct aq_nic_s *self = from_timer(self, t, polling_timer);
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_isr(i, (void *)aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_isr(i, (void *)self->aq_vec[i]);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
@@ -386,6 +384,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
+ self->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
@@ -486,8 +489,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_vec_start(aq_vec);
if (err < 0)
goto err_exit;
@@ -517,8 +520,8 @@ int aq_nic_start(struct aq_nic_s *self)
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ aq_vec = self->aq_vec[i];
err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
@@ -569,6 +572,103 @@ err_exit:
return err;
}
+static unsigned int aq_nic_map_xdp(struct aq_nic_s *self,
+ struct xdp_frame *xdpf,
+ struct aq_ring_s *ring)
+{
+ struct device *dev = aq_nic_get_dev(self);
+ struct aq_ring_buff_s *first = NULL;
+ unsigned int dx = ring->sw_tail;
+ struct aq_ring_buff_s *dx_buff;
+ struct skb_shared_info *sinfo;
+ unsigned int frag_count = 0U;
+ unsigned int nr_frags = 0U;
+ unsigned int ret = 0U;
+ u16 total_len;
+
+ dx_buff = &ring->buff_ring[dx];
+ dx_buff->flags = 0U;
+
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ total_len = xdpf->len;
+ dx_buff->len = total_len;
+ if (xdp_frame_has_frags(xdpf)) {
+ nr_frags = sinfo->nr_frags;
+ total_len += sinfo->xdp_frags_size;
+ }
+ dx_buff->pa = dma_map_single(dev, xdpf->data, dx_buff->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, dx_buff->pa)))
+ goto exit;
+
+ first = dx_buff;
+ dx_buff->len_pkt = total_len;
+ dx_buff->is_sop = 1U;
+ dx_buff->is_mapped = 1U;
+ ++ret;
+
+ for (; nr_frags--; ++frag_count) {
+ skb_frag_t *frag = &sinfo->frags[frag_count];
+ unsigned int frag_len = skb_frag_size(frag);
+ unsigned int buff_offset = 0U;
+ unsigned int buff_size = 0U;
+ dma_addr_t frag_pa;
+
+ while (frag_len) {
+ if (frag_len > AQ_CFG_TX_FRAME_MAX)
+ buff_size = AQ_CFG_TX_FRAME_MAX;
+ else
+ buff_size = frag_len;
+
+ frag_pa = skb_frag_dma_map(dev, frag, buff_offset,
+ buff_size, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, frag_pa)))
+ goto mapping_error;
+
+ dx = aq_ring_next_dx(ring, dx);
+ dx_buff = &ring->buff_ring[dx];
+
+ dx_buff->flags = 0U;
+ dx_buff->len = buff_size;
+ dx_buff->pa = frag_pa;
+ dx_buff->is_mapped = 1U;
+ dx_buff->eop_index = 0xffffU;
+
+ frag_len -= buff_size;
+ buff_offset += buff_size;
+
+ ++ret;
+ }
+ }
+
+ first->eop_index = dx;
+ dx_buff->is_eop = 1U;
+ dx_buff->skb = NULL;
+ dx_buff->xdpf = xdpf;
+ goto exit;
+
+mapping_error:
+ for (dx = ring->sw_tail;
+ ret > 0;
+ --ret, dx = aq_ring_next_dx(ring, dx)) {
+ dx_buff = &ring->buff_ring[dx];
+
+ if (!dx_buff->pa)
+ continue;
+ if (unlikely(dx_buff->is_sop))
+ dma_unmap_single(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, dx_buff->pa, dx_buff->len,
+ DMA_TO_DEVICE);
+ }
+
+exit:
+ return ret;
+}
+
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring)
{
@@ -697,6 +797,7 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
first->eop_index = dx;
dx_buff->is_eop = 1U;
dx_buff->skb = skb;
+ dx_buff->xdpf = NULL;
goto exit;
mapping_error:
@@ -725,6 +826,44 @@ exit:
return ret;
}
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ u16 queue_index = AQ_NIC_RING2QMAP(aq_nic, tx_ring->idx);
+ struct net_device *ndev = aq_nic_get_ndev(aq_nic);
+ struct skb_shared_info *sinfo;
+ int cpu = smp_processor_id();
+ int err = NETDEV_TX_BUSY;
+ struct netdev_queue *nq;
+ unsigned int frags = 1;
+
+ if (xdp_frame_has_frags(xdpf)) {
+ sinfo = xdp_get_shared_info_from_frame(xdpf);
+ frags += sinfo->nr_frags;
+ }
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX)
+ return err;
+
+ nq = netdev_get_tx_queue(ndev, tx_ring->idx);
+ __netif_tx_lock(nq, cpu);
+
+ aq_ring_update_queue_state(tx_ring);
+
+ /* Above status update may stop the queue. Check this. */
+ if (__netif_subqueue_stopped(aq_nic_get_ndev(aq_nic), queue_index))
+ goto out;
+
+ frags = aq_nic_map_xdp(aq_nic, xdpf, tx_ring);
+ if (likely(frags))
+ err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, tx_ring,
+ frags);
+out:
+ __netif_tx_unlock(nq);
+
+ return err;
+}
+
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
{
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(self);
@@ -878,7 +1017,6 @@ int aq_nic_get_regs_count(struct aq_nic_s *self)
u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
{
- struct aq_vec_s *aq_vec = NULL;
struct aq_stats_s *stats;
unsigned int count = 0U;
unsigned int i = 0U;
@@ -928,11 +1066,11 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data += i;
for (tc = 0U; tc < self->aq_nic_cfg.tcs; tc++) {
- for (i = 0U, aq_vec = self->aq_vec[0];
- aq_vec && self->aq_vecs > i;
- ++i, aq_vec = self->aq_vec[i]) {
+ for (i = 0U; self->aq_vecs > i; ++i) {
+ if (!self->aq_vec[i])
+ break;
data += count;
- count = aq_vec_get_sw_stats(aq_vec, tc, data);
+ count = aq_vec_get_sw_stats(self->aq_vec[i], tc, data);
}
}
@@ -1246,7 +1384,6 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
int aq_nic_stop(struct aq_nic_s *self)
{
- struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
netif_tx_disable(self->ndev);
@@ -1264,9 +1401,8 @@ int aq_nic_stop(struct aq_nic_s *self)
aq_ptp_irq_free(self);
- for (i = 0U, aq_vec = self->aq_vec[0];
- self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
- aq_vec_stop(aq_vec);
+ for (i = 0U; self->aq_vecs > i; ++i)
+ aq_vec_stop(self->aq_vec[i]);
aq_ptp_ring_stop(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 1a7148041e3d..ad33f8586532 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -11,6 +11,8 @@
#define AQ_NIC_H
#include <linux/ethtool.h>
+#include <net/xdp.h>
+#include <linux/bpf.h>
#include "aq_common.h"
#include "aq_rss.h"
@@ -128,6 +130,7 @@ struct aq_nic_s {
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
struct aq_ring_s *aq_ring_tx[AQ_HW_QUEUES_MAX];
struct aq_hw_s *aq_hw;
+ struct bpf_prog *xdp_prog;
struct net_device *ndev;
unsigned int aq_vecs;
unsigned int packet_filter;
@@ -154,6 +157,8 @@ struct aq_nic_s {
struct mutex fwreq_mutex;
#if IS_ENABLED(CONFIG_MACSEC)
struct aq_macsec_cfg *macsec_cfg;
+ /* mutex to protect data in macsec_cfg */
+ struct mutex macsec_mutex;
#endif
/* PTP support */
struct aq_ptp_s *aq_ptp;
@@ -177,6 +182,8 @@ void aq_nic_ndev_free(struct aq_nic_s *self);
int aq_nic_start(struct aq_nic_s *self);
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
struct aq_ring_s *ring);
+int aq_nic_xmit_xdpf(struct aq_nic_s *aq_nic, struct aq_ring_s *tx_ring,
+ struct xdp_frame *xdpf);
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
int aq_nic_get_regs_count(struct aq_nic_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 797a95142d1f..baa5f8cc31f2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -379,7 +379,8 @@ static void aq_pci_shutdown(struct pci_dev *pdev)
}
}
-static int aq_suspend_common(struct device *dev, bool deep)
+#ifdef CONFIG_PM
+static int aq_suspend_common(struct device *dev)
{
struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
@@ -392,17 +393,15 @@ static int aq_suspend_common(struct device *dev, bool deep)
if (netif_running(nic->ndev))
aq_nic_stop(nic);
- if (deep) {
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- aq_nic_set_power(nic);
- }
+ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ aq_nic_set_power(nic);
rtnl_unlock();
return 0;
}
-static int atl_resume_common(struct device *dev, bool deep)
+static int atl_resume_common(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
@@ -415,11 +414,6 @@ static int atl_resume_common(struct device *dev, bool deep)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (deep) {
- /* Reinitialize Nic/Vecs objects */
- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
- }
-
if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
@@ -444,22 +438,22 @@ err_exit:
static int aq_pm_freeze(struct device *dev)
{
- return aq_suspend_common(dev, false);
+ return aq_suspend_common(dev);
}
static int aq_pm_suspend_poweroff(struct device *dev)
{
- return aq_suspend_common(dev, true);
+ return aq_suspend_common(dev);
}
static int aq_pm_thaw(struct device *dev)
{
- return atl_resume_common(dev, false);
+ return atl_resume_common(dev);
}
static int aq_pm_resume_restore(struct device *dev)
{
- return atl_resume_common(dev, true);
+ return atl_resume_common(dev);
}
static const struct dev_pm_ops aq_pm_ops = {
@@ -470,6 +464,7 @@ static const struct dev_pm_ops aq_pm_ops = {
.restore = aq_pm_resume_restore,
.thaw = aq_pm_thaw,
};
+#endif
static struct pci_driver aq_pci_ops = {
.name = AQ_CFG_DRV_NAME,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 06de19f63287..80b44043e6c5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -1217,8 +1217,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
atomic_set(&aq_ptp->offset_egress, 0);
atomic_set(&aq_ptp->offset_ingress, 0);
- netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
- aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, aq_ptp_poll);
aq_ptp->idx_vector = idx_vec;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 81b3756417ec..7f933175cbda 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -7,15 +7,37 @@
/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
-#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
#include "aq_hw_utils.h"
#include "aq_ptp.h"
+#include "aq_vec.h"
+#include "aq_main.h"
+#include <net/xdp.h>
+#include <linux/filter.h>
+#include <linux/bpf_trace.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static void aq_get_rxpages_xdp(struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo;
+ int i;
+
+ if (xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ for (i = 0; i < sinfo->nr_frags; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+
+ page_ref_inc(skb_frag_page(frag));
+ }
+ }
+ page_ref_inc(buff->rxdata.page);
+}
+
static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
{
unsigned int len = PAGE_SIZE << rxpage->order;
@@ -27,9 +49,10 @@ static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
rxpage->page = NULL;
}
-static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
- struct device *dev)
+static int aq_alloc_rxpages(struct aq_rxpage *rxpage, struct aq_ring_s *rx_ring)
{
+ struct device *dev = aq_nic_get_dev(rx_ring->aq_nic);
+ unsigned int order = rx_ring->page_order;
struct page *page;
int ret = -ENOMEM;
dma_addr_t daddr;
@@ -47,7 +70,7 @@ static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
rxpage->page = page;
rxpage->daddr = daddr;
rxpage->order = order;
- rxpage->pg_off = 0;
+ rxpage->pg_off = rx_ring->page_offset;
return 0;
@@ -58,21 +81,26 @@ err_exit:
return ret;
}
-static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
- int order)
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf)
{
+ unsigned int order = self->page_order;
+ u16 page_offset = self->page_offset;
+ u16 frame_max = self->frame_max;
+ u16 tail_size = self->tail_size;
int ret;
if (rxbuf->rxdata.page) {
/* One means ring is the only user and can reuse */
if (page_ref_count(rxbuf->rxdata.page) > 1) {
/* Try reuse buffer */
- rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
- if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
- (PAGE_SIZE << order)) {
+ rxbuf->rxdata.pg_off += frame_max + page_offset +
+ tail_size;
+ if (rxbuf->rxdata.pg_off + frame_max + tail_size <=
+ (PAGE_SIZE << order)) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_flips++;
u64_stats_update_end(&self->stats.rx.syncp);
+
} else {
/* Buffer exhausted. We have other users and
* should release this page and realloc
@@ -84,7 +112,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
u64_stats_update_end(&self->stats.rx.syncp);
}
} else {
- rxbuf->rxdata.pg_off = 0;
+ rxbuf->rxdata.pg_off = page_offset;
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.pg_reuses++;
u64_stats_update_end(&self->stats.rx.syncp);
@@ -92,8 +120,7 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
}
if (!rxbuf->rxdata.page) {
- ret = aq_get_rxpage(&rxbuf->rxdata, order,
- aq_nic_get_dev(self->aq_nic));
+ ret = aq_alloc_rxpages(&rxbuf->rxdata, self);
if (ret) {
u64_stats_update_begin(&self->stats.rx.syncp);
self->stats.rx.alloc_fails++;
@@ -117,6 +144,7 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
err = -ENOMEM;
goto err_exit;
}
+
self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
self->size * self->dx_size,
&self->dx_ring_pa, GFP_KERNEL);
@@ -172,11 +200,22 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
- self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
-
- if (aq_nic_cfg->rxpageorder > self->page_order)
- self->page_order = aq_nic_cfg->rxpageorder;
+ self->xdp_prog = aq_nic->xdp_prog;
+ self->frame_max = AQ_CFG_RX_FRAME_MAX;
+
+ /* Only order-2 is allowed if XDP is enabled */
+ if (READ_ONCE(self->xdp_prog)) {
+ self->page_offset = AQ_XDP_HEADROOM;
+ self->page_order = AQ_CFG_XDP_PAGEORDER;
+ self->tail_size = AQ_XDP_TAILROOM;
+ } else {
+ self->page_offset = 0;
+ self->page_order = fls(self->frame_max / PAGE_SIZE +
+ (self->frame_max % PAGE_SIZE ? 1 : 0)) - 1;
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
+ self->tail_size = 0;
+ }
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@ -298,15 +337,26 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop && buff->skb)) {
+ if (likely(!buff->is_eop))
+ goto out;
+
+ if (buff->skb) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
-
dev_kfree_skb_any(buff->skb);
- buff->skb = NULL;
+ } else if (buff->xdpf) {
+ u64_stats_update_begin(&self->stats.tx.syncp);
+ ++self->stats.tx.packets;
+ self->stats.tx.bytes += xdp_get_frame_len(buff->xdpf);
+ u64_stats_update_end(&self->stats.tx.syncp);
+ xdp_return_frame_rx_napi(buff->xdpf);
}
+
+out:
+ buff->skb = NULL;
+ buff->xdpf = NULL;
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
@@ -339,14 +389,175 @@ static void aq_rx_checksum(struct aq_ring_s *self,
__skb_incr_checksum_unnecessary(skb);
}
-#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
-int aq_ring_rx_clean(struct aq_ring_s *self,
- struct napi_struct *napi,
- int *work_done,
- int budget)
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(dev);
+ unsigned int vec, i, drop = 0;
+ int cpu = smp_processor_id();
+ struct aq_nic_cfg_s *aq_cfg;
+ struct aq_ring_s *ring;
+
+ aq_cfg = aq_nic_get_cfg(aq_nic);
+ vec = cpu % aq_cfg->vecs;
+ ring = aq_nic->aq_ring_tx[AQ_NIC_CFG_TCVEC2RING(aq_cfg, 0, vec)];
+
+ for (i = 0; i < num_frames; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ if (aq_nic_xmit_xdpf(aq_nic, ring, xdpf) == NETDEV_TX_BUSY)
+ drop++;
+ }
+
+ return num_frames - drop;
+}
+
+static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp,
+ struct net_device *dev,
+ struct aq_ring_buff_s *buff)
+{
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ return NULL;
+
+ skb = xdp_build_skb_from_frame(xdpf, dev);
+ if (!skb)
+ return NULL;
+
+ aq_get_rxpages_xdp(buff, xdp);
+ return skb;
+}
+
+static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic,
+ struct xdp_buff *xdp,
+ struct aq_ring_s *rx_ring,
+ struct aq_ring_buff_s *buff)
+{
+ int result = NETDEV_TX_BUSY;
+ struct aq_ring_s *tx_ring;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act = XDP_ABORTED;
+ struct sk_buff *skb;
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(xdp);
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+
+ prog = READ_ONCE(rx_ring->xdp_prog);
+ if (!prog)
+ return aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ /* single buffer XDP program, but packet is multi buffer, aborted */
+ if (xdp_buff_has_frags(xdp) && !prog->aux->xdp_has_frags)
+ goto out_aborted;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff);
+ if (!skb)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_pass;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ return skb;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto out_aborted;
+ tx_ring = aq_nic->aq_ring_tx[rx_ring->idx];
+ result = aq_nic_xmit_xdpf(aq_nic, tx_ring, xdpf);
+ if (result == NETDEV_TX_BUSY)
+ goto out_aborted;
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_tx;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(aq_nic->ndev, xdp, prog) < 0)
+ goto out_aborted;
+ xdp_do_flush();
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_redirect;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ aq_get_rxpages_xdp(buff, xdp);
+ break;
+ default:
+ fallthrough;
+ case XDP_ABORTED:
+out_aborted:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ trace_xdp_exception(aq_nic->ndev, prog, act);
+ bpf_warn_invalid_xdp_action(aq_nic->ndev, prog, act);
+ break;
+ case XDP_DROP:
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.xdp_drop;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ break;
+ }
+
+ return ERR_PTR(-result);
+}
+
+static bool aq_add_rx_fragment(struct device *dev,
+ struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buff,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ struct aq_ring_buff_s *buff_ = buff;
+
+ memset(sinfo, 0, sizeof(*sinfo));
+ do {
+ skb_frag_t *frag;
+
+ if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS))
+ return true;
+
+ frag = &sinfo->frags[sinfo->nr_frags++];
+ buff_ = &ring->buff_ring[buff_->next];
+ dma_sync_single_range_for_cpu(dev,
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ DMA_FROM_DEVICE);
+ skb_frag_off_set(frag, buff_->rxdata.pg_off);
+ skb_frag_size_set(frag, buff_->len);
+ sinfo->xdp_frags_size += buff_->len;
+ __skb_frag_set_page(frag, buff_->rxdata.page);
+
+ buff_->is_cleaned = 1;
+
+ buff->is_ip_cso &= buff_->is_ip_cso;
+ buff->is_udp_cso &= buff_->is_udp_cso;
+ buff->is_tcp_cso &= buff_->is_tcp_cso;
+ buff->is_cso_err |= buff_->is_cso_err;
+
+ if (page_is_pfmemalloc(buff_->rxdata.page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ } while (!buff_->is_eop);
+
+ xdp_buff_set_frags_flag(xdp);
+
+ return false;
+}
+
+static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
+ int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@@ -364,8 +575,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
continue;
if (!buff->is_eop) {
+ unsigned int frag_cnt = 0U;
buff_ = buff;
do {
+ bool is_rsc_completed = true;
+
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+ frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@@ -373,22 +593,25 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
next_,
self->hw_head);
- if (unlikely(!is_rsc_completed))
- break;
+ if (unlikely(!is_rsc_completed) ||
+ frag_cnt > MAX_SKB_FRAGS) {
+ err = 0;
+ goto err_exit;
+ }
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
- if (!is_rsc_completed) {
- err = 0;
- goto err_exit;
- }
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
do {
+ if (buff_->next >= self->size) {
+ err = -EIO;
+ goto err_exit;
+ }
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
@@ -438,16 +661,15 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
- skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff->rxdata.page);
}
if (!buff->is_eop) {
buff_ = buff;
- i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];
@@ -461,7 +683,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff_->rxdata.page,
buff_->rxdata.pg_off,
buff_->len,
- AQ_CFG_RX_FRAME_MAX);
+ self->frame_max);
page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
@@ -502,6 +724,149 @@ err_exit:
return err;
}
+static int __aq_ring_xdp_clean(struct aq_ring_s *rx_ring,
+ struct napi_struct *napi, int *work_done,
+ int budget)
+{
+ int frame_sz = rx_ring->page_offset + rx_ring->frame_max +
+ rx_ring->tail_size;
+ struct aq_nic_s *aq_nic = rx_ring->aq_nic;
+ bool is_rsc_completed = true;
+ struct device *dev;
+ int err = 0;
+
+ dev = aq_nic_get_dev(aq_nic);
+ for (; (rx_ring->sw_head != rx_ring->hw_head) && budget;
+ rx_ring->sw_head = aq_ring_next_dx(rx_ring, rx_ring->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &rx_ring->buff_ring[rx_ring->sw_head];
+ bool is_ptp_ring = aq_ptp_ring(rx_ring->aq_nic, rx_ring);
+ struct aq_ring_buff_s *buff_ = NULL;
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ struct xdp_buff xdp;
+ void *hard_start;
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+ is_rsc_completed =
+ aq_ring_dx_in_range(rx_ring->sw_head,
+ next_,
+ rx_ring->hw_head);
+
+ if (unlikely(!is_rsc_completed))
+ break;
+
+ buff->is_error |= buff_->is_error;
+ buff->is_cso_err |= buff_->is_cso_err;
+ } while (!buff_->is_eop);
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
+ buff_ = buff;
+ do {
+ if (buff_->next >= rx_ring->size) {
+ err = -EIO;
+ goto err_exit;
+ }
+ next_ = buff_->next;
+ buff_ = &rx_ring->buff_ring[next_];
+
+ buff_->is_cleaned = true;
+ } while (!buff_->is_eop);
+
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ if (buff->is_error) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.errors;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+
+ dma_sync_single_range_for_cpu(dev,
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+ hard_start = page_address(buff->rxdata.page) +
+ buff->rxdata.pg_off - rx_ring->page_offset;
+
+ if (is_ptp_ring)
+ buff->len -=
+ aq_ptp_extract_ts(rx_ring->aq_nic, skb,
+ aq_buf_vaddr(&buff->rxdata),
+ buff->len);
+
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prepare_buff(&xdp, hard_start, rx_ring->page_offset,
+ buff->len, false);
+ if (!buff->is_eop) {
+ if (aq_add_rx_fragment(dev, rx_ring, buff, &xdp)) {
+ u64_stats_update_begin(&rx_ring->stats.rx.syncp);
+ ++rx_ring->stats.rx.packets;
+ rx_ring->stats.rx.bytes += xdp_get_buff_len(&xdp);
+ ++rx_ring->stats.rx.xdp_aborted;
+ u64_stats_update_end(&rx_ring->stats.rx.syncp);
+ continue;
+ }
+ }
+
+ skb = aq_xdp_run_prog(aq_nic, &xdp, rx_ring, buff);
+ if (IS_ERR(skb) || !skb)
+ continue;
+
+ if (buff->is_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ buff->vlan_rx_tag);
+
+ aq_rx_checksum(rx_ring, buff, skb);
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+ /* Send all PTP traffic to 0 queue */
+ skb_record_rx_queue(skb,
+ is_ptp_ring ? 0
+ : AQ_NIC_RING2QMAP(rx_ring->aq_nic,
+ rx_ring->idx));
+
+ napi_gro_receive(napi, skb);
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_clean(struct aq_ring_s *self,
+ struct napi_struct *napi,
+ int *work_done,
+ int budget)
+{
+ if (static_branch_unlikely(&aq_xdp_locking_key))
+ return __aq_ring_xdp_clean(self, napi, work_done, budget);
+ else
+ return __aq_ring_rx_clean(self, napi, work_done, budget);
+}
+
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
{
#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
@@ -521,7 +886,6 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
@@ -535,9 +899,9 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff = &self->buff_ring[self->sw_tail];
buff->flags = 0U;
- buff->len = AQ_CFG_RX_FRAME_MAX;
+ buff->len = self->frame_max;
- err = aq_get_rxpages(self, buff, page_order);
+ err = aq_get_rxpages(self, buff);
if (err)
goto err_exit;
@@ -584,7 +948,7 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
/* This data should mimic aq_ethtool_queue_rx_stat_names structure */
do {
count = 0;
- start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
+ start = u64_stats_fetch_begin(&self->stats.rx.syncp);
data[count] = self->stats.rx.packets;
data[++count] = self->stats.rx.jumbo_packets;
data[++count] = self->stats.rx.lro_packets;
@@ -592,15 +956,24 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
data[++count] = self->stats.rx.alloc_fails;
data[++count] = self->stats.rx.skb_alloc_fails;
data[++count] = self->stats.rx.polls;
- } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
+ data[++count] = self->stats.rx.pg_flips;
+ data[++count] = self->stats.rx.pg_reuses;
+ data[++count] = self->stats.rx.pg_losts;
+ data[++count] = self->stats.rx.xdp_aborted;
+ data[++count] = self->stats.rx.xdp_drop;
+ data[++count] = self->stats.rx.xdp_pass;
+ data[++count] = self->stats.rx.xdp_tx;
+ data[++count] = self->stats.rx.xdp_invalid;
+ data[++count] = self->stats.rx.xdp_redirect;
+ } while (u64_stats_fetch_retry(&self->stats.rx.syncp, start));
} else {
/* This data should mimic aq_ethtool_queue_tx_stat_names structure */
do {
count = 0;
- start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
+ start = u64_stats_fetch_begin(&self->stats.tx.syncp);
data[count] = self->stats.tx.packets;
data[++count] = self->stats.tx.queue_restarts;
- } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
+ } while (u64_stats_fetch_retry(&self->stats.tx.syncp, start));
}
return ++count;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 93659e58f1ce..0a6c34438c1d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -11,6 +11,10 @@
#define AQ_RING_H
#include "aq_common.h"
+#include "aq_vec.h"
+
+#define AQ_XDP_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
+#define AQ_XDP_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
struct page;
struct aq_nic_cfg_s;
@@ -51,6 +55,7 @@ struct __packed aq_ring_buff_s {
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
+ struct xdp_frame *xdpf;
};
/* TxC */
struct {
@@ -101,6 +106,12 @@ struct aq_ring_stats_rx_s {
u64 pg_losts;
u64 pg_flips;
u64 pg_reuses;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
+ u64 xdp_redirect;
};
struct aq_ring_stats_tx_s {
@@ -132,10 +143,15 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
- unsigned int page_order;
+ u16 page_order;
+ u16 page_offset;
+ u16 frame_max;
+ u16 tail_size;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
+ struct bpf_prog *xdp_prog;
enum atl_ring_type ring_type;
+ struct xdp_rxq_info xdp_rxq;
};
struct aq_ring_param_s {
@@ -175,6 +191,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic,
unsigned int idx,
struct aq_nic_cfg_s *aq_nic_cfg);
+
int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
void aq_ring_rx_deinit(struct aq_ring_s *self);
void aq_ring_free(struct aq_ring_s *self);
@@ -182,6 +199,8 @@ void aq_ring_update_queue_state(struct aq_ring_s *ring);
void aq_ring_queue_wake(struct aq_ring_s *ring);
void aq_ring_queue_stop(struct aq_ring_s *ring);
bool aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags);
int aq_ring_rx_clean(struct aq_ring_s *self,
struct napi_struct *napi,
int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f4774cf051c9..f5db1c44e9b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -10,11 +10,6 @@
*/
#include "aq_vec.h"
-#include "aq_nic.h"
-#include "aq_ring.h"
-#include "aq_hw.h"
-
-#include <linux/netdevice.h>
struct aq_vec_s {
const struct aq_hw_ops *aq_hw_ops;
@@ -43,8 +38,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
if (!self) {
err = -EINVAL;
} else {
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
ring[AQ_VEC_RX_ID].stats.rx.polls++;
u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -124,8 +119,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
self->tx_rings = 0;
self->rx_rings = 0;
- netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
- aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
err_exit:
return self;
@@ -153,9 +147,23 @@ int aq_vec_ring_alloc(struct aq_vec_s *self, struct aq_nic_s *aq_nic,
aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+ if (xdp_rxq_info_reg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ aq_nic->ndev, idx,
+ self->napi.napi_id) < 0) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ if (xdp_rxq_info_reg_mem_model(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL) < 0) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
idx_ring, aq_nic_cfg);
if (!ring) {
+ xdp_rxq_info_unreg(&self->ring[i][AQ_VEC_RX_ID].xdp_rxq);
err = -ENOMEM;
goto err_exit;
}
@@ -182,8 +190,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
self->aq_hw_ops = aq_hw_ops;
self->aq_hw = aq_hw;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
if (err < 0)
goto err_exit;
@@ -224,8 +232,8 @@ int aq_vec_start(struct aq_vec_s *self)
unsigned int i = 0U;
int err = 0;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
if (err < 0)
@@ -248,8 +256,8 @@ void aq_vec_stop(struct aq_vec_s *self)
struct aq_ring_s *ring = NULL;
unsigned int i = 0U;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
&ring[AQ_VEC_TX_ID]);
@@ -268,8 +276,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
}
@@ -297,11 +305,13 @@ void aq_vec_ring_free(struct aq_vec_s *self)
if (!self)
goto err_exit;
- for (i = 0U, ring = self->ring[0];
- self->tx_rings > i; ++i, ring = self->ring[i]) {
+ for (i = 0U; self->tx_rings > i; ++i) {
+ ring = self->ring[i];
aq_ring_free(&ring[AQ_VEC_TX_ID]);
- if (i < self->rx_rings)
+ if (i < self->rx_rings) {
+ xdp_rxq_info_unreg(&ring[AQ_VEC_RX_ID].xdp_rxq);
aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
}
self->tx_rings = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
index 567f3d4b79a2..78fac609b71d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -13,7 +13,13 @@
#define AQ_VEC_H
#include "aq_common.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
#include <linux/irqreturn.h>
+#include <linux/filter.h>
+#include <linux/netdevice.h>
struct aq_hw_s;
struct aq_hw_ops;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 4625ccb79499..9dfd68f0fda9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -531,7 +531,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -706,9 +706,9 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index d875ce3ec759..54e70f07b573 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -766,7 +766,7 @@ int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
hw_atl_rdm_rx_desc_data_buff_size_set(self,
- AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->frame_max / 1024U,
aq_ring->idx);
hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
@@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
+
+ /* Validate that the new hw_head_ is reasonable. */
+ if (hw_head_ >= ring->size) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);
@@ -969,15 +976,15 @@ int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self, struct aq_ring_s *ring)
rxd_wb->status);
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
- AQ_CFG_RX_FRAME_MAX;
+ ring->frame_max;
buff->len = buff->len ?
- buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->len : ring->frame_max;
buff->next = 0U;
buff->is_eop = 1U;
} else {
buff->len =
- rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
- AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
+ rxd_wb->pkt_len > ring->frame_max ?
+ ring->frame_max : rxd_wb->pkt_len;
if (buff->is_lro) {
/* LRO */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 58d426dda3ed..674683b54304 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -336,7 +336,7 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
struct statistics_s *stats)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
struct aq_stats_s *cs = &self->curr_stats;
struct aq_stats_s curr_stats = *cs;
bool corrupted_stats = false;
@@ -378,7 +378,7 @@ do { \
static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
struct statistics_s *stats)
{
- struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct hw_atl2_priv *priv = self->priv;
struct aq_stats_s *cs = &self->curr_stats;
struct aq_stats_s curr_stats = *cs;
bool corrupted_stats = false;
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index 36c7cf05630a..431924959520 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
u16 table_index)
{
u16 packed_record[18];
+ int ret;
if (table_index >= NUMROWS_INGRESSSAKEYRECORD)
return -EINVAL;
@@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw,
packed_record[16] = rec->key_len & 0x3;
- return set_raw_ingress_record(hw, packed_record, 18, 2,
- ROWOFFSET_INGRESSSAKEYRECORD +
- table_index);
+ ret = set_raw_ingress_record(hw, packed_record, 18, 2,
+ ROWOFFSET_INGRESSSAKEYRECORD +
+ table_index);
+
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw,
@@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw,
ret = set_raw_egress_record(hw, packed_record, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index);
if (unlikely(ret))
- return ret;
+ goto clear_key;
ret = set_raw_egress_record(hw, packed_record + 8, 8, 2,
ROWOFFSET_EGRESSSAKEYRECORD + table_index -
32);
- if (unlikely(ret))
- return ret;
- return 0;
+clear_key:
+ memzero_explicit(packed_record, sizeof(packed_record));
+ return ret;
}
int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw,
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
index b6119dcc3bb9..c2fda80fe1cc 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_struct.h
@@ -158,7 +158,7 @@ struct aq_mss_egress_class_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! 0: don't care and no LLC header exist.
@@ -422,7 +422,7 @@ struct aq_mss_ingress_preclass_record {
* 1: compare the SNAP header.
* If this bit is set to 1, the extracted filed will assume the
* SNAP header exist as encapsulated in 802.3 (RFC 1042). I.E. the
- * next 5 bytes after the the LLC header is SNAP header.
+ * next 5 bytes after the LLC header is SNAP header.
*/
u32 snap_mask;
/*! Mask is per-byte.
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index c642c3d3e600..ba0646b3b122 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -91,7 +91,7 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
+ strscpy(info->driver, priv->drv_name, sizeof(info->driver));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
@@ -981,7 +981,8 @@ int arc_emac_probe(struct net_device *ndev, int interface)
dev_info(dev, "connected to %s phy with id 0x%x\n",
phydev->drv->name, phydev->phy_id);
- netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, arc_emac_poll,
+ ARC_EMAC_NAPI_WEIGHT);
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 9acf589b1178..87f40c2ba904 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -132,6 +132,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
{
struct arc_emac_mdio_bus_data *data = &priv->bus_data;
struct device_node *np = priv->dev->of_node;
+ const char *name = "Synopsys MII Bus";
struct mii_bus *bus;
int error;
@@ -142,7 +143,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus";
+ bus->name = name;
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
@@ -167,7 +168,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
if (error) {
mdiobus_free(bus);
return dev_err_probe(priv->dev, error,
- "cannot register MDIO bus %s\n", bus->name);
+ "cannot register MDIO bus %s\n", name);
}
return 0;
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index e230d8d0ff73..e551ffaed20d 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -144,12 +144,13 @@ static void ax88796c_set_mac_addr(struct net_device *ndev)
static void ax88796c_load_mac_addr(struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 addr[ETH_ALEN];
u16 temp;
lockdep_assert_held(&ax_local->spi_lock);
/* Try the device tree first */
- if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ if (!platform_get_ethdev_address(&ax_local->spi->dev, ndev) &&
is_valid_ether_addr(ndev->dev_addr)) {
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
@@ -159,18 +160,19 @@ static void ax88796c_load_mac_addr(struct net_device *ndev)
/* Read the MAC address from AX88796C */
temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
- ndev->dev_addr[5] = (u8)temp;
- ndev->dev_addr[4] = (u8)(temp >> 8);
+ addr[5] = (u8)temp;
+ addr[4] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
- ndev->dev_addr[3] = (u8)temp;
- ndev->dev_addr[2] = (u8)(temp >> 8);
+ addr[3] = (u8)temp;
+ addr[2] = (u8)(temp >> 8);
temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
- ndev->dev_addr[1] = (u8)temp;
- ndev->dev_addr[0] = (u8)(temp >> 8);
+ addr[1] = (u8)temp;
+ addr[0] = (u8)(temp >> 8);
- if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
if (netif_msg_probe(ax_local))
dev_info(&ax_local->spi->dev,
"MAC address read from ASIX chip\n");
@@ -291,7 +293,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
skb_put(skb, padlen);
/* EOP header */
- memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+ skb_put_data(skb, &info.eop, TX_EOP_SIZE);
skb_unlink(skb, q);
@@ -379,7 +381,7 @@ static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
return 1;
}
-static int
+static netdev_tx_t
ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
@@ -431,7 +433,7 @@ ax88796c_skb_return(struct ax88796c_device *ax_local,
netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
skb->len + sizeof(struct ethhdr), skb->protocol);
- status = netif_rx_ni(skb);
+ status = netif_rx(skb);
if (status != NET_RX_SUCCESS && net_ratelimit())
netif_info(ax_local, rx_err, ndev,
"netif_rx status %d\n", status);
@@ -660,12 +662,12 @@ static void ax88796c_get_stats64(struct net_device *ndev,
s = per_cpu_ptr(ax_local->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&s->syncp);
+ start = u64_stats_fetch_begin(&s->syncp);
rx_packets = u64_stats_read(&s->rx_packets);
rx_bytes = u64_stats_read(&s->rx_bytes);
tx_packets = u64_stats_read(&s->tx_packets);
tx_bytes = u64_stats_read(&s->tx_bytes);
- } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+ } while (u64_stats_fetch_retry(&s->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -1004,7 +1006,7 @@ static int ax88796c_probe(struct spi_device *spi)
ax_local->mdiobus->parent = &spi->dev;
snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE,
- "ax88796c-%s.%u", dev_name(&spi->dev), spi->chip_select);
+ "ax88796c-%s.%u", dev_name(&spi->dev), spi_get_chipselect(spi, 0));
ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus);
if (ret < 0) {
@@ -1100,7 +1102,7 @@ err:
return ret;
}
-static int ax88796c_remove(struct spi_device *spi)
+static void ax88796c_remove(struct spi_device *spi)
{
struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
struct net_device *ndev = ax_local->ndev;
@@ -1110,8 +1112,6 @@ static int ax88796c_remove(struct spi_device *spi)
netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
dev_driver_string(&spi->dev),
dev_name(&spi->dev));
-
- return 0;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 88d2ab748399..ff1a5edf8df1 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -451,8 +451,8 @@ static void ag71xx_get_drvinfo(struct net_device *ndev,
{
struct ag71xx *ag = netdev_priv(ndev);
- strlcpy(info->driver, "ag71xx", sizeof(info->driver));
- strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
+ strscpy(info->driver, "ag71xx", sizeof(info->driver));
+ strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
sizeof(info->bus_info));
}
@@ -766,7 +766,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
unsigned long timestamp;
u32 rx_sm, tx_sm, rx_fd;
- timestamp = netdev_get_tx_queue(ag->ndev, 0)->trans_start;
+ timestamp = READ_ONCE(netdev_get_tx_queue(ag->ndev, 0)->trans_start);
if (likely(time_before(jiffies, timestamp + HZ / 10)))
return false;
@@ -786,7 +786,7 @@ static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
return false;
}
-static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush, int budget)
{
struct ag71xx_ring *ring = &ag->tx_ring;
int sent = 0, bytes_compl = 0, n = 0;
@@ -825,7 +825,7 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
if (!skb)
continue;
- dev_kfree_skb_any(skb);
+ napi_consume_skb(skb, budget);
ring->buf[i].tx.skb = NULL;
bytes_compl += ring->buf[i].tx.len;
@@ -946,7 +946,7 @@ static unsigned int ag71xx_max_frame_len(unsigned int mtu)
return ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
}
-static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac)
+static void ag71xx_hw_set_macaddr(struct ag71xx *ag, const unsigned char *mac)
{
u32 t;
@@ -970,7 +970,7 @@ static void ag71xx_fast_reset(struct ag71xx *ag)
mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
- ag71xx_tx_packets(ag, true);
+ ag71xx_tx_packets(ag, true, 0);
reset_control_assert(ag->mac_reset);
usleep_range(10, 20);
@@ -1024,83 +1024,6 @@ static void ag71xx_mac_config(struct phylink_config *config, unsigned int mode,
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]);
}
-static void ag71xx_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_MII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
- ag71xx_is(ag, AR9340) ||
- ag71xx_is(ag, QCA9530) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_GMII:
- if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
- (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_SGMII:
- if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RMII:
- if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
- break;
- goto unsupported;
- case PHY_INTERFACE_MODE_RGMII:
- if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
- (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
- break;
- goto unsupported;
- default:
- goto unsupported;
- }
-
- phylink_set(mask, MII);
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Autoneg);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- state->interface == PHY_INTERFACE_MODE_RGMII ||
- state->interface == PHY_INTERFACE_MODE_GMII) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-unsupported:
- linkmode_zero(supported);
-}
-
-static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- state->link = 0;
-}
-
-static void ag71xx_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
-}
-
static void ag71xx_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@ -1163,9 +1086,6 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops ag71xx_phylink_mac_ops = {
- .validate = ag71xx_mac_validate,
- .mac_pcs_get_state = ag71xx_mac_pcs_get_state,
- .mac_an_restart = ag71xx_mac_an_restart,
.mac_config = ag71xx_mac_config,
.mac_link_down = ag71xx_mac_link_down,
.mac_link_up = ag71xx_mac_link_up,
@@ -1177,6 +1097,34 @@ static int ag71xx_phylink_setup(struct ag71xx *ag)
ag->phylink_config.dev = &ag->ndev->dev;
ag->phylink_config.type = PHYLINK_NETDEV;
+ ag->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000FD;
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 0) ||
+ ag71xx_is(ag, AR9340) ||
+ ag71xx_is(ag, QCA9530) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9330) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, AR9340) && ag->mac_idx == 1) ||
+ (ag71xx_is(ag, QCA9530) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, QCA9550) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ ag->phylink_config.supported_interfaces);
+
+ if (ag71xx_is(ag, AR9340) && ag->mac_idx == 0)
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ ag->phylink_config.supported_interfaces);
+
+ if ((ag71xx_is(ag, AR9340) && ag->mac_idx == 0) ||
+ (ag71xx_is(ag, QCA9550) && ag->mac_idx == 1))
+ __set_bit(PHY_INTERFACE_MODE_RGMII,
+ ag->phylink_config.supported_interfaces);
phylink = phylink_create(&ag->phylink_config, ag->pdev->dev.fwnode,
ag->phy_if_mode, &ag71xx_phylink_mac_ops);
@@ -1478,7 +1426,7 @@ static int ag71xx_open(struct net_device *ndev)
if (ret) {
netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n",
ret);
- goto err;
+ return ret;
}
max_frame_len = ag71xx_max_frame_len(ndev->mtu);
@@ -1499,6 +1447,7 @@ static int ag71xx_open(struct net_device *ndev)
err:
ag71xx_rings_cleanup(ag);
+ phylink_disconnect_phy(ag->phylink);
return ret;
}
@@ -1708,7 +1657,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pktlen;
- skb = build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
+ skb = napi_build_skb(ring->buf[i].rx.rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
skb_free_frag(ring->buf[i].rx.rx_buf);
goto next;
@@ -1754,7 +1703,7 @@ static int ag71xx_poll(struct napi_struct *napi, int limit)
int tx_done, rx_done;
u32 status;
- tx_done = ag71xx_tx_packets(ag, false);
+ tx_done = ag71xx_tx_packets(ag, false, limit);
netif_dbg(ag, rx_status, ndev, "processing RX ring\n");
rx_done = ag71xx_rx_packets(ag, limit);
@@ -1913,15 +1862,12 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac");
if (IS_ERR(ag->mac_reset)) {
netif_err(ag, probe, ndev, "missing mac reset\n");
- err = PTR_ERR(ag->mac_reset);
- goto err_free;
+ return PTR_ERR(ag->mac_reset);
}
ag->mac_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!ag->mac_base) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->mac_base)
+ return -ENOMEM;
ndev->irq = platform_get_irq(pdev, 0);
err = devm_request_irq(&pdev->dev, ndev->irq, ag71xx_interrupt,
@@ -1929,7 +1875,7 @@ static int ag71xx_probe(struct platform_device *pdev)
if (err) {
netif_err(ag, probe, ndev, "unable to request IRQ %d\n",
ndev->irq);
- goto err_free;
+ return err;
}
ndev->netdev_ops = &ag71xx_netdev_ops;
@@ -1957,10 +1903,8 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc = dmam_alloc_coherent(&pdev->dev,
sizeof(struct ag71xx_desc),
&ag->stop_desc_dma, GFP_KERNEL);
- if (!ag->stop_desc) {
- err = -ENOMEM;
- goto err_free;
- }
+ if (!ag->stop_desc)
+ return -ENOMEM;
ag->stop_desc->data = 0;
ag->stop_desc->ctrl = 0;
@@ -1975,15 +1919,16 @@ static int ag71xx_probe(struct platform_device *pdev)
err = of_get_phy_mode(np, &ag->phy_if_mode);
if (err) {
netif_err(ag, probe, ndev, "missing phy-mode property in DT\n");
- goto err_free;
+ return err;
}
- netif_napi_add(ndev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT);
+ netif_napi_add_weight(ndev, &ag->napi, ag71xx_poll,
+ AG71XX_NAPI_WEIGHT);
err = clk_prepare_enable(ag->clk_eth);
if (err) {
netif_err(ag, probe, ndev, "Failed to enable eth clk.\n");
- goto err_free;
+ return err;
}
ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0);
@@ -2019,8 +1964,6 @@ err_mdio_remove:
ag71xx_mdio_remove(ag);
err_put_clk:
clk_disable_unprepare(ag->clk_eth);
-err_free:
- free_netdev(ndev);
return err;
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ad3fc72e74e..49bb9a8f00e6 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -39,7 +39,6 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/mdio.h>
-#include <linux/aer.h>
#include <linux/bitops.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -752,7 +751,7 @@ static int alx_alloc_napis(struct alx_priv *alx)
goto err_out;
np->alx = alx;
- netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
+ netif_napi_add(alx->dev, &np->napi, alx_poll);
alx->qnapi[i] = np;
}
@@ -1181,8 +1180,11 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ mutex_lock(&alx->mtx);
alx_reinit(alx);
+ mutex_unlock(&alx->mtx);
+ }
return 0;
}
@@ -1742,7 +1744,6 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_pci_disable;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
if (!pdev->pm_cap) {
@@ -1876,7 +1877,6 @@ out_free_netdev:
free_netdev(netdev);
out_pci_release:
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
@@ -1894,7 +1894,6 @@ static void alx_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
mutex_destroy(&alx->mtx);
@@ -1902,18 +1901,20 @@ static void alx_remove(struct pci_dev *pdev)
free_netdev(alx->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int alx_suspend(struct device *dev)
{
struct alx_priv *alx = dev_get_drvdata(dev);
if (!netif_running(alx->dev))
return 0;
+
+ rtnl_lock();
netif_device_detach(alx->dev);
mutex_lock(&alx->mtx);
__alx_stop(alx);
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return 0;
}
@@ -1924,6 +1925,7 @@ static int alx_resume(struct device *dev)
struct alx_hw *hw = &alx->hw;
int err;
+ rtnl_lock();
mutex_lock(&alx->mtx);
alx_reset_phy(hw);
@@ -1940,15 +1942,11 @@ static int alx_resume(struct device *dev)
unlock:
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return err;
}
-static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
-#define ALX_PM_OPS (&alx_pm_ops)
-#else
-#define ALX_PM_OPS NULL
-#endif
-
+static DEFINE_SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -2047,7 +2045,7 @@ static struct pci_driver alx_driver = {
.probe = alx_probe,
.remove = alx_remove,
.err_handler = &alx_err_handlers,
- .driver.pm = ALX_PM_OPS,
+ .driver.pm = pm_sleep_ptr(&alx_pm_ops),
};
module_pci_driver(alx_driver);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index e2eb7b8c63a0..0bce122c68f1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -220,8 +220,8 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index da595242bc13..4a288799633f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -207,16 +207,6 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/**
- * atl1c_irq_reset - reset interrupt confiure on the NIC
- * @adapter: board private structure
- */
-static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
-{
- atomic_set(&adapter->irq_sem, 1);
- atl1c_irq_enable(adapter);
-}
-
/*
* atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
* of the idle status register until the device is actually idle
@@ -900,7 +890,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
- netdev_reset_queue(adapter->netdev);
+ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
@@ -1051,7 +1041,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
* each ring/block may need up to 8 bytes for alignment, hence the
* additional bytes tacked onto the end.
*/
- ring_header->size = size =
+ ring_header->size =
sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
@@ -2072,7 +2062,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
@@ -2107,7 +2097,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2132,7 +2122,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
*tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
@@ -2219,7 +2209,8 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2731,10 +2722,10 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_set_threaded(netdev, true);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
- atl1c_clean_rx, 64);
+ atl1c_clean_rx);
for (i = 0; i < adapter->tx_queue_count; ++i)
- netif_napi_add(netdev, &adapter->tpd_ring[i].napi,
- atl1c_clean_tx, 64);
+ netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
+ atl1c_clean_tx);
timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
@@ -2849,7 +2840,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 0cbde352d1ba..68f1832a198d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -306,9 +306,9 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 56e5f440e666..5db0f3495a32 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1609,8 +1609,7 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
if (skb_is_gso(skb)) {
if (skb->protocol == htons(ETH_P_IP) ||
(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) {
- proto_hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (proto_hdr_len < skb_headlen(skb)) {
tpd_req += ((skb_headlen(skb) - proto_hdr_len +
MAX_TX_BUF_LEN - 1) >>
@@ -1645,7 +1644,7 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
netdev_warn(adapter->netdev,
@@ -1713,7 +1712,8 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (segment) {
/* TSO */
- map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
+ map_len = hdr_len;
use_tpd = tpd;
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
@@ -2354,7 +2354,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
- netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1e_clean);
timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
@@ -2395,7 +2395,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
- netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
+ netif_set_tso_max_size(netdev, MAX_TSO_SEG_SIZE);
err = register_netdev(netdev);
if (err) {
netdev_err(netdev, "register netdevice failed\n");
@@ -2482,7 +2482,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b4c9e805e981..c8444bcdf527 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2115,7 +2115,7 @@ static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
ntohs(iph->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
- hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->len == hdr_len) {
iph->check = 0;
tcp_hdr(skb)->check =
@@ -2206,7 +2206,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
if (retval) {
/* TSO */
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
@@ -2367,8 +2367,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
if (mss) {
if (skb->protocol == htons(ETH_P_IP)) {
- proto_hdr_len = (skb_transport_offset(skb) +
- tcp_hdrlen(skb));
+ proto_hdr_len = skb_tcp_all_headers(skb);
if (unlikely(proto_hdr_len > len)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2978,7 +2977,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &atl1_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1_rings_clean);
netdev->ethtool_ops = &atl1_ethtool_ops;
adapter->bd_number = cards_found;
@@ -3341,8 +3340,8 @@ static void atl1_get_drvinfo(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
@@ -3438,7 +3437,9 @@ static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
}
static void atl1_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
@@ -3451,7 +3452,9 @@ static void atl1_get_ringparam(struct net_device *netdev,
}
static int atl1_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bbc4d7b08a49..1b487c071cb6 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1980,9 +1980,9 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 56e0fb07aec7..948586bf1b5b 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -53,8 +53,8 @@ config B44_PCI
config BCM4908_ENET
tristate "Broadcom BCM4908 internal mac support"
- depends on ARCH_BCM4908 || COMPILE_TEST
- default y if ARCH_BCM4908
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ default y if ARCH_BCMBCA
help
This driver supports Ethernet controller integrated into Broadcom
BCM4908 family SoCs.
@@ -71,6 +71,7 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
depends on HAS_IOMEM
+ depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835
select MII
select PHYLIB
select FIXED_PHY
@@ -212,6 +213,7 @@ config BNXT
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
+ select AUXILIARY_BUS
help
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 969591bbc066..392ec09a1d8a 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -196,28 +196,6 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
return 0;
}
-static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
-{
- u32 val;
-
- bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
- (index << CAM_CTRL_INDEX_SHIFT)));
-
- b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
-
- val = br32(bp, B44_CAM_DATA_LO);
-
- data[2] = (val >> 24) & 0xFF;
- data[3] = (val >> 16) & 0xFF;
- data[4] = (val >> 8) & 0xFF;
- data[5] = (val >> 0) & 0xFF;
-
- val = br32(bp, B44_CAM_DATA_HI);
-
- data[0] = (val >> 8) & 0xFF;
- data[1] = (val >> 0) & 0xFF;
-}
-
static inline void __b44_cam_write(struct b44 *bp,
const unsigned char *data, int index)
{
@@ -1680,7 +1658,7 @@ static void b44_get_stats64(struct net_device *dev,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&hwstat->syncp);
+ start = u64_stats_fetch_begin(&hwstat->syncp);
/* Convert HW stats into rtnl_link_stats64 stats. */
nstat->rx_packets = hwstat->rx_pkts;
@@ -1714,7 +1692,7 @@ static void b44_get_stats64(struct net_device *dev,
/* Carrier lost counter seems to be broken for some devices */
nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
- } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry(&hwstat->syncp, start));
}
@@ -1790,13 +1768,13 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct b44 *bp = netdev_priv(dev);
struct ssb_bus *bus = bp->sdev->bus;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
- strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
break;
case SSB_BUSTYPE_SSB:
- strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+ strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
@@ -1961,7 +1939,9 @@ static int b44_set_link_ksettings(struct net_device *dev,
}
static void b44_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
@@ -1972,7 +1952,9 @@ static void b44_get_ringparam(struct net_device *dev,
}
static int b44_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct b44 *bp = netdev_priv(dev);
@@ -2078,12 +2060,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
do {
data_src = &hwstat->tx_good_octets;
data_dst = data;
- start = u64_stats_fetch_begin_irq(&hwstat->syncp);
+ start = u64_stats_fetch_begin(&hwstat->syncp);
for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
*data_dst++ = *data_src++;
- } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
+ } while (u64_stats_fetch_retry(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2371,7 +2353,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->netdev_ops = &b44_netdev_ops;
- netif_napi_add(dev, &bp->napi, b44_poll, 64);
+ netif_napi_add(dev, &bp->napi, b44_poll);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->min_mtu = B44_MIN_MTU;
dev->max_mtu = B44_MAX_MTU;
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index b07cb9bc5f2d..33d86683af50 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -36,13 +36,24 @@
#define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
ETH_FCS_LEN + 4) /* 32 */
+#define ENET_RX_SKB_BUF_SIZE (NET_SKB_PAD + NET_IP_ALIGN + \
+ ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
+ ENET_MTU_MAX + ETH_FCS_LEN + 4)
+#define ENET_RX_SKB_BUF_ALLOC_SIZE (SKB_DATA_ALIGN(ENET_RX_SKB_BUF_SIZE) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define ENET_RX_BUF_DMA_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
+#define ENET_RX_BUF_DMA_SIZE (ENET_RX_SKB_BUF_SIZE - ENET_RX_BUF_DMA_OFFSET)
+
struct bcm4908_enet_dma_ring_bd {
__le32 ctl;
__le32 addr;
} __packed;
struct bcm4908_enet_dma_ring_slot {
- struct sk_buff *skb;
+ union {
+ void *buf; /* RX */
+ struct sk_buff *skb; /* TX */
+ };
unsigned int len;
dma_addr_t dma_addr;
};
@@ -260,22 +271,21 @@ static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int
u32 tmp;
int err;
- slot->len = ENET_MTU_MAX + ENET_MAX_ETH_OVERHEAD;
-
- slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
- if (!slot->skb)
+ slot->buf = napi_alloc_frag(ENET_RX_SKB_BUF_ALLOC_SIZE);
+ if (!slot->buf)
return -ENOMEM;
- slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
+ slot->dma_addr = dma_map_single(dev, slot->buf + ENET_RX_BUF_DMA_OFFSET,
+ ENET_RX_BUF_DMA_SIZE, DMA_FROM_DEVICE);
err = dma_mapping_error(dev, slot->dma_addr);
if (err) {
dev_err(dev, "Failed to map DMA buffer: %d\n", err);
- kfree_skb(slot->skb);
- slot->skb = NULL;
+ skb_free_frag(slot->buf);
+ slot->buf = NULL;
return err;
}
- tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp = ENET_RX_BUF_DMA_SIZE << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
tmp |= DMA_CTL_STATUS_OWN;
if (idx == enet->rx_ring.length - 1)
tmp |= DMA_CTL_STATUS_WRAP;
@@ -315,11 +325,11 @@ static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
for (i = rx_ring->length - 1; i >= 0; i--) {
slot = &rx_ring->slots[i];
- if (!slot->skb)
+ if (!slot->buf)
continue;
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
- kfree_skb(slot->skb);
- slot->skb = NULL;
+ skb_free_frag(slot->buf);
+ slot->buf = NULL;
}
}
@@ -495,6 +505,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
netif_carrier_off(netdev);
napi_disable(&rx_ring->napi);
napi_disable(&tx_ring->napi);
+ netdev_reset_queue(netdev);
bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
@@ -507,7 +518,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
return 0;
}
-static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
@@ -554,6 +565,8 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
if (ring->write_idx + 1 == ring->length - 1)
tmp |= DMA_CTL_STATUS_WRAP;
+ netdev_sent_queue(enet->netdev, skb->len);
+
buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
buf_desc->ctl = cpu_to_le32(tmp);
@@ -561,8 +574,6 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
if (++ring->write_idx == ring->length - 1)
ring->write_idx = 0;
- enet->netdev->stats.tx_bytes += skb->len;
- enet->netdev->stats.tx_packets++;
return NETDEV_TX_OK;
}
@@ -577,6 +588,7 @@ static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
while (handled < weight) {
struct bcm4908_enet_dma_ring_bd *buf_desc;
struct bcm4908_enet_dma_ring_slot slot;
+ struct sk_buff *skb;
u32 ctl;
int len;
int err;
@@ -600,16 +612,24 @@ static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
if (len < ETH_ZLEN ||
(ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
- kfree_skb(slot.skb);
+ skb_free_frag(slot.buf);
enet->netdev->stats.rx_dropped++;
break;
}
- dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, slot.dma_addr, ENET_RX_BUF_DMA_SIZE, DMA_FROM_DEVICE);
+
+ skb = build_skb(slot.buf, ENET_RX_SKB_BUF_ALLOC_SIZE);
+ if (unlikely(!skb)) {
+ skb_free_frag(slot.buf);
+ enet->netdev->stats.rx_dropped++;
+ break;
+ }
+ skb_reserve(skb, ENET_RX_BUF_DMA_OFFSET);
+ skb_put(skb, len - ETH_FCS_LEN);
+ skb->protocol = eth_type_trans(skb, enet->netdev);
- skb_put(slot.skb, len - ETH_FCS_LEN);
- slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
- netif_receive_skb(slot.skb);
+ netif_receive_skb(skb);
enet->netdev->stats.rx_packets++;
enet->netdev->stats.rx_bytes += len;
@@ -646,13 +666,18 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
dev_kfree_skb(slot->skb);
+
+ handled++;
bytes += slot->len;
+
if (++tx_ring->read_idx == tx_ring->length)
tx_ring->read_idx = 0;
-
- handled++;
}
+ netdev_completed_queue(enet->netdev, handled, bytes);
+ enet->netdev->stats.tx_packets += handled;
+ enet->netdev->stats.tx_bytes += bytes;
+
if (handled < weight) {
napi_complete_done(napi, handled);
bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
@@ -718,24 +743,29 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
err = of_get_ethdev_address(dev->of_node, netdev);
+ if (err == -EPROBE_DEFER)
+ goto err_dma_free;
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
netdev->min_mtu = ETH_ZLEN;
netdev->mtu = ETH_DATA_LEN;
netdev->max_mtu = ENET_MTU_MAX;
- netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT);
- netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx);
+ netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx);
err = register_netdev(netdev);
- if (err) {
- bcm4908_enet_dma_free(enet);
- return err;
- }
+ if (err)
+ goto err_dma_free;
platform_set_drvdata(pdev, enet);
return 0;
+
+err_dma_free:
+ bcm4908_enet_dma_free(enet);
+
+ return err;
}
static int bcm4908_enet_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a568994a03a6..2cf96892e565 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -388,7 +388,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_buf_size, DMA_FROM_DEVICE);
priv->rx_buf[desc_idx] = NULL;
- skb = build_skb(buf, priv->rx_frag_size);
+ skb = napi_build_skb(buf, priv->rx_frag_size);
if (unlikely(!skb)) {
skb_free_frag(buf);
dev->stats.rx_dropped++;
@@ -423,7 +423,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
/*
* try to or force reclaim of transmitted buffers
*/
-static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
+static int bcm_enet_tx_reclaim(struct net_device *dev, int force, int budget)
{
struct bcm_enet_priv *priv;
unsigned int bytes;
@@ -468,7 +468,7 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
dev->stats.tx_errors++;
bytes += skb->len;
- dev_kfree_skb(skb);
+ napi_consume_skb(skb, budget);
released++;
}
@@ -499,7 +499,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
ENETDMAC_IR, priv->tx_chan);
/* reclaim sent skb */
- bcm_enet_tx_reclaim(dev, 0);
+ bcm_enet_tx_reclaim(dev, 0, budget);
spin_lock(&priv->rx_lock);
rx_work_done = bcm_enet_receive_queue(dev, budget);
@@ -1211,7 +1211,7 @@ static int bcm_enet_stop(struct net_device *dev)
bcm_enet_disable_mac(priv);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -1321,8 +1321,8 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
@@ -1497,8 +1497,11 @@ static int bcm_enet_set_link_ksettings(struct net_device *dev,
}
}
-static void bcm_enet_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -1512,7 +1515,9 @@ static void bcm_enet_get_ringparam(struct net_device *dev,
}
static int bcm_enet_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
@@ -1711,17 +1716,17 @@ static int bcm_enet_probe(struct platform_device *pdev)
struct bcm_enet_priv *priv;
struct net_device *dev;
struct bcm63xx_enet_platform_data *pd;
- struct resource *res_irq, *res_irq_rx, *res_irq_tx;
+ int irq, irq_rx, irq_tx;
struct mii_bus *bus;
int i, ret;
if (!bcm_enet_shared_base[0])
return -EPROBE_DEFER;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_irq || !res_irq_rx || !res_irq_tx)
+ irq = platform_get_irq(pdev, 0);
+ irq_rx = platform_get_irq(pdev, 1);
+ irq_tx = platform_get_irq(pdev, 2);
+ if (irq < 0 || irq_rx < 0 || irq_tx < 0)
return -ENODEV;
dev = alloc_etherdev(sizeof(*priv));
@@ -1743,9 +1748,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
goto out;
}
- dev->irq = priv->irq = res_irq->start;
- priv->irq_rx = res_irq_rx->start;
- priv->irq_tx = res_irq_tx->start;
+ dev->irq = priv->irq = irq;
+ priv->irq_rx = irq_rx;
+ priv->irq_tx = irq_tx;
priv->mac_clk = devm_clk_get(&pdev->dev, "enet");
if (IS_ERR(priv->mac_clk)) {
@@ -1854,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enet_ops;
- netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+ netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enet_ethtool_ops;
/* MTU range: 46 - 2028 */
@@ -1930,7 +1935,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enet_driver = {
+static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
.remove = bcm_enet_remove,
.driver = {
@@ -2357,7 +2362,7 @@ static int bcm_enetsw_stop(struct net_device *dev)
bcm_enet_disable_dma(priv, priv->rx_chan);
/* force reclaim of all tx buffers */
- bcm_enet_tx_reclaim(dev, 1);
+ bcm_enet_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
bcm_enet_free_rx_buf_ring(kdev, priv);
@@ -2579,8 +2584,11 @@ static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
}
}
-static void bcm_enetsw_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static void
+bcm_enetsw_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
@@ -2595,8 +2603,11 @@ static void bcm_enetsw_get_ringparam(struct net_device *dev,
ering->tx_pending = priv->tx_ring_size;
}
-static int bcm_enetsw_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+static int
+bcm_enetsw_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bcm_enet_priv *priv;
int was_running;
@@ -2703,7 +2714,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enetsw_ops;
- netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+ netif_napi_add_weight(dev, &priv->napi, bcm_enet_poll, 16);
dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -2745,7 +2756,7 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver bcm63xx_enetsw_driver = {
+static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
.remove = bcm_enetsw_remove,
.driver = {
@@ -2773,17 +2784,11 @@ static int bcm_enet_shared_probe(struct platform_device *pdev)
return 0;
}
-static int bcm_enet_shared_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
/* this "shared" driver is needed because both macs share a single
* address space
*/
struct platform_driver bcm63xx_enet_shared_driver = {
.probe = bcm_enet_shared_probe,
- .remove = bcm_enet_shared_remove,
.driver = {
.name = "bcm63xx_enet_shared",
.owner = THIS_MODULE,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 60dde29974bf..38d0cdaf22a5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -295,6 +295,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* RBUF misc statistics */
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+ /* RDMA misc statistics */
+ STAT_RDMA("rdma_ovflow_cnt", mib.rdma_ovflow_cnt, RDMA_OVFL_DISC_CNTR),
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
@@ -308,8 +310,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
@@ -333,6 +335,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
case BCM_SYSPORT_STAT_NETDEV64:
case BCM_SYSPORT_STAT_RXCHK:
case BCM_SYSPORT_STAT_RBUF:
+ case BCM_SYSPORT_STAT_RDMA:
case BCM_SYSPORT_STAT_SOFT:
return true;
default:
@@ -436,6 +439,14 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
if (val == ~0)
rbuf_writel(priv, 0, s->reg_offset);
break;
+ case BCM_SYSPORT_STAT_RDMA:
+ if (!priv->is_lite)
+ continue;
+
+ val = rdma_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rdma_writel(priv, 0, s->reg_offset);
+ break;
}
j += s->stat_sizeof;
@@ -457,10 +468,10 @@ static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
for (q = 0; q < priv->netdev->num_tx_queues; q++) {
ring = &priv->tx_rings[q];
do {
- start = u64_stats_fetch_begin_irq(&priv->syncp);
+ start = u64_stats_fetch_begin(&priv->syncp);
bytes = ring->bytes;
packets = ring->packets;
- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
*tx_bytes += bytes;
*tx_packets += packets;
@@ -504,9 +515,9 @@ static void bcm_sysport_get_stats(struct net_device *dev,
if (s->stat_sizeof == sizeof(u64) &&
s->type == BCM_SYSPORT_STAT_NETDEV64) {
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
data[i] = *(u64 *)p;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
} else
data[i] = *(u32 *)p;
j++;
@@ -1517,7 +1528,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* Initialize SW view of the ring */
spin_lock_init(&ring->lock);
ring->priv = priv;
- netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+ netif_napi_add_tx(priv->netdev, &ring->napi, bcm_sysport_tx_poll);
ring->index = index;
ring->size = size;
ring->clean_index = 0;
@@ -1878,10 +1889,10 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
&stats->tx_packets);
do {
- start = u64_stats_fetch_begin_irq(&priv->syncp);
+ start = u64_stats_fetch_begin(&priv->syncp);
stats->rx_packets = stats64->rx_packets;
stats->rx_bytes = stats64->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
}
static void bcm_sysport_netif_start(struct net_device *dev)
@@ -1991,6 +2002,9 @@ static int bcm_sysport_open(struct net_device *dev)
goto out_clk_disable;
}
+ /* Indicate that the MAC is responsible for PHY PM */
+ phydev->mac_managed_pm = true;
+
/* Reset house keeping link status */
priv->old_duplex = -1;
priv->old_link = -1;
@@ -2180,13 +2194,9 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
return -EOPNOTSUPP;
- /* All filters are already in use, we cannot match more rules */
- if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
- RXCHK_BRCM_TAG_MAX)
- return -ENOSPC;
-
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
if (index >= RXCHK_BRCM_TAG_MAX)
+ /* All filters are already in use, we cannot match more rules */
return -ENOSPC;
/* Location is the classification ID, and index is the position
@@ -2568,7 +2578,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
dev->ethtool_ops = &bcm_sysport_ethtool_ops;
dev->netdev_ops = &bcm_sysport_netdev_ops;
- netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -2585,8 +2595,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
device_set_wakeup_capable(&pdev->dev, 1);
priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
- if (IS_ERR(priv->wol_clk))
- return PTR_ERR(priv->wol_clk);
+ if (IS_ERR(priv->wol_clk)) {
+ ret = PTR_ERR(priv->wol_clk);
+ goto err_deregister_fixed_link;
+ }
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 16b73bb9acc7..335cf6631db5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -290,6 +290,7 @@ struct bcm_rsb {
#define RDMA_WRITE_PTR_HI 0x1010
#define RDMA_WRITE_PTR_LO 0x1014
+#define RDMA_OVFL_DISC_CNTR 0x1018
#define RDMA_PROD_INDEX 0x1018
#define RDMA_PROD_INDEX_MASK 0xffff
@@ -484,7 +485,7 @@ struct bcm_rsb {
/* Number of Receive hardware descriptor words */
#define SP_NUM_HW_RX_DESC_WORDS 1024
-#define SP_LT_NUM_HW_RX_DESC_WORDS 256
+#define SP_LT_NUM_HW_RX_DESC_WORDS 512
/* Internal linked-list RAM size */
#define SP_NUM_TX_DESC 1536
@@ -565,6 +566,7 @@ struct bcm_sysport_mib {
u32 rxchk_other_pkt_disc;
u32 rbuf_ovflow_cnt;
u32 rbuf_err_cnt;
+ u32 rdma_ovflow_cnt;
u32 alloc_rx_buff_failed;
u32 rx_dma_failed;
u32 tx_dma_failed;
@@ -581,6 +583,7 @@ enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_RUNT,
BCM_SYSPORT_STAT_RXCHK,
BCM_SYSPORT_STAT_RBUF,
+ BCM_SYSPORT_STAT_RDMA,
BCM_SYSPORT_STAT_SOFT,
};
@@ -627,6 +630,14 @@ enum bcm_sysport_stat_type {
.reg_offset = ofs, \
}
+#define STAT_RDMA(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RDMA, \
+ .reg_offset = ofs, \
+}
+
/* TX bytes and packets */
#define NUM_SYSPORT_TXQ_STAT 2
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 086739e4f40a..9b83d5361699 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
np = of_get_child_by_name(core->dev.of_node, "mdio");
err = of_mdiobus_register(mii_bus, np);
+ of_node_put(np);
if (err) {
dev_err(&core->dev, "Registration of mii bus failed\n");
goto err_free_bus;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index e6f48786949c..6e4f36aaf5db 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -240,12 +240,12 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
- ci->pkg == BCMA_PKG_ID_BCM47186) {
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (ci->pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
case BCMA_CHIP_ID_BCM53573:
@@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core)
bcma_mdio_mii_unregister(bgmac->mii_bus);
bgmac_enet_remove(bgmac);
bcma_set_drvdata(core, NULL);
- kfree(bgmac);
}
static struct bcma_driver bgmac_bcma_driver = {
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index c6412c523637..b4381cd41979 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
+ struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- else
+ /* The idm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ /* The nicpm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 7b525c65bacb..1761df8fb7f9 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -189,8 +189,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
}
slot->skb = skb;
- ring->end += nr_frags + 1;
netdev_sent_queue(net_dev, skb->len);
+ ring->end += nr_frags + 1;
wmb();
@@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac)
if (iost & BGMAC_BCMA_IOST_ATTACHED) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
- if (!bgmac->has_robosw)
+ if (bgmac->in_init || !bgmac->has_robosw)
flags |= BGMAC_BCMA_IOCTL_SW_RESET;
}
bgmac_clk_enable(bgmac, flags);
}
- if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
+ if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw))
bgmac_idm_write(bgmac, BCMA_IOCTL,
bgmac_idm_read(bgmac, BCMA_IOCTL) &
~BGMAC_BCMA_IOCTL_SW_RESET);
@@ -1367,7 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}
@@ -1395,8 +1395,8 @@ static void bgmac_get_ethtool_stats(struct net_device *dev,
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
@@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
struct net_device *net_dev = bgmac->net_dev;
int err;
+ bgmac->in_init = true;
+
bgmac_chip_intrs_off(bgmac);
net_dev->irq = bgmac->irq;
@@ -1527,7 +1529,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll);
err = bgmac_phy_connect(bgmac);
if (err) {
@@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
/* Omit FCS from max MTU size */
net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN;
+ bgmac->in_init = false;
+
err = register_netdev(bgmac->net_dev);
if (err) {
dev_err(bgmac->dev, "Cannot register net device\n");
@@ -1568,7 +1572,6 @@ void bgmac_enet_remove(struct bgmac *bgmac)
phy_disconnect(bgmac->net_dev->phydev);
netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
- free_netdev(bgmac->net_dev);
}
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 110088e662ea..d73ef262991d 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -364,8 +364,6 @@
#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040
#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080
-#define BGMAC_WEIGHT 64
-
#define ETHER_MAX_LEN (ETH_FRAME_LEN + ETH_FCS_LEN)
/* Feature Flags */
@@ -474,6 +472,8 @@ struct bgmac {
int irq;
u32 int_mask;
+ bool in_init;
+
/* Current MAC state */
int mac_speed;
int mac_duplex;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index babc955ba64e..466e1d62bcf6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -48,7 +48,6 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
-#include <linux/aer.h>
#include <linux/crash_dump.h>
#if IS_ENABLED(CONFIG_CNIC)
@@ -176,12 +175,12 @@ static const struct flash_spec flash_table[] =
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
- "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+ "Entry 0101: ST M45PE10 (128kB non-buffered)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
- "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+ "Entry 0110: ST M45PE20 (256kB non-buffered)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
@@ -3045,7 +3044,7 @@ error:
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
DMA_FROM_DEVICE);
- skb = build_skb(data, 0);
+ skb = slab_build_skb(data);
if (!skb) {
kfree(data);
goto error;
@@ -3829,7 +3828,7 @@ load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
return 0;
}
-static int
+static void
load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
const struct bnx2_mips_fw_file_entry *fw_entry)
{
@@ -3897,48 +3896,34 @@ load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
val &= ~cpu_reg->mode_value_halt;
bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
-
- return 0;
}
-static int
+static void
bnx2_init_cpus(struct bnx2 *bp)
{
const struct bnx2_mips_fw_file *mips_fw =
(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
const struct bnx2_rv2p_fw_file *rv2p_fw =
(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
- int rc;
/* Initialize the RV2P processor. */
load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
/* Initialize the RX Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
/* Initialize the TX Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
/* Initialize the TX Patch-up Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
/* Initialize the Completion Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
- if (rc)
- goto init_cpu_err;
+ load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
/* Initialize the Command Processor. */
- rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
-
-init_cpu_err:
- return rc;
+ load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
}
static void
@@ -4951,8 +4936,7 @@ bnx2_init_chip(struct bnx2 *bp)
} else
bnx2_init_context(bp);
- if ((rc = bnx2_init_cpus(bp)) != 0)
- return rc;
+ bnx2_init_cpus(bp);
bnx2_init_nvram(bp);
@@ -5415,8 +5399,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
bp->rx_buf_use_size = rx_size;
/* hw alignment + build_skb() overhead*/
- bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
- NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ bp->rx_buf_size = kmalloc_size_roundup(
+ SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+ NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
@@ -7042,9 +7027,9 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -7318,7 +7303,9 @@ static int bnx2_set_coalesce(struct net_device *dev,
}
static void
-bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7389,7 +7376,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
}
static int
-bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2 *bp = netdev_priv(dev);
int rc;
@@ -8088,7 +8077,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
int rc, i, j;
u32 reg;
u64 dma_mask, persist_dma_mask;
- int err;
SET_NETDEV_DEV(dev, &pdev->dev);
bp = netdev_priv(dev);
@@ -8171,12 +8159,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags |= BNX2_FLAG_PCIE;
if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
-
- /* AER (Advanced Error Reporting) hooks */
- err = pci_enable_pcie_error_reporting(pdev);
- if (!err)
- bp->flags |= BNX2_FLAG_AER_ENABLED;
-
} else {
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
if (bp->pcix_cap == 0) {
@@ -8212,7 +8194,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed, aborting\n");
+ "dma_set_coherent_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
@@ -8455,11 +8437,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
return 0;
err_out_unmap:
- if (bp->flags & BNX2_FLAG_AER_ENABLED) {
- pci_disable_pcie_error_reporting(pdev);
- bp->flags &= ~BNX2_FLAG_AER_ENABLED;
- }
-
pci_iounmap(pdev, bp->regview);
bp->regview = NULL;
@@ -8518,7 +8495,7 @@ bnx2_init_napi(struct bnx2 *bp)
else
poll = bnx2_poll_msix;
- netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
bnapi->bp = bp;
}
}
@@ -8633,11 +8610,6 @@ bnx2_remove_one(struct pci_dev *pdev)
bnx2_free_stats_blk(dev);
kfree(bp->temp_stats_blk);
- if (bp->flags & BNX2_FLAG_AER_ENABLED) {
- pci_disable_pcie_error_reporting(pdev);
- bp->flags &= ~BNX2_FLAG_AER_ENABLED;
- }
-
bnx2_release_firmware(bp);
free_netdev(dev);
@@ -8761,9 +8733,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
}
rtnl_unlock();
- if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
- return result;
-
return result;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index a09ec47461c9..315b08c64edd 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6808,7 +6808,6 @@ struct bnx2 {
#define BNX2_FLAG_JUMBO_BROKEN 0x00000800
#define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000
#define BNX2_FLAG_BROKEN_STATS 0x00002000
-#define BNX2_FLAG_AER_ENABLED 0x00004000
struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2b06d78baa08..8bcde0a6e011 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1271,7 +1271,7 @@ struct bnx2x_fw_stats_data {
struct per_port_stats port;
struct per_pf_stats pf;
struct fcoe_statistics_params fcoe;
- struct per_queue_stats queue_stats[1];
+ struct per_queue_stats queue_stats[];
};
/* Public slow path states */
@@ -1486,7 +1486,6 @@ struct bnx2x {
#define IS_VF_FLAG (1 << 22)
#define BC_SUPPORTS_RMMOD_CMD (1 << 23)
#define HAS_PHYS_PORT_ID (1 << 24)
-#define AER_ENABLED (1 << 25)
#define PTP_SUPPORTED (1 << 26)
#define TX_TIMESTAMPING_EN (1 << 27)
@@ -1850,6 +1849,14 @@ struct bnx2x {
/* Vxlan/Geneve related information */
u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
+
+#define FW_CAP_INVALIDATE_VF_FP_HSI BIT(0)
+ u32 fw_cap;
+
+ u32 fw_major;
+ u32 fw_minor;
+ u32 fw_rev;
+ u32 fw_eng;
};
/* Tx queues may be less or equal to Rx queues */
@@ -2525,5 +2532,4 @@ void bnx2x_register_phc(struct bnx2x *bp);
* Meant for implicit re-load flows.
*/
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
-
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e8e8c2d593c5..6ea5521074d3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include <linux/ip.h>
#include <linux/crash_dump.h>
#include <net/tcp.h>
+#include <net/gro.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <linux/prefetch.h>
@@ -43,8 +44,7 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -54,8 +54,7 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -149,7 +148,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(buf, bp->fw_ver, buf_len);
+ strscpy(buf, bp->fw_ver, buf_len);
snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
@@ -673,6 +672,18 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return 0;
}
+static struct sk_buff *
+bnx2x_build_skb(const struct bnx2x_fastpath *fp, void *data)
+{
+ struct sk_buff *skb;
+
+ if (fp->rx_frag_size)
+ skb = build_skb(data, fp->rx_frag_size);
+ else
+ skb = slab_build_skb(data);
+ return skb;
+}
+
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
if (fp->rx_frag_size)
@@ -780,7 +791,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
if (likely(new_data))
- skb = build_skb(data, fp->rx_frag_size);
+ skb = bnx2x_build_skb(fp, data);
if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
@@ -788,6 +799,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
return;
}
#endif
@@ -1046,7 +1058,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
DMA_FROM_DEVICE);
- skb = build_skb(data, fp->rx_frag_size);
+ skb = bnx2x_build_skb(fp, data);
if (unlikely(!skb)) {
bnx2x_frag_free(fp, data);
bnx2x_fp_qstats(bp, fp)->
@@ -1923,8 +1935,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
/* Skip VLAN tag if present */
if (ether_type == ETH_P_8021Q) {
- struct vlan_ethhdr *vhdr =
- (struct vlan_ethhdr *)skb->data;
+ struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
}
@@ -2363,26 +2374,30 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
/* is another pf loaded on this engine? */
if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
- /* build my FW version dword */
- u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
- (BCM_5710_FW_MINOR_VERSION << 8) +
- (BCM_5710_FW_REVISION_VERSION << 16) +
- (BCM_5710_FW_ENGINEERING_VERSION << 24);
+ u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
+ u32 loaded_fw;
/* read loaded FW from chip */
- u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+ loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
- DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
- loaded_fw, my_fw);
+ loaded_fw_major = loaded_fw & 0xff;
+ loaded_fw_minor = (loaded_fw >> 8) & 0xff;
+ loaded_fw_rev = (loaded_fw >> 16) & 0xff;
+ loaded_fw_eng = (loaded_fw >> 24) & 0xff;
+
+ DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
+ loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
/* abort nic load if version mismatch */
- if (my_fw != loaded_fw) {
+ if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
+ loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
+ loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
+ loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
if (print_err)
- BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
- loaded_fw, my_fw);
+ BNX2X_ERR("loaded FW incompatible. Aborting\n");
else
- BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
- loaded_fw, my_fw);
+ BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
+
return -EBUSY;
}
}
@@ -3416,12 +3431,9 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
/* Headers length */
if (xmit_type & XMIT_GSO_ENC)
- hlen = (int)(skb_inner_transport_header(skb) -
- skb->data) +
- inner_tcp_hdrlen(skb);
+ hlen = skb_inner_tcp_all_headers(skb);
else
- hlen = (int)(skb_transport_header(skb) -
- skb->data) + tcp_hdrlen(skb);
+ hlen = skb_tcp_all_headers(skb);
/* Amount of data (w/o headers) on linear part of SKB*/
first_bd_sz = skb_headlen(skb) - hlen;
@@ -3529,15 +3541,13 @@ static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_inner_transport_header(skb) +
- inner_tcp_hdrlen(skb) - skb->data;
+ return skb_inner_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_inner_transport_header(skb) +
- sizeof(struct udphdr) - skb->data;
+ return skb_inner_transport_offset(skb) + sizeof(struct udphdr);
}
/**
@@ -3563,12 +3573,12 @@ static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ return skb_tcp_all_headers(skb);
}
/* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant.
*/
- return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
+ return skb_transport_offset(skb) + sizeof(struct udphdr);
}
/* set FW indication according to inner or outer protocols if tunneled */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 472a3a478038..bda3ccc28eca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1112,7 +1112,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
int ext_dev_info_offset;
u32 mbi;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -1126,7 +1126,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
(mbi & 0xff000000) >> 24,
(mbi & 0x00ff0000) >> 16,
(mbi & 0x0000ff00) >> 8);
- strlcpy(info->fw_version, version,
+ strscpy(info->fw_version, version,
sizeof(info->fw_version));
}
}
@@ -1135,7 +1135,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
strlcat(info->fw_version, version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1914,7 +1914,9 @@ static int bnx2x_set_coalesce(struct net_device *dev,
}
static void bnx2x_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1938,7 +1940,9 @@ static void bnx2x_get_ringparam(struct net_device *dev,
}
static int bnx2x_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 3f8435208bf4..a84d015da5df 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -241,6 +241,8 @@
IRO[221].m2))
#define XSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[48].base + ((funcId) * IRO[48].m1))
+#define XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(fid) \
+ (IRO[386].base + ((fid) * IRO[386].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
/* eth hsi version */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 622fadc50316..611efee75834 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,8 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 15
+#define BCM_5710_FW_REVISION_VERSION 21
+#define BCM_5710_FW_REVISION_VERSION_V15 15
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 4e85e7dbc2be..02808513ffe4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6178,7 +6178,8 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hx.%hx", num >> 16, num);
+ ret = scnprintf(str, *len, "%x.%x", (num >> 16) & 0xFFFF,
+ num & 0xFFFF);
*len -= ret;
return 0;
}
@@ -6193,7 +6194,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
return -EINVAL;
}
- ret = scnprintf(str, *len, "%hhx.%hhx.%hhx", num >> 16, num >> 8, num);
+ ret = scnprintf(str, *len, "%x.%x.%x", (num >> 16) & 0xFF,
+ (num >> 8) & 0xFF, num & 0xFF);
*len -= ret;
return 0;
}
@@ -13842,7 +13844,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* Since some switches tend to reinit the AN process and clear the
- * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
+ * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
* and recovered many times
*/
if (vars->check_kr2_recovery_cnt > 0) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index aec666e97683..637d162bbcfa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -74,9 +73,19 @@
__stringify(BCM_5710_FW_MINOR_VERSION) "." \
__stringify(BCM_5710_FW_REVISION_VERSION) "." \
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
+#define FW_FILE_VERSION_V15 \
+ __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
+ __stringify(BCM_5710_FW_MINOR_VERSION) "." \
+ __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
+ __stringify(BCM_5710_FW_ENGINEERING_VERSION)
+
#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
+#define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -90,6 +99,9 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FW_FILE_NAME_E1);
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
+MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
+MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
int bnx2x_num_queues;
module_param_named(num_queues, bnx2x_num_queues, int, 0444);
@@ -285,7 +297,7 @@ const u32 dmae_reg_go_c[] = {
/* Global resources for unloading a previously loaded device */
#define BNX2X_PREV_WAIT_NEEDED 1
-static DEFINE_SEMAPHORE(bnx2x_prev_sem);
+static DEFINE_SEMAPHORE(bnx2x_prev_sem, 1);
static LIST_HEAD(bnx2x_prev_list);
/* Forward declaration */
@@ -747,9 +759,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CHIP_IS_E1(bp) ? "everest1" :
CHIP_IS_E1H(bp) ? "everest1h" :
CHIP_IS_E2(bp) ? "everest2" : "everest3",
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION);
+ bp->fw_major, bp->fw_minor, bp->fw_rev);
return rc;
}
@@ -3374,7 +3384,7 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
&bp->sp_objs->mac_obj;
int i;
- strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+ strscpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
@@ -13026,27 +13036,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_features_check = bnx2x_features_check,
};
-static int bnx2x_set_coherency_mask(struct bnx2x *bp)
-{
- struct device *dev = &bp->pdev->dev;
-
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(dev, "System does not support DMA, aborting\n");
- return -EIO;
- }
-
- return 0;
-}
-
-static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
-{
- if (bp->flags & AER_ENABLED) {
- pci_disable_pcie_error_reporting(bp->pdev);
- bp->flags &= ~AER_ENABLED;
- }
-}
-
static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
struct net_device *dev, unsigned long board_type)
{
@@ -13116,9 +13105,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
goto err_out_release;
}
- rc = bnx2x_set_coherency_mask(bp);
- if (rc)
+ rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n");
goto err_out_release;
+ }
dev->mem_start = pci_resource_start(pdev, 0);
dev->base_addr = dev->mem_start;
@@ -13157,13 +13148,6 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
/* Set PCIe reset type to fundamental for EEH recovery */
pdev->needs_freset = 1;
- /* AER (Advanced Error reporting) configuration */
- rc = pci_enable_pcie_error_reporting(pdev);
- if (!rc)
- bp->flags |= AER_ENABLED;
- else
- BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
-
/*
* Clean the following indirect addresses for all functions since it
* is not used by the driver.
@@ -13317,16 +13301,11 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
/* Check FW version */
offset = be32_to_cpu(fw_hdr->fw_version.offset);
fw_ver = firmware->data + offset;
- if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
- (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
- (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
- (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+ if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
+ fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
- fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
- BCM_5710_FW_MAJOR_VERSION,
- BCM_5710_FW_MINOR_VERSION,
- BCM_5710_FW_REVISION_VERSION,
- BCM_5710_FW_ENGINEERING_VERSION);
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
return -EINVAL;
}
@@ -13406,32 +13385,49 @@ do { \
static int bnx2x_init_firmware(struct bnx2x *bp)
{
- const char *fw_file_name;
+ const char *fw_file_name, *fw_file_name_v15;
struct bnx2x_fw_file_hdr *fw_hdr;
int rc;
if (bp->firmware)
return 0;
- if (CHIP_IS_E1(bp))
+ if (CHIP_IS_E1(bp)) {
fw_file_name = FW_FILE_NAME_E1;
- else if (CHIP_IS_E1H(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1_V15;
+ } else if (CHIP_IS_E1H(bp)) {
fw_file_name = FW_FILE_NAME_E1H;
- else if (!CHIP_IS_E1x(bp))
+ fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
+ } else if (!CHIP_IS_E1x(bp)) {
fw_file_name = FW_FILE_NAME_E2;
- else {
+ fw_file_name_v15 = FW_FILE_NAME_E2_V15;
+ } else {
BNX2X_ERR("Unsupported chip revision\n");
return -EINVAL;
}
+
BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
if (rc) {
- BNX2X_ERR("Can't load firmware file %s\n",
- fw_file_name);
- goto request_firmware_exit;
+ BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
+
+ /* try to load prev version */
+ rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
+
+ if (rc)
+ goto request_firmware_exit;
+
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
+ } else {
+ bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
+ bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
}
+ bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
+ bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
+ bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
+
rc = bnx2x_check_firmware(bp);
if (rc) {
BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
@@ -13659,19 +13655,20 @@ static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
return bnx2x_func_state_change(bp, &func_params);
}
-static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int bnx2x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
int rc;
int drift_dir = 1;
int val, period, period1, period2, dif, dif1, dif2;
int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
- DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
+ DP(BNX2X_MSG_PTP, "PTP adjfine called, ppb = %d\n", ppb);
if (!netif_running(bp->dev)) {
DP(BNX2X_MSG_PTP,
- "PTP adjfreq called while the interface is down\n");
+ "PTP adjfine called while the interface is down\n");
return -ENETDOWN;
}
@@ -13806,7 +13803,7 @@ void bnx2x_register_phc(struct bnx2x *bp)
bp->ptp_clock_info.n_ext_ts = 0;
bp->ptp_clock_info.n_per_out = 0;
bp->ptp_clock_info.pps = 0;
- bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
+ bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine;
bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
@@ -14007,8 +14004,6 @@ init_one_freemem:
bnx2x_free_mem_bp(bp);
init_one_exit:
- bnx2x_disable_pcie_error_reporting(bp);
-
if (bp->regview)
iounmap(bp->regview);
@@ -14089,7 +14084,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
pci_set_power_state(pdev, PCI_D3hot);
}
- bnx2x_disable_pcie_error_reporting(bp);
if (remove_netdev) {
if (bp->regview)
iounmap(bp->regview);
@@ -14141,10 +14135,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
/* Stop Tx */
bnx2x_tx_disable(bp);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
- if (CNIC_LOADED(bp))
- bnx2x_del_all_napi_cnic(bp);
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
@@ -14249,6 +14239,11 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
bnx2x_drain_tx_queues(bp);
bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
bnx2x_netif_stop(bp, 1);
+ bnx2x_del_all_napi(bp);
+
+ if (CNIC_LOADED(bp))
+ bnx2x_del_all_napi_cnic(bp);
+
bnx2x_free_irq(bp);
/* Report UNLOAD_DONE to MCP */
@@ -15356,11 +15351,6 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
config.tx_type, config.rx_filter);
- if (config.flags) {
- BNX2X_ERR("config.flags is reserved for future use\n");
- return -EINVAL;
- }
-
bp->hwtstamp_ioctl_called = true;
bp->tx_type = config.tx_type;
bp->rx_filter = config.rx_filter;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 5caa75b41b73..4e9215bce4ad 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -2212,7 +2212,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2381,7 +2381,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2493,7 +2493,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -2529,7 +2529,7 @@
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
@@ -6218,7 +6218,7 @@
#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 (0x1<<2)
#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (0x1<<12)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (0x1<<28)
-#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (0x1U<<31)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (0x1<<29)
#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (0x1<<30)
#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (0x1<<15)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 74a8931ce1d1..77d4cb4ad782 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -758,9 +758,18 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
{
+ u16 abs_fid;
+
+ abs_fid = FW_VF_HANDLE(abs_vfid);
+
/* set the VF-PF association in the FW */
- storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
- storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
+ storm_memset_vf_to_pf(bp, abs_fid, BP_FUNC(bp));
+ storm_memset_func_en(bp, abs_fid, 1);
+
+ /* Invalidate fp_hsi version for vfs */
+ if (bp->fw_cap & FW_CAP_INVALIDATE_VF_FP_HSI)
+ REG_WR8(bp, BAR_XSTRORM_INTMEM +
+ XSTORM_ETH_FUNCTION_INFO_FP_HSI_VALID_E2_OFFSET(abs_fid), 0);
/* clear vf errors*/
bnx2x_vf_semi_clear_err(bp, abs_vfid);
@@ -786,16 +795,20 @@ static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
{
- struct pci_dev *dev;
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
+ struct pci_dev *dev;
+ bool pending;
if (!vf)
return false;
dev = pci_get_domain_bus_and_slot(vf->domain, vf->bus, vf->devfn);
- if (dev)
- return bnx2x_is_pcie_pending(dev);
- return false;
+ if (!dev)
+ return false;
+ pending = bnx2x_is_pcie_pending(dev);
+ pci_dev_put(dev);
+
+ return pending;
}
int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 8c2cf5519787..02a4e557e176 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -518,7 +518,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len)
{
- strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+ strscpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
@@ -586,7 +586,7 @@ static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
-static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
+static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr,
u8 vf_qid, bool set) {return 0; }
static inline int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 0b193edb73b8..2bb133ae61c3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -849,7 +849,8 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
memcpy(old, new, sizeof(struct nig_stats));
- memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+ BUILD_BUG_ON(sizeof(estats->shared) != sizeof(pstats->mac_stx[1]));
+ memcpy(&(estats->shared), &(pstats->mac_stx[1]),
sizeof(struct mac_stx));
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
@@ -1634,9 +1635,9 @@ void bnx2x_stats_init(struct bnx2x *bp)
REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
if (!CHIP_IS_E3(bp)) {
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt0), 2);
REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
- &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+ &(bp->port.old_nig_stats.egress_mac_pkt1), 2);
}
/* Prepare statistics ramrod data */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index d55e63692cf3..ae93c078707b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -36,10 +36,14 @@ struct nig_stats {
u32 pbf_octets;
u32 pbf_packet;
u32 safc_inp;
- u32 egress_mac_pkt0_lo;
- u32 egress_mac_pkt0_hi;
- u32 egress_mac_pkt1_lo;
- u32 egress_mac_pkt1_hi;
+ struct_group(egress_mac_pkt0,
+ u32 egress_mac_pkt0_lo;
+ u32 egress_mac_pkt0_hi;
+ );
+ struct_group(egress_mac_pkt1,
+ u32 egress_mac_pkt1_lo;
+ u32 egress_mac_pkt1_hi;
+ );
};
enum bnx2x_stats_event {
@@ -83,6 +87,7 @@ struct bnx2x_eth_stats {
u32 no_buff_discard_hi;
u32 no_buff_discard_lo;
+ struct_group(shared,
u32 rx_stat_ifhcinbadoctets_hi;
u32 rx_stat_ifhcinbadoctets_lo;
u32 tx_stat_ifhcoutbadoctets_hi;
@@ -159,6 +164,7 @@ struct bnx2x_eth_stats {
u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
u32 tx_stat_bmac_ufl_hi;
u32 tx_stat_bmac_ufl_lo;
+ );
u32 pause_frames_received_hi;
u32 pause_frames_received_lo;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index c9129b9ba446..0657a0f5170f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -380,7 +380,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
- strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c04ea83188e2..dcd9367f05af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -37,6 +37,7 @@
#include <linux/if_bridge.h>
#include <linux/rtc.h>
#include <linux/bpf.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/udp.h>
@@ -47,7 +48,6 @@
#include <linux/prefetch.h>
#include <linux/cache.h>
#include <linux/log2.h>
-#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
@@ -55,6 +55,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <net/page_pool.h>
+#include <linux/align.h>
+#include <net/netdev_queues.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -173,12 +175,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
- { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
- { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -232,6 +234,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
+ ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -329,26 +332,6 @@ static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
txr->kick_pending = 0;
}
-static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
- struct bnxt_tx_ring_info *txr,
- struct netdev_queue *txq)
-{
- netif_tx_stop_queue(txq);
-
- /* netif_tx_stop_queue() must be done before checking
- * tx index in bnxt_tx_avail() below, because in
- * bnxt_tx_int(), we update tx index before checking for
- * netif_tx_queue_stopped().
- */
- smp_mb();
- if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
- netif_tx_wake_queue(txq);
- return false;
- }
-
- return true;
-}
-
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -368,7 +351,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
dev_kfree_skb_any(skb);
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -382,10 +365,14 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (net_ratelimit() && txr->kick_pending)
netif_warn(bp, tx_err, dev,
"bnxt: ring busy w/ flush pending!\n");
- if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
+ if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
+ bp->tx_wake_thresh))
return NETDEV_TX_BUSY;
}
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_free;
+
length = skb->len;
len = skb_headlen(skb);
last_frag = skb_shinfo(skb)->nr_frags;
@@ -485,7 +472,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
prod = NEXT_TX(prod);
tx_push->doorbell =
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
@@ -532,12 +519,9 @@ normal_tx:
u32 hdr_len;
if (skb->encapsulation)
- hdr_len = skb_inner_network_offset(skb) +
- skb_inner_network_header_len(skb) +
- inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
else
- hdr_len = skb_transport_offset(skb) +
- tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
TX_BD_FLAGS_T_IPID |
@@ -599,7 +583,7 @@ normal_tx:
wmb();
prod = NEXT_TX(prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_txr_db_kick(bp, txr, prod);
@@ -612,7 +596,8 @@ tx_done:
if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_txr_db_kick(bp, txr, prod);
- bnxt_txr_netif_try_stop_queue(bp, txr, txq);
+ netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
+ bp->tx_wake_thresh);
}
return NETDEV_TX_OK;
@@ -644,7 +629,7 @@ tx_kick_pending:
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
- atomic_long_inc(&dev->tx_dropped);
+ dev_core_stats_tx_dropped_inc(dev);
return NETDEV_TX_OK;
}
@@ -659,7 +644,6 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
for (i = 0; i < nr_pkts; i++) {
struct bnxt_sw_tx_bd *tx_buf;
- bool compl_deferred = false;
struct sk_buff *skb;
int j, last;
@@ -668,6 +652,8 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
skb = tx_buf->skb;
tx_buf->skb = NULL;
+ tx_bytes += skb->len;
+
if (tx_buf->is_push) {
tx_buf->is_push = 0;
goto next_tx_int;
@@ -688,8 +674,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ /* PTP worker takes ownership of the skb */
if (!bnxt_get_tx_ts_p5(bp, skb))
- compl_deferred = true;
+ skb = NULL;
else
atomic_inc(&bp->ptp_cfg->tx_avail);
}
@@ -698,25 +685,14 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
next_tx_int:
cons = NEXT_TX(cons);
- tx_bytes += skb->len;
- if (!compl_deferred)
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
}
- netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
- txr->tx_cons = cons;
-
- /* Need to make the tx_cons update visible to bnxt_start_xmit()
- * before checking for netif_tx_queue_stopped(). Without the
- * memory barrier, there is a small possibility that bnxt_start_xmit()
- * will miss it and cause the queue to be stopped forever.
- */
- smp_mb();
+ WRITE_ONCE(txr->tx_cons, cons);
- if (unlikely(netif_tx_queue_stopped(txq)) &&
- bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
- READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
- netif_tx_wake_queue(txq);
+ __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
+ bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
+ READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING);
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -736,17 +712,19 @@ static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
- *mapping += bp->rx_dma_offset;
return page;
}
-static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
+static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
u8 *data;
struct pci_dev *pdev = bp->pdev;
- data = kmalloc(bp->rx_buf_size, gfp);
+ if (gfp == GFP_ATOMIC)
+ data = napi_alloc_frag(bp->rx_buf_size);
+ else
+ data = netdev_alloc_frag(bp->rx_buf_size);
if (!data)
return NULL;
@@ -755,7 +733,7 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
DMA_ATTR_WEAK_ORDERING);
if (dma_mapping_error(&pdev->dev, *mapping)) {
- kfree(data);
+ skb_free_frag(data);
data = NULL;
}
return data;
@@ -775,10 +753,11 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
if (!page)
return -ENOMEM;
+ mapping += bp->rx_dma_offset;
rx_buf->data = page;
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
} else {
- u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+ u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
if (!data)
return -ENOMEM;
@@ -835,33 +814,41 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
u16 sw_prod = rxr->rx_sw_agg_prod;
unsigned int offset = 0;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
- page = rxr->rx_page;
- if (!page) {
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
+
+ if (!page)
+ return -ENOMEM;
+
+ } else {
+ if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+ page = rxr->rx_page;
+ if (!page) {
+ page = alloc_page(gfp);
+ if (!page)
+ return -ENOMEM;
+ rxr->rx_page = page;
+ rxr->rx_page_offset = 0;
+ }
+ offset = rxr->rx_page_offset;
+ rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+ if (rxr->rx_page_offset == PAGE_SIZE)
+ rxr->rx_page = NULL;
+ else
+ get_page(page);
+ } else {
page = alloc_page(gfp);
if (!page)
return -ENOMEM;
- rxr->rx_page = page;
- rxr->rx_page_offset = 0;
}
- offset = rxr->rx_page_offset;
- rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
- if (rxr->rx_page_offset == PAGE_SIZE)
- rxr->rx_page = NULL;
- else
- get_page(page);
- } else {
- page = alloc_page(gfp);
- if (!page)
- return -ENOMEM;
- }
- mapping = dma_map_page_attrs(&pdev->dev, page, offset,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
- if (dma_mapping_error(&pdev->dev, mapping)) {
- __free_page(page);
- return -EIO;
+ mapping = dma_map_page_attrs(&pdev->dev, page, offset,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ if (dma_mapping_error(&pdev->dev, mapping)) {
+ __free_page(page);
+ return -EIO;
+ }
}
if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
@@ -957,6 +944,38 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int len = offset_and_len & 0xffff;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ if (!skb) {
+ page_pool_recycle_direct(rxr->page_pool, page);
+ return NULL;
+ }
+ skb_mark_for_recycle(skb);
+ skb_reserve(skb, bp->rx_dma_offset);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr,
u16 cons, void *data, u8 *data_ptr,
@@ -979,17 +998,17 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
dma_addr -= bp->rx_dma_offset;
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- page_pool_release_page(rxr->page_pool, page);
if (unlikely(!payload))
payload = eth_get_headlen(bp->dev, data_ptr, len);
skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
if (!skb) {
- __free_page(page);
+ page_pool_recycle_direct(rxr->page_pool, page);
return NULL;
}
+ skb_mark_for_recycle(skb);
off = (void *)data_ptr - page_address(page);
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
@@ -1020,11 +1039,11 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return NULL;
}
- skb = build_skb(data, 0);
+ skb = build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
if (!skb) {
- kfree(data);
+ skb_free_frag(data);
return NULL;
}
@@ -1033,22 +1052,24 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
return skb;
}
-static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
- struct bnxt_cp_ring_info *cpr,
- struct sk_buff *skb, u16 idx,
- u32 agg_bufs, bool tpa)
+static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct skb_shared_info *shinfo,
+ u16 idx, u32 agg_bufs, bool tpa,
+ struct xdp_buff *xdp)
{
struct bnxt_napi *bnapi = cpr->bnapi;
struct pci_dev *pdev = bp->pdev;
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
u16 prod = rxr->rx_agg_prod;
+ u32 i, total_frag_len = 0;
bool p5_tpa = false;
- u32 i;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
p5_tpa = true;
for (i = 0; i < agg_bufs; i++) {
+ skb_frag_t *frag = &shinfo->frags[i];
u16 cons, frag_len;
struct rx_agg_cmp *agg;
struct bnxt_sw_rx_agg_bd *cons_rx_buf;
@@ -1064,8 +1085,10 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
cons_rx_buf = &rxr->rx_agg_ring[cons];
- skb_fill_page_desc(skb, i, cons_rx_buf->page,
- cons_rx_buf->offset, frag_len);
+ skb_frag_off_set(frag, cons_rx_buf->offset);
+ skb_frag_size_set(frag, frag_len);
+ __skb_frag_set_page(frag, cons_rx_buf->page);
+ shinfo->nr_frags = i + 1;
__clear_bit(cons, rxr->rx_agg_bmap);
/* It is possible for bnxt_alloc_rx_page() to allocate
@@ -1076,16 +1099,14 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
page = cons_rx_buf->page;
cons_rx_buf->page = NULL;
+ if (xdp && page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
- struct skb_shared_info *shinfo;
unsigned int nr_frags;
- shinfo = skb_shinfo(skb);
nr_frags = --shinfo->nr_frags;
__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
-
- dev_kfree_skb(skb);
-
cons_rx_buf->page = page;
/* Update prod since possibly some pages have been
@@ -1093,23 +1114,62 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
*/
rxr->rx_agg_prod = prod;
bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
- return NULL;
+ return 0;
}
dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
- DMA_FROM_DEVICE,
+ bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- skb->data_len += frag_len;
- skb->len += frag_len;
- skb->truesize += PAGE_SIZE;
-
+ total_frag_len += frag_len;
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+ return total_frag_len;
+}
+
+static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct sk_buff *skb, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ u32 total_frag_len = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
+ agg_bufs, tpa, NULL);
+ if (!total_frag_len) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ skb->data_len += total_frag_len;
+ skb->len += total_frag_len;
+ skb->truesize += PAGE_SIZE * agg_bufs;
return skb;
}
+static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
+ struct bnxt_cp_ring_info *cpr,
+ struct xdp_buff *xdp, u16 idx,
+ u32 agg_bufs, bool tpa)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 total_frag_len = 0;
+
+ if (!xdp_buff_has_frags(xdp))
+ shinfo->nr_frags = 0;
+
+ total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
+ idx, agg_bufs, tpa, xdp);
+ if (total_frag_len) {
+ xdp_buff_set_frags_flag(xdp);
+ shinfo->nr_frags = agg_bufs;
+ shinfo->xdp_frags_size = total_frag_len;
+ }
+ return total_frag_len;
+}
+
static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u8 agg_bufs, u32 *raw_cons)
{
@@ -1612,7 +1672,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
u8 *new_data;
dma_addr_t new_mapping;
- new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
+ new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1623,13 +1683,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
tpa_info->data_ptr = new_data + bp->rx_offset;
tpa_info->mapping = new_mapping;
- skb = build_skb(data, 0);
+ skb = build_skb(data, bp->rx_buf_size);
dma_unmap_single_attrs(&bp->pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
if (!skb) {
- kfree(data);
+ skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
cpr->sw_stats.rx.rx_oom_discards += 1;
return NULL;
@@ -1639,7 +1699,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
cpr->sw_stats.rx.rx_oom_discards += 1;
@@ -1724,8 +1784,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
+ bool xdp_active = false;
dma_addr_t dma_addr;
struct sk_buff *skb;
+ struct xdp_buff xdp;
u32 flags, misc;
void *data;
int rc = 0;
@@ -1834,18 +1896,39 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
len = flags >> RX_CMP_LEN_SHIFT;
dma_addr = rx_buf->mapping;
- if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
- rc = 1;
- goto next_rx;
+ if (bnxt_xdp_attached(bp, rxr)) {
+ bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
+ if (agg_bufs) {
+ u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
+ cp_cons, agg_bufs,
+ false);
+ if (!frag_len) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ }
+ xdp_active = true;
+ }
+
+ if (xdp_active) {
+ if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
+ rc = 1;
+ goto next_rx;
+ }
}
if (len <= bp->rx_copy_thresh) {
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
- if (agg_bufs)
- bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
- agg_bufs, false);
+ if (agg_bufs) {
+ if (!xdp_active)
+ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
+ agg_bufs, false);
+ else
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ }
cpr->sw_stats.rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
@@ -1867,11 +1950,22 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (agg_bufs) {
- skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ if (!xdp_active) {
+ skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
+ if (!skb) {
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
+ } else {
+ skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
+ if (!skb) {
+ /* we should be able to free the old skb here */
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
+ cpr->sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
+ }
}
}
@@ -1918,7 +2012,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
- RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
+ RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
if (bp->flags & BNXT_FLAG_CHIP_P5) {
u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
u64 ns, ts;
@@ -2043,13 +2137,22 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
{
- switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) {
+ u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
+
+ switch (err_type) {
case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
+ netdev_warn(bp->dev, "Pause Storm detected!\n");
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
+ netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
+ break;
default:
- netdev_err(bp->dev, "FW reported unknown error type\n");
+ netdev_err(bp->dev, "FW reported unknown error type %u\n",
+ err_type);
break;
}
}
@@ -2066,6 +2169,16 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
(BNXT_EVENT_RING_TYPE(data2) == \
ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
+#define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
+
+#define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
+ (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
+ ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
+
+#define BNXT_PHC_BITS 48
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
@@ -2073,6 +2186,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
u32 data1 = le32_to_cpu(cmpl->event_data1);
u32 data2 = le32_to_cpu(cmpl->event_data2);
+ netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
+ event_id, data1, data2);
+
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
@@ -2242,6 +2358,24 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_event_error_report(bp, data1, data2);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
+ switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
+ case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
+ if (BNXT_PTP_USE_RTC(bp)) {
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u64 ns;
+
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(bp);
+ ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
+ BNXT_PHC_BITS) | ptp->current_time);
+ bnxt_ptp_rtc_timecounter_init(ptp, ns);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+ break;
+ }
+ goto async_event_process_exit;
+ }
case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
@@ -2253,7 +2387,6 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
bnxt_queue_sp_work(bp);
async_event_process_exit:
- bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -2419,7 +2552,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (event & BNXT_REDIRECT_EVENT)
- xdp_do_flush_map();
+ xdp_do_flush();
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -2447,10 +2580,13 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
- if (bnapi->events & BNXT_AGG_EVENT)
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
}
+ if (bnapi->events & BNXT_AGG_EVENT) {
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ }
bnapi->events = 0;
}
@@ -2622,6 +2758,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_cp_ring_info *cpr_rx;
u32 raw_cons = cpr->cp_raw_cons;
struct bnxt *bp = bnapi->bp;
struct nqe_cn *nqcmp;
@@ -2649,7 +2786,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
if (napi_complete_done(napi, work_done))
BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
cpr->cp_raw_cons);
- return work_done;
+ goto poll_done;
}
/* The valid test of the entry must be done first before
@@ -2661,6 +2798,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
struct bnxt_cp_ring_info *cpr2;
+ /* No more budget for RX work */
+ if (budget && work_done >= budget && idx == BNXT_RX_HDL)
+ break;
+
cpr2 = cpr->cp_ring_arr[idx];
work_done += __bnxt_poll_work(bp, cpr2,
budget - work_done);
@@ -2675,6 +2816,17 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
cpr->cp_raw_cons = raw_cons;
BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
}
+poll_done:
+ cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
+ if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
+ struct dim_sample dim_sample = {};
+
+ dim_update_sample(cpr->event_ctr,
+ cpr_rx->rx_packets,
+ cpr_rx->rx_bytes,
+ &dim_sample);
+ net_dim(&cpr->dim, dim_sample);
+ }
return work_done;
}
@@ -2774,7 +2926,7 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
tpa_info->data = NULL;
- kfree(data);
+ skb_free_frag(data);
}
skip_rx_tpa_free:
@@ -2800,7 +2952,7 @@ skip_rx_tpa_free:
dma_unmap_single_attrs(&pdev->dev, mapping,
bp->rx_buf_use_size, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
- kfree(data);
+ skb_free_frag(data);
}
}
@@ -2815,14 +2967,23 @@ skip_rx_buf_free:
if (!page)
continue;
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
- DMA_ATTR_WEAK_ORDERING);
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- rx_agg_buf->page = NULL;
- __clear_bit(i, rxr->rx_agg_bmap);
+ page_pool_recycle_direct(rxr->page_pool, page);
+ } else {
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
+ DMA_ATTR_WEAK_ORDERING);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- __free_page(page);
+ __free_page(page);
+ }
}
skip_rx_agg_free:
@@ -2957,7 +3118,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
@@ -2965,8 +3126,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
kfree(rxr->rx_tpa_idx_map);
rxr->rx_tpa_idx_map = NULL;
if (rxr->rx_tpa) {
- kfree(rxr->rx_tpa[0].agg_arr);
- rxr->rx_tpa[0].agg_arr = NULL;
+ for (j = 0; j < bp->max_tpa; j++) {
+ kfree(rxr->rx_tpa[j].agg_arr);
+ rxr->rx_tpa[j].agg_arr = NULL;
+ }
}
kfree(rxr->rx_tpa);
rxr->rx_tpa = NULL;
@@ -2975,14 +3138,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp)
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j, total_aggs = 0;
+ int i, j;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
if (!bp->max_tpa_v2)
return 0;
bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
- total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
}
for (i = 0; i < bp->rx_nr_rings; i++) {
@@ -2996,12 +3158,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
continue;
- agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
- rxr->rx_tpa[0].agg_arr = agg;
- if (!agg)
- return -ENOMEM;
- for (j = 1; j < bp->max_tpa; j++)
- rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
+ for (j = 0; j < bp->max_tpa; j++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[j].agg_arr = agg;
+ }
rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
GFP_KERNEL);
if (!rxr->rx_tpa_idx_map)
@@ -3049,6 +3211,7 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.pool_size = bp->rx_ring_size;
pp.nid = dev_to_node(&bp->pdev->dev);
+ pp.napi = &rxr->bnapi->napi;
pp.dev = &bp->pdev->dev;
pp.dma_dir = DMA_BIDIRECTIONAL;
@@ -3196,6 +3359,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
}
qidx = bp->tc_to_qidx[j];
ring->queue_id = bp->q_info[qidx].queue_id;
+ spin_lock_init(&txr->xdp_tx_lock);
if (i < bp->tx_nr_rings_xdp)
continue;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -3504,7 +3668,7 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
u8 *data;
for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
+ data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -3686,7 +3850,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
if (bp->vnic_info[i].rss_hash_key) {
if (i == 0)
- prandom_bytes(vnic->rss_hash_key,
+ get_random_bytes(vnic->rss_hash_key,
HW_HASH_KEY_SIZE);
else
memcpy(vnic->rss_hash_key,
@@ -3735,7 +3899,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
/* 8 for CRC and VLAN */
rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
- rx_space = rx_size + NET_SKB_PAD +
+ rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
@@ -3776,9 +3940,17 @@ void bnxt_set_ring_params(struct bnxt *bp)
}
bp->rx_agg_ring_size = agg_ring_size;
bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
- rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
- rx_space = rx_size + NET_SKB_PAD +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ rx_space = PAGE_SIZE;
+ rx_size = PAGE_SIZE -
+ ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ } else {
+ rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
+ rx_space = rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ }
}
bp->rx_buf_use_size = rx_size;
@@ -3819,14 +3991,21 @@ void bnxt_set_ring_params(struct bnxt *bp)
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
if (page_mode) {
- if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
- return -EOPNOTSUPP;
- bp->dev->max_mtu =
- min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
- bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
+
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ bp->flags |= BNXT_FLAG_JUMBO;
+ bp->rx_skb_func = bnxt_rx_multi_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
+ } else {
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ bp->dev->max_mtu =
+ min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
+ }
bp->rx_dir = DMA_BIDIRECTIONAL;
- bp->rx_skb_func = bnxt_rx_page_skb;
/* Disable LRO or GRO_HW */
netdev_update_features(bp->dev);
} else {
@@ -4276,7 +4455,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
}
}
if (irq_reinit) {
- kfree(bp->ntp_fltr_bmap);
+ bitmap_free(bp->ntp_fltr_bmap);
bp->ntp_fltr_bmap = NULL;
}
bp->ntp_fltr_count = 0;
@@ -4295,9 +4474,7 @@ static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
- sizeof(long),
- GFP_KERNEL);
+ bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
rc = -ENOMEM;
@@ -4719,8 +4896,10 @@ static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
return rc;
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
- req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
- req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
+ req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
+ req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
+ }
req->mask = cpu_to_le32(vnic->rx_mask);
return hwrm_req_send_silent(bp, req);
}
@@ -5049,7 +5228,7 @@ int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
return 1;
}
-static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
u16 i, j;
@@ -5062,8 +5241,8 @@ static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
}
}
-static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
- struct bnxt_vnic_info *vnic)
+static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
__le16 *ring_tbl = vnic->rss_table;
struct bnxt_rx_ring_info *rxr;
@@ -5084,12 +5263,27 @@ static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
}
}
-static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+static void
+__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
+ struct bnxt_vnic_info *vnic)
{
if (bp->flags & BNXT_FLAG_CHIP_P5)
- __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
+ bnxt_fill_hw_rss_tbl_p5(bp, vnic);
else
- __bnxt_fill_hw_rss_tbl(bp, vnic);
+ bnxt_fill_hw_rss_tbl(bp, vnic);
+
+ if (bp->rss_hash_delta) {
+ req->hash_type = cpu_to_le32(bp->rss_hash_delta);
+ if (bp->rss_hash_cfg & bp->rss_hash_delta)
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
+ else
+ req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
+ } else {
+ req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
+ }
+ req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
+ req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
+ req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
@@ -5106,14 +5300,8 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
if (rc)
return rc;
- if (set_rss) {
- bnxt_fill_hw_rss_tbl(bp, vnic);
- req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
- req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
- req->hash_key_tbl_addr =
- cpu_to_le64(vnic->rss_hash_key_dma_addr);
- }
+ if (set_rss)
+ __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
return hwrm_req_send(bp, req);
}
@@ -5134,10 +5322,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
if (!set_rss)
return hwrm_req_send(bp, req);
- bnxt_fill_hw_rss_tbl(bp, vnic);
- req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
- req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
- req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
+ __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
ring_tbl_map = vnic->rss_table_dma_addr;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
@@ -5156,6 +5341,25 @@ exit:
return rc;
}
+static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
+{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct hwrm_vnic_rss_qcfg_output *resp;
+ struct hwrm_vnic_rss_qcfg_input *req;
+
+ if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
+ return;
+
+ /* all contexts configured to same hash_type, zero always exists */
+ req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ resp = hwrm_req_hold(bp, req);
+ if (!hwrm_req_send(bp, req)) {
+ bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
+ bp->rss_hash_delta = 0;
+ }
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5166,15 +5370,19 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
if (rc)
return rc;
- req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
- VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
- req->enables =
- cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
- VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
- /* thresholds not implemented in firmware yet */
- req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
- req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
+ req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
+
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
+ } else {
+ req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
+ VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
+ req->enables |=
+ cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+ req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+ req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
+ }
req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
return hwrm_req_send(bp, req);
}
@@ -5304,7 +5512,7 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ if (!vnic_id && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
@@ -5410,6 +5618,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
(BNXT_CHIP_P5_THOR(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
+ bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
if (bp->max_tpa_v2) {
if (BNXT_CHIP_P5_THOR(bp))
@@ -6474,8 +6684,8 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
{
struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
+ u16 val, tmr, max, flags = hw_coal->flags;
u32 cmpl_params = coal_cap->cmpl_params;
- u16 val, tmr, max, flags = 0;
max = hw_coal->bufs_per_record * 128;
if (hw_coal->budget)
@@ -6518,8 +6728,6 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
}
- if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
- flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
@@ -6758,6 +6966,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
+
if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
@@ -7388,10 +7597,11 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
struct hwrm_port_mac_ptp_qcfg_output *resp;
struct hwrm_port_mac_ptp_qcfg_input *req;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ bool phc_cfg;
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -7430,7 +7640,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
rc = -ENODEV;
goto exit;
}
- rc = bnxt_ptp_init(bp);
+ phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
+ rc = bnxt_ptp_init(bp, phc_cfg);
if (rc)
netdev_warn(bp->dev, "PTP initialization failed.\n");
exit:
@@ -7450,7 +7661,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct hwrm_func_qcaps_output *resp;
struct hwrm_func_qcaps_input *req;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u32 flags, flags_ext;
+ u32 flags, flags_ext, flags_ext2;
int rc;
rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
@@ -7488,11 +7699,17 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ flags_ext2 = le32_to_cpu(resp->flags_ext2);
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
BNXT_FW_MAJ(bp) > 217)
@@ -7527,7 +7744,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
bp->flags |= BNXT_FLAG_WOL_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
- __bnxt_hwrm_ptp_qcfg(bp);
+ bp->fw_cap |= BNXT_FW_CAP_PTP;
} else {
bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
@@ -7575,7 +7792,7 @@ hwrm_dbg_qcaps_exit:
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc;
@@ -7668,19 +7885,6 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
BNXT_FW_HEALTH_WIN_MAP_OFF);
}
-bool bnxt_is_fw_healthy(struct bnxt *bp)
-{
- if (bp->fw_health && bp->fw_health->status_reliable) {
- u32 fw_status;
-
- fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
- return false;
- }
-
- return true;
-}
-
static void bnxt_inv_fw_health_reg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -7774,6 +7978,19 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
return 0;
}
+static void bnxt_remap_fw_health_regs(struct bnxt *bp)
+{
+ if (!bp->fw_health)
+ return;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
+ } else {
+ bnxt_try_map_fw_health_reg(bp);
+ }
+}
+
static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -8008,6 +8225,12 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+ bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
+ if (!bp->hwrm_cmd_max_timeout)
+ bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
+ else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
+ netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
+ bp->hwrm_cmd_max_timeout / 1000);
if (resp->hwrm_intf_maj_8b >= 1) {
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
@@ -8592,6 +8815,8 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
+ if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ bnxt_hwrm_update_rss_hash_cfg(bp);
if (bp->flags & BNXT_FLAG_RFS) {
rc = bnxt_alloc_rfs_vnics(bp);
@@ -8611,12 +8836,18 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
/* Filter for default vnic 0 */
rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
+ else
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
goto err_out;
}
vnic->uc_filter_count = 1;
vnic->rx_mask = 0;
+ if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
+ goto skip_rx_mask;
+
if (bp->dev->flags & IFF_BROADCAST)
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
@@ -8626,7 +8857,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (bp->dev->flags & IFF_ALLMULTI) {
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (bp->dev->flags & IFF_MULTICAST) {
u32 mask = 0;
bnxt_mc_list_updated(bp, &mask);
@@ -8637,6 +8868,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (rc)
goto err_out;
+skip_rx_mask:
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
@@ -9014,10 +9246,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
}
- if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
+ bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (bp->tx_nr_rings_xdp)
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
+ else
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
return 0;
@@ -9143,16 +9379,16 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0, 64);
+ bnxt_poll_nitroa0);
}
} else {
bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
}
}
@@ -9226,7 +9462,7 @@ void bnxt_tx_enable(struct bnxt *bp)
/* Make sure napi polls see @dev_state change */
synchronize_net();
netif_tx_wake_all_queues(bp->dev);
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
netif_carrier_on(bp->dev);
}
@@ -9256,7 +9492,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
void bnxt_report_link(struct bnxt *bp)
{
- if (bp->link_info.link_up) {
+ if (BNXT_LINK_IS_UP(bp)) {
const char *signal = "";
const char *flow_ctrl;
const char *duplex;
@@ -9342,7 +9578,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_phy_qcaps_exit;
- bp->phy_flags = resp->flags;
+ bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
@@ -9392,7 +9628,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
struct bnxt_link_info *link_info = &bp->link_info;
struct hwrm_port_phy_qcfg_output *resp;
struct hwrm_port_phy_qcfg_input *req;
- u8 link_up = link_info->link_up;
+ u8 link_state = link_info->link_state;
bool support_changed = false;
int rc;
@@ -9404,6 +9640,10 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
rc = hwrm_req_send(bp, req);
if (rc) {
hwrm_req_drop(bp, req);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
+ rc = 0;
+ }
return rc;
}
@@ -9489,14 +9729,14 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
- link_info->link_up = 1;
+ link_info->link_state = BNXT_LINK_STATE_UP;
else
- link_info->link_up = 0;
- if (link_up != link_info->link_up)
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
+ if (link_state != link_info->link_state)
bnxt_report_link(bp);
} else {
- /* alwasy link down if not require to update link state */
- link_info->link_up = 0;
+ /* always link down if not require to update link state */
+ link_info->link_state = BNXT_LINK_STATE_DOWN;
}
hwrm_req_drop(bp, req);
@@ -9696,7 +9936,18 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return rc;
req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
- return hwrm_req_send(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ /* Device is not obliged link down in certain scenarios, even
+ * when forced. Setting the state unknown is consistent with
+ * driver startup and will force link state to be reported
+ * during subsequent open based on PORT_PHY_QCFG.
+ */
+ bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
}
static int bnxt_fw_reset_via_optee(struct bnxt *bp)
@@ -9745,17 +9996,12 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
-int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int rc;
if (!BNXT_NEW_RM(bp))
- return 0; /* no resource reservations required */
-
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- if (rc)
- netdev_err(bp->dev, "resc_qcaps failed\n");
+ return; /* no resource reservations required */
hw_resc->resv_cp_rings = 0;
hw_resc->resv_stat_ctxs = 0;
@@ -9768,6 +10014,20 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
bp->tx_nr_rings = 0;
bp->rx_nr_rings = 0;
}
+}
+
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+{
+ int rc;
+
+ if (!BNXT_NEW_RM(bp))
+ return 0; /* no resource reservations required */
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
+ bnxt_clear_reservations(bp, fw_reset);
return rc;
}
@@ -9822,10 +10082,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
- if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
+ if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
+ test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
fw_reset = true;
- else if (bp->fw_health && !bp->fw_health->status_reliable)
- bnxt_try_map_fw_health_reg(bp);
+ else
+ bnxt_remap_fw_health_regs(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
@@ -10127,7 +10388,7 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
/* The last close may have shutdown the link, so need to call
* PHY_CFG to bring it back up.
*/
- if (!bp->link_info.link_up)
+ if (!BNXT_LINK_IS_UP(bp))
update_link = true;
if (!bnxt_eee_config_ok(bp))
@@ -10249,6 +10510,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (irq_re_init)
udp_tunnel_nic_reset_ntf(bp->dev);
+ if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
+ if (!static_key_enabled(&bnxt_xdp_locking_key))
+ static_branch_enable(&bnxt_xdp_locking_key);
+ } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
+ static_branch_disable(&bnxt_xdp_locking_key);
+ }
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
/* Enable TX queues */
@@ -10262,6 +10529,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ bnxt_ptp_init_rtc(bp, true);
+ bnxt_ptp_cfg_tstamp_filters(bp);
return 0;
open_err_irq:
@@ -10304,13 +10573,15 @@ int bnxt_half_open_nic(struct bnxt *bp)
goto half_open_err;
}
- rc = bnxt_alloc_mem(bp, false);
+ rc = bnxt_alloc_mem(bp, true);
if (rc) {
netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
goto half_open_err;
}
- rc = bnxt_init_nic(bp, false);
+ set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+ rc = bnxt_init_nic(bp, true);
if (rc) {
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
goto half_open_err;
}
@@ -10318,7 +10589,7 @@ int bnxt_half_open_nic(struct bnxt *bp)
half_open_err:
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
dev_close(bp->dev);
return rc;
}
@@ -10328,9 +10599,10 @@ half_open_err:
*/
void bnxt_half_close_nic(struct bnxt *bp)
{
- bnxt_hwrm_resource_free(bp, false, false);
+ bnxt_hwrm_resource_free(bp, false, true);
bnxt_free_skbs(bp);
- bnxt_free_mem(bp, false);
+ bnxt_free_mem(bp, true);
+ clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
}
void bnxt_reenable_sriov(struct bnxt *bp)
@@ -10404,7 +10676,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
- /* Flush rings and and disable interrupts */
+ /* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
@@ -10746,7 +11018,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
- } else {
+ } else if (dev->flags & IFF_MULTICAST) {
mc_update = bnxt_mc_list_updated(bp, &mask);
}
@@ -10802,21 +11074,31 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
if (rc) {
- netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
- rc);
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
+ else
+ netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
+ rc = 0;
+ } else {
+ netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
+ }
vnic->uc_filter_count = i;
return rc;
}
}
+ if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
+ netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
skip_uc:
if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
!bnxt_promisc_ok(bp))
vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
- if (rc && vnic->mc_list_count) {
+ if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
+ vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
@@ -10873,7 +11155,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_CHIP_P5)
return bnxt_rfs_supported(bp);
- if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
+ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
vnics = 1 + bp->rx_nr_rings;
@@ -10918,7 +11200,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
- if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
if (!(features & NETIF_F_GRO))
@@ -11014,6 +11296,7 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
u8 **nextp)
{
struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+ struct hop_jumbo_hdr *jhdr;
int hdr_count = 0;
u8 *nexthdr;
int start;
@@ -11041,9 +11324,27 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
if (hdrlen > 64)
return false;
+
+ /* The ext header may be a hop-by-hop header inserted for
+ * big TCP purposes. This will be removed before sending
+ * from NIC, so do not count it.
+ */
+ if (*nexthdr == NEXTHDR_HOP) {
+ if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
+ goto increment_hdr;
+
+ jhdr = (struct hop_jumbo_hdr *)hp;
+ if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
+ jhdr->nexthdr != IPPROTO_TCP)
+ goto increment_hdr;
+
+ goto next_hdr;
+ }
+increment_hdr:
+ hdr_count++;
+next_hdr:
nexthdr = &hp->nexthdr;
start += hdrlen;
- hdr_count++;
}
if (nextp) {
/* Caller will check inner protocol */
@@ -11345,7 +11646,7 @@ static void bnxt_timer(struct timer_list *t)
if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
bnxt_fw_health_check(bp);
- if (bp->link_info.link_up && bp->stats_coal_ticks) {
+ if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
@@ -11372,6 +11673,11 @@ static void bnxt_timer(struct timer_list *t)
}
}
+ if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
+ set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
+ bnxt_queue_sp_work(bp);
+ }
+
if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
@@ -11875,7 +12181,13 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
static void bnxt_init_dflt_coal(struct bnxt *bp)
{
+ struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
struct bnxt_coal *coal;
+ u16 flags = 0;
+
+ if (coal_cap->cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
+ flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
/* Tick values in micro seconds.
* 1 coal_buf x bufs_per_record = 1 completion record.
@@ -11888,6 +12200,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->idle_thresh = 50;
coal->bufs_per_record = 2;
coal->budget = 64; /* NAPI budget */
+ coal->flags = flags;
coal = &bp->tx_coal;
coal->coal_ticks = 28;
@@ -11895,6 +12208,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
coal->coal_ticks_irq = 2;
coal->coal_bufs_irq = 2;
coal->bufs_per_record = 1;
+ coal->flags = flags;
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
@@ -11959,6 +12273,8 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
bnxt_ethtool_init(bp);
+ if (bp->fw_cap & BNXT_FW_CAP_PTP)
+ __bnxt_hwrm_ptp_qcfg(bp);
bnxt_dcb_init(bp);
return 0;
}
@@ -11970,6 +12286,8 @@ static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
+ if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ bp->rss_hash_delta = bp->rss_hash_cfg;
if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
@@ -12033,11 +12351,6 @@ int bnxt_fw_init_one(struct bnxt *bp)
if (rc)
return rc;
- /* In case fw capabilities have changed, destroy the unneeded
- * reporters and create newly capable ones.
- */
- bnxt_dl_fw_reporters_destroy(bp, false);
- bnxt_dl_fw_reporters_create(bp);
bnxt_fw_init_one_p3(bp);
return 0;
}
@@ -12368,8 +12681,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
goto init_err_release;
}
- pci_enable_pcie_error_reporting(pdev);
-
INIT_WORK(&bp->sp_task, bnxt_sp_task);
INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
@@ -12381,8 +12692,6 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
- bnxt_init_dflt_coal(bp);
-
timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
@@ -12628,8 +12937,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
rcu_read_lock();
hlist_for_each_entry_rcu(fltr, head, hash) {
if (bnxt_fltr_match(fltr, new_fltr)) {
+ rc = fltr->sw_id;
rcu_read_unlock();
- rc = 0;
goto err_free;
}
}
@@ -12807,13 +13116,6 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return 0;
}
-static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
-{
- struct bnxt *bp = netdev_priv(dev);
-
- return &bp->dl_port;
-}
-
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -12845,7 +13147,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
- .ndo_get_devlink_port = bnxt_get_devlink_port,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -12856,11 +13157,9 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- if (BNXT_PF(bp))
- devlink_port_type_clear(&bp->dl_port);
+ bnxt_rdma_aux_device_uninit(bp);
bnxt_ptp_clear(bp);
- pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
@@ -12868,7 +13167,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&bp->fw_reset_task);
bp->sp_event = 0;
- bnxt_dl_fw_reporters_destroy(bp, true);
+ bnxt_dl_fw_reporters_destroy(bp);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
@@ -12877,8 +13176,6 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_free_hwrm_resources(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
- kfree(bp->edev);
- bp->edev = NULL;
kfree(bp->ptp_cfg);
bp->ptp_cfg = NULL;
kfree(bp->fw_health);
@@ -13072,7 +13369,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
if (sh)
@@ -13081,7 +13378,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
/* Rings may have been trimmed, re-reserve the trimmed rings. */
if (bnxt_need_reserve_rings(bp)) {
rc = __bnxt_reserve_rings(bp);
- if (rc)
+ if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
}
@@ -13107,7 +13404,10 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
bnxt_clear_int_mode(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
+ if (BNXT_VF(bp) && rc == -ENODEV)
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ else
+ netdev_err(bp->dev, "Not enough rings available.\n");
goto init_dflt_ring_err;
}
rc = bnxt_init_int_mode(bp);
@@ -13115,10 +13415,9 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
goto init_dflt_ring_err;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
- bp->flags |= BNXT_FLAG_RFS;
- bp->dev->features |= NETIF_F_NTUPLE;
- }
+
+ bnxt_set_dflt_rfs(bp);
+
init_dflt_ring_err:
bnxt_ulp_irq_restart(bp, rc);
return rc;
@@ -13276,6 +13575,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (bnxt_vf_pciid(bp->board_idx))
bp->flags |= BNXT_FLAG_VF;
+ /* No devlink port registration in case of a VF */
+ if (BNXT_PF(bp))
+ SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
+
if (pdev->msix_cap)
bp->flags |= BNXT_FLAG_MSIX_CAP;
@@ -13356,9 +13659,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features &= ~NETIF_F_LRO;
dev->priv_flags |= IFF_UNICAST_FLT;
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG;
+
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
- mutex_init(&bp->sriov_lock);
#endif
if (BNXT_SUPPORTS_TPA(bp)) {
bp->gro_func = bnxt_gro_func_5730x;
@@ -13395,13 +13702,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_ring_params(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
- netdev_err(bp->dev, "Not enough rings available.\n");
- rc = -ENOMEM;
+ if (BNXT_VF(bp) && rc == -ENODEV) {
+ netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
+ } else {
+ netdev_err(bp->dev, "Not enough rings available.\n");
+ rc = -ENOMEM;
+ }
goto init_err_pci_clean;
}
bnxt_fw_init_one_p3(bp);
+ bnxt_init_dflt_coal(bp);
+
if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
@@ -13439,15 +13752,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_cleanup;
- if (BNXT_PF(bp))
- devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
+ bnxt_rdma_aux_device_init(bp);
+
bnxt_print_device_info(bp);
pci_save_state(pdev);
- return 0;
+ return 0;
init_err_cleanup:
bnxt_dl_unregister(bp);
init_err_dl:
@@ -13491,7 +13804,6 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
dev_close(dev);
- bnxt_ulp_shutdown(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
@@ -13640,7 +13952,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
- int err = 0, off;
+ int retry = 0;
+ int err = 0;
+ int off;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -13668,11 +13982,36 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_save_state(pdev);
+ bnxt_inv_fw_health_reg(bp);
+ bnxt_try_map_fw_health_reg(bp);
+
+ /* In some PCIe AER scenarios, firmware may take up to
+ * 10 seconds to become ready in the worst case.
+ */
+ do {
+ err = bnxt_try_recover_fw(bp);
+ if (!err)
+ break;
+ retry++;
+ } while (retry < BNXT_FW_SLOT_RESET_RETRY);
+
+ if (err) {
+ dev_err(&pdev->dev, "Firmware not ready\n");
+ goto reset_exit;
+ }
+
err = bnxt_hwrm_func_reset(bp);
if (!err)
result = PCI_ERS_RESULT_RECOVERED;
+
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ err = bnxt_init_int_mode(bp);
+ bnxt_ulp_irq_restart(bp, err);
}
+reset_exit:
+ bnxt_clear_reservations(bp, true);
rtnl_unlock();
return result;
@@ -13728,8 +14067,16 @@ static struct pci_driver bnxt_pci_driver = {
static int __init bnxt_init(void)
{
+ int err;
+
bnxt_debug_init();
- return pci_register_driver(&bnxt_pci_driver);
+ err = pci_register_driver(&bnxt_pci_driver);
+ if (err) {
+ bnxt_debug_exit();
+ return err;
+ }
+
+ return 0;
}
static void __exit bnxt_exit(void)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 4c9507d82fd0..080e73496066 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <linux/crash_dump.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/xdp.h>
@@ -591,9 +592,20 @@ struct nqe_cn {
#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
#define BNXT_MAX_MTU 9500
-#define BNXT_MAX_PAGE_MODE_MTU \
+
+/* First RX buffer page in XDP multi-buf mode
+ *
+ * +-------------------------------------------------------------------------+
+ * | XDP_PACKET_HEADROOM | bp->rx_buf_use_size | skb_shared_info|
+ * | (bp->rx_dma_offset) | | |
+ * +-------------------------------------------------------------------------+
+ */
+#define BNXT_MAX_PAGE_MODE_MTU_SBUF \
((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN - \
XDP_PACKET_HEADROOM)
+#define BNXT_MAX_PAGE_MODE_MTU \
+ (BNXT_MAX_PAGE_MODE_MTU_SBUF - \
+ SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
#define BNXT_MIN_PKT_SIZE 52
@@ -698,13 +710,12 @@ struct bnxt_sw_tx_bd {
};
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
+ struct page *page;
u8 is_gso;
u8 is_push;
u8 action;
- union {
- unsigned short nr_frags;
- u16 rx_prod;
- };
+ unsigned short nr_frags;
+ u16 rx_prod;
};
struct bnxt_sw_rx_bd {
@@ -800,6 +811,8 @@ struct bnxt_tx_ring_info {
u32 dev_state;
struct bnxt_ring_struct tx_ring_struct;
+ /* Synchronize simultaneous xdp_xmit on same ring */
+ spinlock_t xdp_tx_lock;
};
#define BNXT_LEGACY_COAL_CMPL_PARAMS \
@@ -847,6 +860,7 @@ struct bnxt_coal {
u16 idle_thresh;
u8 bufs_per_record;
u8 budget;
+ u16 flags;
};
struct bnxt_tpa_info {
@@ -1174,7 +1188,11 @@ struct bnxt_link_info {
#define BNXT_PHY_STATE_ENABLED 0
#define BNXT_PHY_STATE_DISABLED 1
- u8 link_up;
+ u8 link_state;
+#define BNXT_LINK_STATE_UNKNOWN 0
+#define BNXT_LINK_STATE_DOWN 1
+#define BNXT_LINK_STATE_UP 2
+#define BNXT_LINK_IS_UP(bp) ((bp)->link_info.link_state == BNXT_LINK_STATE_UP)
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
@@ -1208,6 +1226,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
+#define BNXT_LINK_SPEED_200GB PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
u16 support_speeds;
u16 support_pam4_speeds;
u16 auto_link_speeds; /* fw adv setting */
@@ -1612,6 +1631,13 @@ struct bnxt_fw_health {
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
+#define BNXT_FW_SLOT_RESET_RETRY 4
+
+struct bnxt_aux_priv {
+ struct auxiliary_device aux_dev;
+ struct bnxt_en_dev *edev;
+ int id;
+};
enum board_idx {
BCM57301,
@@ -1809,6 +1835,7 @@ struct bnxt {
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
(!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
(bp)->max_tpa_v2) && !is_kdump_kernel())
+#define BNXT_RX_JUMBO_MODE(bp) ((bp)->flags & BNXT_FLAG_JUMBO)
#define BNXT_CHIP_SR2(bp) \
((bp)->chip_num == CHIP_NUM_58818)
@@ -1833,6 +1860,7 @@ struct bnxt {
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+ struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
struct bnxt_napi **bnapi;
@@ -1890,6 +1918,7 @@ struct bnxt {
u16 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
+ u32 rss_hash_delta;
u16 max_mtu;
u8 max_tc;
@@ -1915,10 +1944,12 @@ struct bnxt {
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9
+#define BNXT_STATE_L2_FILTER_RETRY 10
#define BNXT_STATE_FW_ACTIVATE 11
#define BNXT_STATE_RECOVER 12
#define BNXT_STATE_FW_NON_FATAL_COND 13
#define BNXT_STATE_FW_ACTIVATE_RESET 14
+#define BNXT_STATE_HALF_OPEN 15 /* For offline ethtool tests */
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1938,35 +1969,41 @@ struct bnxt {
u32 msg_enable;
- u32 fw_cap;
- #define BNXT_FW_CAP_SHORT_CMD 0x00000001
- #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
- #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
- #define BNXT_FW_CAP_NEW_RM 0x00000008
- #define BNXT_FW_CAP_IF_CHANGE 0x00000010
- #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
- #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
- #define BNXT_FW_CAP_TRUSTED_VF 0x00000800
- #define BNXT_FW_CAP_ERROR_RECOVERY 0x00002000
- #define BNXT_FW_CAP_PKG_VER 0x00004000
- #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
- #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000
- #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
- #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
- #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000
- #define BNXT_FW_CAP_HOT_RESET 0x00200000
- #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
- #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
- #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
- #define BNXT_FW_CAP_LIVEPATCH 0x08000000
- #define BNXT_FW_CAP_PTP_PPS 0x10000000
- #define BNXT_FW_CAP_HOT_RESET_IF 0x20000000
- #define BNXT_FW_CAP_RING_MONITOR 0x40000000
- #define BNXT_FW_CAP_DBG_QCAPS 0x80000000
+ u64 fw_cap;
+ #define BNXT_FW_CAP_SHORT_CMD BIT_ULL(0)
+ #define BNXT_FW_CAP_LLDP_AGENT BIT_ULL(1)
+ #define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2)
+ #define BNXT_FW_CAP_NEW_RM BIT_ULL(3)
+ #define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4)
+ #define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
+ #define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
+ #define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
+ #define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
+ #define BNXT_FW_CAP_PKG_VER BIT_ULL(14)
+ #define BNXT_FW_CAP_CFA_ADV_FLOW BIT_ULL(15)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
+ #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
+ #define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
+ #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA BIT_ULL(19)
+ #define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
+ #define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
+ #define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
+ #define BNXT_FW_CAP_RX_ALL_PKT_TS BIT_ULL(23)
+ #define BNXT_FW_CAP_VLAN_RX_STRIP BIT_ULL(24)
+ #define BNXT_FW_CAP_VLAN_TX_INSERT BIT_ULL(25)
+ #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED BIT_ULL(26)
+ #define BNXT_FW_CAP_LIVEPATCH BIT_ULL(27)
+ #define BNXT_FW_CAP_PTP_PPS BIT_ULL(28)
+ #define BNXT_FW_CAP_HOT_RESET_IF BIT_ULL(29)
+ #define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30)
+ #define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(31)
+ #define BNXT_FW_CAP_PTP BIT_ULL(32)
u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
+#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \
+ ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC))
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -1985,7 +2022,8 @@ struct bnxt {
u16 hwrm_max_req_len;
u16 hwrm_max_ext_req_len;
- int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_timeout;
+ unsigned int hwrm_cmd_max_timeout;
struct mutex hwrm_cmd_lock; /* serialize hwrm messages */
struct hwrm_ver_get_output ver_resp;
#define FW_VER_STR_LEN 32
@@ -2063,12 +2101,6 @@ struct bnxt {
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
-
- /* lock to protect VF-rep creation/cleanup via
- * multiple paths such as ->sriov_configure() and
- * devlink ->eswitch_mode_set()
- */
- struct mutex sriov_lock;
#endif
#if BITS_PER_LONG == 32
@@ -2095,8 +2127,8 @@ struct bnxt {
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
- /* copied from flags in hwrm_port_phy_qcaps_output */
- u8 phy_flags;
+ /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */
+ u32 phy_flags;
#define BNXT_PHY_FL_EEE_CAP PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
#define BNXT_PHY_FL_EXT_LPBK PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
#define BNXT_PHY_FL_AN_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
@@ -2105,6 +2137,9 @@ struct bnxt {
#define BNXT_PHY_FL_NO_PHY_LPBK PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
#define BNXT_PHY_FL_FW_MANAGED_LKDN PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
#define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
+#define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8)
u8 num_tests;
struct bnxt_test_info *test_info;
@@ -2121,6 +2156,7 @@ struct bnxt {
struct bpf_prog *xdp_prog;
struct bnxt_ptp_cfg *ptp_cfg;
+ u8 ptp_all_rx_tstamp;
/* devlink interface and vf-rep structs */
struct devlink *dl;
@@ -2195,13 +2231,12 @@ struct bnxt {
#define SFF_MODULE_ID_QSFP28 0x11
#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
-static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+static inline u32 bnxt_tx_avail(struct bnxt *bp,
+ const struct bnxt_tx_ring_info *txr)
{
- /* Tell compiler to fetch tx indices from memory. */
- barrier();
+ u32 used = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
- return bp->tx_ring_size -
- ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+ return bp->tx_ring_size - (used & bp->tx_ring_mask);
}
static inline void bnxt_writeq(struct bnxt *bp, u64 val,
@@ -2302,7 +2337,7 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
-bool bnxt_is_fw_healthy(struct bnxt *bp);
+int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
index d3cb2f21946d..c06789882036 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -32,7 +32,7 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
return -ENOMEM;
}
- hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
+ hwrm_req_timeout(bp, msg, bp->hwrm_cmd_max_timeout);
cmn_resp = hwrm_req_hold(bp, msg);
resp = cmn_resp;
@@ -125,7 +125,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
if (rc)
return rc;
- hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->component_id = cpu_to_le16(component_id);
req->segment_id = cpu_to_le16(segment_id);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 217ff597cdf2..caab3d626a2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -627,7 +627,8 @@ static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
int rc;
if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
+ !(bp->dcbx_cap & DCB_CAP_DCBX_HOST) ||
+ (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EINVAL;
if (!my_pfc) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 951c4c569a9b..8b3e7697390f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include <net/devlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -19,6 +20,7 @@
#include "bnxt_ulp.h"
#include "bnxt_ptp.h"
#include "bnxt_coredump.h"
+#include "bnxt_nvm_defs.h"
static void __bnxt_fw_recover(struct bnxt *bp)
{
@@ -44,7 +46,7 @@ bnxt_dl_flash_update(struct devlink *dl,
}
devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
- rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0);
+ rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack);
if (!rc)
devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0);
else
@@ -240,37 +242,37 @@ static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
.recover = bnxt_fw_recover,
};
-void bnxt_dl_fw_reporters_create(struct bnxt *bp)
+static struct devlink_health_reporter *
+__bnxt_dl_reporter_create(struct bnxt *bp,
+ const struct devlink_health_reporter_ops *ops)
{
- struct bnxt_fw_health *health = bp->fw_health;
-
- if (!health || health->fw_reporter)
- return;
+ struct devlink_health_reporter *reporter;
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp);
+ if (IS_ERR(reporter)) {
+ netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n",
+ ops->name, PTR_ERR(reporter));
+ return NULL;
}
+
+ return reporter;
}
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
+void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (!health)
- return;
+ if (fw_health && !fw_health->fw_reporter)
+ fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops);
+}
- if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
- return;
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
- if (health->fw_reporter) {
- devlink_health_reporter_destroy(health->fw_reporter);
- health->fw_reporter = NULL;
+ if (fw_health && fw_health->fw_reporter) {
+ devlink_health_reporter_destroy(fw_health->fw_reporter);
+ fw_health->fw_reporter = NULL;
}
}
@@ -366,6 +368,16 @@ bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
}
}
+/* Live patch status in NVM */
+#define BNXT_LIVEPATCH_NOT_INSTALLED 0
+#define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL
+#define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE
+#define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \
+ FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE)
+#define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK
+
+#define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK)
+
static int
bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
{
@@ -373,8 +385,9 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
struct hwrm_fw_livepatch_query_input *query_req;
struct hwrm_fw_livepatch_output *patch_resp;
struct hwrm_fw_livepatch_input *patch_req;
+ u16 flags, live_patch_state;
+ bool activated = false;
u32 installed = 0;
- u16 flags;
u8 target;
int rc;
@@ -393,7 +406,6 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
hwrm_req_drop(bp, query_req);
return rc;
}
- patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
patch_resp = hwrm_req_hold(bp, patch_req);
@@ -406,12 +418,20 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
flags = le16_to_cpu(query_resp->status_flags);
- if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
+ live_patch_state = BNXT_LIVEPATCH_STATE(flags);
+
+ if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED)
continue;
- if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
- !strncmp(query_resp->active_ver, query_resp->install_ver,
- sizeof(query_resp->active_ver)))
+
+ if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) {
+ activated = true;
continue;
+ }
+
+ if (live_patch_state == BNXT_LIVEPATCH_INSTALLED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ else if (live_patch_state == BNXT_LIVEPATCH_REMOVED)
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE;
patch_req->fw_target = target;
rc = hwrm_req_send(bp, patch_req);
@@ -423,8 +443,13 @@ bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
}
if (!rc && !installed) {
- NL_SET_ERR_MSG_MOD(extack, "No live patches found");
- rc = -ENOENT;
+ if (activated) {
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already activated");
+ rc = -EEXIST;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
}
hwrm_req_drop(bp, query_req);
hwrm_req_drop(bp, patch_req);
@@ -586,6 +611,64 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
return rc;
}
+static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ bool rc = false;
+ u32 datalen;
+ u16 index;
+ u8 *buf;
+
+ if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &datalen) || !datalen) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error");
+ return false;
+ }
+
+ buf = kzalloc(datalen, GFP_KERNEL);
+ if (!buf) {
+ NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test");
+ return false;
+ }
+
+ if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error");
+ goto done;
+ }
+
+ if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST,
+ BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) {
+ NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error");
+ goto done;
+ }
+
+ rc = true;
+
+done:
+ kfree(buf);
+ return rc;
+}
+
+static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ return id == DEVLINK_ATTR_SELFTEST_ID_FLASH;
+}
+
+static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl,
+ unsigned int id,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH)
+ return bnxt_nvm_test(bp, extack) ?
+ DEVLINK_SELFTEST_STATUS_PASS :
+ DEVLINK_SELFTEST_STATUS_FAIL;
+
+ return DEVLINK_SELFTEST_STATUS_SKIP;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -598,6 +681,8 @@ static const struct devlink_ops bnxt_dl_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = bnxt_dl_reload_down,
.reload_up = bnxt_dl_reload_up,
+ .selftest_check = bnxt_dl_selftest_check,
+ .selftest_run = bnxt_dl_selftest_run,
};
static const struct devlink_ops bnxt_vf_dl_ops;
@@ -807,10 +892,6 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
u32 ver = 0;
int rc;
- rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME);
- if (rc)
- return rc;
-
if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) {
sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
@@ -955,9 +1036,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
- if (rc)
- return rc;
+ if (BNXT_CHIP_P5(bp)) {
+ rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
+ if (rc)
+ return rc;
+ }
return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index a715458abc30..b8105065367b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -75,7 +75,7 @@ void bnxt_devlink_health_fw_report(struct bnxt *bp);
void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
-void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
+void bnxt_dl_fw_reporters_destroy(struct bnxt *bp);
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8188d55722e4..2dd8ee4a6f75 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
#include <linux/linkmode.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
@@ -22,18 +23,24 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/timecounter.h>
+#include <net/netlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
#include "bnxt_xdp.h"
#include "bnxt_ptp.h"
#include "bnxt_ethtool.h"
#include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
#include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
#include "bnxt_coredump.h"
-#define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
-#define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
-#define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
+
+#define BNXT_NVM_ERR_MSG(dev, extack, msg) \
+ do { \
+ if (extack) \
+ NL_SET_ERR_MSG_MOD(extack, msg); \
+ netdev_err(dev, "%s\n", msg); \
+ } while (0)
static u32 bnxt_get_msglevel(struct net_device *dev)
{
@@ -68,6 +75,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_rx = true;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -75,6 +85,9 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult;
coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult;
+ if (hw_coal->flags &
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET)
+ kernel_coal->use_cqe_mode_tx = true;
coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
@@ -101,12 +114,22 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
}
+ if ((kernel_coal->use_cqe_mode_rx || kernel_coal->use_cqe_mode_tx) &&
+ !(bp->coal_cap.cmpl_params &
+ RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET))
+ return -EOPNOTSUPP;
+
hw_coal = &bp->rx_coal;
mult = hw_coal->bufs_per_record;
hw_coal->coal_ticks = coal->rx_coalesce_usecs;
hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_rx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
hw_coal = &bp->tx_coal;
mult = hw_coal->bufs_per_record;
@@ -114,6 +137,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult;
hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq;
hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult;
+ hw_coal->flags &=
+ ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
+ if (kernel_coal->use_cqe_mode_tx)
+ hw_coal->flags |=
+ RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs;
@@ -134,7 +162,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
}
reset_coalesce:
- if (netif_running(dev)) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state)) {
if (update_stats) {
rc = bnxt_close_nic(bp, true, false);
if (!rc)
@@ -775,16 +803,20 @@ skip_tpa_stats:
}
static void bnxt_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
} else {
ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT;
ering->rx_jumbo_max_pending = 0;
+ kernel_ering->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT;
@@ -794,7 +826,9 @@ static void bnxt_get_ringparam(struct net_device *dev,
}
static int bnxt_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
@@ -1200,6 +1234,8 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (bp->rss_hash_cfg == rss_hash_cfg)
return 0;
+ if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
+ bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg;
bp->rss_hash_cfg = rss_hash_cfg;
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
@@ -1337,9 +1373,9 @@ static void bnxt_get_drvinfo(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
@@ -1636,15 +1672,19 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
u16 fw_speeds = link_info->support_speeds;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
fw_speeds = link_info->support_pam4_speeds;
BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Asym_Pause);
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Pause);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Asym_Pause);
+ }
if (link_info->support_auto_speeds ||
link_info->support_pam4_auto_speeds)
@@ -1674,6 +1714,8 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
return SPEED_50000;
case BNXT_LINK_SPEED_100GB:
return SPEED_100000;
+ case BNXT_LINK_SPEED_200GB:
+ return SPEED_200000;
default:
return SPEED_UNKNOWN;
}
@@ -1875,7 +1917,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
/* any change to autoneg will cause link change, therefore the
* driver should put back the original pause setting in autoneg
*/
- set_pause = true;
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
+ set_pause = true;
} else {
u8 phy_type = link_info->phy_type;
@@ -1947,6 +1990,9 @@ static int bnxt_get_fecparam(struct net_device *dev,
case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
fec->active_fec |= ETHTOOL_FEC_LLRS;
break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_OFF;
+ break;
}
return 0;
}
@@ -1963,6 +2009,14 @@ static void bnxt_get_fec_stats(struct net_device *dev,
rx = bp->rx_port_stats_ext.sw_stats;
fec_stats->corrected_bits.total =
*(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits));
+
+ if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY)
+ return;
+
+ fec_stats->corrected_blocks.total =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks));
+ fec_stats->uncorrectable_blocks.total =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks));
}
static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
@@ -2064,7 +2118,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (!BNXT_PHY_CFG_ABLE(bp))
+ if (!BNXT_PHY_CFG_ABLE(bp) || (bp->phy_flags & BNXT_PHY_FL_NO_PAUSE))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
@@ -2075,9 +2129,7 @@ static int bnxt_set_pauseparam(struct net_device *dev,
}
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- if (bp->hwrm_spec_code >= 0x10201)
- link_info->req_flow_ctrl =
- PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
+ link_info->req_flow_ctrl = 0;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -2106,7 +2158,7 @@ static u32 bnxt_get_link(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
/* TODO: handle MF, VF, driver close case */
- return bp->link_info.link_up;
+ return BNXT_LINK_IS_UP(bp);
}
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
@@ -2136,14 +2188,14 @@ static void bnxt_print_admin_err(struct bnxt *bp)
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
-static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
- u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
- u32 dir_item_len, const u8 *data,
- size_t data_len)
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_nvm_write_input *req;
@@ -2169,7 +2221,7 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
req->host_src_addr = cpu_to_le64(dma_handle);
}
- hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT);
+ hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout);
req->dir_type = cpu_to_le16(dir_type);
req->dir_ordinal = cpu_to_le16(dir_ordinal);
req->dir_ext = cpu_to_le16(dir_ext);
@@ -2467,12 +2519,92 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
return rc;
}
+#define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
+#define MSG_INVALID_PKG "PKG install error : Invalid package"
+#define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
+#define MSG_INVALID_DEV "PKG install error : Invalid device"
+#define MSG_INTERNAL_ERR "PKG install error : Internal error"
+#define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
+#define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
+#define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
+#define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
+#define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
+
+static int nvm_update_err_to_stderr(struct net_device *dev, u8 result,
+ struct netlink_ext_ack *extack)
+{
+ switch (result) {
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTEGRITY_ERR);
+ return -EINVAL;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_PKG);
+ return -ENOPKG;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_AUTHENTICATION_ERR);
+ return -EPERM;
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID:
+ case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INVALID_DEV);
+ return -EOPNOTSUPP;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_INTERNAL_ERR);
+ return -EIO;
+ }
+}
+
#define BNXT_PKG_DMA_SIZE 0x40000
#define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
#define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
+static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size,
+ struct netlink_ext_ack *extack)
+{
+ u32 item_len;
+ int rc;
+
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL,
+ &item_len, NULL);
+ if (rc) {
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
+ return rc;
+ }
+
+ if (fw_size > item_len) {
+ rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE,
+ BNX_DIR_ORDINAL_FIRST, 0, 1,
+ round_up(fw_size, 4096), NULL, 0);
+ if (rc) {
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR);
+ return rc;
+ }
+ }
+ return 0;
+}
+
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
- u32 install_type)
+ u32 install_type, struct netlink_ext_ack *extack)
{
struct hwrm_nvm_install_update_input *install;
struct hwrm_nvm_install_update_output *resp;
@@ -2483,9 +2615,15 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
u8 *kmem = NULL;
u32 modify_len;
u32 item_len;
+ u8 cmd_err;
u16 index;
int rc;
+ /* resize before flashing larger image than available space */
+ rc = bnxt_resize_update_entry(dev, fw->size, extack);
+ if (rc)
+ return rc;
+
bnxt_hwrm_fw_set_time(bp);
rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY);
@@ -2515,8 +2653,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
return rc;
}
- hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT);
- hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT);
+ hwrm_req_timeout(bp, modify, bp->hwrm_cmd_max_timeout);
+ hwrm_req_timeout(bp, install, bp->hwrm_cmd_max_timeout);
hwrm_req_hold(bp, modify);
modify->host_src_addr = cpu_to_le64(dma_handle);
@@ -2534,12 +2672,11 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_EXT_NONE,
&index, &item_len, NULL);
if (rc) {
- netdev_err(dev, "PKG update area not created in nvram\n");
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR);
break;
}
if (fw->size > item_len) {
- netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
- (unsigned long)fw->size);
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_SPACE_ERR);
rc = -EFBIG;
break;
}
@@ -2566,6 +2703,8 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
}
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
if (defrag_attempted) {
/* We have tried to defragment already in the previous
@@ -2574,15 +2713,24 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
break;
}
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (cmd_err) {
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_ANTI_ROLLBACK_ERR);
+ rc = -EALREADY;
+ break;
+ case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR:
install->flags =
cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
rc = hwrm_req_send_silent(bp, install);
+ if (!rc)
+ break;
+
+ cmd_err = ((struct hwrm_err_output *)resp)->cmd_err;
- if (rc && ((struct hwrm_err_output *)resp)->cmd_err ==
- NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
+ if (cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) {
/* FW has cleared NVM area, driver will create
* UPDATE directory and try the flash again
*/
@@ -2592,11 +2740,12 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
BNX_DIR_TYPE_UPDATE,
BNX_DIR_ORDINAL_FIRST,
0, 0, item_len, NULL, 0);
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ if (!rc)
+ break;
}
- } else if (rc) {
- netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc);
+ fallthrough;
+ default:
+ BNXT_NVM_ERR_MSG(dev, extack, MSG_GENERIC_FAILURE_ERR);
}
} while (defrag_attempted && !rc);
@@ -2607,7 +2756,7 @@ pkg_abort:
if (resp->result) {
netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
(s8)resp->result, (int)resp->problem_item);
- rc = -ENOPKG;
+ rc = nvm_update_err_to_stderr(dev, resp->result, extack);
}
if (rc == -EACCES)
bnxt_print_admin_err(bp);
@@ -2615,7 +2764,7 @@ pkg_abort:
}
static int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
- u32 install_type)
+ u32 install_type, struct netlink_ext_ack *extack)
{
const struct firmware *fw;
int rc;
@@ -2627,7 +2776,7 @@ static int bnxt_flash_package_from_file(struct net_device *dev, const char *file
return rc;
}
- rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type);
+ rc = bnxt_flash_package_from_fw_obj(dev, fw, install_type, extack);
release_firmware(fw);
@@ -2645,7 +2794,7 @@ static int bnxt_flash_device(struct net_device *dev,
if (flash->region == ETHTOOL_FLASH_ALL_REGIONS ||
flash->region > 0xffff)
return bnxt_flash_package_from_file(dev, flash->data,
- flash->region);
+ flash->region, NULL);
return bnxt_flash_firmware_from_file(dev, flash->region, flash->data);
}
@@ -2715,7 +2864,7 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
if (rc)
return rc;
- buflen = dir_entries * entry_length;
+ buflen = mul_u32_u32(dir_entries, entry_length);
buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle);
if (!buf) {
hwrm_req_drop(bp, req);
@@ -2731,8 +2880,8 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
return rc;
}
-static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
- u32 length, u8 *data)
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
@@ -2766,9 +2915,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
return rc;
}
-static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
- u16 ext, u16 *index, u32 *item_length,
- u32 *data_length)
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length)
{
struct hwrm_nvm_find_dir_entry_output *output;
struct hwrm_nvm_find_dir_entry_input *req;
@@ -3041,8 +3190,9 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
}
static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
- u16 page_number, u16 start_addr,
- u16 data_length, u8 *buf)
+ u16 page_number, u8 bank,
+ u16 start_addr, u16 data_length,
+ u8 *buf)
{
struct hwrm_port_phy_i2c_read_output *output;
struct hwrm_port_phy_i2c_read_input *req;
@@ -3063,8 +3213,13 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
data_length -= xfer_size;
req->page_offset = cpu_to_le16(start_addr + byte_offset);
req->data_length = xfer_size;
- req->enables = cpu_to_le32(start_addr + byte_offset ?
- PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+ req->enables =
+ cpu_to_le32((start_addr + byte_offset ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET :
+ 0) |
+ (bank ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER :
+ 0));
rc = hwrm_req_send(bp, req);
if (!rc)
memcpy(buf + byte_offset, output->data, xfer_size);
@@ -3094,7 +3249,7 @@ static int bnxt_get_module_info(struct net_device *dev,
if (bp->hwrm_spec_code < 0x10202)
return -EOPNOTSUPP;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0,
SFF_DIAG_SUPPORT_OFFSET + 1,
data);
if (!rc) {
@@ -3139,7 +3294,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
if (start < ETH_MODULE_SFF_8436_LEN) {
if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
length = ETH_MODULE_SFF_8436_LEN - start;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
start, length, data);
if (rc)
return rc;
@@ -3151,12 +3306,68 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0,
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0,
start, length, data);
}
return rc;
}
+static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ if (bp->link_info.module_status <=
+ PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+ return 0;
+
+ switch (bp->link_info.module_status) {
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down");
+ break;
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted");
+ break;
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT:
+ NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown error");
+ break;
+ }
+ return -EINVAL;
+}
+
+static int bnxt_get_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page_data,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ rc = bnxt_get_module_status(bp, extack);
+ if (rc)
+ return rc;
+
+ if (bp->hwrm_spec_code < 0x10202) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware version too old");
+ return -EINVAL;
+ }
+
+ if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection");
+ return -EINVAL;
+ }
+
+ rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1,
+ page_data->page, page_data->bank,
+ page_data->offset,
+ page_data->length,
+ page_data->data);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed");
+ return rc;
+ }
+ return page_data->length;
+}
+
static int bnxt_nway_reset(struct net_device *dev)
{
int rc = 0;
@@ -3298,7 +3509,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
return rc;
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
- if (bp->link_info.link_up)
+ if (BNXT_LINK_IS_UP(bp))
fw_speed = bp->link_info.link_speed;
else if (fw_advertising & BNXT_LINK_SPEED_MSK_10GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
@@ -3432,7 +3643,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
if (!skb)
return -ENOMEM;
data = skb_put(skb, pkt_size);
- eth_broadcast_addr(data);
+ ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
ether_addr_copy(&data[i], bp->dev->dev_addr);
i += ETH_ALEN;
@@ -3445,7 +3656,7 @@ static int bnxt_run_loopback(struct bnxt *bp)
dev_kfree_skb(skb);
return -EIO;
}
- bnxt_xmit_bd(bp, txr, map, pkt_size);
+ bnxt_xmit_bd(bp, txr, map, pkt_size, NULL);
/* Sync BD data before updating doorbell */
wmb();
@@ -3526,9 +3737,13 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- rc = bnxt_close_nic(bp, false, false);
- if (rc)
+ bnxt_ulp_stop(bp);
+ rc = bnxt_close_nic(bp, true, false);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ bnxt_ulp_start(bp, rc);
return;
+ }
bnxt_run_fw_tests(bp, test_mask, &test_results);
buf[BNXT_MACLPBK_TEST_IDX] = 1;
@@ -3538,6 +3753,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (rc) {
bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
+ bnxt_ulp_start(bp, rc);
return;
}
if (bnxt_run_loopback(bp))
@@ -3563,7 +3779,8 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
}
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
- rc = bnxt_open_nic(bp, false, true);
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
@@ -3708,6 +3925,9 @@ static int bnxt_get_ts_info(struct net_device *dev,
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ info->rx_filters |= (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
@@ -3752,7 +3972,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
test_info->timeout = HWRM_CMD_TIMEOUT;
for (i = 0; i < bp->num_tests; i++) {
char *str = test_info->string[i];
- char *fw_str = resp->test0_name + i * 32;
+ char *fw_str = resp->test_name[i];
if (i == BNXT_MACLPBK_TEST_IDX) {
strcpy(str, "Mac loopback test (offline)");
@@ -3763,14 +3983,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
- strlcpy(str, fw_str, ETH_GSTRING_LEN);
- strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
- if (test_info->offline_mask & (1 << i))
- strncat(str, " (offline)",
- ETH_GSTRING_LEN - strlen(str));
- else
- strncat(str, " (online)",
- ETH_GSTRING_LEN - strlen(str));
+ snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
+ fw_str, test_info->offline_mask & (1 << i) ?
+ "offline" : "online");
}
}
@@ -3905,6 +4120,20 @@ static void bnxt_get_rmon_stats(struct net_device *dev,
*ranges = bnxt_rmon_ranges;
}
+static void bnxt_get_link_ext_stats(struct net_device *dev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+ return;
+
+ rx = bp->rx_port_stats_ext.sw_stats;
+ stats->link_down_events =
+ *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events));
+}
+
void bnxt_ethtool_free(struct bnxt *bp)
{
kfree(bp->test_info);
@@ -3917,7 +4146,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_USECS_IRQ |
ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_STATS_BLOCK_USECS |
- ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_USE_CQE,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
.get_fec_stats = bnxt_get_fec_stats,
@@ -3953,10 +4183,12 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eeprom = bnxt_get_eeprom,
.set_eeprom = bnxt_set_eeprom,
.get_link = bnxt_get_link,
+ .get_link_ext_stats = bnxt_get_link_ext_stats,
.get_eee = bnxt_get_eee,
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
+ .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page,
.nway_reset = bnxt_nway_reset,
.set_phys_id = bnxt_set_phys_id,
.self_test = bnxt_self_test,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 6aa44840f13a..a8ecef8ab82c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -54,9 +54,21 @@ int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
u8 self_reset, u8 flags);
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
- u32 install_type);
+ u32 install_type, struct netlink_ext_ack *extack);
int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
+ u16 ext, u16 *index, u32 *item_length,
+ u32 *data_length);
+int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
+ u16 dir_ordinal, u16 dir_ext, u16 dir_attr,
+ u32 dir_item_len, const u8 *data,
+ size_t data_len);
+int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset,
+ u32 length, u8 *data);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index ea86c54247c7..b31de4cf6534 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2021 Broadcom Inc.
+ * Copyright (c) 2018-2022 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -254,6 +254,8 @@ struct cmd_nums {
#define HWRM_PORT_DSC_DUMP 0xd9UL
#define HWRM_PORT_EP_TX_QCFG 0xdaUL
#define HWRM_PORT_EP_TX_CFG 0xdbUL
+ #define HWRM_PORT_CFG 0xdcUL
+ #define HWRM_PORT_QCFG 0xddUL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_REG_POWER_QUERY 0xe1UL
#define HWRM_CORE_FREQUENCY_QUERY 0xe2UL
@@ -311,6 +313,8 @@ struct cmd_nums {
#define HWRM_CFA_TFLIB 0x125UL
#define HWRM_CFA_LAG_GROUP_MEMBER_RGTR 0x126UL
#define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
+ #define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
+ #define HWRM_CFA_TLS_FILTER_FREE 0x129UL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -369,6 +373,16 @@ struct cmd_nums {
#define HWRM_FUNC_PTP_EXT_CFG 0x1a0UL
#define HWRM_FUNC_PTP_EXT_QCFG 0x1a1UL
#define HWRM_FUNC_KEY_CTX_ALLOC 0x1a2UL
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2 0x1a3UL
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2 0x1a4UL
+ #define HWRM_FUNC_DBR_PACING_CFG 0x1a5UL
+ #define HWRM_FUNC_DBR_PACING_QCFG 0x1a6UL
+ #define HWRM_FUNC_DBR_PACING_BROADCAST_EVENT 0x1a7UL
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL
+ #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL
+ #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
+ #define HWRM_FUNC_SYNCE_CFG 0x1abUL
+ #define HWRM_FUNC_SYNCE_QCFG 0x1acUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -390,6 +404,10 @@ struct cmd_nums {
#define HWRM_MFG_PRVSN_IMPORT_CERT 0x212UL
#define HWRM_MFG_PRVSN_GET_STATE 0x213UL
#define HWRM_MFG_GET_NVM_MEASUREMENT 0x214UL
+ #define HWRM_MFG_PSOC_QSTATUS 0x215UL
+ #define HWRM_MFG_SELFTEST_QLIST 0x216UL
+ #define HWRM_MFG_SELFTEST_EXEC 0x217UL
+ #define HWRM_STAT_GENERIC_QSTATS 0x218UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -403,6 +421,8 @@ struct cmd_nums {
#define HWRM_TF_SESSION_RESC_FREE 0x2ceUL
#define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
#define HWRM_TF_SESSION_RESC_INFO 0x2d0UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL
+ #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL
#define HWRM_TF_TBL_TYPE_GET 0x2daUL
#define HWRM_TF_TBL_TYPE_SET 0x2dbUL
#define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
@@ -426,6 +446,25 @@ struct cmd_nums {
#define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
#define HWRM_TF_IF_TBL_SET 0x2feUL
#define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
+ #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
+ #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL
+ #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL
+ #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL
+ #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL
+ #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL
+ #define HWRM_TFC_SESSION_FID_ADD 0x389UL
+ #define HWRM_TFC_SESSION_FID_REM 0x38aUL
+ #define HWRM_TFC_IDENT_ALLOC 0x38bUL
+ #define HWRM_TFC_IDENT_FREE 0x38cUL
+ #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL
+ #define HWRM_TFC_IDX_TBL_SET 0x38fUL
+ #define HWRM_TFC_IDX_TBL_GET 0x390UL
+ #define HWRM_TFC_IDX_TBL_FREE 0x391UL
+ #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -532,8 +571,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 63
-#define HWRM_VERSION_STR "1.10.2.63"
+#define HWRM_VERSION_RSVD 118
+#define HWRM_VERSION_STR "1.10.2.118"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -757,10 +796,13 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_MASTER 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1112,34 +1154,37 @@ struct hwrm_async_event_cmpl_echo_request {
__le32 event_data1;
};
-/* hwrm_async_event_cmpl_phc_master (size:128b/16B) */
-struct hwrm_async_event_cmpl_phc_master {
+/* hwrm_async_event_cmpl_phc_update (size:128b/16B) */
+struct hwrm_async_event_cmpl_phc_update {
__le16 type;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_LAST ASYNC_EVENT_CMPL_PHC_MASTER_TYPE_HWRM_ASYNC_EVENT
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER 0x43UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_ID_PHC_MASTER
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_ID_PHC_UPDATE
__le32 event_data2;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_MASTER_FID_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA2_PHC_SEC_FID_SFT 16
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_MASTER_FID_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA2_PHC_SEC_FID_SFT 16
u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_V 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_OPAQUE_SFT 1
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_OPAQUE_SFT 1
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_MASK 0xfUL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_SFT 0
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
- #define ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_MASTER_EVENT_DATA1_FLAGS_PHC_FAILOVER
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK 0xfUL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT 0
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_MASTER 0x1UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_SECONDARY 0x2UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_FAILOVER 0x3UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE 0x4UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_LAST ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK 0xffff0UL
+ #define ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT 4
};
/* hwrm_async_event_cmpl_pps_timestamp (size:128b/16B) */
@@ -1246,7 +1291,8 @@ struct hwrm_async_event_cmpl_error_report_base {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
#define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1330,6 +1376,32 @@ struct hwrm_async_event_cmpl_error_report_nvm {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE
};
+/* hwrm_async_event_cmpl_error_report_doorbell_drop_threshold (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK 0xffffff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1563,32 +1635,38 @@ struct hwrm_func_qcaps_output {
__le16 max_sp_tx_rings;
__le16 max_msix_vfs;
__le32 flags_ext;
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
- #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED 0x10000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PTM_SUPPORTED 0x20000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED 0x40000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED 0x80000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PARTITION_BW_SUPPORTED 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_REQUIRED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED 0x10000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DBR_PACING_SUPPORTED 0x20000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_HW_DBR_DROP_RECOV_SUPPORTED 0x40000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_DISABLE_CQ_OVERFLOW_DETECTION_SUPPORTED 0x80000000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1597,7 +1675,27 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
__le16 max_key_ctxs_alloc;
- u8 unused_1[7];
+ __le32 flags_ext2;
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED 0x1UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_QUIC_SUPPORTED 0x2UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KDNET_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ __le16 tunnel_disable_flag;
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NVGRE 0x4UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_L2GRE 0x8UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_GRE 0x10UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
+ #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ u8 unused_1;
u8 valid;
};
@@ -1735,7 +1833,20 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
#define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
#define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
- u8 unused_2[3];
+ u8 db_page_size;
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
+ u8 unused_2[2];
__le32 partition_min_bw;
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
@@ -1761,11 +1872,17 @@ struct hwrm_func_qcfg_output {
__le16 host_mtu;
__le16 alloc_tx_key_ctxs;
__le16 alloc_rx_key_ctxs;
- u8 unused_3[5];
+ u8 port_kdnet_mode;
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
+ u8 kdnet_pcie_function;
+ __le16 port_kdnet_fid;
+ u8 unused_3;
u8 valid;
};
-/* hwrm_func_cfg_input (size:896b/112B) */
+/* hwrm_func_cfg_input (size:960b/120B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1801,6 +1918,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
#define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
+ #define FUNC_CFG_REQ_FLAGS_KEY_CTX_ASSETS_TEST 0x80000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1945,7 +2063,27 @@ struct hwrm_func_cfg_input {
__le16 host_mtu;
__le16 num_tx_key_ctxs;
__le16 num_rx_key_ctxs;
- u8 unused_0[4];
+ __le32 enables2;
+ #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
+ #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
+ u8 port_kdnet_mode;
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL
+ #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED
+ u8 db_page_size;
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
+ #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
+ u8 unused_0[6];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -1979,10 +2117,9 @@ struct hwrm_func_qstats_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK
+ #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL
+ #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL
u8 unused_0[5];
};
@@ -2012,7 +2149,8 @@ struct hwrm_func_qstats_output {
__le64 rx_agg_bytes;
__le64 rx_agg_events;
__le64 rx_agg_aborts;
- u8 unused_0[7];
+ u8 clear_seq;
+ u8 unused_0[6];
u8 valid;
};
@@ -2025,10 +2163,8 @@ struct hwrm_func_qstats_ext_input {
__le64 resp_addr;
__le16 fid;
u8 flags;
- #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
- #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL
+ #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL
u8 unused_0[1];
__le32 enables;
#define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL
@@ -2129,6 +2265,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
#define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -2455,7 +2592,7 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 rkc_entry_size;
__le32 tkc_max_entries;
__le32 rkc_max_entries;
- u8 rsvd[7];
+ u8 rsvd1[7];
u8 valid;
};
@@ -3074,19 +3211,23 @@ struct hwrm_func_ptp_pin_qcfg_output {
#define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL
#define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT
u8 pin2_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
u8 pin3_usage;
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
u8 unused_0;
u8 valid;
};
@@ -3134,23 +3275,27 @@ struct hwrm_func_ptp_pin_cfg_input {
#define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL
#define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED
u8 pin2_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT
u8 pin3_state;
#define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL
#define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL
#define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED
u8 pin3_usage;
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
- #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL
+ #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT
u8 unused_0[4];
};
@@ -3164,7 +3309,7 @@ struct hwrm_func_ptp_pin_cfg_output {
u8 valid;
};
-/* hwrm_func_ptp_cfg_input (size:320b/40B) */
+/* hwrm_func_ptp_cfg_input (size:384b/48B) */
struct hwrm_func_ptp_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3178,6 +3323,7 @@ struct hwrm_func_ptp_cfg_input {
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD 0x8UL
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP 0x10UL
#define FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE 0x20UL
+ #define FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME 0x40UL
u8 ptp_pps_event;
#define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_INTERNAL 0x1UL
#define FUNC_PTP_CFG_REQ_PTP_PPS_EVENT_EXTERNAL 0x2UL
@@ -3204,6 +3350,7 @@ struct hwrm_func_ptp_cfg_input {
__le32 ptp_freq_adj_ext_up;
__le32 ptp_freq_adj_ext_phase_lower;
__le32 ptp_freq_adj_ext_phase_upper;
+ __le64 ptp_set_time;
};
/* hwrm_func_ptp_cfg_output (size:128b/16B) */
@@ -3236,13 +3383,344 @@ struct hwrm_func_ptp_ts_query_output {
__le16 seq_id;
__le16 resp_len;
__le64 pps_event_ts;
- __le64 ptm_res_local_ts;
- __le64 ptm_pmstr_ts;
- __le32 ptm_mstr_prop_dly;
+ __le64 ptm_local_ts;
+ __le64 ptm_system_ts;
+ __le32 ptm_link_delay;
u8 unused_0[3];
u8 valid;
};
+/* hwrm_func_ptp_ext_cfg_input (size:256b/32B) */
+struct hwrm_func_ptp_ext_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 enables;
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_MASTER_FID 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_FID 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_PHC_SEC_MODE 0x4UL
+ #define FUNC_PTP_EXT_CFG_REQ_ENABLES_FAILOVER_TIMER 0x8UL
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ u8 phc_sec_mode;
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_SWITCH 0x0UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_ALL 0x1UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY 0x2UL
+ #define FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_LAST FUNC_PTP_EXT_CFG_REQ_PHC_SEC_MODE_PF_ONLY
+ u8 unused_0;
+ __le32 failover_timer;
+ u8 unused_1[4];
+};
+
+/* hwrm_func_ptp_ext_cfg_output (size:128b/16B) */
+struct hwrm_func_ptp_ext_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_ptp_ext_qcfg_input (size:192b/24B) */
+struct hwrm_func_ptp_ext_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 unused_0[8];
+};
+
+/* hwrm_func_ptp_ext_qcfg_output (size:256b/32B) */
+struct hwrm_func_ptp_ext_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 phc_master_fid;
+ __le16 phc_sec_fid;
+ __le16 phc_active_fid0;
+ __le16 phc_active_fid1;
+ __le32 last_failover_event;
+ __le16 from_fid;
+ __le16 to_fid;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_cfg_v2_input (size:448b/56B) */
+struct hwrm_func_backing_store_cfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL
+ __le64 page_dir;
+ __le32 num_entries;
+ __le16 entry_size;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+};
+
+/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
+struct hwrm_func_backing_store_cfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 rsvd0[7];
+ u8 valid;
+};
+
+/* hwrm_func_backing_store_qcfg_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcfg_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
+ __le16 instance;
+ u8 rsvd[4];
+};
+
+/* hwrm_func_backing_store_qcfg_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcfg_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ __le16 instance;
+ __le32 flags;
+ __le64 page_dir;
+ __le32 num_entries;
+ u8 page_size_pbl_level;
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_SFT 0
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PBL_LEVEL_LVL_2
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_PAGE_SIZE_PG_1G
+ u8 subtype_valid_cnt;
+ u8 rsvd[2];
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd2[7];
+ u8 valid;
+};
+
+/* qpc_split_entries (size:128b/16B) */
+struct qpc_split_entries {
+ __le32 qp_num_l2_entries;
+ __le32 qp_num_qp1_entries;
+ __le32 rsvd[2];
+};
+
+/* srq_split_entries (size:128b/16B) */
+struct srq_split_entries {
+ __le32 srq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* cq_split_entries (size:128b/16B) */
+struct cq_split_entries {
+ __le32 cq_num_l2_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* vnic_split_entries (size:128b/16B) */
+struct vnic_split_entries {
+ __le32 vnic_num_vnic_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* mrav_split_entries (size:128b/16B) */
+struct mrav_split_entries {
+ __le32 mrav_num_av_entries;
+ __le32 rsvd;
+ __le32 rsvd2[2];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_input (size:192b/24B) */
+struct hwrm_func_backing_store_qcaps_v2_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ u8 rsvd[6];
+};
+
+/* hwrm_func_backing_store_qcaps_v2_output (size:448b/56B) */
+struct hwrm_func_backing_store_qcaps_v2_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 type;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ __le16 entry_size;
+ __le32 flags;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ __le32 instance_bit_map;
+ u8 ctx_init_value;
+ u8 ctx_init_offset;
+ u8 entry_multiple;
+ u8 rsvd;
+ __le32 max_num_entries;
+ __le32 min_num_entries;
+ __le16 next_valid_type;
+ u8 subtype_valid_cnt;
+ u8 rsvd2;
+ __le32 split_entry_0;
+ __le32 split_entry_1;
+ __le32 split_entry_2;
+ __le32 split_entry_3;
+ u8 rsvd3[3];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -3441,7 +3919,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:768b/96B) */
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -3738,10 +4216,13 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ u8 unused_0[7];
u8 valid;
};
-/* hwrm_port_mac_cfg_input (size:384b/48B) */
+/* hwrm_port_mac_cfg_input (size:448b/56B) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -3763,6 +4244,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
#define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE 0x4000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE 0x8000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -3807,7 +4290,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
__le32 ptp_freq_adj_ppb;
- __le32 ptp_adj_phase;
+ u8 unused_1[4];
+ __le64 ptp_adj_phase;
};
/* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -3850,6 +4334,7 @@ struct hwrm_port_mac_ptp_qcfg_output {
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x8UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK 0x10UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED 0x20UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -4006,9 +4491,7 @@ struct hwrm_port_qstats_input {
__le64 resp_addr;
__le16 port_id;
u8 flags;
- #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK
+ #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
u8 unused_0[5];
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
@@ -4136,9 +4619,7 @@ struct hwrm_port_qstats_ext_input {
__le16 tx_stat_size;
__le16 rx_stat_size;
u8 flags;
- #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK
+ #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL
u8 unused_0;
__le64 tx_stat_host_addr;
__le64 rx_stat_host_addr;
@@ -4197,9 +4678,7 @@ struct hwrm_port_ecn_qstats_input {
__le16 port_id;
__le16 ecn_stat_buf_size;
u8 flags;
- #define PORT_ECN_QSTATS_REQ_FLAGS_UNUSED 0x0UL
- #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
- #define PORT_ECN_QSTATS_REQ_FLAGS_LAST PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
u8 unused_0[3];
__le64 ecn_stat_host_addr;
};
@@ -4339,7 +4818,8 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
- #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_12 0xcUL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_12
__le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
@@ -4397,9 +4877,10 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
__le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- u8 unused_0[1];
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ u8 internal_port_cnt;
u8 valid;
};
@@ -4413,9 +4894,10 @@ struct hwrm_port_phy_i2c_read_input {
__le32 flags;
__le32 enables;
#define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL
__le16 port_id;
u8 i2c_slave_addr;
- u8 unused_0;
+ u8 bank_number;
__le16 page_number;
__le16 page_offset;
u8 data_length;
@@ -6042,6 +6524,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL
#define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL
#define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL
+ #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL
__le16 vnic_id;
__le16 dflt_ring_grp;
__le16 rss_rule;
@@ -6056,7 +6539,12 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL
#define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX
- u8 unused0[5];
+ u8 l2_cqe_mode;
+ #define VNIC_CFG_REQ_L2_CQE_MODE_DEFAULT 0x0UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL
+ #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED
+ u8 unused0[4];
};
/* hwrm_vnic_cfg_output (size:128b/16B) */
@@ -6089,25 +6577,32 @@ struct hwrm_vnic_qcaps_output {
__le16 mru;
u8 unused_0[2];
__le32 flags;
- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
- #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
- #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
- #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
- #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
- #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_TOEPLITZ_CAP 0x8000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_XOR_CAP 0x10000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_CHKSM_CAP 0x20000UL
- #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
+ #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RING_SELECT_MODE_TOEPLITZ_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V3_CAP 0x80000UL
+ #define VNIC_QCAPS_RESP_FLAGS_L2_CQE_MODE_CAP 0x100000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV4_CAP 0x200000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -6221,12 +6716,17 @@ struct hwrm_vnic_rss_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 hash_type;
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6 0x400UL
__le16 vnic_id;
u8 ring_table_pair_index;
u8 hash_mode_flags;
@@ -6241,11 +6741,11 @@ struct hwrm_vnic_rss_cfg_input {
u8 flags;
#define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
#define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
- u8 rss_hash_function;
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_TOEPLITZ 0x0UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_XOR 0x1UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM 0x2UL
- #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_LAST VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM
+ u8 ring_select_mode;
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_LAST VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
u8 unused_1[4];
};
@@ -6268,6 +6768,53 @@ struct hwrm_vnic_rss_cfg_cmd_err {
u8 unused_0[7];
};
+/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */
+struct hwrm_vnic_rss_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 rss_ctx_idx;
+ __le16 vnic_id;
+ u8 unused_0[4];
+};
+
+/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */
+struct hwrm_vnic_rss_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 hash_type;
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL
+ #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL
+ u8 unused_0[4];
+ __le32 hash_key[10];
+ u8 hash_mode_flags;
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL
+ #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL
+ u8 ring_select_mode;
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL
+ #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM
+ u8 unused_1[5];
+ u8 valid;
+};
+
/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
struct hwrm_vnic_plcmodes_cfg_input {
__le16 req_type;
@@ -6390,7 +6937,10 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
#define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
__le16 flags;
- #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
+ #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL
+ #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL
+ #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL
__le64 page_tbl_addr;
__le32 fbo;
u8 page_size;
@@ -7190,7 +7740,10 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
@@ -7574,12 +8127,17 @@ struct hwrm_cfa_flow_info_input {
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
- #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_TX 0x3000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT_RX 0x9000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT_RX 0xa000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_NIC_RX 0xb000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX 0xc000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_LAST CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT_RX
u8 unused_0[6];
__le64 ext_flow_handle;
};
@@ -7668,7 +8226,8 @@ struct hwrm_cfa_flow_stats_output {
__le64 byte_7;
__le64 byte_8;
__le64 byte_9;
- u8 unused_0[7];
+ __le16 flow_hits;
+ u8 unused_0[5];
u8 valid;
};
@@ -7894,10 +8453,13 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL
u8 unused_0[3];
u8 valid;
};
+/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -7911,7 +8473,9 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI
u8 unused_0[7];
};
@@ -7923,7 +8487,16 @@ struct hwrm_tunnel_dst_port_query_output {
__le16 resp_len;
__le16 tunnel_dst_port_id;
__be16 tunnel_dst_port_val;
- u8 unused_0[3];
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[2];
u8 valid;
};
@@ -7941,7 +8514,9 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -7954,7 +8529,21 @@ struct hwrm_tunnel_dst_port_alloc_output {
__le16 seq_id;
__le16 resp_len;
__le16 tunnel_dst_port_id;
- u8 unused_0[5];
+ u8 error_info;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE
+ u8 upar_in_use;
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL
+ #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL
+ u8 unused_0[3];
u8 valid;
};
@@ -7972,7 +8561,9 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -7984,7 +8575,12 @@ struct hwrm_tunnel_dst_port_free_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_1[7];
+ u8 error_info;
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL
+ #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED
+ u8 unused_1[6];
u8 valid;
};
@@ -8233,6 +8829,54 @@ struct pcie_ctx_hw_stats {
__le64 pcie_recovery_histogram;
};
+/* hwrm_stat_generic_qstats_input (size:256b/32B) */
+struct hwrm_stat_generic_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 generic_stat_size;
+ u8 flags;
+ #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 generic_stat_host_addr;
+};
+
+/* hwrm_stat_generic_qstats_output (size:128b/16B) */
+struct hwrm_stat_generic_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 generic_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* generic_sw_hw_stats (size:1216b/152B) */
+struct generic_sw_hw_stats {
+ __le64 pcie_statistics_tx_tlp;
+ __le64 pcie_statistics_rx_tlp;
+ __le64 pcie_credit_fc_hdr_posted;
+ __le64 pcie_credit_fc_hdr_nonposted;
+ __le64 pcie_credit_fc_hdr_cmpl;
+ __le64 pcie_credit_fc_data_posted;
+ __le64 pcie_credit_fc_data_nonposted;
+ __le64 pcie_credit_fc_data_cmpl;
+ __le64 pcie_credit_fc_tgt_nonposted;
+ __le64 pcie_credit_fc_tgt_data_posted;
+ __le64 pcie_credit_fc_tgt_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_hdr_posted;
+ __le64 pcie_credit_fc_cmpl_data_posted;
+ __le64 pcie_cmpl_longest;
+ __le64 pcie_cmpl_shortest;
+ __le64 cache_miss_count_cfcq;
+ __le64 cache_miss_count_cfcs;
+ __le64 cache_miss_count_cfcc;
+ __le64 cache_miss_count_cfcm;
+};
+
/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
__le16 req_type;
@@ -8909,6 +9553,50 @@ struct hwrm_dbg_qcfg_output {
u8 valid;
};
+/* hwrm_dbg_crashdump_medium_cfg_input (size:320b/40B) */
+struct hwrm_dbg_crashdump_medium_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 output_dest_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR 0x1UL
+ __le16 pg_size_lvl;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_MASK 0x3UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_SFT 0
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_0 0x0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_1 0x1UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2 0x2UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_LVL_LVL_2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_MASK 0x1cUL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_SFT 2
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_4K (0x0UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8K (0x1UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_64K (0x2UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_2M (0x3UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_8M (0x4UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G (0x5UL << 2)
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_LAST DBG_CRASHDUMP_MEDIUM_CFG_REQ_PG_SIZE_PG_1G
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_MASK 0xffe0UL
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_UNUSED11_SFT 5
+ __le32 size;
+ __le32 coredump_component_disable_flags;
+ #define DBG_CRASHDUMP_MEDIUM_CFG_REQ_NVRAM 0x1UL
+ __le32 unused_0;
+ __le64 pbl;
+};
+
+/* hwrm_dbg_crashdump_medium_cfg_output (size:128b/16B) */
+struct hwrm_dbg_crashdump_medium_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_1[7];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -9372,8 +10060,35 @@ struct hwrm_nvm_install_update_output {
__le16 resp_len;
__le64 installed_items;
u8 result;
- #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
- #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_SUCCESS 0x0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_FAILURE 0xffUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_MALLOC_FAILURE 0xfdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER 0xfbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER 0xf3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE 0xf2UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER 0xecUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE 0xebUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM 0xeaUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH 0xe9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST 0xe8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER 0xe7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM 0xe6UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM 0xe5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH 0xe4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE 0xe1UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV 0xceUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID 0xcdUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR 0xccUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID 0xcbUL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM 0xc5UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM 0xc4UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM 0xc3UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR 0xb9UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR 0xb8UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR 0xb7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND 0xb0UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED 0xa7UL
+ #define NVM_INSTALL_UPDATE_RESP_RESULT_LAST NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
u8 problem_item;
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_NONE 0x0UL
#define NVM_INSTALL_UPDATE_RESP_PROBLEM_ITEM_PACKAGE 0xffUL
@@ -9390,11 +10105,12 @@ struct hwrm_nvm_install_update_output {
/* hwrm_nvm_install_update_cmd_err (size:64b/8B) */
struct hwrm_nvm_install_update_cmd_err {
u8 code;
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
- #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR 0x1UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE 0x2UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK 0x3UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT 0x4UL
+ #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_LAST NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_VOLTREG_SUPPORT
u8 unused_0[7];
};
@@ -9533,14 +10249,7 @@ struct hwrm_selftest_qlist_output {
u8 unused_0;
__le16 test_timeout;
u8 unused_1[2];
- char test0_name[32];
- char test1_name[32];
- char test2_name[32];
- char test3_name[32];
- char test4_name[32];
- char test5_name[32];
- char test6_name[32];
- char test7_name[32];
+ char test_name[8][32];
u8 eyescope_target_BER_support;
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL
#define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL
@@ -9612,6 +10321,87 @@ struct hwrm_selftest_irq_output {
u8 valid;
};
+/* dbc_dbc (size:64b/8B) */
+struct dbc_dbc {
+ u32 index;
+ #define DBC_DBC_INDEX_MASK 0xffffffUL
+ #define DBC_DBC_INDEX_SFT 0
+ #define DBC_DBC_EPOCH 0x1000000UL
+ #define DBC_DBC_TOGGLE_MASK 0x6000000UL
+ #define DBC_DBC_TOGGLE_SFT 25
+ u32 type_path_xid;
+ #define DBC_DBC_XID_MASK 0xfffffUL
+ #define DBC_DBC_XID_SFT 0
+ #define DBC_DBC_PATH_MASK 0x3000000UL
+ #define DBC_DBC_PATH_SFT 24
+ #define DBC_DBC_PATH_ROCE (0x0UL << 24)
+ #define DBC_DBC_PATH_L2 (0x1UL << 24)
+ #define DBC_DBC_PATH_ENGINE (0x2UL << 24)
+ #define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_VALID 0x4000000UL
+ #define DBC_DBC_DEBUG_TRACE 0x8000000UL
+ #define DBC_DBC_TYPE_MASK 0xf0000000UL
+ #define DBC_DBC_TYPE_SFT 28
+ #define DBC_DBC_TYPE_SQ (0x0UL << 28)
+ #define DBC_DBC_TYPE_RQ (0x1UL << 28)
+ #define DBC_DBC_TYPE_SRQ (0x2UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARM (0x3UL << 28)
+ #define DBC_DBC_TYPE_CQ (0x4UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMSE (0x5UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMALL (0x6UL << 28)
+ #define DBC_DBC_TYPE_CQ_ARMENA (0x7UL << 28)
+ #define DBC_DBC_TYPE_SRQ_ARMENA (0x8UL << 28)
+ #define DBC_DBC_TYPE_CQ_CUTOFF_ACK (0x9UL << 28)
+ #define DBC_DBC_TYPE_NQ (0xaUL << 28)
+ #define DBC_DBC_TYPE_NQ_ARM (0xbUL << 28)
+ #define DBC_DBC_TYPE_NQ_MASK (0xeUL << 28)
+ #define DBC_DBC_TYPE_NULL (0xfUL << 28)
+ #define DBC_DBC_TYPE_LAST DBC_DBC_TYPE_NULL
+};
+
+/* db_push_start (size:64b/8B) */
+struct db_push_start {
+ u64 db;
+ #define DB_PUSH_START_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_START_DB_INDEX_SFT 0
+ #define DB_PUSH_START_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_START_DB_PI_LO_SFT 24
+ #define DB_PUSH_START_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_START_DB_XID_SFT 32
+ #define DB_PUSH_START_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_START_DB_PI_HI_SFT 52
+ #define DB_PUSH_START_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_START_DB_TYPE_SFT 60
+ #define DB_PUSH_START_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_START_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
+};
+
+/* db_push_end (size:64b/8B) */
+struct db_push_end {
+ u64 db;
+ #define DB_PUSH_END_DB_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_END_DB_INDEX_SFT 0
+ #define DB_PUSH_END_DB_PI_LO_MASK 0xff000000UL
+ #define DB_PUSH_END_DB_PI_LO_SFT 24
+ #define DB_PUSH_END_DB_XID_MASK 0xfffff00000000ULL
+ #define DB_PUSH_END_DB_XID_SFT 32
+ #define DB_PUSH_END_DB_PI_HI_MASK 0xf0000000000000ULL
+ #define DB_PUSH_END_DB_PI_HI_SFT 52
+ #define DB_PUSH_END_DB_PATH_MASK 0x300000000000000ULL
+ #define DB_PUSH_END_DB_PATH_SFT 56
+ #define DB_PUSH_END_DB_PATH_ROCE (0x0ULL << 56)
+ #define DB_PUSH_END_DB_PATH_L2 (0x1ULL << 56)
+ #define DB_PUSH_END_DB_PATH_ENGINE (0x2ULL << 56)
+ #define DB_PUSH_END_DB_PATH_LAST DB_PUSH_END_DB_PATH_ENGINE
+ #define DB_PUSH_END_DB_DEBUG_TRACE 0x800000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_MASK 0xf000000000000000ULL
+ #define DB_PUSH_END_DB_TYPE_SFT 60
+ #define DB_PUSH_END_DB_TYPE_PUSH_START (0xcULL << 60)
+ #define DB_PUSH_END_DB_TYPE_PUSH_END (0xdULL << 60)
+ #define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
+};
+
/* db_push_info (size:64b/8B) */
struct db_push_info {
u32 push_size_push_index;
@@ -9636,6 +10426,7 @@ struct fw_status_reg {
#define FW_STATUS_REG_SHUTDOWN 0x100000UL
#define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
#define FW_STATUS_REG_RECOVERING 0x400000UL
+ #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL
};
/* hcomm_status (size:64b/8B) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index bb7327b82d0b..132442f16fe6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -359,6 +359,8 @@ static int __hwrm_to_stderr(u32 hwrm_err)
return -EAGAIN;
case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
return -EOPNOTSUPP;
+ case HWRM_ERR_CODE_PF_UNAVAILABLE:
+ return -ENODEV;
default:
return -EIO;
}
@@ -416,6 +418,44 @@ hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state)
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
}
+static void hwrm_req_dbg(struct bnxt *bp, struct input *req)
+{
+ u32 ring = le16_to_cpu(req->cmpl_ring);
+ u32 type = le16_to_cpu(req->req_type);
+ u32 tgt = le16_to_cpu(req->target_id);
+ u32 seq = le16_to_cpu(req->seq_id);
+ char opt[32] = "\n";
+
+ if (unlikely(ring != (u16)BNXT_HWRM_NO_CMPL_RING))
+ snprintf(opt, 16, " ring %d\n", ring);
+
+ if (unlikely(tgt != BNXT_HWRM_TARGET))
+ snprintf(opt + strlen(opt) - 1, 16, " tgt 0x%x\n", tgt);
+
+ netdev_dbg(bp->dev, "sent hwrm req_type 0x%x seq id 0x%x%s",
+ type, seq, opt);
+}
+
+#define hwrm_err(bp, ctx, fmt, ...) \
+ do { \
+ if ((ctx)->flags & BNXT_HWRM_CTX_SILENT) \
+ netdev_dbg((bp)->dev, fmt, __VA_ARGS__); \
+ else \
+ netdev_err((bp)->dev, fmt, __VA_ARGS__); \
+ } while (0)
+
+static bool hwrm_wait_must_abort(struct bnxt *bp, u32 req_type, u32 *fw_status)
+{
+ if (req_type == HWRM_VER_GET)
+ return false;
+
+ if (!bp->fw_health || !bp->fw_health->status_reliable)
+ return false;
+
+ *fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ return *fw_status && !BNXT_FW_IS_HEALTHY(*fw_status);
+}
+
static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
{
u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
@@ -427,8 +467,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
unsigned int i, timeout, tmo_count;
u32 *data = (u32 *)ctx->req;
u32 msg_len = ctx->req_len;
+ u32 req_type, sts;
int rc = -EBUSY;
- u32 req_type;
u16 len = 0;
u8 *valid;
@@ -436,8 +476,12 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
memset(ctx->resp, 0, PAGE_SIZE);
req_type = le16_to_cpu(ctx->req->req_type);
- if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET)
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) {
+ netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n",
+ req_type);
goto exit;
+ }
if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
msg_len > bp->hwrm_max_ext_req_len) {
@@ -490,13 +534,15 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
/* Ring channel doorbell */
writel(1, bp->bar0 + doorbell_offset);
+ hwrm_req_dbg(bp, ctx->req);
+
if (!pci_is_enabled(bp->pdev)) {
rc = -ENODEV;
goto exit;
}
/* Limit timeout to an upper limit */
- timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT);
+ timeout = min(ctx->timeout, bp->hwrm_cmd_max_timeout ?: HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
@@ -523,17 +569,19 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
- if (HWRM_WAIT_MUST_ABORT(bp, ctx))
- break;
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Resp cmpl intr abandoning msg: 0x%x due to firmware status: 0x%x\n",
+ req_type, sts);
+ goto exit;
+ }
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) {
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
- le16_to_cpu(ctx->req->req_type));
+ hwrm_err(bp, ctx, "Resp cmpl intr err msg: 0x%x\n",
+ req_type);
goto exit;
}
len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len));
@@ -565,7 +613,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
if (resp_seq != seen_out_of_seq) {
netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n",
le16_to_cpu(resp_seq),
- le16_to_cpu(ctx->req->req_type),
+ req_type,
le16_to_cpu(ctx->req->seq_id));
seen_out_of_seq = resp_seq;
}
@@ -576,40 +624,45 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
} else {
- if (HWRM_WAIT_MUST_ABORT(bp, ctx))
- goto timeout_abort;
+ if (hwrm_wait_must_abort(bp, req_type, &sts)) {
+ hwrm_err(bp, ctx, "Abandoning msg {0x%x 0x%x} len: %d due to firmware status: 0x%x\n",
+ req_type,
+ le16_to_cpu(ctx->req->seq_id),
+ len, sts);
+ goto exit;
+ }
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
}
}
if (i >= tmo_count) {
-timeout_abort:
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
- hwrm_total_timeout(i),
- le16_to_cpu(ctx->req->req_type),
- le16_to_cpu(ctx->req->seq_id), len);
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n",
+ hwrm_total_timeout(i), req_type,
+ le16_to_cpu(ctx->req->seq_id), len);
goto exit;
}
/* Last byte of resp contains valid bit */
valid = ((u8 *)ctx->resp) + len - 1;
- for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
+ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; ) {
/* make sure we read from updated DMA memory */
dma_rmb();
if (*valid)
break;
- usleep_range(1, 5);
+ if (j < 10) {
+ udelay(1);
+ j++;
+ } else {
+ usleep_range(20, 30);
+ j += 20;
+ }
}
if (j >= HWRM_VALID_BIT_DELAY_USEC) {
- if (!(ctx->flags & BNXT_HWRM_CTX_SILENT))
- netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
- hwrm_total_timeout(i),
- le16_to_cpu(ctx->req->req_type),
- le16_to_cpu(ctx->req->seq_id), len,
- *valid);
+ hwrm_err(bp, ctx, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n",
+ hwrm_total_timeout(i) + j, req_type,
+ le16_to_cpu(ctx->req->seq_id), len, *valid);
goto exit;
}
}
@@ -620,11 +673,12 @@ timeout_abort:
*/
*valid = 0;
rc = le16_to_cpu(ctx->resp->error_code);
- if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) {
- netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- le16_to_cpu(ctx->resp->req_type),
- le16_to_cpu(ctx->resp->seq_id), rc);
- }
+ if (rc == HWRM_ERR_CODE_BUSY && !(ctx->flags & BNXT_HWRM_CTX_SILENT))
+ netdev_warn(bp->dev, "FW returned busy, hwrm req_type 0x%x\n",
+ req_type);
+ else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
+ hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+ req_type, token->seq_id, rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index 4d17f0d5363b..c98032e38188 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -58,11 +58,10 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
-#define HWRM_CMD_MAX_TIMEOUT 40000
+#define HWRM_CMD_MAX_TIMEOUT 40000U
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
-#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
#define BNXT_HWRM_TARGET 0xffff
#define BNXT_HWRM_NO_CMPL_RING -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
@@ -83,10 +82,6 @@ void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s);
#define HWRM_MIN_TIMEOUT 25
#define HWRM_MAX_TIMEOUT 40
-#define HWRM_WAIT_MUST_ABORT(bp, ctx) \
- (le16_to_cpu((ctx)->req->req_type) != HWRM_VER_GET && \
- !bnxt_is_fw_healthy(bp))
-
static inline unsigned int hwrm_total_timeout(unsigned int n)
{
return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT :
@@ -95,7 +90,7 @@ static inline unsigned int hwrm_total_timeout(unsigned int n)
}
-#define HWRM_VALID_BIT_DELAY_USEC 150
+#define HWRM_VALID_BIT_DELAY_USEC 50000
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 8388be119f9a..e46689128e32 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -14,11 +14,26 @@
#include <linux/net_tstamp.h>
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
+#include <linux/clocksource.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ptp.h"
+static int bnxt_ptp_cfg_settime(struct bnxt *bp, u64 time)
+{
+ struct hwrm_func_ptp_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_SET_TIME);
+ req->ptp_set_time = cpu_to_le64(time);
+ return hwrm_req_send(bp, req);
+}
+
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off)
{
unsigned int ptp_class;
@@ -48,6 +63,9 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info,
ptp_info);
u64 ns = timespec64_to_ns(ts);
+ if (BNXT_PTP_USE_RTC(ptp->bp))
+ return bnxt_ptp_cfg_settime(ptp->bp, ns);
+
spin_lock_bh(&ptp->ptp_lock);
timecounter_init(&ptp->tc, &ptp->cc, ns);
spin_unlock_bh(&ptp->ptp_lock);
@@ -59,14 +77,23 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,
u64 *ns)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 high_before, high_now, low;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return -EIO;
+ high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
ptp_read_system_prets(sts);
- *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
ptp_read_system_postts(sts);
- *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32;
+ high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);
+ if (high_now != high_before) {
+ ptp_read_system_prets(sts);
+ low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);
+ ptp_read_system_postts(sts);
+ }
+ *ns = ((u64)high_now << 32) | low;
+
return 0;
}
@@ -131,23 +158,57 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info,
return 0;
}
+/* Caller holds ptp_lock */
+void bnxt_ptp_update_current_time(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ bnxt_refclk_read(ptp->bp, NULL, &ptp->current_time);
+ WRITE_ONCE(ptp->old_time, ptp->current_time);
+}
+
+static int bnxt_ptp_adjphc(struct bnxt_ptp_cfg *ptp, s64 delta)
+{
+ struct hwrm_port_mac_cfg_input *req;
+ int rc;
+
+ rc = hwrm_req_init(ptp->bp, req, HWRM_PORT_MAC_CFG);
+ if (rc)
+ return rc;
+
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_ADJ_PHASE);
+ req->ptp_adj_phase = cpu_to_le64(delta);
+
+ rc = hwrm_req_send(ptp->bp, req);
+ if (rc) {
+ netdev_err(ptp->bp->dev, "ptp adjphc failed. rc = %x\n", rc);
+ } else {
+ spin_lock_bh(&ptp->ptp_lock);
+ bnxt_ptp_update_current_time(ptp->bp);
+ spin_unlock_bh(&ptp->ptp_lock);
+ }
+
+ return rc;
+}
+
static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
+ if (BNXT_PTP_USE_RTC(ptp->bp))
+ return bnxt_ptp_adjphc(ptp, delta);
+
spin_lock_bh(&ptp->ptp_lock);
timecounter_adjtime(&ptp->tc, delta);
spin_unlock_bh(&ptp->ptp_lock);
return 0;
}
-static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
+static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, long scaled_ppm)
{
- struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
- ptp_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
struct hwrm_port_mac_cfg_input *req;
- struct bnxt *bp = ptp->bp;
int rc;
rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
@@ -156,13 +217,29 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb)
req->ptp_freq_adj_ppb = cpu_to_le32(ppb);
req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB);
- rc = hwrm_req_send(ptp->bp, req);
+ rc = hwrm_req_send(bp, req);
if (rc)
- netdev_err(ptp->bp->dev,
- "ptp adjfreq failed. rc = %d\n", rc);
+ netdev_err(bp->dev,
+ "ptp adjfine failed. rc = %d\n", rc);
return rc;
}
+static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
+ ptp_info);
+ struct bnxt *bp = ptp->bp;
+
+ if (!BNXT_MH(bp))
+ return bnxt_ptp_adjfine_rtc(bp, scaled_ppm);
+
+ spin_lock_bh(&ptp->ptp_lock);
+ timecounter_read(&ptp->tc);
+ ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm);
+ spin_unlock_bh(&ptp->ptp_lock);
+ return 0;
+}
+
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -242,6 +319,40 @@ static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event)
return hwrm_req_send(bp, req);
}
+void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct hwrm_port_mac_cfg_input *req;
+
+ if (!ptp || !ptp->tstamp_filters)
+ return;
+
+ if (hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG))
+ goto out;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) && (ptp->tstamp_filters &
+ (PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE))) {
+ ptp->tstamp_filters &= ~(PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE |
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE);
+ netdev_warn(bp->dev, "Unsupported FW for all RX pkts timestamp filter\n");
+ }
+
+ req->flags = cpu_to_le32(ptp->tstamp_filters);
+ req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
+
+ if (!hwrm_req_send(bp, req)) {
+ bp->ptp_all_rx_tstamp = !!(ptp->tstamp_filters &
+ PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE);
+ return;
+ }
+ ptp->tstamp_filters = 0;
+out:
+ bp->ptp_all_rx_tstamp = 0;
+ netdev_warn(bp->dev, "Failed to configure HW packet timestamp filters\n");
+}
+
void bnxt_ptp_reapply_pps(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
@@ -329,7 +440,7 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg,
ptp_info);
struct bnxt *bp = ptp->bp;
- u8 pin_id;
+ int pin_id;
int rc;
switch (rq->type) {
@@ -337,6 +448,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
/* Configure an External PPS IN */
pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
+ if (!TSIO_PIN_VALID(pin_id))
+ return -EOPNOTSUPP;
if (!on)
break;
rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN);
@@ -350,6 +463,8 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
/* Configure a Periodic PPS OUT */
pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
+ if (!TSIO_PIN_VALID(pin_id))
+ return -EOPNOTSUPP;
if (!on)
break;
@@ -378,27 +493,45 @@ static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info,
static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
- struct hwrm_port_mac_cfg_input *req;
u32 flags = 0;
- int rc;
+ int rc = 0;
- rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG);
- if (rc)
- return rc;
+ switch (ptp->rx_filter) {
+ case HWTSTAMP_FILTER_ALL:
+ flags = PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_ENABLE;
+ break;
+ case HWTSTAMP_FILTER_NONE:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_ALL_RX_TS_CAPTURE_DISABLE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ flags = PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ break;
+ }
- if (ptp->rx_filter)
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
- else
- flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
if (ptp->tx_tstamp_en)
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
else
flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
- req->flags = cpu_to_le32(flags);
- req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
- req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl);
- return hwrm_req_send(bp, req);
+ ptp->tstamp_filters = flags;
+
+ if (netif_running(bp->dev)) {
+ if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
+ rc = bnxt_close_nic(bp, false, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, false, false);
+ } else {
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ }
+ if (!rc && !ptp->tstamp_filters)
+ rc = -EIO;
+ }
+
+ return rc;
}
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -417,9 +550,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
- if (stmpconf.flags)
- return -EINVAL;
-
if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
@@ -432,6 +562,12 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
ptp->rxctl = 0;
ptp->rx_filter = HWTSTAMP_FILTER_NONE;
break;
+ case HWTSTAMP_FILTER_ALL:
+ if (bp->fw_cap & BNXT_FW_CAP_RX_ALL_PKT_TS) {
+ ptp->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ return -EOPNOTSUPP;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -628,7 +764,7 @@ static const struct ptp_clock_info bnxt_ptp_caps = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = bnxt_ptp_adjfreq,
+ .adjfine = bnxt_ptp_adjfine,
.adjtime = bnxt_ptp_adjtime,
.do_aux_work = bnxt_ptp_ts_aux_work,
.gettimex64 = bnxt_ptp_gettimex,
@@ -717,7 +853,77 @@ static bool bnxt_pps_config_ok(struct bnxt *bp)
return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config;
}
-int bnxt_ptp_init(struct bnxt *bp)
+static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp->ptp_clock) {
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = bnxt_cc_read;
+ ptp->cc.mask = CYCLECOUNTER_MASK(48);
+ if (BNXT_MH(bp)) {
+ /* Use timecounter based non-real time mode */
+ ptp->cc.shift = BNXT_CYCLES_SHIFT;
+ ptp->cc.mult = clocksource_khz2mult(BNXT_DEVCLK_FREQ, ptp->cc.shift);
+ ptp->cmult = ptp->cc.mult;
+ } else {
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+ }
+ ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
+ }
+ if (init_tc)
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+}
+
+/* Caller holds ptp_lock */
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns)
+{
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ /* For RTC, cycle_last must be in sync with the timecounter value. */
+ ptp->tc.cycle_last = ns & ptp->cc.mask;
+}
+
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
+{
+ struct timespec64 tsp;
+ u64 ns;
+ int rc;
+
+ if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp))
+ return -ENODEV;
+
+ if (!phc_cfg) {
+ ktime_get_real_ts64(&tsp);
+ ns = timespec64_to_ns(&tsp);
+ rc = bnxt_ptp_cfg_settime(bp, ns);
+ if (rc)
+ return rc;
+ } else {
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ if (rc)
+ return rc;
+ }
+ spin_lock_bh(&bp->ptp_cfg->ptp_lock);
+ bnxt_ptp_rtc_timecounter_init(bp->ptp_cfg, ns);
+ spin_unlock_bh(&bp->ptp_cfg->ptp_lock);
+
+ return 0;
+}
+
+static void bnxt_ptp_free(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp->ptp_clock) {
+ ptp_clock_unregister(ptp->ptp_clock);
+ ptp->ptp_clock = NULL;
+ kfree(ptp->ptp_info.pin_config);
+ ptp->ptp_info.pin_config = NULL;
+ }
+}
+
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
int rc;
@@ -732,23 +938,20 @@ int bnxt_ptp_init(struct bnxt *bp)
if (ptp->ptp_clock && bnxt_pps_config_ok(bp))
return 0;
- if (ptp->ptp_clock) {
- ptp_clock_unregister(ptp->ptp_clock);
- ptp->ptp_clock = NULL;
- kfree(ptp->ptp_info.pin_config);
- ptp->ptp_info.pin_config = NULL;
- }
+ bnxt_ptp_free(bp);
+
atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
- memset(&ptp->cc, 0, sizeof(ptp->cc));
- ptp->cc.read = bnxt_cc_read;
- ptp->cc.mask = CYCLECOUNTER_MASK(48);
- ptp->cc.shift = 0;
- ptp->cc.mult = 1;
-
- ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD;
- timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+ if (BNXT_PTP_USE_RTC(bp)) {
+ bnxt_ptp_timecounter_init(bp, false);
+ rc = bnxt_ptp_init_rtc(bp, phc_cfg);
+ if (rc)
+ goto out;
+ } else {
+ bnxt_ptp_timecounter_init(bp, true);
+ bnxt_ptp_adjfine_rtc(bp, 0);
+ }
ptp->ptp_info = bnxt_ptp_caps;
if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
@@ -760,8 +963,8 @@ int bnxt_ptp_init(struct bnxt *bp)
int err = PTR_ERR(ptp->ptp_clock);
ptp->ptp_clock = NULL;
- bnxt_unmap_ptp_regs(bp);
- return err;
+ rc = err;
+ goto out;
}
if (bp->flags & BNXT_FLAG_CHIP_P5) {
spin_lock_bh(&ptp->ptp_lock);
@@ -771,6 +974,11 @@ int bnxt_ptp_init(struct bnxt *bp)
ptp_schedule_worker(ptp->ptp_clock, 0);
}
return 0;
+
+out:
+ bnxt_ptp_free(bp);
+ bnxt_unmap_ptp_regs(bp);
+ return rc;
}
void bnxt_ptp_clear(struct bnxt *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index 7c528e1f8713..34162e07a119 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -17,6 +17,8 @@
#define BNXT_PTP_GRC_WIN_BASE 0x6000
#define BNXT_MAX_PHC_DRIFT 31000000
+#define BNXT_CYCLES_SHIFT 23
+#define BNXT_DEVCLK_FREQ 1000000
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
@@ -31,7 +33,7 @@ struct pps_pin {
u8 state;
};
-#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS))
+#define TSIO_PIN_VALID(pin) ((pin) >= 0 && (pin) < (BNXT_MAX_TSIO_PINS))
#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \
((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE)
@@ -88,8 +90,9 @@ struct bnxt_ptp_cfg {
u64 old_time;
unsigned long next_period;
unsigned long next_overflow_check;
- /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */
- #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ)
+ u32 cmult;
+ /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
+ #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
u16 tx_seqid;
u16 tx_hdr_off;
@@ -113,6 +116,7 @@ struct bnxt_ptp_cfg {
BNXT_PTP_MSG_PDELAY_RESP)
u8 tx_tstamp_en:1;
int rx_filter;
+ u32 tstamp_filters;
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
@@ -131,12 +135,16 @@ do { \
#endif
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
+void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
+void bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
-int bnxt_ptp_init(struct bnxt *bp);
+void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
+int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
+int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
void bnxt_ptp_clear(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 1d177fed44a6..dde327f2c57e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -307,7 +307,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
return -EINVAL;
}
- if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
+ if (min_tx_rate > pf_link_speed) {
netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
min_tx_rate, vf_id);
return -EINVAL;
@@ -623,7 +623,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- hw_resc->max_irqs -= vf_msix * n;
+ hw_resc->max_nqs -= vf_msix;
rc = pf->active_vfs;
}
@@ -749,7 +749,6 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
*num_vfs = rc;
}
- bnxt_ulp_sriov_cfg(bp, *num_vfs);
return 0;
}
@@ -826,12 +825,31 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out2;
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return 0;
+
+ /* Create representors for VFs in switchdev mode */
+ devl_lock(bp->dl);
+ rc = bnxt_vf_reps_create(bp);
+ devl_unlock(bp->dl);
+ if (rc) {
+ netdev_info(bp->dev, "Cannot enable VFS as representors cannot be created\n");
+ goto err_out3;
+ }
+
return 0;
+err_out3:
+ /* Disable SR-IOV */
+ pci_disable_sriov(bp->pdev);
+
err_out2:
/* Free the resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
+ /* Restore the max resources */
+ bnxt_hwrm_func_qcaps(bp);
+
err_out1:
bnxt_free_vf_resources(bp);
@@ -846,7 +864,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
return;
/* synchronize VF and VF-rep create and destroy */
- mutex_lock(&bp->sriov_lock);
+ devl_lock(bp->dl);
bnxt_vf_reps_destroy(bp);
if (pci_vfs_assigned(bp->pdev)) {
@@ -859,7 +877,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Free the HW resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
}
- mutex_unlock(&bp->sriov_lock);
+ devl_unlock(bp->dl);
bnxt_free_vf_resources(bp);
@@ -867,8 +885,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
rtnl_unlock();
-
- bnxt_ulp_sriov_cfg(bp, 0);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 1471b6130a2b..d8afcf8d6b30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -1962,7 +1962,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, v
void *data,
void (*cleanup)(struct flow_block_cb *block_cb))
{
- if (!bnxt_is_netdev_indr_offload(netdev))
+ if (!netdev || !bnxt_is_netdev_indr_offload(netdev))
return -EOPNOTSUPP;
switch (type) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index fde0c3e8ac57..852eb449ccae 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -19,89 +19,26 @@
#include <linux/irq.h>
#include <asm/byteorder.h>
#include <linux/bitmap.h>
+#include <linux/auxiliary_bus.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
-static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
- struct bnxt_ulp_ops *ulp_ops, void *handle)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
- if (rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
- return -EBUSY;
- }
- if (ulp_id == BNXT_ROCE_ULP) {
- unsigned int max_stat_ctxs;
-
- max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
- if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
- }
-
- atomic_set(&ulp->ref_count, 0);
- ulp->handle = handle;
- rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
-
- if (ulp_id == BNXT_ROCE_ULP) {
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
- }
-
- return 0;
-}
-
-static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
- int i = 0;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
- if (!rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
- return -EINVAL;
- }
- if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
- edev->en_ops->bnxt_free_msix(edev, ulp_id);
-
- if (ulp->max_async_event_id)
- bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
-
- RCU_INIT_POINTER(ulp->ulp_ops, NULL);
- synchronize_rcu();
- ulp->max_async_event_id = 0;
- ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
- return 0;
-}
+static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
- num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ if (!edev->ulp_tbl->msix_requested) {
+ netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
+ return;
+ }
+ num_msix = edev->ulp_tbl->msix_requested;
+ idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
@@ -115,125 +52,95 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
}
}
-static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
- struct bnxt_msix_entry *ent, int num_msix)
+int bnxt_register_dev(struct bnxt_en_dev *edev,
+ struct bnxt_ulp_ops *ulp_ops,
+ void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
- struct bnxt_hw_resc *hw_resc;
- int max_idx, max_cp_rings;
- int avail_msix, idx;
- int total_vecs;
- int rc = 0;
-
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- return -ENODEV;
+ unsigned int max_stat_ctxs;
+ struct bnxt_ulp *ulp;
- if (edev->ulp_tbl[ulp_id].msix_requested)
- return -EAGAIN;
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
+ bp->cp_nr_rings == max_stat_ctxs)
+ return -ENOMEM;
- max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- avail_msix = bnxt_get_avail_msix(bp, num_msix);
- if (!avail_msix)
+ ulp = edev->ulp_tbl;
+ if (!ulp)
return -ENOMEM;
- if (avail_msix > num_msix)
- avail_msix = num_msix;
-
- if (BNXT_NEW_RM(bp)) {
- idx = bp->cp_nr_rings;
- } else {
- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
- idx = max_idx - avail_msix;
- }
- edev->ulp_tbl[ulp_id].msix_base = idx;
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- hw_resc = &bp->hw_resc;
- total_vecs = idx + avail_msix;
- if (bp->total_irqs < total_vecs ||
- (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
- if (netif_running(dev)) {
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- } else {
- rc = bnxt_reserve_rings(bp, true);
- }
- }
- if (rc) {
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- return -EAGAIN;
- }
- if (BNXT_NEW_RM(bp)) {
- int resv_msix;
+ ulp->handle = handle;
+ rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
- resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
- avail_msix = min_t(int, resv_msix, avail_msix);
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- }
- bnxt_fill_msix_vecs(bp, ent);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_hwrm_vnic_cfg(bp, 0);
+
+ bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return avail_msix;
+ return 0;
}
+EXPORT_SYMBOL(bnxt_register_dev);
-static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+void bnxt_unregister_dev(struct bnxt_en_dev *edev)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+ int i = 0;
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
+ ulp = edev->ulp_tbl;
+ if (ulp->msix_requested)
+ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
- return 0;
+ if (ulp->max_async_event_id)
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
- bnxt_close_nic(bp, true, false);
- bnxt_open_nic(bp, true, false);
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ synchronize_rcu();
+ ulp->max_async_event_id = 0;
+ ulp->async_events_bmap = NULL;
+ while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+ msleep(100);
+ i++;
}
- return 0;
+ return;
}
+EXPORT_SYMBOL(bnxt_unregister_dev);
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_en_dev *edev = bp->edev;
+ u32 roce_msix = BNXT_VF(bp) ?
+ BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- }
- return 0;
+ return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
+ min_t(u32, roce_msix, num_online_cpus()) : 0);
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ if (edev->ulp_tbl->msix_requested)
+ return edev->ulp_tbl->msix_base;
}
return 0;
}
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ if (edev->ulp_tbl->msix_requested)
return BNXT_MIN_ROCE_STAT_CTXS;
}
return 0;
}
-static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
@@ -243,7 +150,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
u32 resp_len;
int rc;
- if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
+ if (bp->fw_reset_state)
return -EBUSY;
rc = hwrm_req_init(bp, req, 0 /* don't care */);
@@ -267,42 +174,36 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
hwrm_req_drop(bp, req);
return rc;
}
-
-static void bnxt_ulp_get(struct bnxt_ulp *ulp)
-{
- atomic_inc(&ulp->ref_count);
-}
-
-static void bnxt_ulp_put(struct bnxt_ulp *ulp)
-{
- atomic_dec(&ulp->ref_count);
-}
+EXPORT_SYMBOL(bnxt_send_msg);
void bnxt_ulp_stop(struct bnxt *bp)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_stop)
- continue;
- ops->ulp_stop(ulp->handle);
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ struct auxiliary_driver *adrv;
+ pm_message_t pm = {};
+
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->suspend(adev, pm);
+ }
}
}
void bnxt_ulp_start(struct bnxt *bp, int err)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
@@ -312,58 +213,19 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_start)
- continue;
- ops->ulp_start(ulp->handle);
- }
-}
-
-void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ struct auxiliary_driver *adrv;
- rcu_read_lock();
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_sriov_config) {
- rcu_read_unlock();
- continue;
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->resume(adev);
}
- bnxt_ulp_get(ulp);
- rcu_read_unlock();
- ops->ulp_sriov_config(ulp->handle, num_vfs);
- bnxt_ulp_put(ulp);
}
-}
-void bnxt_ulp_shutdown(struct bnxt *bp)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_shutdown)
- continue;
- ops->ulp_shutdown(ulp->handle);
- }
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
@@ -374,8 +236,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
if (!ulp->msix_requested)
return;
@@ -395,8 +257,8 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
struct bnxt_msix_entry *ent = NULL;
if (!ulp->msix_requested)
@@ -418,46 +280,15 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
-{
- u16 event_id = le16_to_cpu(cmpl->event_id);
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- rcu_read_lock();
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_async_notifier)
- continue;
- if (!ulp->async_events_bmap ||
- event_id > ulp->max_async_event_id)
- continue;
-
- /* Read max_async_event_id first before testing the bitmap. */
- smp_rmb();
- if (test_bit(event_id, ulp->async_events_bmap))
- ops->ulp_async_notifier(ulp->handle, cmpl);
- }
- rcu_read_unlock();
-}
-
-static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
- unsigned long *events_bmap, u16 max_id)
+int bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap,
+ u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
+ ulp = edev->ulp_tbl;
ulp->async_events_bmap = events_bmap;
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
@@ -465,38 +296,124 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0;
}
+EXPORT_SYMBOL(bnxt_register_async_events);
+
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
+{
+ struct bnxt_aux_priv *aux_priv;
+ struct auxiliary_device *adev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!bp->aux_priv)
+ return;
-static const struct bnxt_en_ops bnxt_en_ops_tbl = {
- .bnxt_register_device = bnxt_register_dev,
- .bnxt_unregister_device = bnxt_unregister_dev,
- .bnxt_request_msix = bnxt_req_msix_vecs,
- .bnxt_free_msix = bnxt_free_msix_vecs,
- .bnxt_send_fw_msg = bnxt_send_msg,
- .bnxt_register_fw_async_events = bnxt_register_async_events,
-};
+ aux_priv = bp->aux_priv;
+ adev = &aux_priv->aux_dev;
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
+static void bnxt_aux_dev_release(struct device *dev)
{
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_en_dev *edev;
+ struct bnxt_aux_priv *aux_priv =
+ container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
+ struct bnxt *bp = netdev_priv(aux_priv->edev->net);
+
+ ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv->edev->ulp_tbl);
+ bp->edev = NULL;
+ kfree(aux_priv->edev);
+ kfree(aux_priv);
+ bp->aux_priv = NULL;
+}
+
+static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
+{
+ edev->net = bp->dev;
+ edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
- edev = bp->edev;
- if (!edev) {
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return ERR_PTR(-ENOMEM);
- edev->en_ops = &bnxt_en_ops_tbl;
- edev->net = dev;
- edev->pdev = bp->pdev;
- edev->l2_db_size = bp->db_size;
- edev->l2_db_size_nc = bp->db_size;
- bp->edev = edev;
- }
- edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
- return bp->edev;
+ if (bp->flags & BNXT_FLAG_VF)
+ edev->flags |= BNXT_EN_FLAG_VF;
+
+ edev->chip_num = bp->chip_num;
+ edev->hw_ring_stats_size = bp->hw_ring_stats_size;
+ edev->pf_port_id = bp->pf.port_id;
+ edev->en_state = bp->state;
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+}
+
+void bnxt_rdma_aux_device_init(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ struct bnxt_aux_priv *aux_priv;
+ struct bnxt_en_dev *edev;
+ struct bnxt_ulp *ulp;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
+ if (!aux_priv)
+ goto exit;
+
+ aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
+ if (aux_priv->id < 0) {
+ netdev_warn(bp->dev,
+ "ida alloc failed for ROCE auxiliary device\n");
+ kfree(aux_priv);
+ goto exit;
+ }
+
+ aux_dev = &aux_priv->aux_dev;
+ aux_dev->id = aux_priv->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bp->pdev->dev;
+ aux_dev->dev.release = bnxt_aux_dev_release;
+
+ rc = auxiliary_device_init(aux_dev);
+ if (rc) {
+ ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv);
+ goto exit;
+ }
+ bp->aux_priv = aux_priv;
+
+ /* From this point, all cleanup will happen via the .release callback &
+ * any error unwinding will need to include a call to
+ * auxiliary_device_uninit.
+ */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ goto aux_dev_uninit;
+
+ ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ if (!ulp)
+ goto aux_dev_uninit;
+
+ edev->ulp_tbl = ulp;
+ aux_priv->edev = edev;
+ bp->edev = edev;
+ bnxt_set_edev_info(edev, bp);
+
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev,
+ "Failed to add auxiliary device for ROCE\n");
+ goto aux_dev_uninit;
+ }
+
+ return;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+exit:
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
}
-EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 54d59f681b86..80cbc4b6130a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,6 +15,8 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+#define BNXT_MAX_ROCE_MSIX 9
+#define BNXT_MAX_VF_ROCE_MSIX 2
struct hwrm_async_event_cmpl;
struct bnxt;
@@ -26,12 +28,6 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- /* async_notifier() cannot sleep (in BH context) */
- void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
- void (*ulp_stop)(void *);
- void (*ulp_start)(void *);
- void (*ulp_sriov_config)(void *, int);
- void (*ulp_shutdown)(void *);
void (*ulp_irq_stop)(void *);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -57,6 +53,7 @@ struct bnxt_ulp {
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
+ struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
@@ -64,8 +61,10 @@ struct bnxt_en_dev {
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
- const struct bnxt_en_ops *en_ops;
- struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ #define BNXT_EN_FLAG_VF 0x10
+#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+
+ struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
* bytes mapped by L2
* driver.
@@ -74,24 +73,19 @@ struct bnxt_en_dev {
* bytes mapped as non-
* cacheable.
*/
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state; /* Could be checked in
+ * RoCE driver suspend
+ * mode only. Will be
+ * updated in resume.
+ */
};
-struct bnxt_en_ops {
- int (*bnxt_register_device)(struct bnxt_en_dev *, int,
- struct bnxt_ulp_ops *, void *);
- int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
- int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
- struct bnxt_msix_entry *, int);
- int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
- int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
- struct bnxt_fw_msg *);
- int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
- unsigned long *, u16);
-};
-
-static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+ if (edev && edev->ulp_tbl)
return true;
return false;
}
@@ -102,10 +96,15 @@ int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
-void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
-
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_init(struct bnxt *bp);
+int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
+ void *handle);
+void bnxt_unregister_dev(struct bnxt_en_dev *edev);
+int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
+int bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 8eb28e088582..2f1a1f2d2157 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -222,7 +222,7 @@ static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
@@ -356,10 +356,15 @@ void bnxt_vf_reps_destroy(struct bnxt *bp)
/* un-publish cfa_code_map so that RX path can't see it anymore */
kfree(bp->cfa_code_map);
bp->cfa_code_map = NULL;
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- if (closed)
+ if (closed) {
+ /* Temporarily set legacy mode to avoid re-opening
+ * representors and restore switchdev mode after that.
+ */
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
bnxt_open_nic(bp, false, false);
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ }
rtnl_unlock();
/* Need to call vf_reps_destroy() outside of rntl_lock
@@ -482,7 +487,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->min_mtu = ETH_ZLEN;
}
-static int bnxt_vf_reps_create(struct bnxt *bp)
+int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
struct bnxt_vf_rep *vf_rep;
@@ -535,7 +540,6 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
- bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
netif_keep_dst(bp->dev);
return 0;
@@ -559,15 +563,13 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
- int rc = 0;
+ int ret = 0;
- mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
switch (mode) {
@@ -578,25 +580,22 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
if (bp->hwrm_spec_code < 0x10803) {
netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
- rc = -ENOTSUPP;
- goto done;
+ return -ENOTSUPP;
}
- if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
- rc = -EPERM;
- goto done;
- }
- rc = bnxt_vf_reps_create(bp);
+ /* Create representors for existing VFs */
+ if (pci_num_vf(bp->pdev) > 0)
+ ret = bnxt_vf_reps_create(bp);
break;
default:
- rc = -EINVAL;
- goto done;
+ return -EINVAL;
}
-done:
- mutex_unlock(&bp->sriov_lock);
- return rc;
+
+ if (!ret)
+ bp->eswitch_mode = mode;
+
+ return ret;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
index 5637a84884d7..33a965631d0b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -14,6 +14,7 @@
#define MAX_CFA_CODE 65536
+int bnxt_vf_reps_create(struct bnxt *bp);
void bnxt_vf_reps_destroy(struct bnxt *bp);
void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
@@ -37,6 +38,11 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
#else
+static inline int bnxt_vf_reps_create(struct bnxt *bp)
+{
+ return 0;
+}
+
static inline void bnxt_vf_reps_close(struct bnxt *bp)
{
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index c8083df5e0ab..4efa5fe6972b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -20,38 +20,95 @@
#include "bnxt.h"
#include "bnxt_xdp.h"
+DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len)
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp)
{
+ struct skb_shared_info *sinfo;
struct bnxt_sw_tx_bd *tx_buf;
struct tx_bd *txbd;
+ int num_frags = 0;
u32 flags;
u16 prod;
+ int i;
+
+ if (xdp && xdp_buff_has_frags(xdp)) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ num_frags = sinfo->nr_frags;
+ }
+ /* fill up the first buffer */
prod = txr->tx_prod;
tx_buf = &txr->tx_buf_ring[prod];
+ tx_buf->nr_frags = num_frags;
+ if (xdp)
+ tx_buf->page = virt_to_head_page(xdp->data);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
- flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
- TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+ flags = (len << TX_BD_LEN_SHIFT) |
+ ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
+ bnxt_lhint_arr[len >> 9];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd->tx_bd_opaque = prod;
txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ /* now let us fill up the frags into the next buffers */
+ for (i = 0; i < num_frags ; i++) {
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct bnxt_sw_tx_bd *frag_tx_buf;
+ struct pci_dev *pdev = bp->pdev;
+ dma_addr_t frag_mapping;
+ int frag_len;
+
+ prod = NEXT_TX(prod);
+ WRITE_ONCE(txr->tx_prod, prod);
+
+ /* first fill up the first buffer */
+ frag_tx_buf = &txr->tx_buf_ring[prod];
+ frag_tx_buf->page = skb_frag_page(frag);
+
+ txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+ frag_len = skb_frag_size(frag);
+ frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
+ frag_len, DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
+ return NULL;
+
+ dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
+
+ flags = frag_len << TX_BD_LEN_SHIFT;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+ txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
+
+ len = frag_len;
+ }
+
+ flags &= ~TX_BD_LEN;
+ txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
+ TX_BD_FLAGS_PACKET_END);
+ /* Sync TX BD */
+ wmb();
prod = NEXT_TX(prod);
- txr->tx_prod = prod;
+ WRITE_ONCE(txr->tx_prod, prod);
+
return tx_buf;
}
static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len, u16 rx_prod)
+ dma_addr_t mapping, u32 len, u16 rx_prod,
+ struct xdp_buff *xdp)
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp);
tx_buf->rx_prod = rx_prod;
tx_buf->action = XDP_TX;
+
}
static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
@@ -61,7 +118,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
{
struct bnxt_sw_tx_bd *tx_buf;
- tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
+ tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL);
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
@@ -76,7 +133,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
struct bnxt_sw_tx_bd *tx_buf;
u16 tx_cons = txr->tx_cons;
u16 last_tx_cons = tx_cons;
- int i;
+ int i, j, frags;
for (i = 0; i < nr_pkts; i++) {
tx_buf = &txr->tx_buf_ring[tx_cons];
@@ -94,29 +151,83 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
} else if (tx_buf->action == XDP_TX) {
rx_doorbell_needed = true;
last_tx_cons = tx_cons;
+
+ frags = tx_buf->nr_frags;
+ for (j = 0; j < frags; j++) {
+ tx_cons = NEXT_TX(tx_cons);
+ tx_buf = &txr->tx_buf_ring[tx_cons];
+ page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
+ }
}
tx_cons = NEXT_TX(tx_cons);
}
- txr->tx_cons = tx_cons;
+ WRITE_ONCE(txr->tx_cons, tx_cons);
if (rx_doorbell_needed) {
tx_buf = &txr->tx_buf_ring[last_tx_cons];
bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
+
}
}
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+
+ return !!xdp_prog;
+}
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 *data_ptr, unsigned int len,
+ struct xdp_buff *xdp)
+{
+ struct bnxt_sw_rx_bd *rx_buf;
+ u32 buflen = PAGE_SIZE;
+ struct pci_dev *pdev;
+ dma_addr_t mapping;
+ u32 offset;
+
+ pdev = bp->pdev;
+ rx_buf = &rxr->rx_buf_ring[cons];
+ offset = bp->rx_offset;
+
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
+
+ xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
+ xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
+}
+
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp)
+{
+ struct skb_shared_info *shinfo;
+ int i;
+
+ if (!xdp || !xdp_buff_has_frags(xdp))
+ return;
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ struct page *page = skb_frag_page(&shinfo->frags[i]);
+
+ page_pool_recycle_direct(rxr->page_pool, page);
+ }
+ shinfo->nr_frags = 0;
+}
+
/* returns the following:
* true - packet consumed by XDP and new buffer is allocated.
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+ struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_rx_bd *rx_buf;
struct pci_dev *pdev;
- struct xdp_buff xdp;
dma_addr_t mapping;
+ u32 tx_needed = 1;
void *orig_data;
u32 tx_avail;
u32 offset;
@@ -126,16 +237,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
return false;
pdev = bp->pdev;
- rx_buf = &rxr->rx_buf_ring[cons];
offset = bp->rx_offset;
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
txr = rxr->bnapi->tx_ring;
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
- xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -152,22 +257,36 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
offset = xdp.data - xdp.data_hard_start;
*data_ptr = xdp.data_hard_start + offset;
}
+
switch (act) {
case XDP_PASS:
return false;
case XDP_TX:
- if (tx_avail < 1) {
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
+ *event = 0;
+
+ if (unlikely(xdp_buff_has_frags(&xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+
+ tx_needed += sinfo->nr_frags;
+ *event = BNXT_AGG_EVENT;
+ }
+
+ if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- *event = BNXT_TX_EVENT;
dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
bp->rx_dir);
+
+ *event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod));
+ NEXT_RX(rxr->rx_prod), &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -175,6 +294,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
+ rx_buf = &rxr->rx_buf_ring[cons];
+ mapping = rx_buf->mapping - bp->rx_dma_offset;
dma_unmap_page_attrs(&pdev->dev, mapping,
PAGE_SIZE, bp->rx_dir,
DMA_ATTR_WEAK_ORDERING);
@@ -182,6 +303,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -195,12 +317,13 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_REDIRECT_EVENT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
+ bnxt_xdp_buff_frags_free(rxr, &xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
@@ -227,11 +350,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
ring = smp_processor_id() % bp->tx_nr_rings_xdp;
txr = &bp->tx_ring[ring];
+ if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
+ return -EINVAL;
+
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_lock(&txr->xdp_tx_lock);
+
for (i = 0; i < num_frames; i++) {
struct xdp_frame *xdp = frames[i];
- if (!txr || !bnxt_tx_avail(bp, txr) ||
- !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
+ if (!bnxt_tx_avail(bp, txr))
break;
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
@@ -250,6 +378,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
}
+ if (static_branch_unlikely(&bnxt_xdp_locking_key))
+ spin_unlock(&txr->xdp_tx_lock);
+
return nxmit;
}
@@ -260,8 +391,9 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
int tx_xdp = 0, rc, tc;
struct bpf_prog *old;
- if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
- netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+ if (prog && !prog->aux->xdp_has_frags &&
+ bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+ netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n",
bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
return -EOPNOTSUPP;
}
@@ -290,9 +422,11 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
+ xdp_features_set_redirect_target(dev, true);
} else {
int rx, tx;
+ xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
bnxt_get_max_rings(bp, &rx, &tx, true);
if (rx > 1) {
@@ -327,3 +461,26 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
return rc;
}
+
+struct sk_buff *
+bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
+ struct page_pool *pool, struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ if (!skb)
+ return NULL;
+ skb_checksum_none_assert(skb);
+ if (RX_CMP_L4_CS_OK(rxcmp1)) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = RX_CMP_ENCAP(rxcmp1);
+ }
+ }
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ PAGE_SIZE * sinfo->nr_frags,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+ return skb;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 0df40c3beb05..ea430d6961df 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -10,15 +10,29 @@
#ifndef BNXT_XDP_H
#define BNXT_XDP_H
+DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct bnxt_tx_ring_info *txr,
- dma_addr_t mapping, u32 len);
+ dma_addr_t mapping, u32 len,
+ struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct page *page, u8 **data_ptr, unsigned int *len,
- u8 *event);
+ struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags);
+bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
+
+void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+ u16 cons, u8 *data_ptr, unsigned int len,
+ struct xdp_buff *xdp);
+void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
+ struct xdp_buff *xdp);
+struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb,
+ u8 num_frags, struct page_pool *pool,
+ struct xdp_buff *xdp,
+ struct rx_cmp_ext *rxcmp1);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index f7f10cfb3476..7926aaef8f0c 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
+ id_tbl->table = bitmap_zalloc(size, GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
@@ -669,7 +669,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
{
- kfree(id_tbl->table);
+ bitmap_free(id_tbl->table);
id_tbl->table = NULL;
}
@@ -1027,16 +1027,14 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
- &udev->l2_ring_map,
- GFP_KERNEL | __GFP_COMP);
+ &udev->l2_ring_map, GFP_KERNEL);
if (!udev->l2_ring)
return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
- &udev->l2_buf_map,
- GFP_KERNEL | __GFP_COMP);
+ &udev->l2_buf_map, GFP_KERNEL);
if (!udev->l2_buf) {
__cnic_free_uio_rings(udev);
return -ENOMEM;
@@ -4105,8 +4103,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev)
for (i = 0; i < MAX_CM_SK_TBL_SZ; i++)
atomic_set(&cp->csk_tbl[i].ref_count, 0);
- port_id = prandom_u32();
- port_id %= CNIC_LOCAL_PORT_RANGE;
+ port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE);
if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
CNIC_LOCAL_PORT_MIN, port_id)) {
cnic_cm_free_mem(dev);
@@ -4165,7 +4162,7 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
{
u32 seed;
- seed = prandom_u32();
+ seed = get_random_u32();
cnic_ctx_wr(dev, 45, 0, seed);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 226f4403cfed..f28ffc31df22 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -117,24 +117,6 @@ static inline void dmadesc_set(struct bcmgenet_priv *priv,
dmadesc_set_length_status(priv, d, val);
}
-static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
- void __iomem *d)
-{
- dma_addr_t addr;
-
- addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
-
- /* Register writes to GISB bus can take couple hundred nanoseconds
- * and are done for each packet, save these expensive writes unless
- * the platform is explicitly configured for 64-bits/LPAE.
- */
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
- if (priv->hw_params->flags & GENET_HAS_40BITS)
- addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
-#endif
- return addr;
-}
-
#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
@@ -1146,7 +1128,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strscpy(info->driver, "bcmgenet", sizeof(info->driver));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -1368,7 +1350,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
if (!p->eee_enabled) {
bcmgenet_eee_enable_set(dev, false);
} else {
- ret = phy_init_eee(dev->phydev, 0);
+ ret = phy_init_eee(dev->phydev, false);
if (ret) {
netif_err(priv, hw, dev, "EEE initialization failed\n");
return ret;
@@ -1387,7 +1369,8 @@ static int bcmgenet_validate_flow(struct net_device *dev,
struct ethtool_usrip4_spec *l4_mask;
struct ethhdr *eth_mask;
- if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) {
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES &&
+ cmd->fs.location != RX_CLS_LOC_ANY) {
netdev_err(dev, "rxnfc: Invalid location (%d)\n",
cmd->fs.location);
return -EINVAL;
@@ -1452,7 +1435,7 @@ static int bcmgenet_insert_flow(struct net_device *dev,
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *loc_rule;
- int err;
+ int err, i;
if (priv->hw_params->hfb_filter_size < 128) {
netdev_err(dev, "rxnfc: Not supported by this device\n");
@@ -1470,7 +1453,29 @@ static int bcmgenet_insert_flow(struct net_device *dev,
if (err)
return err;
- loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ if (cmd->fs.location == RX_CLS_LOC_ANY) {
+ list_for_each_entry(loc_rule, &priv->rxnfc_list, list) {
+ cmd->fs.location = loc_rule->fs.location;
+ err = memcmp(&loc_rule->fs, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+ if (!err)
+ /* rule exists so return current location */
+ return 0;
+ }
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ loc_rule = &priv->rxnfc_rules[i];
+ if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
+ cmd->fs.location = i;
+ break;
+ }
+ }
+ if (i == MAX_NUM_OF_FS_RULES) {
+ cmd->fs.location = RX_CLS_LOC_ANY;
+ return -ENOSPC;
+ }
+ } else {
+ loc_rule = &priv->rxnfc_rules[cmd->fs.location];
+ }
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
@@ -1583,7 +1588,7 @@ static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bcmgenet_get_num_flows(priv);
- cmd->data = MAX_NUM_OF_FS_RULES;
+ cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
break;
case ETHTOOL_GRXCLSRULE:
err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
@@ -2035,6 +2040,11 @@ static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
return skb;
}
+static void bcmgenet_hide_tsb(struct sk_buff *skb)
+{
+ __skb_pull(skb, sizeof(struct status_64));
+}
+
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -2141,6 +2151,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
}
GENET_CB(skb)->last_cb = tx_cb_ptr;
+
+ bcmgenet_hide_tsb(skb);
skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */
@@ -2287,8 +2299,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
dma_length_status = status->length_status;
if (dev->features & NETIF_F_RXCSUM) {
rx_csum = (__force __be16)(status->rx_csum & 0xffff);
- skb->csum = (__force __wsum)ntohs(rx_csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (rx_csum) {
+ skb->csum = (__force __wsum)ntohs(rx_csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
}
/* DMA flags and length are still valid no matter how
@@ -2302,6 +2316,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
__func__, p_index, ring->c_index,
ring->read_ptr, dma_length_status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, dev, "oversized packet\n");
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
@@ -2662,8 +2684,7 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
DMA_END_ADDR);
/* Initialize Tx NAPI */
- netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
}
/* Initialize a RDMA ring */
@@ -2699,8 +2720,7 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
bcmgenet_init_rx_coalesce(ring);
/* Initialize Rx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
@@ -3445,7 +3465,6 @@ static void bcmgenet_netif_stop(struct net_device *dev)
/* Disable MAC transmit. TX DMA disabled must be done before this */
umac_enable_set(priv, CMD_TX_EN, false);
- phy_stop(dev->phydev);
bcmgenet_disable_rx_napi(priv);
bcmgenet_intr_disable(priv);
@@ -3990,6 +4009,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
+ if (priv->wol_irq == -EPROBE_DEFER) {
+ err = priv->wol_irq;
+ goto err;
+ }
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -4020,10 +4043,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
- err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
- dev->name, priv);
- if (!err)
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (priv->wol_irq > 0) {
+ err = devm_request_irq(&pdev->dev, priv->wol_irq,
+ bcmgenet_wol_isr, 0, dev->name, priv);
+ if (!err)
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
/* Set the needed headroom to account for any possible
* features enabling/disabling at runtime
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index e31a5a397f11..3a4b6cb7b7b9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -40,6 +40,13 @@
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+
+ if (!device_can_wakeup(kdev)) {
+ wol->supported = 0;
+ wol->wolopts = 0;
+ return;
+ }
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
@@ -70,14 +77,18 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts) {
device_set_wakeup_enable(kdev, 1);
/* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
+ if (priv->wol_irq_disabled) {
enable_irq_wake(priv->wol_irq);
+ enable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(kdev, 0);
/* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
+ if (!priv->wol_irq_disabled) {
disable_irq_wake(priv->wol_irq);
+ disable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = true;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index c888ddee1fc4..be042905ada2 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -72,7 +72,6 @@ static void bcmgenet_mac_config(struct net_device *dev)
* Receive clock is provided by the PHY.
*/
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg &= ~OOB_DISABLE;
reg |= RGMII_LINK;
bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
@@ -95,10 +94,18 @@ static void bcmgenet_mac_config(struct net_device *dev)
*/
void bcmgenet_mii_setup(struct net_device *dev)
{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
+ u32 reg;
- if (phydev->link)
+ if (phydev->link) {
bcmgenet_mac_config(dev);
+ } else {
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ }
+
phy_print_status(phydev);
}
@@ -169,15 +176,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- u32 reg;
-
- if (!GENET_IS_V5(priv)) {
- /* Speed settings are set in bcmgenet_mii_setup() */
- reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
- reg |= LED_ACT_SOURCE_MAC;
- bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
- }
-
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
@@ -210,6 +208,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
if (!phy_name) {
phy_name = "MoCA";
+ if (!GENET_IS_V5(priv))
+ port_ctrl |= LED_ACT_SOURCE_MAC;
bcmgenet_moca_phy_setup(priv);
}
break;
@@ -266,18 +266,20 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
(priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
/* This is an external PHY (xMII), so we need to enable the RGMII
- * block for the interface to work
+ * block for the interface to work, unconditionally clear the
+ * Out-of-band disable since we do not need it.
*/
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
if (priv->ext_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
reg &= ~ID_MODE_DIS;
reg |= id_mode_dis;
if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
reg |= RGMII_MODE_EN_V123;
else
reg |= RGMII_MODE_EN;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
}
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
if (init)
dev_info(kdev, "configuring instance for %s\n", phy_name);
@@ -393,6 +395,9 @@ int bcmgenet_mii_probe(struct net_device *dev)
if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_MAC_INTERRUPT;
+ /* Indicate that the MAC is responsible for PHY PM */
+ dev->phydev->mac_managed_pm = true;
+
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f38f40eb966e..3a6763c5e8b3 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#include <asm/sibyte/board.h>
#include <asm/sibyte/sb1250.h>
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
#include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h>
#define R_MAC_DMA_OODPKTLOST_RX R_MAC_DMA_OODPKTLOST
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#include <asm/sibyte/sb1250_mac.h>
#include <asm/sibyte/sb1250_dma.h>
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
#define UNIT_INT(n) (K_BCM1480_INT_MAC_0 + ((n) * 2))
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
#define UNIT_INT(n) (K_INT_MAC_0 + (n))
@@ -1527,7 +1527,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
* Turn on the rest of the bits in the enable register
*/
-#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#if defined(CONFIG_SIBYTE_BCM1x80)
__raw_writeq(M_MAC_RXDMA_EN0 |
M_MAC_TXDMA_EN0, s->sbm_macenable);
#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
@@ -2183,9 +2183,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
ea_reg >>= 8;
}
- for (i = 0; i < 6; i++) {
- dev->dev_addr[i] = eaddr[i];
- }
+ eth_hw_addr_set(dev, eaddr);
/*
* Initialize context (get pointers to registers and stuff), then
@@ -2205,7 +2203,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
dev->min_mtu = 0;
dev->max_mtu = ENET_PACKET_SIZE;
- netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
+ netif_napi_add_weight(dev, &sc->napi, sbmac_poll, 16);
dev->irq = UNIT_INT(idx);
@@ -2536,7 +2534,12 @@ static int sbmac_probe(struct platform_device *pldev)
int err;
res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
- BUG_ON(!res);
+ if (!res) {
+ printk(KERN_ERR "%s: failed to get resource\n",
+ dev_name(&pldev->dev));
+ err = -EINVAL;
+ goto out_out;
+ }
sbm_base = ioremap(res->start, resource_size(res));
if (!sbm_base) {
printk(KERN_ERR "%s: unable to map device registers\n",
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 85ca3909859d..58747292521d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6179,34 +6179,26 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
return 0;
}
-static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
- bool neg_adj = false;
- u32 correction = 0;
-
- if (ppb < 0) {
- neg_adj = true;
- ppb = -ppb;
- }
+ u64 correction;
+ bool neg_adj;
/* Frequency adjustment is performed using hardware with a 24 bit
* accumulator and a programmable correction value. On each clk, the
* correction value gets added to the accumulator and when it
* overflows, the time counter is incremented/decremented.
- *
- * So conversion from ppb to correction value is
- * ppb * (1 << 24) / 1000000000
*/
- correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
- TG3_EAV_REF_CLK_CORRECT_MASK;
+ neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
tg3_full_lock(tp, 0);
if (correction)
tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
TG3_EAV_REF_CLK_CORRECT_EN |
- (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
+ (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
+ ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
else
tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
@@ -6330,7 +6322,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.n_per_out = 1,
.n_pins = 0,
.pps = 0,
- .adjfreq = tg3_ptp_adjfreq,
+ .adjfine = tg3_ptp_adjfine,
.adjtime = tg3_ptp_adjtime,
.gettimex64 = tg3_ptp_gettimex,
.settime64 = tg3_ptp_settime,
@@ -7380,9 +7372,9 @@ static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -7944,7 +7936,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
/* HW/FW can not correctly segment packets that have been
* vlan encapsulated.
@@ -11174,7 +11166,7 @@ static void tg3_reset_task(struct work_struct *work)
rtnl_lock();
tg3_full_lock(tp, 0);
- if (!netif_running(tp->dev)) {
+ if (tp->pcierr_recovery || !netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
rtnl_unlock();
@@ -12302,9 +12294,9 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -12390,7 +12382,10 @@ static int tg3_nway_reset(struct net_device *dev)
return r;
}
-static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static void tg3_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
@@ -12411,7 +12406,10 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->tx_pending = tp->napi[0].tx_pending;
}
-static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+static int tg3_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
@@ -13800,9 +13798,6 @@ static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
return -EFAULT;
- if (stmpconf.flags)
- return -EINVAL;
-
if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
stmpconf.tx_type != HWTSTAMP_TX_OFF)
return -ERANGE;
@@ -18073,16 +18068,20 @@ static void tg3_shutdown(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
dev_close(dev);
- if (system_state == SYSTEM_POWER_OFF)
- tg3_power_down(tp);
+ tg3_power_down(tp);
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
@@ -18102,6 +18101,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
netdev_info(netdev, "PCI I/O error detected\n");
+ /* Want to make sure that the reset task doesn't run */
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
/* Could be second call or maybe we don't have netdev yet */
@@ -18118,9 +18120,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
tg3_timer_stop(tp);
- /* Want to make sure that the reset task doesn't run */
- tg3_reset_task_cancel(tp);
-
netif_device_detach(netdev);
/* Clean up software state, even if MMIO is blocked */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 8f0ac7b99973..858c92129451 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -18,15 +18,43 @@
/* BFA state machine interfaces */
-typedef void (*bfa_sm_t)(void *sm, int event);
-
/* For converting from state machine function to state encoding. */
-struct bfa_sm_table {
- bfa_sm_t sm; /*!< state machine function */
- int state; /*!< state machine encoding */
- char *name; /*!< state name for display */
-};
-#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+#define BFA_SM_TABLE(n, s, e, t) \
+struct s; \
+enum e; \
+typedef void (*t)(struct s *, enum e); \
+ \
+struct n ## _sm_table_s { \
+ t sm; /* state machine function */ \
+ int state; /* state machine encoding */ \
+ char *name; /* state name for display */ \
+}; \
+ \
+static inline int \
+n ## _sm_to_state(struct n ## _sm_table_s *smt, t sm) \
+{ \
+ int i = 0; \
+ \
+ while (smt[i].sm && smt[i].sm != sm) \
+ i++; \
+ return smt[i].state; \
+}
+
+BFA_SM_TABLE(iocpf, bfa_iocpf, iocpf_event, bfa_fsm_iocpf_t)
+BFA_SM_TABLE(ioc, bfa_ioc, ioc_event, bfa_fsm_ioc_t)
+BFA_SM_TABLE(cmdq, bfa_msgq_cmdq, cmdq_event, bfa_fsm_msgq_cmdq_t)
+BFA_SM_TABLE(rspq, bfa_msgq_rspq, rspq_event, bfa_fsm_msgq_rspq_t)
+
+BFA_SM_TABLE(ioceth, bna_ioceth, bna_ioceth_event, bna_fsm_ioceth_t)
+BFA_SM_TABLE(enet, bna_enet, bna_enet_event, bna_fsm_enet_t)
+BFA_SM_TABLE(ethport, bna_ethport, bna_ethport_event, bna_fsm_ethport_t)
+BFA_SM_TABLE(tx, bna_tx, bna_tx_event, bna_fsm_tx_t)
+BFA_SM_TABLE(rxf, bna_rxf, bna_rxf_event, bna_fsm_rxf_t)
+BFA_SM_TABLE(rx, bna_rx, bna_rx_event, bna_fsm_rx_t)
+
+#undef BFA_SM_TABLE
+
+#define BFA_SM(_sm) (_sm)
/* State machine with entry actions. */
typedef void (*bfa_fsm_t)(void *fsm, int event);
@@ -41,24 +69,12 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
static void oc ## _sm_ ## st ## _entry(otype * fsm)
#define bfa_fsm_set_state(_fsm, _state) do { \
- (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ (_fsm)->fsm = (_state); \
_state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
-#define bfa_fsm_cmp_state(_fsm, _state) \
- ((_fsm)->fsm == (bfa_fsm_t)(_state))
-
-static inline int
-bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
-{
- int i = 0;
-
- while (smt[i].sm && smt[i].sm != sm)
- i++;
- return smt[i].state;
-}
-
+#define bfa_fsm_cmp_state(_fsm, _state) ((_fsm)->fsm == (_state))
/* Generic wait counter. */
typedef void (*bfa_wc_resume_t) (void *cbarg);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index cd933817a0b8..b07522ac3e74 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -114,7 +114,7 @@ bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
-static struct bfa_sm_table ioc_sm_table[] = {
+static struct ioc_sm_table_s ioc_sm_table[] = {
{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
@@ -183,7 +183,7 @@ bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
enum iocpf_event);
bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
-static struct bfa_sm_table iocpf_sm_table[] = {
+static struct iocpf_sm_table_s iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
@@ -2860,12 +2860,12 @@ static enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc *ioc)
{
enum bfa_iocpf_state iocpf_st;
- enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+ enum bfa_ioc_state ioc_st = ioc_sm_to_state(ioc_sm_table, ioc->fsm);
if (ioc_st == BFA_IOC_ENABLING ||
ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
- iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+ iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
switch (iocpf_st) {
case BFA_IOCPF_SEMWAIT:
@@ -2983,7 +2983,7 @@ bfa_nw_iocpf_timeout(struct bfa_ioc *ioc)
{
enum bfa_iocpf_state iocpf_st;
- iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
+ iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
if (iocpf_st == BFA_IOCPF_HWINIT)
bfa_ioc_poll_fwinit(ioc);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index edd0ed5b5332..f30d06ec4ffe 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -147,16 +147,20 @@ struct bfa_ioc_notify {
(__notify)->cbarg = (__cbarg); \
} while (0)
+enum iocpf_event;
+
struct bfa_iocpf {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_iocpf *s, enum iocpf_event e);
struct bfa_ioc *ioc;
bool fw_mismatch_notified;
bool auto_recover;
u32 poll_time;
};
+enum ioc_event;
+
struct bfa_ioc {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_ioc *s, enum ioc_event e);
struct bfa *bfa;
struct bfa_pcidev pcidev;
struct timer_list ioc_timer;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index 47125f419530..fa40d5ec6f1c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -202,7 +202,6 @@ static void
__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
{
size_t len = cmd->msg_size;
- int num_entries = 0;
size_t to_copy;
u8 *src, *dst;
@@ -219,7 +218,6 @@ __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
dst = (u8 *)cmdq->addr.kva;
dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
- num_entries++;
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
index 75343b535798..170a4b4bed96 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h
@@ -55,8 +55,10 @@ enum bfa_msgq_cmdq_flags {
BFA_MSGQ_CMDQ_F_DB_UPDATE = 1,
};
+enum cmdq_event;
+
struct bfa_msgq_cmdq {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_msgq_cmdq *s, enum cmdq_event e);
enum bfa_msgq_cmdq_flags flags;
u16 producer_index;
@@ -81,8 +83,10 @@ enum bfa_msgq_rspq_flags {
typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr);
+enum rspq_event;
+
struct bfa_msgq_rspq {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bfa_msgq_rspq *s, enum rspq_event e);
enum bfa_msgq_rspq_flags flags;
u16 producer_index;
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index a2c983f56b00..883de0ac8de4 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1257,7 +1257,7 @@ bna_enet_mtu_get(struct bna_enet *enet)
void
bna_enet_enable(struct bna_enet *enet)
{
- if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
+ if (enet->fsm != bna_enet_sm_stopped)
return;
enet->flags |= BNA_ENET_F_ENABLED;
@@ -1751,12 +1751,12 @@ bna_ioceth_uninit(struct bna_ioceth *ioceth)
void
bna_ioceth_enable(struct bna_ioceth *ioceth)
{
- if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
+ if (ioceth->fsm == bna_ioceth_sm_ready) {
bnad_cb_ioceth_ready(ioceth->bna->bnad);
return;
}
- if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
+ if (ioceth->fsm == bna_ioceth_sm_stopped)
bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
}
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 2623a0da4682..c05dc7a1c4a1 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1956,7 +1956,7 @@ static void
bna_rx_stop(struct bna_rx *rx)
{
rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
- if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
+ if (rx->fsm == bna_rx_sm_stopped)
bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
else {
rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
@@ -2535,7 +2535,7 @@ bna_rx_destroy(struct bna_rx *rx)
void
bna_rx_enable(struct bna_rx *rx)
{
- if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
+ if (rx->fsm != bna_rx_sm_stopped)
return;
rx->rx_flags |= BNA_RX_F_ENABLED;
@@ -3523,7 +3523,7 @@ bna_tx_destroy(struct bna_tx *tx)
void
bna_tx_enable(struct bna_tx *tx)
{
- if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
+ if (tx->fsm != bna_tx_sm_stopped)
return;
tx->flags |= BNA_TX_F_ENABLED;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index 666b6922e24d..a5ebd7110e07 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -312,8 +312,10 @@ struct bna_attr {
/* IOCEth */
+enum bna_ioceth_event;
+
struct bna_ioceth {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_ioceth *s, enum bna_ioceth_event e);
struct bfa_ioc ioc;
struct bna_attr attr;
@@ -334,8 +336,10 @@ struct bna_pause_config {
enum bna_status rx_pause;
};
+enum bna_enet_event;
+
struct bna_enet {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_enet *s, enum bna_enet_event e);
enum bna_enet_flags flags;
enum bna_enet_type type;
@@ -360,8 +364,10 @@ struct bna_enet {
/* Ethport */
+enum bna_ethport_event;
+
struct bna_ethport {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_ethport *s, enum bna_ethport_event e);
enum bna_ethport_flags flags;
enum bna_link_status link_status;
@@ -454,13 +460,16 @@ struct bna_txq {
};
/* Tx object */
+
+enum bna_tx_event;
+
struct bna_tx {
/* This should be the first one */
struct list_head qe;
int rid;
int hw_id;
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_tx *s, enum bna_tx_event e);
enum bna_tx_flags flags;
enum bna_tx_type type;
@@ -698,8 +707,11 @@ struct bna_rxp {
};
/* RxF structure (hardware Rx Function) */
+
+enum bna_rxf_event;
+
struct bna_rxf {
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_rxf *s, enum bna_rxf_event e);
struct bfa_msgq_cmd_entry msgq_cmd;
union {
@@ -769,13 +781,16 @@ struct bna_rxf {
};
/* Rx object */
+
+enum bna_rx_event;
+
struct bna_rx {
/* This should be the first one */
struct list_head qe;
int rid;
int hw_id;
- bfa_fsm_t fsm;
+ void (*fsm)(struct bna_rx *s, enum bna_rx_event e);
enum bna_rx_type type;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index bbdc829c3524..d6d90f9722a7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1881,7 +1881,6 @@ poll_exit:
return rcvd;
}
-#define BNAD_NAPI_POLL_QUOTA 64
static void
bnad_napi_add(struct bnad *bnad, u32 rx_id)
{
@@ -1892,7 +1891,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id)
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
+ bnad_napi_poll_rx);
}
}
@@ -2824,8 +2823,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
return -EINVAL;
}
- if (unlikely((gso_size + skb_transport_offset(skb) +
- tcp_hdrlen(skb)) >= skb->len)) {
+ if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
txqent->hdr.wi.lso_mss = 0;
BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
@@ -2873,8 +2871,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
if (unlikely(skb_headlen(skb) <
- skb_transport_offset(skb) +
- tcp_hdrlen(skb))) {
+ skb_tcp_all_headers(skb))) {
BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
return -EINVAL;
}
@@ -3421,7 +3418,7 @@ static const struct net_device_ops bnad_netdev_ops = {
};
static void
-bnad_netdev_init(struct bnad *bnad, bool using_dac)
+bnad_netdev_init(struct bnad *bnad)
{
struct net_device *netdev = bnad->netdev;
@@ -3434,10 +3431,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
-
- if (using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
netdev->mem_start = bnad->mmio_start;
netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
@@ -3544,8 +3539,7 @@ bnad_lock_uninit(struct bnad *bnad)
/* PCI Initialization */
static int
-bnad_pci_init(struct bnad *bnad,
- struct pci_dev *pdev, bool *using_dac)
+bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
{
int err;
@@ -3555,14 +3549,9 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME);
if (err)
goto disable_device;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- *using_dac = true;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- goto release_regions;
- *using_dac = false;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err)
+ goto release_regions;
pci_set_master(pdev);
return 0;
@@ -3585,7 +3574,6 @@ static int
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac;
int err;
struct bnad *bnad;
struct bna *bna;
@@ -3615,13 +3603,8 @@ bnad_pci_probe(struct pci_dev *pdev,
bnad->id = atomic_inc_return(&bna_id) - 1;
mutex_lock(&bnad->conf_mutex);
- /*
- * PCI initialization
- * Output : using_dac = 1 for 64 bit DMA
- * = 0 for 32 bit DMA
- */
- using_dac = false;
- err = bnad_pci_init(bnad, pdev, &using_dac);
+ /* PCI initialization */
+ err = bnad_pci_init(bnad, pdev);
if (err)
goto unlock_mutex;
@@ -3634,7 +3617,7 @@ bnad_pci_probe(struct pci_dev *pdev,
goto pci_uninit;
/* Initialize netdev structure, set up ethtool ops */
- bnad_netdev_init(bnad, using_dac);
+ bnad_netdev_init(bnad);
/* Set link to down state */
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 391b85f25141..df10edff5603 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -114,7 +114,7 @@ static const char *bnad_net_stats_strings[] = {
"mac_tx_deferral",
"mac_tx_excessive_deferral",
"mac_tx_single_collision",
- "mac_tx_muliple_collision",
+ "mac_tx_multiple_collision",
"mac_tx_late_collision",
"mac_tx_excessive_collision",
"mac_tx_total_collision",
@@ -235,13 +235,18 @@ static int
bnad_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
-
- supported = SUPPORTED_10000baseT_Full;
- advertising = ADVERTISED_10000baseT_Full;
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full);
cmd->base.autoneg = AUTONEG_DISABLE;
- supported |= SUPPORTED_FIBRE;
- advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
cmd->base.port = PORT_FIBRE;
cmd->base.phy_address = 0;
@@ -253,11 +258,6 @@ bnad_get_link_ksettings(struct net_device *netdev,
cmd->base.duplex = DUPLEX_UNKNOWN;
}
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
@@ -283,7 +283,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -291,12 +291,12 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ strscpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ strscpy(drvinfo->bus_info, pci_name(bnad->pcidev),
sizeof(drvinfo->bus_info));
}
@@ -405,7 +405,9 @@ static int bnad_set_coalesce(struct net_device *netdev,
static void
bnad_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
struct bnad *bnad = netdev_priv(netdev);
@@ -418,7 +420,9 @@ bnad_get_ringparam(struct net_device *netdev,
static int
bnad_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ringparam)
+ struct ethtool_ringparam *ringparam,
+ struct kernel_ethtool_ringparam *kernel_ringparam,
+ struct netlink_ext_ack *extack)
{
int i, current_err, err = 0;
struct bnad *bnad = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5620b97b3482..cfbdd0022764 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -12,6 +12,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include <linux/interrupt.h>
+#include <linux/phy/phy.h>
#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP)
#define MACB_EXT_DESC
@@ -94,6 +95,8 @@
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */
#define GEM_WOL 0x00b8 /* Wake on LAN */
+#define GEM_RXPTPUNI 0x00D4 /* PTP RX Unicast address */
+#define GEM_TXPTPUNI 0x00D8 /* PTP TX Unicast address */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
@@ -244,6 +247,8 @@
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */
+#define MACB_PTPUNI_OFFSET 20 /* PTP Unicast packet enable */
+#define MACB_PTPUNI_SIZE 1
#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */
@@ -691,6 +696,8 @@
#define GEM_CLK_DIV48 3
#define GEM_CLK_DIV64 4
#define GEM_CLK_DIV96 5
+#define GEM_CLK_DIV128 6
+#define GEM_CLK_DIV224 7
/* Constants for MAN register */
#define MACB_MAN_C22_SOF 1
@@ -716,14 +723,15 @@
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_MIIONRGMII 0x00000200
+#define MACB_CAPS_NEED_TSUCLK 0x00000400
+#define MACB_CAPS_PCS 0x01000000
+#define MACB_CAPS_HIGH_SPEED 0x02000000
#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_PCS 0x01000000
-#define MACB_CAPS_HIGH_SPEED 0x02000000
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
@@ -766,8 +774,6 @@
#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
-#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
-
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
* and bitfields that are common across both devices, use macb_{read,write}l
@@ -817,11 +823,6 @@ struct macb_dma_desc_ptp {
u32 ts_1;
u32 ts_2;
};
-
-struct gem_tx_ts {
- struct sk_buff *skb;
- struct macb_dma_desc_ptp desc_ptp;
-};
#endif
/* DMA descriptor bitfields */
@@ -1203,11 +1204,15 @@ struct macb_queue {
unsigned int RBQP;
unsigned int RBQPH;
+ /* Lock to protect tx_head and tx_tail */
+ spinlock_t tx_ptr_lock;
unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring;
struct macb_tx_skb *tx_skb;
dma_addr_t tx_ring_dma;
struct work_struct tx_error_task;
+ bool txubr_pending;
+ struct napi_struct napi_tx;
dma_addr_t rx_ring_dma;
dma_addr_t rx_buffers_dma;
@@ -1216,14 +1221,8 @@ struct macb_queue {
struct macb_dma_desc *rx_ring;
struct sk_buff **rx_skbuff;
void *rx_buffers;
- struct napi_struct napi;
+ struct napi_struct napi_rx;
struct queue_stats stats;
-
-#ifdef CONFIG_MACB_USE_HWSTAMP
- struct work_struct tx_ts_task;
- unsigned int tx_ts_head, tx_ts_tail;
- struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
-#endif
};
struct ethtool_rx_fs_item {
@@ -1271,7 +1270,8 @@ struct macb {
struct mii_bus *mii_bus;
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs phylink_usx_pcs;
+ struct phylink_pcs phylink_sgmii_pcs;
u32 caps;
unsigned int dma_burst_length;
@@ -1290,6 +1290,9 @@ struct macb {
u32 wol;
struct macb_ptp_info *ptp_info; /* macb-ptp interface */
+
+ struct phy *sgmii_phy; /* for ZynqMP SGMII mode */
+
#ifdef MACB_EXT_DESC
uint8_t hw_dma_cap;
#endif
@@ -1330,14 +1333,14 @@ enum macb_bd_control {
void gem_ptp_init(struct net_device *ndev);
void gem_ptp_remove(struct net_device *ndev);
-int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
+void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
-static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
+static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
{
- if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED)
- return -ENOTSUPP;
+ if (bp->tstamp_config.tx_type == TSTAMP_DISABLED)
+ return;
- return gem_ptp_txstamp(queue, skb, desc);
+ gem_ptp_txstamp(bp, skb, desc);
}
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
@@ -1353,11 +1356,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
static inline void gem_ptp_init(struct net_device *ndev) { }
static inline void gem_ptp_remove(struct net_device *ndev) { }
-static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
-{
- return -1;
-}
-
+static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
#endif
@@ -1368,7 +1367,7 @@ static inline bool macb_is_gem(struct macb *bp)
static inline bool gem_has_ptp(struct macb *bp)
{
- return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP);
+ return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && (bp->caps & MACB_CAPS_GEM_HAS_PTP);
}
/**
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ffce528aa00e..29a1199dad14 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -34,7 +34,11 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/reset.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -90,8 +94,7 @@ struct sifive_fu540_macb_mgmt {
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
-#define MACB_HALT_TIMEOUT 1230
-
+#define MACB_HALT_TIMEOUT 14000
#define MACB_PM_TIMEOUT 100 /* ms */
#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
@@ -284,6 +287,11 @@ static void macb_set_hwaddr(struct macb *bp)
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
macb_or_gem_writel(bp, SA1T, top);
+ if (gem_has_ptp(bp)) {
+ gem_writel(bp, RXPTPUNI, bottom);
+ gem_writel(bp, TXPTPUNI, bottom);
+ }
+
/* Clear unused address register sets */
macb_or_gem_writel(bp, SA2B, 0);
macb_or_gem_writel(bp, SA2T, 0);
@@ -330,7 +338,40 @@ static int macb_mdio_wait_for_idle(struct macb *bp)
1, MACB_MDIO_TIMEOUT);
}
-static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
+ goto mdio_pm_exit;
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+
+mdio_read_exit:
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
+}
+
+static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
+ int regnum)
{
struct macb *bp = bus->priv;
int status;
@@ -345,30 +386,22 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (status < 0)
goto mdio_read_exit;
- if (regnum & MII_ADDR_C45) {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_ADDR)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(DATA, regnum & 0xFFFF)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
-
- status = macb_mdio_wait_for_idle(bp);
- if (status < 0)
- goto mdio_read_exit;
-
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
- } else {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
- | MACB_BF(RW, MACB_MAN_C22_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_C22_CODE)));
- }
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -383,8 +416,41 @@ mdio_pm_exit:
return status;
}
-static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
+ if (status < 0)
+ goto mdio_pm_exit;
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
+ | MACB_BF(DATA, value)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+mdio_write_exit:
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
+}
+
+static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum,
+ u16 value)
{
struct macb *bp = bus->priv;
int status;
@@ -399,32 +465,23 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (status < 0)
goto mdio_write_exit;
- if (regnum & MII_ADDR_C45) {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_ADDR)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(DATA, regnum & 0xFFFF)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
-
- status = macb_mdio_wait_for_idle(bp);
- if (status < 0)
- goto mdio_write_exit;
-
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)
- | MACB_BF(DATA, value)));
- } else {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
- | MACB_BF(RW, MACB_MAN_C22_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_C22_CODE)
- | MACB_BF(DATA, value)));
- }
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
+ | MACB_BF(DATA, value)));
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
@@ -506,79 +563,11 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
-static void macb_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct net_device *ndev = to_net_dev(config->dev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct macb *bp = netdev_priv(ndev);
-
- /* We only support MII, RMII, GMII, RGMII & SGMII. */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_10GBASER &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- if (!macb_is_gem(bp) &&
- (state->interface == PHY_INTERFACE_MODE_GMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- linkmode_zero(supported);
- return;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
- !(bp->caps & MACB_CAPS_HIGH_SPEED &&
- bp->caps & MACB_CAPS_PCS)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- goto out;
- }
-
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
-
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_GMII ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
-
- if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
- phylink_set(mask, 1000baseT_Half);
- }
-out:
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-}
-
static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
phy_interface_t interface, int speed,
int duplex)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
u32 config;
config = gem_readl(bp, USX_CONTROL);
@@ -592,7 +581,7 @@ static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
u32 val;
state->speed = SPEED_10000;
@@ -612,7 +601,7 @@ static int macb_usx_pcs_config(struct phylink_pcs *pcs,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
- struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
GEM_BIT(SIGNAL_OK));
@@ -789,34 +778,32 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
- /* Enable Rx and Tx */
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
+ /* Enable Rx and Tx; Enable PTP unicast */
+ ctrl = macb_readl(bp, NCR);
+ if (gem_has_ptp(bp))
+ ctrl |= MACB_BIT(PTPUNI);
+
+ macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));
netif_tx_wake_all_queues(ndev);
}
-static int macb_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
+static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct macb *bp = netdev_priv(ndev);
if (interface == PHY_INTERFACE_MODE_10GBASER)
- bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops;
+ return &bp->phylink_usx_pcs;
else if (interface == PHY_INTERFACE_MODE_SGMII)
- bp->phylink_pcs.ops = &macb_phylink_pcs_ops;
+ return &bp->phylink_sgmii_pcs;
else
- bp->phylink_pcs.ops = NULL;
-
- if (bp->phylink_pcs.ops)
- phylink_set_pcs(bp->phylink, &bp->phylink_pcs);
-
- return 0;
+ return NULL;
}
static const struct phylink_mac_ops macb_phylink_ops = {
- .validate = macb_validate,
- .mac_prepare = macb_mac_prepare,
+ .mac_select_pcs = macb_mac_select_pcs,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
.mac_link_up = macb_mac_link_up,
@@ -874,14 +861,47 @@ static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
+ bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
+
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
+ bp->phylink_config.mac_managed_pm = true;
if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
bp->phylink_config.poll_fixed_state = true;
bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
}
+ bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ bp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ bp->phylink_config.supported_interfaces);
+
+ /* Determine what modes are supported */
+ if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
+ bp->phylink_config.mac_capabilities |= MAC_1000FD;
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ bp->phylink_config.mac_capabilities |= MAC_1000HD;
+
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ bp->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_PCS)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_HIGH_SPEED) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ bp->phylink_config.supported_interfaces);
+ bp->phylink_config.mac_capabilities |= MAC_10000FD;
+ }
+ }
+
bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
bp->phy_interface, &macb_phylink_ops);
if (IS_ERR(bp->phylink)) {
@@ -943,8 +963,10 @@ static int macb_mii_init(struct macb *bp)
}
bp->mii_bus->name = "MACB_mii_bus";
- bp->mii_bus->read = &macb_mdio_read;
- bp->mii_bus->write = &macb_mdio_write;
+ bp->mii_bus->read = &macb_mdio_read_c22;
+ bp->mii_bus->write = &macb_mdio_write_c22;
+ bp->mii_bus->read_c45 = &macb_mdio_read_c45;
+ bp->mii_bus->write_c45 = &macb_mdio_write_c45;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
@@ -1002,7 +1024,7 @@ static int macb_halt_tx(struct macb *bp)
return -ETIMEDOUT;
}
-static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
{
if (tx_skb->mapping) {
if (tx_skb->mapped_as_page)
@@ -1015,7 +1037,7 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
if (tx_skb->skb) {
- dev_kfree_skb_any(tx_skb->skb);
+ napi_consume_skb(tx_skb->skb, budget);
tx_skb->skb = NULL;
}
}
@@ -1050,6 +1072,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
}
#endif
addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
+ addr &= ~GEM_BIT(DMA_RXVALID);
+#endif
return addr;
}
@@ -1057,6 +1083,7 @@ static void macb_tx_error_task(struct work_struct *work)
{
struct macb_queue *queue = container_of(work, struct macb_queue,
tx_error_task);
+ bool halt_timeout = false;
struct macb *bp = queue->bp;
struct macb_tx_skb *tx_skb;
struct macb_dma_desc *desc;
@@ -1068,12 +1095,13 @@ static void macb_tx_error_task(struct work_struct *work)
(unsigned int)(queue - bp->queues),
queue->tx_tail, queue->tx_head);
- /* Prevent the queue IRQ handlers from running: each of them may call
- * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
+ /* Prevent the queue NAPI TX poll from running, as it calls
+ * macb_tx_complete(), which in turn may call netif_wake_subqueue().
* As explained below, we have to halt the transmission before updating
* TBQP registers so we call netif_tx_stop_all_queues() to notify the
* network engine about the macb/gem being halted.
*/
+ napi_disable(&queue->napi_tx);
spin_lock_irqsave(&bp->lock, flags);
/* Make sure nobody is trying to queue up new packets */
@@ -1083,9 +1111,11 @@ static void macb_tx_error_task(struct work_struct *work)
* (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register
*/
- if (macb_halt_tx(bp))
- /* Just complain for now, reinitializing TX path can be good */
+ if (macb_halt_tx(bp)) {
netdev_err(bp->dev, "BUG: halt tx timed out\n");
+ macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE)));
+ halt_timeout = true;
+ }
/* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
@@ -1101,7 +1131,7 @@ static void macb_tx_error_task(struct work_struct *work)
if (ctrl & MACB_BIT(TX_USED)) {
/* skb is set for the last buffer of the frame */
while (!skb) {
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
tail++;
tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
@@ -1131,7 +1161,7 @@ static void macb_tx_error_task(struct work_struct *work)
desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
}
/* Set end of TX queue */
@@ -1156,32 +1186,58 @@ static void macb_tx_error_task(struct work_struct *work)
macb_writel(bp, TSR, macb_readl(bp, TSR));
queue_writel(queue, IER, MACB_TX_INT_FLAGS);
+ if (halt_timeout)
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+
/* Now we are ready to start transmission again */
netif_tx_start_all_queues(bp->dev);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
spin_unlock_irqrestore(&bp->lock, flags);
+ napi_enable(&queue->napi_tx);
}
-static void macb_tx_interrupt(struct macb_queue *queue)
+static bool ptp_one_step_sync(struct sk_buff *skb)
{
- unsigned int tail;
- unsigned int head;
- u32 status;
- struct macb *bp = queue->bp;
- u16 queue_index = queue - bp->queues;
+ struct ptp_header *hdr;
+ unsigned int ptp_class;
+ u8 msgtype;
+
+ /* No need to parse packet if PTP TS is not involved */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ goto not_oss;
- status = macb_readl(bp, TSR);
- macb_writel(bp, TSR, status);
+ /* Identify and return whether PTP one step sync is being processed */
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ goto not_oss;
- if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- queue_writel(queue, ISR, MACB_BIT(TCOMP));
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ goto not_oss;
+
+ if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
+ goto not_oss;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ if (msgtype == PTP_MSGTYPE_SYNC)
+ return true;
+
+not_oss:
+ return false;
+}
- netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
- (unsigned long)status);
+static int macb_tx_complete(struct macb_queue *queue, int budget)
+{
+ struct macb *bp = queue->bp;
+ u16 queue_index = queue - bp->queues;
+ unsigned int tail;
+ unsigned int head;
+ int packets = 0;
+ spin_lock(&queue->tx_ptr_lock);
head = queue->tx_head;
- for (tail = queue->tx_tail; tail != head; tail++) {
+ for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
struct macb_tx_skb *tx_skb;
struct sk_buff *skb;
struct macb_dma_desc *desc;
@@ -1207,14 +1263,10 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP) &&
- gem_ptp_do_txstamp(queue, skb, desc) == 0) {
- /* skb now belongs to timestamp buffer
- * and will be removed later
- */
- tx_skb->skb = NULL;
- }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !ptp_one_step_sync(skb))
+ gem_ptp_do_txstamp(bp, skb, desc);
+
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
@@ -1222,10 +1274,11 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ packets++;
}
/* Now we can safely release resources */
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, budget);
/* skb is set only for the last buffer of the frame.
* WARNING: at this point skb has been freed by
@@ -1241,6 +1294,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index);
+ spin_unlock(&queue->tx_ptr_lock);
+
+ return packets;
}
static void gem_rx_refill(struct macb_queue *queue)
@@ -1258,7 +1314,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1297,6 +1352,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -1597,31 +1653,51 @@ static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
return received;
}
-static int macb_poll(struct napi_struct *napi, int budget)
+static bool macb_rx_pending(struct macb_queue *queue)
{
- struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
struct macb *bp = queue->bp;
- int work_done;
- u32 status;
+ unsigned int entry;
+ struct macb_dma_desc *desc;
- status = macb_readl(bp, RSR);
- macb_writel(bp, RSR, status);
+ entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+ desc = macb_rx_desc(queue, entry);
- netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ return (desc->addr & MACB_BIT(RX_USED)) != 0;
+}
+
+static int macb_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
+ struct macb *bp = queue->bp;
+ int work_done;
work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- /* Packets received while interrupts were disabled */
- status = macb_readl(bp, RSR);
- if (status) {
+ netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
+ (unsigned int)(queue - bp->queues), work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ queue_writel(queue, IER, bp->rx_intr_mask);
+
+ /* Packet completions only seem to propagate to raise
+ * interrupts when interrupts are enabled at the time, so if
+ * packets were received while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here to avoid losing a wakeup. This can
+ * potentially race with the interrupt handler doing the same
+ * actions if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ if (macb_rx_pending(queue)) {
+ queue_writel(queue, IDR, bp->rx_intr_mask);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- napi_reschedule(napi);
- } else {
- queue_writel(queue, IER, bp->rx_intr_mask);
+ netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
+ napi_schedule(napi);
}
}
@@ -1630,6 +1706,90 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
+static void macb_tx_restart(struct macb_queue *queue)
+{
+ struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
+
+ spin_lock(&queue->tx_ptr_lock);
+
+ if (queue->tx_head == queue->tx_tail)
+ goto out_tx_ptr_unlock;
+
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
+
+ if (tbqp == head_idx)
+ goto out_tx_ptr_unlock;
+
+ spin_lock_irq(&bp->lock);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock_irq(&bp->lock);
+
+out_tx_ptr_unlock:
+ spin_unlock(&queue->tx_ptr_lock);
+}
+
+static bool macb_tx_complete_pending(struct macb_queue *queue)
+{
+ bool retval = false;
+
+ spin_lock(&queue->tx_ptr_lock);
+ if (queue->tx_head != queue->tx_tail) {
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
+ retval = true;
+ }
+ spin_unlock(&queue->tx_ptr_lock);
+ return retval;
+}
+
+static int macb_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
+ struct macb *bp = queue->bp;
+ int work_done;
+
+ work_done = macb_tx_complete(queue, budget);
+
+ rmb(); // ensure txubr_pending is up to date
+ if (queue->txubr_pending) {
+ queue->txubr_pending = false;
+ netdev_vdbg(bp->dev, "poll: tx restart\n");
+ macb_tx_restart(queue);
+ }
+
+ netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
+ (unsigned int)(queue - bp->queues), work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ queue_writel(queue, IER, MACB_BIT(TCOMP));
+
+ /* Packet completions only seem to propagate to raise
+ * interrupts when interrupts are enabled at the time, so if
+ * packets were sent while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here to avoid losing a wakeup. This can
+ * potentially race with the interrupt handler doing the same
+ * actions if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ if (macb_tx_complete_pending(queue)) {
+ queue_writel(queue, IDR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TCOMP));
+ netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
+ napi_schedule(napi);
+ }
+ }
+
+ return work_done;
+}
+
static void macb_hresp_error_task(struct tasklet_struct *t)
{
struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
@@ -1669,21 +1829,6 @@ static void macb_hresp_error_task(struct tasklet_struct *t)
netif_tx_start_all_queues(dev);
}
-static void macb_tx_restart(struct macb_queue *queue)
-{
- unsigned int head = queue->tx_head;
- unsigned int tail = queue->tx_tail;
- struct macb *bp = queue->bp;
-
- if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- queue_writel(queue, ISR, MACB_BIT(TXUBR));
-
- if (head == tail)
- return;
-
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
-}
-
static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1780,9 +1925,27 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- if (napi_schedule_prep(&queue->napi)) {
+ if (napi_schedule_prep(&queue->napi_rx)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
- __napi_schedule(&queue->napi);
+ __napi_schedule(&queue->napi_rx);
+ }
+ }
+
+ if (status & (MACB_BIT(TCOMP) |
+ MACB_BIT(TXUBR))) {
+ queue_writel(queue, IDR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TCOMP) |
+ MACB_BIT(TXUBR));
+
+ if (status & MACB_BIT(TXUBR)) {
+ queue->txubr_pending = true;
+ wmb(); // ensure softirq can see update
+ }
+
+ if (napi_schedule_prep(&queue->napi_tx)) {
+ netdev_vdbg(bp->dev, "scheduling TX softirq\n");
+ __napi_schedule(&queue->napi_tx);
}
}
@@ -1796,12 +1959,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
break;
}
- if (status & MACB_BIT(TCOMP))
- macb_tx_interrupt(queue);
-
- if (status & MACB_BIT(TXUBR))
- macb_tx_restart(queue);
-
/* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@@ -2007,7 +2164,8 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
- skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
+ !ptp_one_step_sync(skb))
ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
@@ -2034,7 +2192,7 @@ dma_error:
for (i = queue->tx_head; i != tx_head; i++) {
tx_skb = macb_tx_skb(queue, i);
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
}
return 0;
@@ -2098,23 +2256,19 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
- int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
struct sk_buff *nskb;
u32 fcs;
if (!(ndev->features & NETIF_F_HW_CSUM) ||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
- skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
return 0;
if (padlen <= 0) {
/* FCS could be appeded to tailroom. */
if (tailroom >= ETH_FCS_LEN)
goto add_fcs;
- /* FCS could be appeded by moving data to headroom. */
- else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
- padlen = 0;
/* No room for FCS, need to reallocate skb. */
else
padlen = ETH_FCS_LEN;
@@ -2123,10 +2277,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
padlen += ETH_FCS_LEN;
}
- if (!cloned && headroom + tailroom >= padlen) {
- (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
- skb_set_tail_pointer(*skb, (*skb)->len);
- } else {
+ if (cloned || tailroom < padlen) {
nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
@@ -2156,7 +2307,6 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue = &bp->queues[queue_index];
- unsigned long flags;
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
bool is_lso;
@@ -2172,6 +2322,12 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (bp->hw_dma_cap & HW_DMA_CAP_PTP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif
+
is_lso = (skb_shinfo(skb)->gso_size != 0);
if (is_lso) {
@@ -2180,7 +2336,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
/* if this is required, would need to copy to single buffer */
@@ -2213,16 +2369,16 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
- spin_lock_irqsave(&bp->lock, flags);
+ spin_lock_bh(&queue->tx_ptr_lock);
/* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) < desc_cnt) {
netif_stop_subqueue(dev, queue_index);
- spin_unlock_irqrestore(&bp->lock, flags);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
queue->tx_head, queue->tx_tail);
- return NETDEV_TX_BUSY;
+ ret = NETDEV_TX_BUSY;
+ goto unlock;
}
/* Map socket buffer for DMA transfer */
@@ -2235,13 +2391,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
skb_tx_timestamp(skb);
+ spin_lock_irq(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock_irq(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index);
unlock:
- spin_unlock_irqrestore(&bp->lock, flags);
+ spin_unlock_bh(&queue->tx_ptr_lock);
return ret;
}
@@ -2501,8 +2659,12 @@ static u32 gem_mdc_clk_div(struct macb *bp)
config = GEM_BF(CLK, GEM_CLK_DIV48);
else if (pclk_hz <= 160000000)
config = GEM_BF(CLK, GEM_CLK_DIV64);
- else
+ else if (pclk_hz <= 240000000)
config = GEM_BF(CLK, GEM_CLK_DIV96);
+ else if (pclk_hz <= 320000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV128);
+ else
+ config = GEM_BF(CLK, GEM_CLK_DIV224);
return config;
}
@@ -2761,9 +2923,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
- err = pm_runtime_get_sync(&bp->pdev->dev);
+ err = pm_runtime_resume_and_get(&bp->pdev->dev);
if (err < 0)
- goto pm_exit;
+ return err;
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -2775,15 +2937,21 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_enable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
macb_init_hw(bp);
- err = macb_phylink_connect(bp);
+ err = phy_power_on(bp->sgmii_phy);
if (err)
goto reset_hw;
+ err = macb_phylink_connect(bp);
+ if (err)
+ goto phy_off;
+
netif_tx_start_all_queues(dev);
if (bp->ptp_info)
@@ -2791,10 +2959,15 @@ static int macb_open(struct net_device *dev)
return 0;
+phy_off:
+ phy_power_off(bp->sgmii_phy);
+
reset_hw:
macb_reset_hw(bp);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_disable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
macb_free_consistent(bp);
pm_exit:
pm_runtime_put_sync(&bp->pdev->dev);
@@ -2810,12 +2983,16 @@ static int macb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_disable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
+ phy_power_off(bp->sgmii_phy);
+
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
netif_carrier_off(dev);
@@ -2841,6 +3018,18 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int macb_set_mac_addr(struct net_device *dev, void *addr)
+{
+ int err;
+
+ err = eth_mac_addr(dev, addr);
+ if (err < 0)
+ return err;
+
+ macb_set_hwaddr(netdev_priv(dev));
+ return 0;
+}
+
static void gem_update_stats(struct macb *bp)
{
struct macb_queue *queue;
@@ -3101,7 +3290,9 @@ static int macb_set_link_ksettings(struct net_device *netdev,
}
static void macb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
@@ -3113,7 +3304,9 @@ static void macb_get_ringparam(struct net_device *netdev,
}
static int macb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
u32 new_rx_size, new_tx_size;
@@ -3374,7 +3567,8 @@ static int gem_add_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
spin_lock_irqsave(&bp->rx_fs_lock, flags);
@@ -3427,8 +3621,8 @@ static int gem_del_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc),
- htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
gem_writel_n(bp, SCRT2, fs->location, 0);
@@ -3675,7 +3869,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = macb_set_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = macb_poll_controller,
#endif
@@ -3708,17 +3902,17 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
-#ifdef CONFIG_MACB_USE_HWSTAMP
if (gem_has_ptp(bp)) {
if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
dev_err(&bp->pdev->dev,
"GEM doesn't support hardware ptp.\n");
else {
+#ifdef CONFIG_MACB_USE_HWSTAMP
bp->hw_dma_cap |= HW_DMA_CAP_PTP;
bp->ptp_info = &gem_ptp_info;
+#endif
}
}
-#endif
}
dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
@@ -3867,7 +4061,9 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
- netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
+ spin_lock_init(&queue->tx_ptr_lock);
+ netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
+ netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
@@ -3936,6 +4132,8 @@ static int macb_init(struct platform_device *pdev)
dev->ethtool_ops = &macb_ethtool_ops;
}
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
/* Set features */
dev->hw_features = NETIF_F_SG;
@@ -4137,11 +4335,9 @@ static int at91ether_open(struct net_device *dev)
u32 ctl;
int ret;
- ret = pm_runtime_get_sync(&lp->pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&lp->pdev->dev);
+ ret = pm_runtime_resume_and_get(&lp->pdev->dev);
+ if (ret < 0)
return ret;
- }
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
@@ -4492,6 +4688,62 @@ static int fu540_c000_init(struct platform_device *pdev)
return macb_init(pdev);
}
+static int init_reset_optional(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PHY device used in SGMII mode is ready */
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
+
+ if (IS_ERR(bp->sgmii_phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
+ "failed to get SGMII PHY\n");
+
+ ret = phy_init(bp->sgmii_phy);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to init SGMII PHY\n");
+
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
+
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
+ }
+
+ }
+
+ /* Fully reset controller at hardware level if mapped in device tree */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ phy_exit(bp->sgmii_phy);
+ return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ }
+
+ ret = macb_init(pdev);
+
+err_out_phy_exit:
+ if (ret)
+ phy_exit(bp->sgmii_phy);
+
+ return ret;
+}
+
static const struct macb_usrio_config sama7g5_usrio = {
.mii = 0,
.rmii = 1,
@@ -4518,8 +4770,8 @@ static const struct macb_config at91sam9260_config = {
};
static const struct macb_config sama5d3macb_config = {
- .caps = MACB_CAPS_SG_DISABLED
- | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_SG_DISABLED |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
.usrio = &macb_default_usrio,
@@ -4550,8 +4802,8 @@ static const struct macb_config sama5d29_config = {
};
static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
- | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4583,11 +4835,11 @@ static const struct macb_config np4_config = {
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = macb_init,
+ .init = init_reset_optional,
.jumbo_max_len = 10240,
.usrio = &macb_default_usrio,
};
@@ -4601,9 +4853,20 @@ static const struct macb_config zynq_config = {
.usrio = &macb_default_usrio,
};
+static const struct macb_config mpfs_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
static const struct macb_config sama7g5_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
- MACB_CAPS_MIIONRGMII,
+ MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4612,15 +4875,25 @@ static const struct macb_config sama7g5_gem_config = {
static const struct macb_config sama7g5_emac_config = {
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
- MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
.usrio = &sama7g5_usrio,
};
+static const struct macb_config versal_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
+};
+
static const struct of_device_id macb_dt_ids[] = {
- { .compatible = "cdns,at32ap7000-macb" },
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
{ .compatible = "cdns,np4-macb", .data = &np4_config },
@@ -4634,11 +4907,15 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
- { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
- { .compatible = "cdns,zynq-gem", .data = &zynq_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
+ { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
{ .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
{ .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
+ { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
+ { .compatible = "xlnx,versal-gem", .data = &versal_config},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4646,8 +4923,8 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static const struct macb_config default_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4736,7 +5013,7 @@ static int macb_probe(struct platform_device *pdev)
bp->jumbo_max_len = macb_config->jumbo_max_len;
bp->wol = 0;
- if (of_get_property(np, "magic-packet", NULL))
+ if (of_property_read_bool(np, "magic-packet"))
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
@@ -4749,7 +5026,7 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
@@ -4763,8 +5040,8 @@ static int macb_probe(struct platform_device *pdev)
/* MTU range: 68 - 1500 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
- if (bp->caps & MACB_CAPS_JUMBO)
- dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
dev->max_mtu = ETH_DATA_LEN;
@@ -4804,7 +5081,7 @@ static int macb_probe(struct platform_device *pdev)
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_phy_exit;
netif_carrier_off(dev);
@@ -4829,6 +5106,9 @@ err_out_unregister_mdio:
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
+err_out_phy_exit:
+ phy_exit(bp->sgmii_phy);
+
err_out_free_netdev:
free_netdev(dev);
@@ -4850,6 +5130,7 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ phy_exit(bp->sgmii_phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
@@ -4930,12 +5211,15 @@ static int __maybe_unused macb_suspend(struct device *dev)
netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_disable(&queue->napi);
+ ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
+ phy_exit(bp->sgmii_phy);
rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -5009,8 +5293,10 @@ static int __maybe_unused macb_resume(struct device *dev)
}
for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_enable(&queue->napi);
+ ++q, ++queue) {
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
if (netdev->hw_features & NETIF_F_NTUPLE)
gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
@@ -5023,6 +5309,9 @@ static int __maybe_unused macb_resume(struct device *dev)
macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock();
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->sgmii_phy);
+
phylink_start(bp->phylink);
rtnl_unlock();
@@ -5040,7 +5329,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
if (!(device_may_wakeup(dev)))
macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
- else
+ else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
return 0;
@@ -5056,8 +5345,10 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
+ clk_prepare_enable(bp->tsu_clk);
+ } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
+ clk_prepare_enable(bp->tsu_clk);
}
- clk_prepare_enable(bp->tsu_clk);
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 095c5a2144a7..51d26fa190d7 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -258,6 +258,8 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
*/
gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL);
+ ts->tv_sec |= ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
+
/* If the top bit is set in the timestamp,
* but not in 1588 timer, it has rolled over,
* so subtract max size
@@ -266,8 +268,6 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
!(tsu.tv_sec & (GEM_DMA_SEC_TOP >> 1)))
ts->tv_sec -= GEM_DMA_SEC_TOP;
- ts->tv_sec += ((~GEM_DMA_SEC_MASK) & tsu.tv_sec);
-
return 0;
}
@@ -292,79 +292,39 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
}
}
-static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
- struct macb_dma_desc_ptp *desc_ptp)
+void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb,
+ struct macb_dma_desc *desc)
{
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec64 ts;
-
- gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
-}
-
-int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
- struct macb_dma_desc *desc)
-{
- unsigned long tail = READ_ONCE(queue->tx_ts_tail);
- unsigned long head = queue->tx_ts_head;
struct macb_dma_desc_ptp *desc_ptp;
- struct gem_tx_ts *tx_timestamp;
-
- if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
- return -EINVAL;
+ struct timespec64 ts;
- if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
- return -ENOMEM;
+ if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not set in TX BD as expected\n");
+ return;
+ }
- desc_ptp = macb_ptp_desc(queue->bp, desc);
+ desc_ptp = macb_ptp_desc(bp, desc);
/* Unlikely but check */
- if (!desc_ptp)
- return -EINVAL;
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_timestamp = &queue->tx_timestamps[head];
- tx_timestamp->skb = skb;
+ if (!desc_ptp) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not supported in BD\n");
+ return;
+ }
+
/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
dma_rmb();
- tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
- tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
- /* move head */
- smp_store_release(&queue->tx_ts_head,
- (head + 1) & (PTP_TS_BUFFER_SIZE - 1));
-
- schedule_work(&queue->tx_ts_task);
- return 0;
-}
+ gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
-static void gem_tx_timestamp_flush(struct work_struct *work)
-{
- struct macb_queue *queue =
- container_of(work, struct macb_queue, tx_ts_task);
- unsigned long head, tail;
- struct gem_tx_ts *tx_ts;
-
- /* take current head */
- head = smp_load_acquire(&queue->tx_ts_head);
- tail = queue->tx_ts_tail;
-
- while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
- tx_ts = &queue->tx_timestamps[tail];
- gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
- /* cleanup */
- dev_kfree_skb_any(tx_ts->skb);
- /* remove old tail */
- smp_store_release(&queue->tx_ts_tail,
- (tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
- tail = queue->tx_ts_tail;
- }
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
}
void gem_ptp_init(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct macb_queue *queue;
- unsigned int q;
bp->ptp_clock_info = gem_ptp_caps_template;
@@ -384,11 +344,6 @@ void gem_ptp_init(struct net_device *dev)
}
spin_lock_init(&bp->tsu_clk_lock);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue->tx_ts_head = 0;
- queue->tx_ts_tail = 0;
- INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
- }
gem_ptp_init_tsu(bp);
@@ -434,7 +389,7 @@ int gem_get_hwtst(struct net_device *dev, struct ifreq *rq)
return 0;
}
-static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
+static void gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
{
u32 reg_val;
@@ -444,8 +399,6 @@ static int gem_ptp_set_one_step_sync(struct macb *bp, u8 enable)
macb_writel(bp, NCR, reg_val | MACB_BIT(OSSMODE));
else
macb_writel(bp, NCR, reg_val & ~MACB_BIT(OSSMODE));
-
- return 0;
}
int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -464,18 +417,15 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(*tstamp_config)))
return -EFAULT;
- /* reserved for future extensions */
- if (tstamp_config->flags)
- return -EINVAL;
-
switch (tstamp_config->tx_type) {
case HWTSTAMP_TX_OFF:
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- if (gem_ptp_set_one_step_sync(bp, 1) != 0)
- return -ERANGE;
- fallthrough;
+ gem_ptp_set_one_step_sync(bp, 1);
+ tx_bd_control = TSTAMP_ALL_FRAMES;
+ break;
case HWTSTAMP_TX_ON:
+ gem_ptp_set_one_step_sync(bp, 0);
tx_bd_control = TSTAMP_ALL_FRAMES;
break;
default:
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 457cb7121000..f4f87dfa9687 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1224,7 +1224,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
* @budget : maximum number of packets that the current CPU can receive from
* all interfaces.
* Description :
- * This function implements the the reception process.
+ * This function implements the reception process.
* Also it runs the TX completion thread
*/
static int xgmac_poll(struct napi_struct *napi, int budget)
@@ -1792,7 +1792,7 @@ static int xgmac_probe(struct platform_device *pdev)
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
- netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ netif_napi_add(ndev, &priv->napi, xgmac_poll);
ret = register_netdev(ndev);
if (ret)
goto err_reg;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index 3f1c189646f4..a0fd32476225 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -87,8 +87,8 @@
*/
#define CN23XX_SLI_PKT_IN_JABBER 0x29170
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
index d33dd8f4226f..e956109415cd 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
@@ -36,8 +36,8 @@
#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 73cb03266549..882b2be06ea0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -851,7 +851,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
napi = &droq->napi;
dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
(u64)netdev, (u64)octeon_dev);
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+ netif_napi_add(netdev, napi, liquidio_napi_poll);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 2b9747867d4c..2c10ae3f7fc1 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -947,7 +947,9 @@ static int lio_set_phys_id(struct net_device *netdev,
static void
lio_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
@@ -1252,8 +1254,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
return 0;
}
-static int lio_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+lio_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
u32 rx_count, tx_count, rx_count_old, tx_count_old;
struct lio *lio = GET_LIO(netdev);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 12eee2bc7f5c..9bd1d2d7027d 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,11 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct lio_trusted_vf_ctx {
- struct completion complete;
- int status;
-};
-
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
@@ -1134,7 +1129,6 @@ static void octeon_destroy_resources(struct octeon_device *oct)
fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
- pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
@@ -1517,14 +1511,17 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * liquidio_ptp_adjfreq - Adjust ptp frequency
+ * liquidio_ptp_adjfine - Adjust ptp frequency
* @ptp: PTP clock info
- * @ppb: how much to adjust by, in parts-per-billion
+ * @scaled_ppm: how much to adjust by, in scaled parts-per-million
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct lio *lio = container_of(ptp, struct lio, ptp_info);
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
u64 comp, delta;
unsigned long flags;
bool neg_adj = false;
@@ -1648,7 +1645,7 @@ static void oct_ptp_open(struct net_device *netdev)
lio->ptp_info.n_ext_ts = 0;
lio->ptp_info.n_per_out = 0;
lio->ptp_info.pps = 0;
- lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
+ lio->ptp_info.adjfine = liquidio_ptp_adjfine;
lio->ptp_info.adjtime = liquidio_ptp_adjtime;
lio->ptp_info.gettime64 = liquidio_ptp_gettime;
lio->ptp_info.settime64 = liquidio_ptp_settime;
@@ -1799,13 +1796,10 @@ static int liquidio_open(struct net_device *netdev)
ifstate_set(lio, LIO_IFSTATE_RUNNING);
- if (OCTEON_CN23XX_PF(oct)) {
- if (!oct->msix_on)
- if (setup_tx_poll_fn(netdev))
- return -1;
- } else {
- if (setup_tx_poll_fn(netdev))
- return -1;
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
+ ret = setup_tx_poll_fn(netdev);
+ if (ret)
+ goto err_poll;
}
netif_tx_start_all_queues(netdev);
@@ -1818,7 +1812,7 @@ static int liquidio_open(struct net_device *netdev)
/* tell Octeon to start forwarding packets to host */
ret = send_rx_ctrl_cmd(lio, 1);
if (ret)
- return ret;
+ goto err_rx_ctrl;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1829,6 +1823,27 @@ static int liquidio_open(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
+ return 0;
+
+err_rx_ctrl:
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+err_poll:
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
return ret;
}
@@ -2114,9 +2129,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
@@ -3566,7 +3578,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
| NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_LRO;
}
- netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+ netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index c607756b731f..e2921aec3da0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -577,7 +577,6 @@ static void octeon_destroy_resources(struct octeon_device *oct)
fallthrough;
case OCT_DEV_PCI_ENABLE_DONE:
- pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev);
@@ -1254,9 +1253,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
return -EFAULT;
- if (conf.flags)
- return -EINVAL;
-
switch (conf.tx_type) {
case HWTSTAMP_TX_ON:
case HWTSTAMP_TX_OFF:
@@ -2097,7 +2093,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
| NETIF_F_TSO | NETIF_F_TSO6
| NETIF_F_GRO
| NETIF_F_LRO;
- netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+ netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
/* Copy of transmit encapsulation capabilities:
* TSO, TSO6, Checksums for this device
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 28feabec8fbb..67c3570f875f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -247,8 +247,7 @@ static const struct cvmx_bootmem_named_block_desc
struct cvmx_bootmem_named_block_desc,
size));
- strncpy(desc->name, name, sizeof(desc->name));
- desc->name[sizeof(desc->name) - 1] = 0;
+ strscpy(desc->name, name, sizeof(desc->name));
return &oct->bootmem_named_block_desc;
} else {
return NULL;
@@ -471,8 +470,8 @@ static void output_console_line(struct octeon_device *oct,
if (line != &console_buffer[bytes_read]) {
console_buffer[bytes_read] = '\0';
len = strlen(console->leftover);
- strncpy(&console->leftover[len], line,
- sizeof(console->leftover) - len);
+ strscpy(&console->leftover[len], line,
+ sizeof(console->leftover) - len + 1);
}
}
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index 8e59c2825533..32f854c0cd79 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -40,15 +40,6 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
-static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
-{
- struct octeon_instr_queue *iq =
- (struct octeon_instr_queue *)oct->instr_queue[iq_no];
- return iq->iqcmd_64B;
-}
-
-#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
-
/* Define this to return the request status comaptible to old code */
/*#define OCTEON_USE_OLD_REQ_STATUS*/
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4e39d712e121..edde0b8fa49c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -548,7 +548,7 @@ struct octeon_mgmt_cam_state {
};
static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
- unsigned char *addr)
+ const unsigned char *addr)
{
int i;
@@ -702,9 +702,6 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags) /* reserved for future extensions */
- return -EINVAL;
-
/* Check the status of hardware for tiemstamps */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
/* Get the current state of the PTP clock */
@@ -1345,7 +1342,7 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1399,8 +1396,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
p = netdev_priv(netdev);
- netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
- OCTEON_MGMT_NAPI_WEIGHT);
+ netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
p->netdev = netdev;
p->dev = &pdev->dev;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index f2f1ce81fd9c..0ec65ec634df 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -59,7 +59,7 @@ struct nicpf {
/* MSI-X */
u8 num_vec;
- bool irq_allocated[NIC_PF_MSIX_VECTORS];
+ unsigned int irq_allocated[NIC_PF_MSIX_VECTORS];
char irq_name[NIC_PF_MSIX_VECTORS][20];
};
@@ -1150,7 +1150,7 @@ static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
u64 intr;
u8 vf;
- if (irq == pci_irq_vector(nic->pdev, NIC_PF_INTR_ID_MBOX0))
+ if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0])
mbx = 0;
else
mbx = 1;
@@ -1176,14 +1176,14 @@ static void nic_free_all_interrupts(struct nicpf *nic)
for (irq = 0; irq < nic->num_vec; irq++) {
if (nic->irq_allocated[irq])
- free_irq(pci_irq_vector(nic->pdev, irq), nic);
- nic->irq_allocated[irq] = false;
+ free_irq(nic->irq_allocated[irq], nic);
+ nic->irq_allocated[irq] = 0;
}
}
static int nic_register_interrupts(struct nicpf *nic)
{
- int i, ret;
+ int i, ret, irq;
nic->num_vec = pci_msix_vec_count(nic->pdev);
/* Enable MSI-X */
@@ -1201,13 +1201,13 @@ static int nic_register_interrupts(struct nicpf *nic)
sprintf(nic->irq_name[i],
"NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
- ret = request_irq(pci_irq_vector(nic->pdev, i),
- nic_mbx_intr_handler, 0,
+ irq = pci_irq_vector(nic->pdev, i);
+ ret = request_irq(irq, nic_mbx_intr_handler, 0,
nic->irq_name[i], nic);
if (ret)
goto fail;
- nic->irq_allocated[i] = true;
+ nic->irq_allocated[i] = irq;
}
/* Enable mailbox interrupt */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 7f2882109b16..d8d71bf97983 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -191,8 +191,8 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
static u32 nicvf_get_msglevel(struct net_device *netdev)
@@ -467,7 +467,9 @@ static int nicvf_get_coalesce(struct net_device *netdev,
}
static void nicvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
@@ -479,7 +481,9 @@ static void nicvf_get_ringparam(struct net_device *netdev,
}
static int nicvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
@@ -731,12 +735,17 @@ static int nicvf_set_channels(struct net_device *dev,
if (channel->tx_count > nic->max_queues)
return -EINVAL;
- if (nic->xdp_prog &&
- ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
- netdev_err(nic->netdev,
- "XDP mode, RXQs + TXQs > Max %d\n",
- nic->max_queues);
- return -EINVAL;
+ if (channel->tx_count + channel->rx_count > nic->max_queues) {
+ if (nic->xdp_prog) {
+ netdev_err(nic->netdev,
+ "XDP mode, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -EINVAL;
+ }
+
+ xdp_clear_features_flag(nic->netdev);
+ } else if (!pass1_silicon(nic->pdev)) {
+ xdp_set_features_flag(dev, NETDEV_XDP_ACT_BASIC);
}
if (if_up)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bb45d5df2856..eff350e0bc2a 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -590,7 +590,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
default:
- bpf_warn_invalid_xdp_action(action);
+ bpf_warn_invalid_xdp_action(nic->netdev, prog, action);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
@@ -1472,8 +1472,7 @@ int nicvf_open(struct net_device *netdev)
}
cq_poll->cq_idx = qidx;
cq_poll->nicvf = nic;
- netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll);
napi_enable(&cq_poll->napi);
nic->napi[qidx] = cq_poll;
}
@@ -1917,10 +1916,6 @@ static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
case HWTSTAMP_TX_ON:
@@ -2028,9 +2023,6 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
u8 mode;
struct xcast_addr_list *mc;
- if (!vf_work)
- return;
-
/* Save message data locally to prevent them from
* being overwritten by next ndo_set_rx_mode call().
*/
@@ -2226,6 +2218,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+ if (!pass1_silicon(nic->pdev) &&
+ nic->rx_queues + nic->tx_queues <= nic->max_queues)
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+
/* MTU range: 64 - 9200 */
netdev->min_mtu = NIC_HW_MIN_FRS;
netdev->max_mtu = NIC_HW_MAX_FRS;
@@ -2247,7 +2243,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_unregister_interrupts;
+ goto err_destroy_workqueue;
}
nic->msg_enable = debug;
@@ -2256,6 +2252,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_destroy_workqueue:
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
err_unregister_interrupts:
nicvf_unregister_interrupts(nic);
err_free_netdev:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 50bbe79fb93d..06397cc8bb36 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
#include <linux/iommu.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <uapi/linux/bpf.h>
#include "nic_reg.h"
#include "nic.h"
@@ -1260,7 +1261,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
static int nicvf_tso_count_subdescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int sh_len = skb_tcp_all_headers(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
@@ -1381,7 +1382,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
- hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr->tso_start = skb_tcp_all_headers(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 574a32f23f96..7eb2ddbe9bad 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1409,7 +1409,8 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
struct device *dev = &bgx->pdev->dev;
struct acpi_device *adev;
- if (acpi_bus_get_device(handle, &adev))
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
goto out;
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
@@ -1435,8 +1436,10 @@ static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
return AE_OK;
}
- if (strncmp(string.pointer, bgx_sel, 4))
+ if (strncmp(string.pointer, bgx_sel, 4)) {
+ kfree(string.pointer);
return AE_OK;
+ }
acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
bgx_acpi_register_phy, NULL, bgx, NULL);
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 0321be77366c..e56eff701395 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: common.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cphy.h b/drivers/net/ethernet/chelsio/cxgb/cphy.h
index bf43da6c6a63..12639b688ddc 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cphy.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cphy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: cphy.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
index 5249686afe71..a30fb407115d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
+++ b/drivers/net/ethernet/chelsio/cxgb/cpl5_cmd.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: cpl5_cmd.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -635,4 +626,3 @@ struct cpl_mss_change {
};
#endif /* _CXGB_CPL5_CMD_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 609820e214a3..d2286adf09fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -429,8 +429,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -710,7 +710,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -724,7 +726,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = adapter->params.sge.cmdQ_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct adapter *adapter = dev->ml_priv;
int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
@@ -940,11 +944,11 @@ static const struct net_device_ops cxgb_netdev_ops = {
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
const struct board_info *bi;
struct adapter *adapter = NULL;
struct port_info *pi;
+ int i, err;
err = pci_enable_device(pdev);
if (err)
@@ -957,17 +961,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_pdev;
}
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
-
- if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
- pci_name(pdev));
- err = -ENODEV;
- goto out_disable_pdev;
- }
-
- } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
@@ -1039,10 +1034,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_RXCSUM;
netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_RXCSUM | NETIF_F_LLTX;
+ NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
if (vlan_tso_capable(adapter)) {
netdev->features |=
NETIF_F_HW_VLAN_CTAG_TX |
@@ -1060,7 +1053,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
- netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, t1_poll);
netdev->ethtool_ops = &t1_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb/elmer0.h b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
index 81526ad36339..0427e894c277 100644
--- a/drivers/net/ethernet/chelsio/cxgb/elmer0.h
+++ b/drivers/net/ethernet/chelsio/cxgb/elmer0.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: elmer0.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -154,4 +145,3 @@ enum {
#define MI1_OP_INDIRECT_READ 3
#endif /* _CXGB_ELMER0_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.c b/drivers/net/ethernet/chelsio/cxgb/espi.c
index 3e182eee799e..ef70569435be 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.c
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: espi.c *
@@ -7,16 +8,6 @@
* Ethernet SPI functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/espi.h b/drivers/net/ethernet/chelsio/cxgb/espi.h
index 162de5259df9..f588e9f3b37a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/espi.h
+++ b/drivers/net/ethernet/chelsio/cxgb/espi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: espi.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index 5913eaf442b5..96077da1ed5e 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: gmac.h *
@@ -7,16 +8,6 @@
* Generic MAC functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
index 7ddb301bcba0..556c8ad68fa8 100644
--- a/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
+++ b/drivers/net/ethernet/chelsio/cxgb/mv88x201x.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: mv88x201x.c *
@@ -7,16 +8,6 @@
* Marvell PHY (mv88x201x) functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index 0bb37e4680c7..cbfa03d5663a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: pm3393.c *
@@ -7,16 +8,6 @@
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/regs.h b/drivers/net/ethernet/chelsio/cxgb/regs.h
index 964ce59ee169..f751e680cf7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb/regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: regs.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index cda01f22c71c..861edff5ed89 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: sge.c *
@@ -7,16 +8,6 @@
* DMA engine. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -1359,7 +1350,7 @@ static void restart_sched(struct tasklet_struct *t)
* @fl: the free list that contains the packet buffer
* @len: the packet length
*
- * Process an ingress ethernet pakcet and deliver it to the stack.
+ * Process an ingress ethernet packet and deliver it to the stack.
*/
static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
{
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index 716705b96f26..f7e6f64040ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: sge.h *
@@ -6,16 +7,6 @@
* Description: *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 007c591b8bf5..367a9e4581d5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*****************************************************************************
* *
* File: subr.c *
@@ -7,16 +8,6 @@
* Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
diff --git a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
index 7f79cc7ceb75..4c883170683b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb/suni1x10gexp_regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*****************************************************************************
* *
* File: suni1x10gexp_regs.h *
@@ -7,16 +8,6 @@
* PMC/SIERRA (pm3393) MAC-PHY functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
- * This program is free software; you can redistribute it and/or modify *
- * it under the terms of the GNU General Public License, version 2, as *
- * published by the Free Software Foundation. *
- * *
- * You should have received a copy of the GNU General Public License along *
- * with this program; if not, see <http://www.gnu.org/licenses/>. *
- * *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
- * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
@@ -1639,4 +1630,3 @@
#define SUNI1x10GEXP_BITMSK_PL4IDU_DIP4I 0x0002
#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
-
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index bfffcaeee624..9b84c8d8d309 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -609,8 +609,7 @@ static void init_napi(struct adapter *adap)
struct sge_qset *qs = &adap->sge.qs[i];
if (qs->adap)
- netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
- 64);
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
}
/*
@@ -1302,6 +1301,7 @@ static int cxgb_up(struct adapter *adap)
if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
+ quiesce_rx(adap);
free_irq_resources(adap);
err = ret;
goto out;
@@ -1627,8 +1627,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
@@ -1948,7 +1948,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -1964,7 +1966,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = q->txq_size[0];
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -3200,7 +3204,7 @@ static void cxgb3_init_iscsi_mac(struct net_device *dev)
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- int i, err, pci_using_dac = 0;
+ int i, err;
resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
@@ -3227,9 +3231,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto out_release_regions;
}
@@ -3305,8 +3308,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= netdev->hw_features |
NETIF_F_HW_VLAN_CTAG_TX;
netdev->vlan_features |= netdev->features & VLAN_FEAT;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->netdev_ops = &cxgb_netdev_ops;
netdev->ethtool_ops = &cxgb_ethtool_ops;
@@ -3346,6 +3349,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (!adapter->registered_device_map) {
dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -ENODEV;
goto out_free_dev;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 84604aff53ce..89256b866840 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -243,7 +243,7 @@ static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
/*
* on rx, the iscsi pdu has to be < rx page size and the
- * the max rx data length programmed in TP
+ * max rx data length programmed in TP
*/
val = min(adapter->params.tp.rx_pg_size,
((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index c3afec1041f8..efa7f401529e 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -126,8 +126,10 @@ struct rsp_desc { /* response queue descriptor */
struct rss_header rss_hdr;
__be32 flags;
__be32 len_cq;
- u8 imm_data[47];
- u8 intr_gen;
+ struct_group(immediate,
+ u8 imm_data[47];
+ u8 intr_gen;
+ );
};
/*
@@ -164,11 +166,6 @@ static u8 flit_desc_map[] = {
#endif
};
-static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
-{
- return container_of(q, struct sge_qset, fl[qidx]);
-}
-
static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
{
return container_of(q, struct sge_qset, rspq);
@@ -925,7 +922,8 @@ static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
- skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
+ BUILD_BUG_ON(IMMED_PKT_SIZE != sizeof(resp->immediate));
+ skb_copy_to_linear_data(skb, &resp->immediate, IMMED_PKT_SIZE);
}
return skb;
}
@@ -1953,7 +1951,7 @@ static int ofld_poll(struct napi_struct *napi, int budget)
* @rx_gather: a gather list of packets if we are building a bundle
* @gather_idx: index of the next available slot in the bundle
*
- * Process an ingress offload pakcet and add it to the offload ingress
+ * Process an ingress offload packet and add it to the offload ingress
* queue. Returns the index of the next available slot in the bundle.
*/
static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
@@ -2079,7 +2077,7 @@ static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
* @pad: padding
* @lro: large receive offload
*
- * Process an ingress ethernet pakcet and deliver it to the stack.
+ * Process an ingress ethernet packet and deliver it to the stack.
* The padding is 2 if the packet was delivered in an Rx buffer and 0
* if it was immediate data in a response.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index da41eee2f25c..a06003bfa04b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -3613,6 +3613,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
adapter->params.pci.vpd_cap_addr =
pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
+ if (!adapter->params.pci.vpd_cap_addr)
+ return -ENODEV;
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index a7f291c89702..557c591a6ce3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -14,6 +14,7 @@
#include "cudbg_entity.h"
#include "cudbg_lib.h"
#include "cudbg_zlib.h"
+#include "cxgb4_tc_mqprio.h"
static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
{0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
@@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < utxq->ntxq; i++)
QDESC_GET_TXQ(&utxq->uldtxq[i].q,
cudbg_uld_txq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
@@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
cudbg_uld_rxq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD FLQ */
@@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nrxq; i++)
QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
cudbg_uld_flq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
/* ULD CIQ */
@@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
for (i = 0; i < urxq->nciq; i++)
QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
cudbg_uld_ciq_to_qtype(j),
- out_unlock);
+ out_unlock_uld);
}
}
+ mutex_unlock(&uld_mutex);
+
+ if (!padap->tc_mqprio)
+ goto out;
+ mutex_lock(&padap->tc_mqprio->mqprio_mutex);
/* ETHOFLD TXQ */
if (s->eohw_txq)
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_TXQ(&s->eohw_txq[i].q,
- CUDBG_QTYPE_ETHOFLD_TXQ, out);
+ CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio);
/* ETHOFLD RXQ and FLQ */
if (s->eohw_rxq) {
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_RXQ(&s->eohw_rxq[i].rspq,
- CUDBG_QTYPE_ETHOFLD_RXQ, out);
+ CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio);
for (i = 0; i < s->eoqsets; i++)
QDESC_GET_FLQ(&s->eohw_rxq[i].fl,
- CUDBG_QTYPE_ETHOFLD_FLQ, out);
+ CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio);
}
-out_unlock:
- mutex_unlock(&uld_mutex);
+out_unlock_mqprio:
+ mutex_unlock(&padap->tc_mqprio->mqprio_mutex);
out:
qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
@@ -3559,6 +3565,10 @@ out_free:
#undef QDESC_GET
return rc;
+
+out_unlock_uld:
+ mutex_unlock(&uld_mutex);
+ goto out;
}
int cudbg_collect_flash(struct cudbg_init *pdbg_init,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 5657ac8cfca0..fca9533bc011 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1079,8 +1079,6 @@ struct mbox_list {
#if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal {
struct thermal_zone_device *tzdev;
- int trip_temp;
- int trip_type;
};
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 4a872f328fea..7d5204834ee2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -85,7 +85,7 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
if (err) {
dev_err(adap->pdev_dev,
- "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
+ "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, err=%d\n",
dcb_ver_array[dcb->dcb_version], app.selector,
app.protocol, -err);
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 7d49fd4edc9e..14e0d989c3ba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3429,18 +3429,18 @@ static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
unsigned long *t;
struct adapter *adap = filp->private_data;
- t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ t = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!t)
return -ENOMEM;
err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
if (err) {
- kfree(t);
+ bitmap_free(t);
return err;
}
bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
- kfree(t);
+ bitmap_free(t);
return count;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 129352bbe114..8477a93cee6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -199,8 +199,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = netdev2adap(dev);
u32 exprom_vers;
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
@@ -890,7 +890,9 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -906,7 +908,9 @@ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
e->tx_pending = s->ethtxq[pi->first_qset].q.size;
}
-static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
+ struct kernel_ethtool_ringparam *kernel_e,
+ struct netlink_ext_ack *extack)
{
int i;
const struct port_info *pi = netdev_priv(dev);
@@ -1989,6 +1993,15 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static bool cxgb4_fw_mod_type_info_available(unsigned int fw_mod_type)
+{
+ /* Read port module EEPROM as long as it is plugged-in and
+ * safe to read.
+ */
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
static int cxgb4_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -1997,7 +2010,7 @@ static int cxgb4_get_module_info(struct net_device *dev,
struct adapter *adapter = pi->adapter;
int ret;
- if (!t4_is_inserted_mod_type(pi->mod_type))
+ if (!cxgb4_fw_mod_type_info_available(pi->mod_type))
return -EINVAL;
switch (pi->port_type) {
@@ -2214,7 +2227,7 @@ void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
if (eth_filter_info) {
for (i = 0; i < adap->params.nports; i++) {
kvfree(eth_filter_info[i].loc_array);
- kfree(eth_filter_info[i].bmap);
+ bitmap_free(eth_filter_info[i].bmap);
}
kfree(eth_filter_info);
}
@@ -2257,9 +2270,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
goto free_eth_finfo;
}
- eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
- sizeof(unsigned long),
- GFP_KERNEL);
+ eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL);
if (!eth_filter->port[i].bmap) {
ret = -ENOMEM;
goto free_eth_finfo;
@@ -2271,7 +2282,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap)
free_eth_finfo:
while (i-- > 0) {
- kfree(eth_filter->port[i].bmap);
+ bitmap_free(eth_filter->port[i].bmap);
kvfree(eth_filter->port[i].loc_array);
}
kfree(eth_filter_info);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dde1cf51d0ab..f0bc7396ce2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -51,7 +51,6 @@
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -3903,8 +3902,8 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -5047,28 +5046,24 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* Allocate the memory for the vaious egress queue bitmaps
* ie starving_fl, txq_maperr and blocked_fl.
*/
- adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.starving_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.starving_fl) {
ret = -ENOMEM;
goto bye;
}
- adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.txq_maperr = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.txq_maperr) {
ret = -ENOMEM;
goto bye;
}
#ifdef CONFIG_DEBUG_FS
- adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
+ adap->sge.blocked_fl = bitmap_zalloc(adap->sge.egr_sz, GFP_KERNEL);
if (!adap->sge.blocked_fl) {
ret = -ENOMEM;
goto bye;
}
- bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
#endif
params[0] = FW_PARAM_PFVF(CLIP_START);
@@ -5417,10 +5412,10 @@ bye:
adap_free_hma_mem(adap);
kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map);
- kfree(adap->sge.starving_fl);
- kfree(adap->sge.txq_maperr);
+ bitmap_free(adap->sge.starving_fl);
+ bitmap_free(adap->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adap->sge.blocked_fl);
+ bitmap_free(adap->sge.blocked_fl);
#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
@@ -5854,8 +5849,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
if (!msix_info)
return -ENOMEM;
- adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
- sizeof(long), GFP_KERNEL);
+ adap->msix_bmap.msix_bmap = bitmap_zalloc(num_vec, GFP_KERNEL);
if (!adap->msix_bmap.msix_bmap) {
kfree(msix_info);
return -ENOMEM;
@@ -5870,7 +5864,7 @@ static int alloc_msix_info(struct adapter *adap, u32 num_vec)
static void free_msix_info(struct adapter *adap)
{
- kfree(adap->msix_bmap.msix_bmap);
+ bitmap_free(adap->msix_bmap.msix_bmap);
kfree(adap->msix_info);
}
@@ -6189,10 +6183,10 @@ static void free_some_resources(struct adapter *adapter)
cxgb4_cleanup_ethtool_filters(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
- kfree(adapter->sge.starving_fl);
- kfree(adapter->sge.txq_maperr);
+ bitmap_free(adapter->sge.starving_fl);
+ bitmap_free(adapter->sge.txq_maperr);
#ifdef CONFIG_DEBUG_FS
- kfree(adapter->sge.blocked_fl);
+ bitmap_free(adapter->sge.blocked_fl);
#endif
disable_msi(adapter);
@@ -6495,21 +6489,21 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x)
+static int cxgb4_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct adapter *adap = netdev2adap(x->xso.dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
+ NL_SET_ERR_MSG_MOD(extack, "crypto uld critical resource is under use");
return -EBUSY;
}
ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack);
out_unlock:
mutex_unlock(&uld_mutex);
@@ -6608,7 +6602,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
static int adap_idx = 1;
int s_qpp, qpp, num_seg;
struct port_info *pi;
- bool highdma = false;
enum chip_type chip;
void __iomem *regs;
int func, chip_ver;
@@ -6687,17 +6680,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- highdma = true;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_free_adapter;
- }
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto out_free_adapter;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
adap_idx++;
@@ -6823,7 +6811,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_TC | NETIF_F_NTUPLE;
+ NETIF_F_HW_TC | NETIF_F_NTUPLE | NETIF_F_HIGHDMA;
if (chip_ver > CHELSIO_T5) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM |
@@ -6841,8 +6829,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
}
- if (highdma)
- netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
@@ -7104,7 +7090,6 @@ fw_attach_fail:
out_unmap_bar0:
iounmap(regs);
out_disable_device:
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
out_release_regions:
pci_release_regions(pdev);
@@ -7183,7 +7168,6 @@ static void remove_one(struct pci_dev *pdev)
}
#endif
iounmap(adapter->regs);
- pci_disable_pcie_error_reporting(pdev);
if ((adapter->flags & CXGB4_DEV_ENABLED)) {
pci_disable_device(pdev);
adapter->flags &= ~CXGB4_DEV_ENABLED;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
index 5bf117d2179f..cbd06d9b95d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
@@ -194,17 +194,20 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
}
/**
- * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
+ * cxgb4_ptp_adjfine - Adjust frequency of PHC cycle counter
* @ptp: ptp clock structure
- * @ppb: Desired frequency change in parts per billion
+ * @scaled_ppm: Desired frequency in scaled parts per billion
*
- * Adjust the frequency of the PHC cycle counter by the indicated ppb from
+ * Adjust the frequency of the PHC cycle counter by the indicated amount from
* the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int cxgb4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct adapter *adapter = (struct adapter *)container_of(ptp,
struct adapter, ptp_clock_info);
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
struct fw_ptp_cmd c;
int err;
@@ -404,7 +407,7 @@ static const struct ptp_clock_info cxgb4_ptp_clock_info = {
.n_ext_ts = 0,
.n_per_out = 0,
.pps = 0,
- .adjfreq = cxgb4_ptp_adjfreq,
+ .adjfine = cxgb4_ptp_adjfine,
.adjtime = cxgb4_ptp_adjtime,
.gettime64 = cxgb4_ptp_gettime,
.settime64 = cxgb4_ptp_settime,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index dd9be229819a..d3541159487d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -1135,7 +1135,7 @@ void cxgb4_cleanup_tc_flower(struct adapter *adap)
return;
if (adap->flower_stats_timer.function)
- del_timer_sync(&adap->flower_stats_timer);
+ timer_shutdown_sync(&adap->flower_stats_timer);
cancel_work_sync(&adap->flower_stats_work);
rhashtable_destroy(&adap->flower_tbl);
adap->tc_flower_initialized = false;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
index 28fd2de9e4cf..1672d3afe5be 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
@@ -8,6 +8,46 @@
#include "cxgb4_filter.h"
#include "cxgb4_tc_flower.h"
+static int cxgb4_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int cxgb4_matchall_egress_validate(struct net_device *dev,
struct tc_cls_matchall_offload *cls)
{
@@ -48,11 +88,10 @@ static int cxgb4_matchall_egress_validate(struct net_device *dev,
flow_action_for_each(i, entry, actions) {
switch (entry->id) {
case FLOW_ACTION_POLICE:
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+ ret = cxgb4_policer_validate(actions, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert bytes per second to bits per second */
if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
NL_SET_ERR_MSG_MOD(extack,
@@ -150,11 +189,11 @@ static int cxgb4_matchall_alloc_tc(struct net_device *dev,
flow_action_for_each(i, entry, &cls->rule->action)
if (entry->id == FLOW_ACTION_POLICE)
break;
- if (entry->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack,
- "QoS offload not support packets per second");
- return -EOPNOTSUPP;
- }
+
+ ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
+ if (ret)
+ return ret;
+
/* Convert from bytes per second to Kbps */
p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
p.u.params.channel = pi->tx_chan;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index be96f1dc0372..d4a862a9fd7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -4,7 +4,7 @@
#ifndef __CXGB4_TC_MQPRIO_H__
#define __CXGB4_TC_MQPRIO_H__
-#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index 9a6d65243334..dea9d2907666 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -12,7 +12,7 @@
static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
int *temp)
{
- struct adapter *adap = tzdev->devdata;
+ struct adapter *adap = thermal_zone_device_priv(tzdev);
u32 param, val;
int ret;
@@ -29,36 +29,12 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trip_type *type)
-{
- struct adapter *adap = tzdev->devdata;
-
- if (!adap->ch_thermal.trip_temp)
- return -EINVAL;
-
- *type = adap->ch_thermal.trip_type;
- return 0;
-}
-
-static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
- int trip, int *temp)
-{
- struct adapter *adap = tzdev->devdata;
-
- if (!adap->ch_thermal.trip_temp)
- return -EINVAL;
-
- *temp = adap->ch_thermal.trip_temp;
- return 0;
-}
-
static struct thermal_zone_device_ops cxgb4_thermal_ops = {
.get_temp = cxgb4_thermal_get_temp,
- .get_trip_type = cxgb4_thermal_get_trip_type,
- .get_trip_temp = cxgb4_thermal_get_trip_temp,
};
+static struct thermal_trip trip = { .type = THERMAL_TRIP_CRITICAL } ;
+
int cxgb4_thermal_init(struct adapter *adap)
{
struct ch_thermal *ch_thermal = &adap->ch_thermal;
@@ -79,15 +55,14 @@ int cxgb4_thermal_init(struct adapter *adap)
if (ret < 0) {
num_trip = 0; /* could not get trip temperature */
} else {
- ch_thermal->trip_temp = val * 1000;
- ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
+ trip.temperature = val * 1000;
}
snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name);
- ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip,
- 0, adap,
- &cxgb4_thermal_ops,
- NULL, 0, 0);
+ ch_thermal->tzdev = thermal_zone_device_register_with_trips(ch_tz_name, &trip, num_trip,
+ 0, adap,
+ &cxgb4_thermal_ops,
+ NULL, 0, 0);
if (IS_ERR(ch_thermal->tzdev)) {
ret = PTR_ERR(ch_thermal->tzdev);
dev_err(adap->pdev_dev, "Failed to register thermal zone\n");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fa5b596ff23a..46809e2d94ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1531,7 +1531,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (cxgb4_is_ktls_skb(skb) &&
- (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
+ (skb->len - skb_tcp_all_headers(skb)))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
@@ -1842,8 +1842,10 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ BUILD_BUG_ON(sizeof(wr->firmware) !=
+ (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci)));
+ fw_hdr_copy_len = sizeof(wr->firmware);
ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
if (ret)
goto out_free;
@@ -1924,7 +1926,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/* If this is a Large Send Offload packet we'll put in an LSO CPL
@@ -4465,7 +4467,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
if (ret)
goto err;
- netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &iq->napi, napi_rx_handler);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index e7b4e3ed056c..8d719f82854a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
goto out;
na = ret;
- memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
+ memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
strim(p->id);
- memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
+ memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
strim(p->sn);
- memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
+ memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
strim(p->pn);
- memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
- strim((char *)p->na);
+ memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
+ strim(p->na);
out:
vfree(vpd);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0a326c054707..2419459a0b85 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -794,10 +794,12 @@ struct fw_eth_tx_pkt_vm_wr {
__be32 op_immdlen;
__be32 equiq_to_len16;
__be32 r3[2];
- u8 ethmacdst[6];
- u8 ethmacsrc[6];
- __be16 ethtype;
- __be16 vlantci;
+ struct_group(firmware,
+ u8 ethmacdst[ETH_ALEN];
+ u8 ethmacsrc[ETH_ALEN];
+ __be16 ethtype;
+ __be16 vlantci;
+ );
};
#define FW_CMD_MAX_TIMEOUT 10000
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ae9cca768d74..9ba0864592e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev)
*/
err = t4vf_update_port_info(pi);
if (err < 0)
- return err;
+ goto err_unwind;
/*
* Note that this interface is up and start everything up ...
@@ -1553,8 +1553,8 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
@@ -1591,7 +1591,9 @@ static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
* first Queue Set.
*/
static void cxgb4vf_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
const struct sge *s = &pi->adapter->sge;
@@ -1614,7 +1616,9 @@ static void cxgb4vf_get_ringparam(struct net_device *dev,
* device -- after vetting them of course!
*/
static int cxgb4vf_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
const struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2855,7 +2859,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
* address stored on the adapter
* @adapter: The adapter
*
- * Find the the port mask for the VF based on the index of mac
+ * Find the port mask for the VF based on the index of mac
* address stored in the adapter. If no mac address is stored on
* the adapter for the VF, use the port mask received from the
* firmware.
@@ -2895,7 +2899,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct port_info *pi;
unsigned int pmask;
- int pci_using_dac;
int err, pidx;
/*
@@ -2916,19 +2919,12 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
/*
- * Set up our DMA mask: try for 64-bit address masking first and
- * fall back to 32-bit if we can't get 64 bits ...
+ * Set up our DMA mask
*/
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err == 0) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err != 0) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto err_release_regions;
- }
- pci_using_dac = 0;
+ if (err) {
+ dev_err(&pdev->dev, "no usable DMA configuration\n");
+ goto err_release_regions;
}
/*
@@ -3074,9 +3070,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -3264,7 +3258,6 @@ err_free_adapter:
err_release_regions:
pci_release_regions(pdev);
- pci_clear_master(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -3344,7 +3337,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
* Disable the device and release its PCI resources.
*/
pci_disable_device(pdev);
- pci_clear_master(pdev);
pci_release_regions(pdev);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0295b2406646..2d0cf76fb3c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1167,10 +1167,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
- const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci));
+ const size_t fw_hdr_copy_len = sizeof(wr->firmware);
/*
* The chip minimum packet length is 10 octets but the firmware
@@ -1267,7 +1264,7 @@ netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
- skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
+ skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len);
end = (u64 *)wr + flits;
/*
@@ -2339,7 +2336,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
if (ret)
goto err;
- netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler);
rspq->cur_desc = rspq->desc;
rspq->cidx = 0;
rspq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index d546993bda09..1c52592d3b65 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -877,7 +877,7 @@ int t4vf_get_sge_params(struct adapter *adapter)
/* T4 uses a single control field to specify both the PCIe Padding and
* Packing Boundary. T5 introduced the ability to specify these
- * separately with the Padding Boundary in SGE_CONTROL and and Packing
+ * separately with the Padding Boundary in SGE_CONTROL and Packing
* Boundary in SGE_CONTROL2. So for T5 and later we need to grab
* SGE_CONTROL in order to determine how ingress packet data will be
* laid out in Packed Buffer Mode. Unfortunately, older versions of
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index 585590520076..3731c93f8f95 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -80,7 +80,8 @@ static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
@@ -226,61 +227,66 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
int res = 0;
if (x->props.aalgo != SADB_AALG_NONE) {
- pr_debug("Cannot offload authenticated xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
- pr_debug("Cannot offload compressed xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
- pr_debug("Only IPv4/6 xfrm state offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm state offloaded");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
- pr_debug("Only transport and tunnel xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm offload");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
- pr_debug("Only ESP xfrm state offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state offloaded");
return -EINVAL;
}
if (x->encap) {
- pr_debug("Encapsulated xfrm state not offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state not offloaded");
return -EINVAL;
}
if (!x->aead) {
- pr_debug("Cannot offload xfrm states without aead\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128 &&
x->aead->alg_icv_len != 96) {
- pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 96b & 128b");
+ return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
- pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ NL_SET_ERR_MSG_MOD(extack, "cannot offload xfrm states with AEAD key length other than 128/256 bit");
return -EINVAL;
}
if (x->tfcpad) {
- pr_debug("Cannot offload xfrm states with tfc padding\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
return -EINVAL;
}
if (!x->geniv) {
- pr_debug("Cannot offload xfrm states without geniv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
- pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
+ return -EINVAL;
+ }
+ if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 59683f79959c..1a5fdd755e9e 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -483,7 +483,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!sk->sk_ipv6only &&
+ if (!ipv6_only_sock(sk) &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4);
tx_info->ip_family = AF_INET;
@@ -1012,7 +1012,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options).
*/
- pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ pktlen = skb_tcp_all_headers(skb);
ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
@@ -1839,9 +1839,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
*/
if (prior_data_len) {
int i = 0;
- u8 *data = NULL;
skb_frag_t *f;
- u8 *vaddr;
int frag_size = 0, frag_delta = 0;
while (remaining > 0) {
@@ -1853,24 +1851,24 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
i++;
}
f = &record->frags[i];
- vaddr = kmap_atomic(skb_frag_page(f));
-
- data = vaddr + skb_frag_off(f) + remaining;
frag_delta = skb_frag_size(f) - remaining;
if (frag_delta >= prior_data_len) {
- memcpy(prior_data, data, prior_data_len);
- kunmap_atomic(vaddr);
+ memcpy_from_page(prior_data, skb_frag_page(f),
+ skb_frag_off(f) + remaining,
+ prior_data_len);
} else {
- memcpy(prior_data, data, frag_delta);
- kunmap_atomic(vaddr);
+ memcpy_from_page(prior_data, skb_frag_page(f),
+ skb_frag_off(f) + remaining,
+ frag_delta);
+
/* get the next page */
f = &record->frags[i + 1];
- vaddr = kmap_atomic(skb_frag_page(f));
- data = vaddr + skb_frag_off(f);
- memcpy(prior_data + frag_delta,
- data, (prior_data_len - frag_delta));
- kunmap_atomic(vaddr);
+
+ memcpy_from_page(prior_data + frag_delta,
+ skb_frag_page(f),
+ skb_frag_off(f),
+ prior_data_len - frag_delta);
}
/* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len;
@@ -1907,7 +1905,7 @@ static int chcr_ktls_sw_fallback(struct sk_buff *skb,
return 0;
th = tcp_hdr(nskb);
- skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
+ skb_offset = skb_tcp_all_headers(nskb);
data_len = nskb->len - skb_offset;
skb_tx_timestamp(nskb);
@@ -1932,20 +1930,26 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
struct chcr_ktls_info *tx_info;
+ struct net_device *tls_netdev;
struct tls_context *tls_ctx;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
tcp_seq = ntohl(th->seq);
- skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_offset = skb_tcp_all_headers(skb);
skb_data_len = skb->len - skb_offset;
data_len = skb_data_len;
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != dev))
+ tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
+ /* Don't quit on NULL: if tls_device_down is running in parallel,
+ * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * true. Rather continue processing this packet.
+ */
+ if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 9e2378013642..41714203ace8 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -567,7 +567,7 @@ void chtls_shutdown(struct sock *sk, int how);
void chtls_destroy_sock(struct sock *sk);
int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags, int *addr_len);
+ size_t len, int flags, int *addr_len);
int chtls_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
int send_tx_flowc_wr(struct sock *sk, int compl,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 4af5561cbfc5..c2e7037c7ba1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1063,14 +1063,13 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
rpl5->opt0 = cpu_to_be64(opt0);
rpl5->opt2 = cpu_to_be32(opt2);
- rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+ rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
}
-static void inet_inherit_port(struct inet_hashinfo *hash_info,
- struct sock *lsk, struct sock *newsk)
+static void inet_inherit_port(struct sock *lsk, struct sock *newsk)
{
local_bh_disable();
__inet_inherit_port(lsk, newsk);
@@ -1236,11 +1235,11 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->sndbuf = newsk->sk_sndbuf;
csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- sock_net(newsk)->
- ipv4.sysctl_tcp_window_scaling,
+ READ_ONCE(sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
- inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+ inet_inherit_port(lsk, newsk);
csk_set_flag(csk, CSK_CONN_INLINE);
bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
@@ -1384,7 +1383,7 @@ static void chtls_pass_accept_request(struct sock *sk,
#endif
}
if (req->tcpopt.wsf <= 14 &&
- sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
@@ -1392,7 +1391,7 @@ static void chtls_pass_accept_request(struct sock *sk,
th_ecn = tcph->ece && tcph->cwr;
if (th_ecn) {
ect = !INET_ECN_is_not_ect(ip_dsfield);
- ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+ ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);
if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
inet_rsk(oreq)->ecn_ok = 1;
}
@@ -1467,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
tp->write_seq = snd_isn;
tp->snd_nxt = snd_isn;
tp->snd_una = snd_isn;
- inet_sk(sk)->inet_id = prandom_u32();
+ inet_sk(sk)->inet_id = get_random_u16();
assign_rxopt(sk, opt);
if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index c320cc8ca68d..ae6b17b96bf1 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -919,8 +919,8 @@ static int csk_wait_memory(struct chtls_dev *cdev,
current_timeo = *timeo_p;
noblock = (*timeo_p ? false : true);
if (csk_mem_free(cdev, sk)) {
- current_timeo = (prandom_u32() % (HZ / 5)) + 2;
- vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+ current_timeo = get_random_u32_below(HZ / 5) + 2;
+ vm_wait = get_random_u32_below(HZ / 5) + 2;
}
add_wait_queue(sk_sleep(sk), &wait);
@@ -1426,7 +1426,7 @@ static void chtls_cleanup_rbuf(struct sock *sk, int copied)
}
static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
struct chtls_hws *hws = &csk->tlshws;
@@ -1441,7 +1441,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
@@ -1616,7 +1616,7 @@ skip_copy:
* Peek at data in a socket's receive buffer.
*/
static int peekmsg(struct sock *sk, struct msghdr *msg,
- size_t len, int nonblock, int flags)
+ size_t len, int flags)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 peek_seq, offset;
@@ -1626,7 +1626,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
long timeo;
lock_sock(sk);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
peek_seq = tp->copied_seq;
do {
@@ -1737,7 +1737,7 @@ found_ok_skb:
}
int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
- int nonblock, int flags, int *addr_len)
+ int flags, int *addr_len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct chtls_sock *csk;
@@ -1750,25 +1750,23 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
buffers_freed = 0;
if (unlikely(flags & MSG_OOB))
- return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
- addr_len);
+ return tcp_prot.recvmsg(sk, msg, len, flags, addr_len);
if (unlikely(flags & MSG_PEEK))
- return peekmsg(sk, msg, len, nonblock, flags);
+ return peekmsg(sk, msg, len, flags);
if (sk_can_busy_loop(sk) &&
skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk->sk_state == TCP_ESTABLISHED)
- sk_busy_loop(sk, nonblock);
+ sk_busy_loop(sk, flags & MSG_DONTWAIT);
lock_sock(sk);
csk = rcu_dereference_sk_user_data(sk);
if (is_tls_rx(csk))
- return chtls_pt_recvmsg(sk, msg, len, nonblock,
- flags, addr_len);
+ return chtls_pt_recvmsg(sk, msg, len, flags, addr_len);
- timeo = sock_rcvtimeo(sk, nonblock);
+ timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 9098b3eed4da..1e55b12fee51 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -193,7 +193,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
{
struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
+ strscpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index d04a6c163445..da8d10475a08 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/ip6_route.h>
@@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi,
rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
- tos, 0);
+ tos & ~INET_ECN_MASK, 0);
if (IS_ERR(rt))
return NULL;
n = dst_neigh_lookup(&rt->dst, &peer_ip);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 4a97aa8e1387..06a0c00af99c 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -985,7 +985,7 @@ release_irq:
if (result == DETECTED_NONE) {
pr_warn("%s: 10Base-5 (AUI) has no cable\n", dev->name);
if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
- result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
+ result = DETECTED_AUI; /* Yes! I don't care if I see a carrier */
}
break;
case A_CNF_MEDIA_10B_2:
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 21ba6e893072..8627ab19d470 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -689,7 +689,7 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
@@ -812,7 +812,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
+ netif_napi_add(dev, &ep->napi, ep93xx_poll);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 84251b85fc93..21a70b1f0ac5 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -242,12 +242,15 @@ static int mac89x0_device_probe(struct platform_device *pdev)
pr_info("No EEPROM, giving up now.\n");
goto out1;
} else {
+ u8 addr[ETH_ALEN];
+
for (i = 0; i < ETH_ALEN; i += 2) {
/* Big-endian (why??!) */
unsigned short s = readreg(dev, PP_IA + i);
- dev->dev_addr[i] = s >> 8;
- dev->dev_addr[i+1] = s & 0xff;
+ addr[i] = s >> 8;
+ addr[i+1] = s & 0xff;
}
+ eth_hw_addr_set(dev, addr);
}
dev->irq = SLOT2IRQ(slot);
diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h
index d6dd1b4edf6e..462c5435a206 100644
--- a/drivers/net/ethernet/cisco/enic/cq_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _CQ_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index ac37cacc6136..d25426470a29 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _CQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index c67a16a48d62..300ad05ee05b 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ENIC_H_
@@ -239,21 +226,6 @@ static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
return enic->rq_count + wq;
}
-static inline unsigned int enic_legacy_io_intr(void)
-{
- return 0;
-}
-
-static inline unsigned int enic_legacy_err_intr(void)
-{
- return 1;
-}
-
-static inline unsigned int enic_legacy_notify_intr(void)
-{
- return 2;
-}
-
static inline unsigned int enic_msix_rq_intr(struct enic *enic,
unsigned int rq)
{
@@ -271,6 +243,10 @@ static inline unsigned int enic_msix_err_intr(struct enic *enic)
return enic->rq_count + enic->wq_count;
}
+#define ENIC_LEGACY_IO_INTR 0
+#define ENIC_LEGACY_ERR_INTR 1
+#define ENIC_LEGACY_NOTIFY_INTR 2
+
static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
return enic->rq_count + enic->wq_count + 1;
@@ -280,7 +256,7 @@ static inline bool enic_is_err_intr(struct enic *enic, int intr)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- return intr == enic_legacy_err_intr();
+ return intr == ENIC_LEGACY_ERR_INTR;
case VNIC_DEV_INTR_MODE_MSIX:
return intr == enic_msix_err_intr(enic);
case VNIC_DEV_INTR_MODE_MSI:
@@ -293,7 +269,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr)
{
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- return intr == enic_legacy_notify_intr();
+ return intr == ENIC_LEGACY_NOTIFY_INTR;
case VNIC_DEV_INTR_MODE_MSIX:
return intr == enic_msix_notify_intr(enic);
case VNIC_DEV_INTR_MODE_MSI:
@@ -304,7 +280,7 @@ static inline bool enic_is_notify_intr(struct enic *enic, int intr)
static inline int enic_dma_map_check(struct enic *enic, dma_addr_t dma_addr)
{
- if (unlikely(pci_dma_mapping_error(enic->pdev, dma_addr))) {
+ if (unlikely(dma_mapping_error(&enic->pdev->dev, dma_addr))) {
net_warn_ratelimited("%s: PCI dma mapping failed!\n",
enic->netdev->name);
enic->gen_stats.dma_map_error++;
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
index 3bdc74fba1e3..e3b700c28bc4 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.c
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
index 6b9f9255af28..e01790fb0415 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.h
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -1,20 +1,5 @@
-/**
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2013 Cisco Systems, Inc. All rights reserved. */
#ifndef __ENIC_API_H__
#define __ENIC_API_H__
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index f8d2a6a34282..2cbae7c6cc3d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2011 Cisco Systems, Inc. All rights reserved.
#include <linux/pci.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index f5bb058b3f96..698d0cb02064 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_DEV_H_
#define _ENIC_DEV_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 6ded4d9fa32a..08b7cc0a1809 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2013 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2013 Cisco Systems, Inc. All rights reserved.
#include <linux/netdevice.h>
#include <linux/ethtool.h>
@@ -146,10 +131,10 @@ static void enic_get_drvinfo(struct net_device *netdev,
if (err == -ENOMEM)
return;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strscpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
@@ -177,7 +162,9 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset,
}
static void enic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
@@ -189,7 +176,9 @@ static void enic_get_ringparam(struct net_device *netdev,
}
static int enic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_enet_config *c = &enic->config;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index aacf141986d5..37bd38d772e8 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -150,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic)
!cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask))
continue;
- err = irq_set_affinity_hint(enic->msix_entry[i].vector,
- enic->msix[i].affinity_mask);
+ err = irq_update_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
if (err)
- netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
err);
}
@@ -173,7 +173,7 @@ static void enic_unset_affinity_hint(struct enic *enic)
int i;
for (i = 0; i < enic->intr_count; i++)
- irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+ irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
}
static int enic_udp_tunnel_set_port(struct net_device *netdev,
@@ -448,9 +448,9 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
{
struct net_device *netdev = data;
struct enic *enic = netdev_priv(netdev);
- unsigned int io_intr = enic_legacy_io_intr();
- unsigned int err_intr = enic_legacy_err_intr();
- unsigned int notify_intr = enic_legacy_notify_intr();
+ unsigned int io_intr = ENIC_LEGACY_IO_INTR;
+ unsigned int err_intr = ENIC_LEGACY_ERR_INTR;
+ unsigned int notify_intr = ENIC_LEGACY_NOTIFY_INTR;
u32 pba;
vnic_intr_mask(&enic->intr[io_intr]);
@@ -680,11 +680,10 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
skb_frag_t *frag;
if (skb->encapsulation) {
- hdr_len = skb_inner_transport_header(skb) - skb->data;
- hdr_len += inner_tcp_hdrlen(skb);
+ hdr_len = skb_inner_tcp_all_headers(skb);
enic_preload_tcp_csum_encap(skb);
} else {
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
enic_preload_tcp_csum(skb);
}
@@ -1508,7 +1507,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
struct enic *enic = netdev_priv(netdev);
unsigned int cq_rq = enic_cq_rq(enic, 0);
unsigned int cq_wq = enic_cq_wq(enic, 0);
- unsigned int intr = enic_legacy_io_intr();
+ unsigned int intr = ENIC_LEGACY_IO_INTR;
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = ENIC_WQ_NAPI_BUDGET;
unsigned int work_done, rq_work_done = 0, wq_work_done;
@@ -1857,8 +1856,7 @@ static int enic_dev_notify_set(struct enic *enic)
spin_lock_bh(&enic->devcmd_lock);
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
- err = vnic_dev_notify_set(enic->vdev,
- enic_legacy_notify_intr());
+ err = vnic_dev_notify_set(enic->vdev, ENIC_LEGACY_NOTIFY_INTR);
break;
case VNIC_DEV_INTR_MODE_MSIX:
err = vnic_dev_notify_set(enic->vdev,
@@ -2634,16 +2632,17 @@ static int enic_dev_init(struct enic *enic)
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
- netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+ netif_napi_add(netdev, &enic->napi[0], enic_poll);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
netif_napi_add(netdev, &enic->napi[i],
- enic_poll_msix_rq, NAPI_POLL_WEIGHT);
+ enic_poll_msix_rq);
}
for (i = 0; i < enic->wq_count; i++)
- netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
- enic_poll_msix_wq, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev,
+ &enic->napi[enic_cq_wq(enic, i)],
+ enic_poll_msix_wq);
break;
}
@@ -2718,26 +2717,14 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* fail to 32-bit.
*/
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_release_regions;
}
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 32);
- goto err_out_release_regions;
- }
} else {
- err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47));
- if (err) {
- dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 47);
- goto err_out_release_regions;
- }
using_dac = 1;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 80f46dbd5117..4720a952725d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2011 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.h b/drivers/net/ethernet/cisco/enic/enic_pp.h
index a09ff392c1c6..20a2687713ef 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.h
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2011 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2011 Cisco Systems, Inc. All rights reserved. */
#ifndef _ENIC_PP_H_
#define _ENIC_PP_H_
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c
index 40b20817ddd5..1c48aebdbab0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.c
+++ b/drivers/net/ethernet/cisco/enic/enic_res.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 81f98a8b60e9..b8ee42d297aa 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _ENIC_RES_H_
diff --git a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
index e6dd30988d6f..0ab5fd6b8d46 100644
--- a/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/rq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _RQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c
index 519323460f26..27c885e91552 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 4e6aa65857f7..eed5bf59e5d2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_CQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 45015931b335..12a83fa1302d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 714fc1ed79e3..6273794b923b 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_DEV_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index fcc4a3ccdd94..db56d778877a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_DEVCMD_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h
index 7d6fbb5635a4..5acc236069de 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_enet.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_ENIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c
index 23604e3d4455..25319f072a04 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.h b/drivers/net/ethernet/cisco/enic/vnic_intr.h
index 2b1636392294..33a72aa10b26 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_intr.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_intr.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_INTR_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h
index 84ff8ca17fcb..04fee45b5d39 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_nic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_NIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_resource.h b/drivers/net/ethernet/cisco/enic/vnic_resource.h
index 4e45f88ac1d4..b4776e334d63 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_resource.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_resource.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_RESOURCE_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index a3e7b003ada1..5ae80551f17c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
@@ -216,4 +203,3 @@ void vnic_rq_clean(struct vnic_rq *rq,
vnic_dev_clear_desc_ring(&rq->ring);
}
-
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 0413103ebe94..0bc595abc03b 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_RQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rss.h b/drivers/net/ethernet/cisco/enic/vnic_rss.h
index 881fa18542b3..4dcf0e61cb13 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rss.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rss.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef _VNIC_RSS_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_stats.h b/drivers/net/ethernet/cisco/enic/vnic_stats.h
index 74c81ed6fdab..2dd04322d760 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_stats.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_stats.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_STATS_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.c b/drivers/net/ethernet/cisco/enic/vnic_vic.c
index 24ef8cd40545..20fcb20b42ed 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.c
@@ -1,20 +1,5 @@
-/*
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2010 Cisco Systems, Inc. All rights reserved.
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_vic.h b/drivers/net/ethernet/cisco/enic/vnic_vic.h
index 057776908828..b51c1c52f8bf 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_vic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_vic.h
@@ -1,20 +1,5 @@
-/*
- * Copyright 2010 Cisco Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2010 Cisco Systems, Inc. All rights reserved. */
#ifndef _VNIC_VIC_H_
#define _VNIC_VIC_H_
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index eb75891974df..29c7900349b2 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 01209613d57d..75c526911074 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _VNIC_WQ_H_
diff --git a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
index c7021e3a631f..425e46a804ee 100644
--- a/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/wq_enet_desc.h
@@ -1,20 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#ifndef _WQ_ENET_DESC_H_
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 941f175fb911..5715b9ab2712 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -68,7 +68,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define DEFAULT_GMAC_RXQ_ORDER 9
#define DEFAULT_GMAC_TXQ_ORDER 8
#define DEFAULT_RX_BUF_ORDER 11
-#define DEFAULT_NAPI_WEIGHT 64
#define TX_MAX_FRAGS 16
#define TX_QUEUE_NUM 1 /* max: 6 */
#define RX_MAX_ALLOC_ORDER 2
@@ -305,21 +304,21 @@ static void gmac_speed_set(struct net_device *netdev)
switch (phydev->speed) {
case 1000:
status.bits.speed = GMAC_SPEED_1000;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_1000;
netdev_dbg(netdev, "connect %s to RGMII @ 1Gbit\n",
phydev_name(phydev));
break;
case 100:
status.bits.speed = GMAC_SPEED_100;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
netdev_dbg(netdev, "connect %s to RGMII @ 100 Mbit\n",
phydev_name(phydev));
break;
case 10:
status.bits.speed = GMAC_SPEED_10;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(phydev->interface))
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
netdev_dbg(netdev, "connect %s to RGMII @ 10 Mbit\n",
phydev_name(phydev));
@@ -389,6 +388,9 @@ static int gmac_setup_phy(struct net_device *netdev)
status.bits.mii_rmii = GMAC_PHY_GMII;
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
netdev_dbg(netdev,
"RGMII: set GMAC0 and GMAC1 to MII/RGMII mode\n");
status.bits.mii_rmii = GMAC_PHY_RGMII_100_10;
@@ -2105,7 +2107,9 @@ static void gmac_get_pauseparam(struct net_device *netdev,
}
static void gmac_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
@@ -2123,7 +2127,9 @@ static void gmac_get_ringparam(struct net_device *netdev,
}
static int gmac_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
int err = 0;
@@ -2356,11 +2362,13 @@ static void gemini_port_save_mac_addr(struct gemini_ethernet_port *port)
static int gemini_ethernet_port_probe(struct platform_device *pdev)
{
char *port_names[2] = { "ethernet0", "ethernet1" };
+ struct device_node *np = pdev->dev.of_node;
struct gemini_ethernet_port *port;
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
struct net_device *netdev;
struct device *parent;
+ u8 mac[ETH_ALEN];
unsigned int id;
int irq;
int ret;
@@ -2463,8 +2471,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
- netif_napi_add(netdev, &port->napi, gmac_napi_poll,
- DEFAULT_NAPI_WEIGHT);
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
+
+ ret = of_get_mac_address(np, mac);
+ if (!ret) {
+ dev_info(dev, "Setting macaddr from DT %pM\n", mac);
+ memcpy(port->mac_addr, mac, ETH_ALEN);
+ }
if (is_valid_ether_addr((void *)port->mac_addr)) {
eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
diff --git a/drivers/net/ethernet/davicom/Kconfig b/drivers/net/ethernet/davicom/Kconfig
index 7af86b6d4150..02e0caff98e3 100644
--- a/drivers/net/ethernet/davicom/Kconfig
+++ b/drivers/net/ethernet/davicom/Kconfig
@@ -3,6 +3,19 @@
# Davicom device configuration
#
+config NET_VENDOR_DAVICOM
+ bool "Davicom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Davicom devices. If you say Y, you will be asked
+ for your specific card in the following selections.
+
+if NET_VENDOR_DAVICOM
+
config DM9000
tristate "DM9000 support"
depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST
@@ -22,3 +35,21 @@ config DM9000_FORCE_SIMPLE_PHY_POLL
bit to determine if the link is up or down instead of the more
costly MII PHY reads. Note, this will not work if the chip is
operating with an external PHY.
+
+config DM9051
+ tristate "DM9051 SPI support"
+ depends on SPI
+ select CRC32
+ select MDIO
+ select PHYLIB
+ select REGMAP_SPI
+ help
+ Support for DM9051 SPI chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called dm9051.
+
+ The SPI mode for the host's SPI master to access DM9051 is mode
+ 0 on the SPI bus.
+
+endif # NET_VENDOR_DAVICOM
diff --git a/drivers/net/ethernet/davicom/Makefile b/drivers/net/ethernet/davicom/Makefile
index 173c87d21076..225f85bc1f53 100644
--- a/drivers/net/ethernet/davicom/Makefile
+++ b/drivers/net/ethernet/davicom/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_DM9000) += dm9000.o
+obj-$(CONFIG_DM9051) += dm9051.o
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 0985ab216566..05a89ab6766c 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -28,8 +28,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <asm/delay.h>
#include <asm/irq.h>
@@ -540,8 +539,8 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
struct board_info *dm = to_dm9000_board(dev);
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
@@ -1012,7 +1011,7 @@ static void dm9000_send_packet(struct net_device *dev,
* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int
+static netdev_tx_t
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
@@ -1394,9 +1393,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
if (!pdata)
return ERR_PTR(-ENOMEM);
- if (of_find_property(np, "davicom,ext-phy", NULL))
+ if (of_property_read_bool(np, "davicom,ext-phy"))
pdata->flags |= DM9000_PLATF_EXT_PHY;
- if (of_find_property(np, "davicom,no-eeprom", NULL))
+ if (of_property_read_bool(np, "davicom,no-eeprom"))
pdata->flags |= DM9000_PLATF_NO_EEPROM;
ret = of_get_mac_address(np, pdata->dev_addr);
@@ -1421,8 +1420,7 @@ dm9000_probe(struct platform_device *pdev)
int iosize;
int i;
u32 id_val;
- int reset_gpios;
- enum of_gpio_flags flags;
+ struct gpio_desc *reset_gpio;
struct regulator *power;
bool inv_mac_addr = false;
u8 addr[ETH_ALEN];
@@ -1442,20 +1440,24 @@ dm9000_probe(struct platform_device *pdev)
dev_dbg(dev, "regulator enabled\n");
}
- reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
- &flags);
- if (gpio_is_valid(reset_gpios)) {
- ret = devm_gpio_request_one(dev, reset_gpios, flags,
- "dm9000_reset");
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(reset_gpio);
+ if (ret) {
+ dev_err(dev, "failed to request reset gpio: %d\n", ret);
+ goto out_regulator_disable;
+ }
+
+ if (reset_gpio) {
+ ret = gpiod_set_consumer_name(reset_gpio, "dm9000_reset");
if (ret) {
- dev_err(dev, "failed to request reset gpio %d: %d\n",
- reset_gpios, ret);
+ dev_err(dev, "failed to set reset gpio name: %d\n",
+ ret);
goto out_regulator_disable;
}
/* According to manual PWRST# Low Period Min 1ms */
msleep(2);
- gpio_set_value(reset_gpios, 1);
+ gpiod_set_value_cansleep(reset_gpio, 0);
/* Needs 3ms to read eeprom when PWRST is deasserted */
msleep(4);
}
diff --git a/drivers/net/ethernet/davicom/dm9051.c b/drivers/net/ethernet/davicom/dm9051.c
new file mode 100644
index 000000000000..70728b2e5f18
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.c
@@ -0,0 +1,1262 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "dm9051.h"
+
+#define DRVNAME_9051 "dm9051"
+
+/**
+ * struct rx_ctl_mach - rx activities record
+ * @status_err_counter: rx status error counter
+ * @large_err_counter: rx get large packet length error counter
+ * @rx_err_counter: receive packet error counter
+ * @tx_err_counter: transmit packet error counter
+ * @fifo_rst_counter: reset operation counter
+ *
+ * To keep track for the driver operation statistics
+ */
+struct rx_ctl_mach {
+ u16 status_err_counter;
+ u16 large_err_counter;
+ u16 rx_err_counter;
+ u16 tx_err_counter;
+ u16 fifo_rst_counter;
+};
+
+/**
+ * struct dm9051_rxctrl - dm9051 driver rx control
+ * @hash_table: Multicast hash-table data
+ * @rcr_all: KS_RXCR1 register setting
+ *
+ * The settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings
+ */
+struct dm9051_rxctrl {
+ u16 hash_table[4];
+ u8 rcr_all;
+};
+
+/**
+ * struct dm9051_rxhdr - rx packet data header
+ * @headbyte: lead byte equal to 0x01 notifies a valid packet
+ * @status: status bits for the received packet
+ * @rxlen: packet length
+ *
+ * The Rx packed, entered into the FIFO memory, start with these
+ * four bytes which is the Rx header, followed by the ethernet
+ * packet data and ends with an appended 4-byte CRC data.
+ * Both Rx packet and CRC data are for check purpose and finally
+ * are dropped by this driver
+ */
+struct dm9051_rxhdr {
+ u8 headbyte;
+ u8 status;
+ __le16 rxlen;
+};
+
+/**
+ * struct board_info - maintain the saved data
+ * @spidev: spi device structure
+ * @ndev: net device structure
+ * @mdiobus: mii bus structure
+ * @phydev: phy device structure
+ * @txq: tx queue structure
+ * @regmap_dm: regmap for register read/write
+ * @regmap_dmbulk: extra regmap for bulk read/write
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @tx_work: Work queue for tx packets
+ * @pause: ethtool pause parameter structure
+ * @spi_lockm: between threads lock structure
+ * @reg_mutex: regmap access lock structure
+ * @bc: rx control statistics structure
+ * @rxhdr: rx header structure
+ * @rctl: rx control setting structure
+ * @msg_enable: message level value
+ * @imr_all: to store operating imr value for register DM9051_IMR
+ * @lcr_all: to store operating rcr value for register DM9051_LMCR
+ *
+ * The saved data variables, keep up to date for retrieval back to use
+ */
+struct board_info {
+ u32 msg_enable;
+ struct spi_device *spidev;
+ struct net_device *ndev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+ struct sk_buff_head txq;
+ struct regmap *regmap_dm;
+ struct regmap *regmap_dmbulk;
+ struct work_struct rxctrl_work;
+ struct work_struct tx_work;
+ struct ethtool_pauseparam pause;
+ struct mutex spi_lockm;
+ struct mutex reg_mutex;
+ struct rx_ctl_mach bc;
+ struct dm9051_rxhdr rxhdr;
+ struct dm9051_rxctrl rctl;
+ u8 imr_all;
+ u8 lcr_all;
+};
+
+static int dm9051_set_reg(struct board_info *db, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, reg, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d set reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_update_bits(struct board_info *db, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_update_bits(db->regmap_dm, reg, mask, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d update bits reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* skb buffer exhausted, just discard the received data
+ */
+static int dm9051_dumpblk(struct board_info *db, u8 reg, size_t count)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rb;
+ int ret;
+
+ /* no skb buffer,
+ * both reg and &rb must be noinc,
+ * read once one byte via regmap_read
+ */
+ do {
+ ret = regmap_read(db->regmap_dm, reg, &rb);
+ if (ret < 0) {
+ netif_err(db, drv, ndev, "%s: error %d dumping read reg %02x\n",
+ __func__, ret, reg);
+ break;
+ }
+ } while (--count);
+
+ return ret;
+}
+
+static int dm9051_set_regs(struct board_info *db, unsigned int reg, const void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_get_regs(struct board_info *db, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_read(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_write_mem(struct board_info *db, unsigned int reg, const void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_write(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_read_mem(struct board_info *db, unsigned int reg, void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_read(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* waiting tx-end rather than tx-req
+ * got faster
+ */
+static int dm9051_nsr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_NSR, mval,
+ mval & (NSR_TX2END | NSR_TX1END), 1, 20);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "timeout in checking for tx end\n");
+ return ret;
+}
+
+static int dm9051_epcr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_EPCR, mval,
+ !(mval & EPCR_ERRE), 100, 10000);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "eeprom/phy in processing get timeout\n");
+ return ret;
+}
+
+static int dm9051_irq_flag(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int irq_type = irq_get_trigger_type(spi->irq);
+
+ if (irq_type)
+ return irq_type;
+
+ return IRQF_TRIGGER_LOW;
+}
+
+static unsigned int dm9051_intcr_value(struct board_info *db)
+{
+ return (dm9051_irq_flag(db) == IRQF_TRIGGER_LOW) ?
+ INTCR_POL_LOW : INTCR_POL_HIGH;
+}
+
+static int dm9051_set_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_set_reg(db, DM9051_FCR, fcr);
+}
+
+static int dm9051_set_recv(struct board_info *db)
+{
+ int ret;
+
+ ret = dm9051_set_regs(db, DM9051_MAR, db->rctl.hash_table, sizeof(db->rctl.hash_table));
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, db->rctl.rcr_all); /* enable rx */
+}
+
+static int dm9051_core_reset(struct board_info *db)
+{
+ int ret;
+
+ db->bc.fifo_rst_counter++;
+
+ ret = regmap_write(db->regmap_dm, DM9051_NCR, NCR_RST); /* NCR reset */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_MBNDRY, MBNDRY_BYTE); /* MemBound */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_PPCR, PPCR_PAUSE_COUNT); /* Pause Count */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_LMCR, db->lcr_all); /* LEDMode1 */
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_INTCR, dm9051_intcr_value(db));
+}
+
+static int dm9051_update_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_update_bits(db, DM9051_FCR, FCR_RXTX_BITS, fcr);
+}
+
+static int dm9051_disable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, IMR_PAR); /* disable int */
+}
+
+static int dm9051_enable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, db->imr_all); /* enable int */
+}
+
+static int dm9051_stop_mrcmd(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_ISR, ISR_STOP_MRCMD); /* to stop mrcmd */
+}
+
+static int dm9051_clear_interrupt(struct board_info *db)
+{
+ return dm9051_update_bits(db, DM9051_ISR, ISR_CLR_INT, ISR_CLR_INT);
+}
+
+static int dm9051_eeprom_read(struct board_info *db, int offset, u8 *to)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, to, 2);
+}
+
+static int dm9051_eeprom_write(struct board_info *db, int offset, u8 *data)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, data, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_WEP | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_phyread(void *context, unsigned int reg, unsigned int *val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR | EPCR_EPOS);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ /* this is a 4 bytes data, clear to zero since following regmap_bulk_read
+ * only fill lower 2 bytes
+ */
+ *val = 0;
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, val, 2);
+}
+
+static int dm9051_phywrite(void *context, unsigned int reg, unsigned int val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, &val, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_EPOS | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct board_info *db = bus->priv;
+ unsigned int val = 0xffff;
+ int ret;
+
+ if (addr == DM9051_PHY_ADDR) {
+ ret = dm9051_phyread(db, regnum, &val);
+ if (ret)
+ return ret;
+ }
+
+ return val;
+}
+
+static int dm9051_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct board_info *db = bus->priv;
+
+ if (addr == DM9051_PHY_ADDR)
+ return dm9051_phywrite(db, regnum, val);
+
+ return -ENODEV;
+}
+
+static void dm9051_reg_lock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_lock(&db->reg_mutex);
+}
+
+static void dm9051_reg_unlock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_unlock(&db->reg_mutex);
+}
+
+static struct regmap_config regconfigdm = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+};
+
+static struct regmap_config regconfigdmbulk = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int dm9051_map_init(struct spi_device *spi, struct board_info *db)
+{
+ /* create two regmap instances,
+ * split read/write and bulk_read/bulk_write to individual regmap
+ * to resolve regmap execution confliction problem
+ */
+ regconfigdm.lock_arg = db;
+ db->regmap_dm = devm_regmap_init_spi(db->spidev, &regconfigdm);
+ if (IS_ERR(db->regmap_dm))
+ return PTR_ERR(db->regmap_dm);
+
+ regconfigdmbulk.lock_arg = db;
+ db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, &regconfigdmbulk);
+ if (IS_ERR(db->regmap_dmbulk))
+ return PTR_ERR(db->regmap_dmbulk);
+
+ return 0;
+}
+
+static int dm9051_map_chipid(struct board_info *db)
+{
+ struct device *dev = &db->spidev->dev;
+ unsigned short wid;
+ u8 buff[6];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_VIDL, buff, sizeof(buff));
+ if (ret < 0)
+ return ret;
+
+ wid = get_unaligned_le16(buff + 2);
+ if (wid != DM9051_ID) {
+ dev_err(dev, "chipid error as %04x !\n", wid);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "chip %04x found\n", wid);
+ return 0;
+}
+
+/* Read DM9051_PAR registers which is the mac address loaded from EEPROM while power-on
+ */
+static int dm9051_map_etherdev_par(struct net_device *ndev, struct board_info *db)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_PAR, addr, sizeof(addr));
+ if (ret < 0)
+ return ret;
+
+ if (!is_valid_ether_addr(addr)) {
+ eth_hw_addr_random(ndev);
+
+ ret = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&db->spidev->dev, "Use random MAC address\n");
+ return 0;
+ }
+
+ eth_hw_addr_set(ndev, addr);
+ return 0;
+}
+
+/* ethtool-ops
+ */
+static void dm9051_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRVNAME_9051, sizeof(info->driver));
+}
+
+static void dm9051_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->msg_enable = value;
+}
+
+static u32 dm9051_get_msglevel(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ return db->msg_enable;
+}
+
+static int dm9051_get_eeprom_len(struct net_device *dev)
+{
+ return 128;
+}
+
+static int dm9051_get_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ ee->magic = DM_EEPROM_MAGIC;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_read(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int dm9051_set_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ if (ee->magic != DM_EEPROM_MAGIC)
+ return -EINVAL;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_write(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void dm9051_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ *pause = db->pause;
+}
+
+static int dm9051_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->pause = *pause;
+
+ if (pause->autoneg == AUTONEG_DISABLE)
+ return dm9051_update_fcr(db);
+
+ phy_set_sym_pause(db->phydev, pause->rx_pause, pause->tx_pause,
+ pause->autoneg);
+ phy_start_aneg(db->phydev);
+ return 0;
+}
+
+static const struct ethtool_ops dm9051_ethtool_ops = {
+ .get_drvinfo = dm9051_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = dm9051_get_msglevel,
+ .set_msglevel = dm9051_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = dm9051_get_eeprom_len,
+ .get_eeprom = dm9051_get_eeprom,
+ .set_eeprom = dm9051_set_eeprom,
+ .get_pauseparam = dm9051_get_pauseparam,
+ .set_pauseparam = dm9051_set_pauseparam,
+};
+
+static int dm9051_all_start(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power on of the internal phy
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, 0);
+ if (ret)
+ return ret;
+
+ /* dm9051 chip registers could not be accessed within 1 ms
+ * after GPR power on, delay 1 ms is essential
+ */
+ msleep(1);
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ return dm9051_enable_interrupt(db);
+}
+
+static int dm9051_all_stop(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power off of the internal phy,
+ * The internal phy still could be accessed after this GPR power off control
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, GPR_PHY_OFF);
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, RCR_RX_DISABLE);
+}
+
+/* fifo reset while rx error found
+ */
+static int dm9051_all_restart(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ret;
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_enable_interrupt(db);
+ if (ret)
+ return ret;
+
+ netdev_dbg(ndev, " rxstatus_Er & rxlen_Er %d, RST_c %d\n",
+ db->bc.status_err_counter + db->bc.large_err_counter,
+ db->bc.fifo_rst_counter);
+
+ ret = dm9051_set_recv(db);
+ if (ret)
+ return ret;
+
+ return dm9051_set_fcr(db);
+}
+
+/* read packets from the fifo memory
+ * return value,
+ * > 0 - read packet number, caller can repeat the rx operation
+ * 0 - no error, caller need stop further rx operation
+ * -EBUSY - read data error, caller escape from rx operation
+ */
+static int dm9051_loop_rx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rxbyte;
+ int ret, rxlen;
+ struct sk_buff *skb;
+ u8 *rdptr;
+ int scanrr = 0;
+
+ do {
+ ret = dm9051_read_mem(db, DM_SPI_MRCMDX, &rxbyte, 2);
+ if (ret)
+ return ret;
+
+ if ((rxbyte & GENMASK(7, 0)) != DM9051_PKT_RDY)
+ break; /* exhaust-empty */
+
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, &db->rxhdr, DM_RXHDR_SIZE);
+ if (ret)
+ return ret;
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ rxlen = le16_to_cpu(db->rxhdr.rxlen);
+ if (db->rxhdr.status & RSR_ERR_BITS || rxlen > DM9051_PKT_MAX) {
+ netdev_dbg(ndev, "rxhdr-byte (%02x)\n",
+ db->rxhdr.headbyte);
+
+ if (db->rxhdr.status & RSR_ERR_BITS) {
+ db->bc.status_err_counter++;
+ netdev_dbg(ndev, "check rxstatus-error (%02x)\n",
+ db->rxhdr.status);
+ } else {
+ db->bc.large_err_counter++;
+ netdev_dbg(ndev, "check rxlen large-error (%d > %d)\n",
+ rxlen, DM9051_PKT_MAX);
+ }
+ return dm9051_all_restart(db);
+ }
+
+ skb = dev_alloc_skb(rxlen);
+ if (!skb) {
+ ret = dm9051_dumpblk(db, DM_SPI_MRCMD, rxlen);
+ if (ret)
+ return ret;
+ return scanrr;
+ }
+
+ rdptr = skb_put(skb, rxlen - 4);
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, rdptr, rxlen);
+ if (ret) {
+ db->bc.rx_err_counter++;
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ skb->protocol = eth_type_trans(skb, db->ndev);
+ if (db->ndev->features & NETIF_F_RXCSUM)
+ skb_checksum_none_assert(skb);
+ netif_rx(skb);
+ db->ndev->stats.rx_bytes += rxlen;
+ db->ndev->stats.rx_packets++;
+ scanrr++;
+ } while (!ret);
+
+ return scanrr;
+}
+
+/* transmit a packet,
+ * return value,
+ * 0 - succeed
+ * -ETIMEDOUT - timeout error
+ */
+static int dm9051_single_tx(struct board_info *db, u8 *buff, unsigned int len)
+{
+ int ret;
+
+ ret = dm9051_nsr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_write_mem(db, DM_SPI_MWCMD, buff, len);
+ if (ret)
+ return ret;
+
+ ret = dm9051_set_regs(db, DM9051_TXPLL, &len, 2);
+ if (ret < 0)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_TCR, TCR_TXREQ);
+}
+
+static int dm9051_loop_tx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ntx = 0;
+ int ret;
+
+ while (!skb_queue_empty(&db->txq)) {
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&db->txq);
+ if (skb) {
+ ntx++;
+ ret = dm9051_single_tx(db, skb->data, skb->len);
+ len = skb->len;
+ dev_kfree_skb(skb);
+ if (ret < 0) {
+ db->bc.tx_err_counter++;
+ return 0;
+ }
+ ndev->stats.tx_bytes += len;
+ ndev->stats.tx_packets++;
+ }
+
+ if (netif_queue_stopped(ndev) &&
+ (skb_queue_len(&db->txq) < DM9051_TX_QUE_LO_WATER))
+ netif_wake_queue(ndev);
+ }
+
+ return ntx;
+}
+
+static irqreturn_t dm9051_rx_threaded_irq(int irq, void *pw)
+{
+ struct board_info *db = pw;
+ int result, result_tx;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_disable_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ result = dm9051_clear_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ do {
+ result = dm9051_loop_rx(db); /* threaded irq rx */
+ if (result < 0)
+ goto out_unlock;
+ result_tx = dm9051_loop_tx(db); /* more tx better performance */
+ if (result_tx < 0)
+ goto out_unlock;
+ } while (result > 0);
+
+ dm9051_enable_interrupt(db);
+
+ /* To exit and has mutex unlock while rx or tx error
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+
+ return IRQ_HANDLED;
+}
+
+static void dm9051_tx_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, tx_work);
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_loop_tx(db);
+ if (result < 0)
+ netdev_err(db->ndev, "transmit packet error\n");
+
+ mutex_unlock(&db->spi_lockm);
+}
+
+static void dm9051_rxctl_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, rxctrl_work);
+ struct net_device *ndev = db->ndev;
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (result < 0)
+ goto out_unlock;
+
+ dm9051_set_recv(db);
+
+ /* To has mutex unlock and return from this function if regmap function fail
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+}
+
+/* Open network device
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device
+ */
+static int dm9051_open(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->imr_all = IMR_PAR | IMR_PRM;
+ db->lcr_all = LMCR_MODE1;
+ db->rctl.rcr_all = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ memset(db->rctl.hash_table, 0, sizeof(db->rctl.hash_table));
+
+ ndev->irq = spi->irq; /* by dts */
+ ret = request_threaded_irq(spi->irq, NULL, dm9051_rx_threaded_irq,
+ dm9051_irq_flag(db) | IRQF_ONESHOT,
+ ndev->name, db);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get irq\n");
+ return ret;
+ }
+
+ phy_support_sym_pause(db->phydev);
+ phy_start(db->phydev);
+
+ /* flow control parameters init */
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ db->pause.autoneg = AUTONEG_DISABLE;
+
+ if (db->phydev->autoneg)
+ db->pause.autoneg = AUTONEG_ENABLE;
+
+ ret = dm9051_all_start(db);
+ if (ret) {
+ phy_stop(db->phydev);
+ free_irq(spi->irq, db);
+ return ret;
+ }
+
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+/* Close network device
+ * Called to close down a network device which has been active. Cancel any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state while it is not being used
+ */
+static int dm9051_stop(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = dm9051_all_stop(db);
+ if (ret)
+ return ret;
+
+ flush_work(&db->tx_work);
+ flush_work(&db->rxctrl_work);
+
+ phy_stop(db->phydev);
+
+ free_irq(db->spidev->irq, db);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&db->txq);
+
+ return 0;
+}
+
+/* event: play a schedule starter in condition
+ */
+static netdev_tx_t dm9051_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ skb_queue_tail(&db->txq, skb);
+ if (skb_queue_len(&db->txq) > DM9051_TX_QUE_HI_WATER)
+ netif_stop_queue(ndev); /* enforce limit queue size */
+
+ schedule_work(&db->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* event: play with a schedule starter
+ */
+static void dm9051_set_rx_mode(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct dm9051_rxctrl rxctrl;
+ struct netdev_hw_addr *ha;
+ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ u32 hash_val;
+
+ memset(&rxctrl, 0, sizeof(rxctrl));
+
+ /* rx control */
+ if (ndev->flags & IFF_PROMISC) {
+ rcr |= RCR_PRMSC;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_PRMSC, rcr= %02x\n", rcr);
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ rcr |= RCR_ALL;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_ALLMULTI, rcr= %02x\n", rcr);
+ }
+
+ rxctrl.rcr_all = rcr;
+
+ /* broadcast address */
+ rxctrl.hash_table[0] = 0;
+ rxctrl.hash_table[1] = 0;
+ rxctrl.hash_table[2] = 0;
+ rxctrl.hash_table[3] = 0x8000;
+
+ /* the multicast address in Hash Table : 64 bits */
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash_val = ether_crc_le(ETH_ALEN, ha->addr) & GENMASK(5, 0);
+ rxctrl.hash_table[hash_val / 16] |= BIT(0) << (hash_val % 16);
+ }
+
+ /* schedule work to do the actual set of the data if needed */
+
+ if (memcmp(&db->rctl, &rxctrl, sizeof(rxctrl))) {
+ memcpy(&db->rctl, &rxctrl, sizeof(rxctrl));
+ schedule_work(&db->rxctrl_work);
+ }
+}
+
+/* event: write into the mac registers and eeprom directly
+ */
+static int dm9051_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, p);
+ if (ret < 0)
+ return ret;
+
+ eth_commit_mac_addr_change(ndev, p);
+ return dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+}
+
+static const struct net_device_ops dm9051_netdev_ops = {
+ .ndo_open = dm9051_open,
+ .ndo_stop = dm9051_stop,
+ .ndo_start_xmit = dm9051_start_xmit,
+ .ndo_set_rx_mode = dm9051_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = dm9051_set_mac_address,
+};
+
+static void dm9051_operation_clear(struct board_info *db)
+{
+ db->bc.status_err_counter = 0;
+ db->bc.large_err_counter = 0;
+ db->bc.rx_err_counter = 0;
+ db->bc.tx_err_counter = 0;
+ db->bc.fifo_rst_counter = 0;
+}
+
+static int dm9051_mdio_register(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!db->mdiobus)
+ return -ENOMEM;
+
+ db->mdiobus->priv = db;
+ db->mdiobus->read = dm9051_mdio_read;
+ db->mdiobus->write = dm9051_mdio_write;
+ db->mdiobus->name = "dm9051-mdiobus";
+ db->mdiobus->phy_mask = (u32)~BIT(1);
+ db->mdiobus->parent = &spi->dev;
+ snprintf(db->mdiobus->id, MII_BUS_ID_SIZE,
+ "dm9051-%s.%u", dev_name(&spi->dev), spi_get_chipselect(spi, 0));
+
+ ret = devm_mdiobus_register(&spi->dev, db->mdiobus);
+ if (ret)
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+
+ return ret;
+}
+
+static void dm9051_handle_link_change(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_print_status(db->phydev);
+
+ /* only write pause settings to mac. since mac and phy are integrated
+ * together, such as link state, speed and duplex are sync already
+ */
+ if (db->phydev->link) {
+ if (db->phydev->pause) {
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ }
+ dm9051_update_fcr(db);
+ }
+}
+
+/* phy connect as poll mode
+ */
+static int dm9051_phy_connect(struct board_info *db)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ db->mdiobus->id, DM9051_PHY_ADDR);
+
+ db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(db->phydev))
+ return PTR_ERR_OR_ZERO(db->phydev);
+ return 0;
+}
+
+static int dm9051_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev;
+ struct board_info *db;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct board_info));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
+
+ db = netdev_priv(ndev);
+
+ db->msg_enable = 0;
+ db->spidev = spi;
+ db->ndev = ndev;
+
+ ndev->netdev_ops = &dm9051_netdev_ops;
+ ndev->ethtool_ops = &dm9051_ethtool_ops;
+
+ mutex_init(&db->spi_lockm);
+ mutex_init(&db->reg_mutex);
+
+ INIT_WORK(&db->rxctrl_work, dm9051_rxctl_delay);
+ INIT_WORK(&db->tx_work, dm9051_tx_delay);
+
+ ret = dm9051_map_init(spi, db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_chipid(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_etherdev_par(ndev, db);
+ if (ret < 0)
+ return ret;
+
+ ret = dm9051_mdio_register(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_phy_connect(db);
+ if (ret)
+ return ret;
+
+ dm9051_operation_clear(db);
+ skb_queue_head_init(&db->txq);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ phy_disconnect(db->phydev);
+ return dev_err_probe(dev, ret, "device register failed");
+ }
+
+ return 0;
+}
+
+static void dm9051_drv_remove(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_disconnect(db->phydev);
+}
+
+static const struct of_device_id dm9051_match_table[] = {
+ { .compatible = "davicom,dm9051" },
+ {}
+};
+
+static const struct spi_device_id dm9051_id_table[] = {
+ { "dm9051", 0 },
+ {}
+};
+
+static struct spi_driver dm9051_driver = {
+ .driver = {
+ .name = DRVNAME_9051,
+ .of_match_table = dm9051_match_table,
+ },
+ .probe = dm9051_probe,
+ .remove = dm9051_drv_remove,
+ .id_table = dm9051_id_table,
+};
+module_spi_driver(dm9051_driver);
+
+MODULE_AUTHOR("Joseph CHANG <joseph_chang@davicom.com.tw>");
+MODULE_DESCRIPTION("Davicom DM9051 network SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/davicom/dm9051.h b/drivers/net/ethernet/davicom/dm9051.h
new file mode 100644
index 000000000000..fef3120edd7c
--- /dev/null
+++ b/drivers/net/ethernet/davicom/dm9051.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _DM9051_H_
+#define _DM9051_H_
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#define DM9051_ID 0x9051
+
+#define DM9051_NCR 0x00
+#define DM9051_NSR 0x01
+#define DM9051_TCR 0x02
+#define DM9051_RCR 0x05
+#define DM9051_BPTR 0x08
+#define DM9051_FCR 0x0A
+#define DM9051_EPCR 0x0B
+#define DM9051_EPAR 0x0C
+#define DM9051_EPDRL 0x0D
+#define DM9051_EPDRH 0x0E
+#define DM9051_PAR 0x10
+#define DM9051_MAR 0x16
+#define DM9051_GPCR 0x1E
+#define DM9051_GPR 0x1F
+
+#define DM9051_VIDL 0x28
+#define DM9051_VIDH 0x29
+#define DM9051_PIDL 0x2A
+#define DM9051_PIDH 0x2B
+#define DM9051_SMCR 0x2F
+#define DM9051_ATCR 0x30
+#define DM9051_SPIBCR 0x38
+#define DM9051_INTCR 0x39
+#define DM9051_PPCR 0x3D
+
+#define DM9051_MPCR 0x55
+#define DM9051_LMCR 0x57
+#define DM9051_MBNDRY 0x5E
+
+#define DM9051_MRRL 0x74
+#define DM9051_MRRH 0x75
+#define DM9051_MWRL 0x7A
+#define DM9051_MWRH 0x7B
+#define DM9051_TXPLL 0x7C
+#define DM9051_TXPLH 0x7D
+#define DM9051_ISR 0x7E
+#define DM9051_IMR 0x7F
+
+#define DM_SPI_MRCMDX 0x70
+#define DM_SPI_MRCMD 0x72
+#define DM_SPI_MWCMD 0x78
+
+#define DM_SPI_WR 0x80
+
+/* dm9051 Ethernet controller registers bits
+ */
+/* 0x00 */
+#define NCR_WAKEEN BIT(6)
+#define NCR_FDX BIT(3)
+#define NCR_RST BIT(0)
+/* 0x01 */
+#define NSR_SPEED BIT(7)
+#define NSR_LINKST BIT(6)
+#define NSR_WAKEST BIT(5)
+#define NSR_TX2END BIT(3)
+#define NSR_TX1END BIT(2)
+/* 0x02 */
+#define TCR_DIS_JABBER_TIMER BIT(6) /* for Jabber Packet support */
+#define TCR_TXREQ BIT(0)
+/* 0x05 */
+#define RCR_DIS_WATCHDOG_TIMER BIT(6) /* for Jabber Packet support */
+#define RCR_DIS_LONG BIT(5)
+#define RCR_DIS_CRC BIT(4)
+#define RCR_ALL BIT(3)
+#define RCR_PRMSC BIT(1)
+#define RCR_RXEN BIT(0)
+#define RCR_RX_DISABLE (RCR_DIS_LONG | RCR_DIS_CRC)
+/* 0x06 */
+#define RSR_RF BIT(7)
+#define RSR_MF BIT(6)
+#define RSR_LCS BIT(5)
+#define RSR_RWTO BIT(4)
+#define RSR_PLE BIT(3)
+#define RSR_AE BIT(2)
+#define RSR_CE BIT(1)
+#define RSR_FOE BIT(0)
+#define RSR_ERR_BITS (RSR_RF | RSR_LCS | RSR_RWTO | RSR_PLE | \
+ RSR_AE | RSR_CE | RSR_FOE)
+/* 0x0A */
+#define FCR_TXPEN BIT(5)
+#define FCR_BKPM BIT(3)
+#define FCR_FLCE BIT(0)
+#define FCR_RXTX_BITS (FCR_TXPEN | FCR_BKPM | FCR_FLCE)
+/* 0x0B */
+#define EPCR_WEP BIT(4)
+#define EPCR_EPOS BIT(3)
+#define EPCR_ERPRR BIT(2)
+#define EPCR_ERPRW BIT(1)
+#define EPCR_ERRE BIT(0)
+/* 0x1E */
+#define GPCR_GEP_CNTL BIT(0)
+/* 0x1F */
+#define GPR_PHY_OFF BIT(0)
+/* 0x30 */
+#define ATCR_AUTO_TX BIT(7)
+/* 0x39 */
+#define INTCR_POL_LOW (1 << 0)
+#define INTCR_POL_HIGH (0 << 0)
+/* 0x3D */
+/* Pause Packet Control Register - default = 1 */
+#define PPCR_PAUSE_COUNT 0x08
+/* 0x55 */
+#define MPCR_RSTTX BIT(1)
+#define MPCR_RSTRX BIT(0)
+/* 0x57 */
+/* LEDMode Control Register - LEDMode1 */
+/* Value 0x81 : bit[7] = 1, bit[2] = 0, bit[1:0] = 01b */
+#define LMCR_NEWMOD BIT(7)
+#define LMCR_TYPED1 BIT(1)
+#define LMCR_TYPED0 BIT(0)
+#define LMCR_MODE1 (LMCR_NEWMOD | LMCR_TYPED0)
+/* 0x5E */
+#define MBNDRY_BYTE BIT(7)
+/* 0xFE */
+#define ISR_MBS BIT(7)
+#define ISR_LNKCHG BIT(5)
+#define ISR_ROOS BIT(3)
+#define ISR_ROS BIT(2)
+#define ISR_PTS BIT(1)
+#define ISR_PRS BIT(0)
+#define ISR_CLR_INT (ISR_LNKCHG | ISR_ROOS | ISR_ROS | \
+ ISR_PTS | ISR_PRS)
+#define ISR_STOP_MRCMD (ISR_MBS)
+/* 0xFF */
+#define IMR_PAR BIT(7)
+#define IMR_LNKCHGI BIT(5)
+#define IMR_PTM BIT(1)
+#define IMR_PRM BIT(0)
+
+/* Const
+ */
+#define DM9051_PHY_ADDR 1 /* PHY id */
+#define DM9051_PHY 0x40 /* PHY address 0x01 */
+#define DM9051_PKT_RDY 0x01 /* Packet ready to receive */
+#define DM9051_PKT_MAX 1536 /* Received packet max size */
+#define DM9051_TX_QUE_HI_WATER 50
+#define DM9051_TX_QUE_LO_WATER 25
+#define DM_EEPROM_MAGIC 0x9051
+
+#define DM_RXHDR_SIZE sizeof(struct dm9051_rxhdr)
+
+static inline struct board_info *to_dm9051_board(struct net_device *ndev)
+{
+ return netdev_priv(ndev);
+}
+
+#endif /* _DM9051_H_ */
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 79dc336ce709..078a12f07e96 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -104,21 +104,6 @@ config TULIP_DM910X
def_bool y
depends on TULIP && SPARC
-config DE4X5
- tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
- depends on (PCI || EISA)
- depends on VIRT_TO_BUS || ALPHA || PPC || SPARC
- select CRC32
- help
- This is support for the DIGITAL series of PCI/EISA Ethernet cards.
- These include the DE425, DE434, DE435, DE450 and DE500 models. If
- you have a network card of this type, say Y. More specific
- information is contained in
- <file:Documentation/networking/device_drivers/ethernet/dec/de4x5.rst>.
-
- To compile this driver as a module, choose M here. The module will
- be called de4x5.
-
config WINBOND_840
tristate "Winbond W89c840 Ethernet support"
depends on PCI
diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile
index 8aab37564d5d..d4f1d21d29a0 100644
--- a/drivers/net/ethernet/dec/tulip/Makefile
+++ b/drivers/net/ethernet/dec/tulip/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_DM9102) += dmfe.o
obj-$(CONFIG_WINBOND_840) += winbond-840.o
obj-$(CONFIG_DE2104X) += de2104x.o
obj-$(CONFIG_TULIP) += tulip.o
-obj-$(CONFIG_DE4X5) += de4x5.o
obj-$(CONFIG_ULI526X) += uli526x.o
# Declare multi-part drivers.
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d51b3d24a0c8..cd3dc4b89518 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1606,8 +1606,8 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
static int de_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
deleted file mode 100644
index 71730ef4cd57..000000000000
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ /dev/null
@@ -1,5591 +0,0 @@
-/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
- ethernet driver for Linux.
-
- Copyright 1994, 1995 Digital Equipment Corporation.
-
- Testing resources for this driver have been made available
- in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
-
- The author may be reached at davies@maniac.ultranet.com.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License as published by the
- Free Software Foundation; either version 2 of the License, or (at your
- option) any later version.
-
- THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 675 Mass Ave, Cambridge, MA 02139, USA.
-
- Originally, this driver was written for the Digital Equipment
- Corporation series of EtherWORKS ethernet cards:
-
- DE425 TP/COAX EISA
- DE434 TP PCI
- DE435 TP/COAX/AUI PCI
- DE450 TP/COAX/AUI PCI
- DE500 10/100 PCI Fasternet
-
- but it will now attempt to support all cards which conform to the
- Digital Semiconductor SROM Specification. The driver currently
- recognises the following chips:
-
- DC21040 (no SROM)
- DC21041[A]
- DC21140[A]
- DC21142
- DC21143
-
- So far the driver is known to work with the following cards:
-
- KINGSTON
- Linksys
- ZNYX342
- SMC8432
- SMC9332 (w/new SROM)
- ZNYX31[45]
- ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
-
- The driver has been tested on a relatively busy network using the DE425,
- DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
- 16M of data to a DECstation 5000/200 as follows:
-
- TCP UDP
- TX RX TX RX
- DE425 1030k 997k 1170k 1128k
- DE434 1063k 995k 1170k 1125k
- DE435 1063k 995k 1170k 1125k
- DE500 1063k 998k 1170k 1125k in 10Mb/s mode
-
- All values are typical (in kBytes/sec) from a sample of 4 for each
- measurement. Their error is +/-20k on a quiet (private) network and also
- depend on what load the CPU has.
-
- =========================================================================
- This driver has been written substantially from scratch, although its
- inheritance of style and stack interface from 'ewrk3.c' and in turn from
- Donald Becker's 'lance.c' should be obvious. With the module autoload of
- every usable DECchip board, I pinched Donald's 'next_module' field to
- link my modules together.
-
- Up to 15 EISA cards can be supported under this driver, limited primarily
- by the available IRQ lines. I have checked different configurations of
- multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
- problem yet (provided you have at least depca.c v0.38) ...
-
- PCI support has been added to allow the driver to work with the DE434,
- DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
- to the differences in the EISA and PCI CSR address offsets from the base
- address.
-
- The ability to load this driver as a loadable module has been included
- and used extensively during the driver development (to save those long
- reboot sequences). Loadable module support under PCI and EISA has been
- achieved by letting the driver autoprobe as if it were compiled into the
- kernel. Do make sure you're not sharing interrupts with anything that
- cannot accommodate interrupt sharing!
-
- To utilise this ability, you have to do 8 things:
-
- 0) have a copy of the loadable modules code installed on your system.
- 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
- temporary directory.
- 2) for fixed autoprobes (not recommended), edit the source code near
- line 5594 to reflect the I/O address you're using, or assign these when
- loading by:
-
- insmod de4x5 io=0xghh where g = bus number
- hh = device number
-
- NB: autoprobing for modules is now supported by default. You may just
- use:
-
- insmod de4x5
-
- to load all available boards. For a specific board, still use
- the 'io=?' above.
- 3) compile de4x5.c, but include -DMODULE in the command line to ensure
- that the correct bits are compiled (see end of source code).
- 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
- kernel with the de4x5 configuration turned off and reboot.
- 5) insmod de4x5 [io=0xghh]
- 6) run the net startup bits for your new eth?? interface(s) manually
- (usually /etc/rc.inet[12] at boot time).
- 7) enjoy!
-
- To unload a module, turn off the associated interface(s)
- 'ifconfig eth?? down' then 'rmmod de4x5'.
-
- Automedia detection is included so that in principal you can disconnect
- from, e.g. TP, reconnect to BNC and things will still work (after a
- pause whilst the driver figures out where its media went). My tests
- using ping showed that it appears to work....
-
- By default, the driver will now autodetect any DECchip based card.
- Should you have a need to restrict the driver to DIGITAL only cards, you
- can compile with a DEC_ONLY define, or if loading as a module, use the
- 'dec_only=1' parameter.
-
- I've changed the timing routines to use the kernel timer and scheduling
- functions so that the hangs and other assorted problems that occurred
- while autosensing the media should be gone. A bonus for the DC21040
- auto media sense algorithm is that it can now use one that is more in
- line with the rest (the DC21040 chip doesn't have a hardware timer).
- The downside is the 1 'jiffies' (10ms) resolution.
-
- IEEE 802.3u MII interface code has been added in anticipation that some
- products may use it in the future.
-
- The SMC9332 card has a non-compliant SROM which needs fixing - I have
- patched this driver to detect it because the SROM format used complies
- to a previous DEC-STD format.
-
- I have removed the buffer copies needed for receive on Intels. I cannot
- remove them for Alphas since the Tulip hardware only does longword
- aligned DMA transfers and the Alphas get alignment traps with non
- longword aligned data copies (which makes them really slow). No comment.
-
- I have added SROM decoding routines to make this driver work with any
- card that supports the Digital Semiconductor SROM spec. This will help
- all cards running the dc2114x series chips in particular. Cards using
- the dc2104x chips should run correctly with the basic driver. I'm in
- debt to <mjacob@feral.com> for the testing and feedback that helped get
- this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
- (with the latest SROM complying with the SROM spec V3: their first was
- broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
- (quad 21041 MAC) cards also appear to work despite their incorrectly
- wired IRQs.
-
- I have added a temporary fix for interrupt problems when some SCSI cards
- share the same interrupt as the DECchip based cards. The problem occurs
- because the SCSI card wants to grab the interrupt as a fast interrupt
- (runs the service routine with interrupts turned off) vs. this card
- which really needs to run the service routine with interrupts turned on.
- This driver will now add the interrupt service routine as a fast
- interrupt if it is bounced from the slow interrupt. THIS IS NOT A
- RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
- until people sort out their compatibility issues and the kernel
- interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
- INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
- run on the same interrupt. PCMCIA/CardBus is another can of worms...
-
- Finally, I think I have really fixed the module loading problem with
- more than one DECchip based card. As a side effect, I don't mess with
- the device structure any more which means that if more than 1 card in
- 2.0.x is installed (4 in 2.1.x), the user will have to edit
- linux/drivers/net/Space.c to make room for them. Hence, module loading
- is the preferred way to use this driver, since it doesn't have this
- limitation.
-
- Where SROM media detection is used and full duplex is specified in the
- SROM, the feature is ignored unless lp->params.fdx is set at compile
- time OR during a module load (insmod de4x5 args='eth??:fdx' [see
- below]). This is because there is no way to automatically detect full
- duplex links except through autonegotiation. When I include the
- autonegotiation feature in the SROM autoconf code, this detection will
- occur automatically for that case.
-
- Command line arguments are now allowed, similar to passing arguments
- through LILO. This will allow a per adapter board set up of full duplex
- and media. The only lexical constraints are: the board name (dev->name)
- appears in the list before its parameters. The list of parameters ends
- either at the end of the parameter list or with another board name. The
- following parameters are allowed:
-
- fdx for full duplex
- autosense to set the media/speed; with the following
- sub-parameters:
- TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
-
- Case sensitivity is important for the sub-parameters. They *must* be
- upper case. Examples:
-
- insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-
- For a compiled in driver, at or above line 548, place e.g.
- #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
-
- Yes, I know full duplex isn't permissible on BNC or AUI; they're just
- examples. By default, full duplex is turned off and AUTO is the default
- autosense setting. In reality, I expect only the full duplex option to
- be used. Note the use of single quotes in the two examples above and the
- lack of commas to separate items. ALSO, you must get the requested media
- correct in relation to what the adapter SROM says it has. There's no way
- to determine this in advance other than by trial and error and common
- sense, e.g. call a BNC connectored port 'BNC', not '10Mb'.
-
- Changed the bus probing. EISA used to be done first, followed by PCI.
- Most people probably don't even know what a de425 is today and the EISA
- probe has messed up some SCSI cards in the past, so now PCI is always
- probed first followed by EISA if a) the architecture allows EISA and
- either b) there have been no PCI cards detected or c) an EISA probe is
- forced by the user. To force a probe include "force_eisa" in your
- insmod "args" line; for built-in kernels either change the driver to do
- this automatically or include #define DE4X5_FORCE_EISA on or before
- line 1040 in the driver.
-
- TO DO:
- ------
-
- Revision History
- ----------------
-
- Version Date Description
-
- 0.1 17-Nov-94 Initial writing. ALPHA code release.
- 0.2 13-Jan-95 Added PCI support for DE435's.
- 0.21 19-Jan-95 Added auto media detection.
- 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
- Fix recognition bug reported by <bkm@star.rl.ac.uk>.
- Add request/release_region code.
- Add loadable modules support for PCI.
- Clean up loadable modules support.
- 0.23 28-Feb-95 Added DC21041 and DC21140 support.
- Fix missed frame counter value and initialisation.
- Fixed EISA probe.
- 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
- Change TX_BUFFS_AVAIL macro.
- Change media autodetection to allow manual setting.
- Completed DE500 (DC21140) support.
- 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
- 0.242 10-May-95 Minor changes.
- 0.30 12-Jun-95 Timer fix for DC21140.
- Portability changes.
- Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
- Add DE500 semi automatic autosense.
- Add Link Fail interrupt TP failure detection.
- Add timer based link change detection.
- Plugged a memory leak in de4x5_queue_pkt().
- 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
- 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
- suggestion by <heiko@colossus.escape.de>.
- 0.33 8-Aug-95 Add shared interrupt support (not released yet).
- 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
- Fix de4x5_interrupt().
- Fix dc21140_autoconf() mess.
- No shared interrupt support.
- 0.332 11-Sep-95 Added MII management interface routines.
- 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
- Add kernel timer code (h/w is too flaky).
- Add MII based PHY autosense.
- Add new multicasting code.
- Add new autosense algorithms for media/mode
- selection using kernel scheduling/timing.
- Re-formatted.
- Made changes suggested by <jeff@router.patch.net>:
- Change driver to detect all DECchip based cards
- with DEC_ONLY restriction a special case.
- Changed driver to autoprobe as a module. No irq
- checking is done now - assume BIOS is good!
- Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
- 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
- only <niles@axp745gsfc.nasa.gov>
- Fix for multiple PCI cards reported by <jos@xos.nl>
- Duh, put the IRQF_SHARED flag into request_interrupt().
- Fix SMC ethernet address in enet_det[].
- Print chip name instead of "UNKNOWN" during boot.
- 0.42 26-Apr-96 Fix MII write TA bit error.
- Fix bug in dc21040 and dc21041 autosense code.
- Remove buffer copies on receive for Intels.
- Change sk_buff handling during media disconnects to
- eliminate DUP packets.
- Add dynamic TX thresholding.
- Change all chips to use perfect multicast filtering.
- Fix alloc_device() bug <jari@markkus2.fimr.fi>
- 0.43 21-Jun-96 Fix unconnected media TX retry bug.
- Add Accton to the list of broken cards.
- Fix TX under-run bug for non DC21140 chips.
- Fix boot command probe bug in alloc_device() as
- reported by <koen.gadeyne@barco.com> and
- <orava@nether.tky.hut.fi>.
- Add cache locks to prevent a race condition as
- reported by <csd@microplex.com> and
- <baba@beckman.uiuc.edu>.
- Upgraded alloc_device() code.
- 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
- with <csd@microplex.com>
- 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
- Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
- and <michael@compurex.com>.
- 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
- with a loopback packet.
- 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
- by <bhat@mundook.cs.mu.OZ.AU>
- 0.45 8-Dec-96 Include endian functions for PPC use, from work
- by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
- 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
- suggestion from <mjacob@feral.com>.
- 0.5 30-Jan-97 Added SROM decoding functions.
- Updated debug flags.
- Fix sleep/wakeup calls for PCI cards, bug reported
- by <cross@gweep.lkg.dec.com>.
- Added multi-MAC, one SROM feature from discussion
- with <mjacob@feral.com>.
- Added full module autoprobe capability.
- Added attempt to use an SMC9332 with broken SROM.
- Added fix for ZYNX multi-mac cards that didn't
- get their IRQs wired correctly.
- 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
- <paubert@iram.es>
- Fix init_connection() to remove extra device reset.
- Fix MAC/PHY reset ordering in dc21140m_autoconf().
- Fix initialisation problem with lp->timeout in
- typeX_infoblock() from <paubert@iram.es>.
- Fix MII PHY reset problem from work done by
- <paubert@iram.es>.
- 0.52 26-Apr-97 Some changes may not credit the right people -
- a disk crash meant I lost some mail.
- Change RX interrupt routine to drop rather than
- defer packets to avoid hang reported by
- <g.thomas@opengroup.org>.
- Fix srom_exec() to return for COMPACT and type 1
- infoblocks.
- Added DC21142 and DC21143 functions.
- Added byte counters from <phil@tazenda.demon.co.uk>
- Added IRQF_DISABLED temporary fix from
- <mjacob@feral.com>.
- 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
- module load: bug reported by
- <Piete.Brooks@cl.cam.ac.uk>
- Fix multi-MAC, one SROM, to work with 2114x chips:
- bug reported by <cmetz@inner.net>.
- Make above search independent of BIOS device scan
- direction.
- Completed DC2114[23] autosense functions.
- 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
- <robin@intercore.com
- Fix type1_infoblock() bug introduced in 0.53, from
- problem reports by
- <parmee@postecss.ncrfran.france.ncr.com> and
- <jo@ice.dillingen.baynet.de>.
- Added argument list to set up each board from either
- a module's command line or a compiled in #define.
- Added generic MII PHY functionality to deal with
- newer PHY chips.
- Fix the mess in 2.1.67.
- 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
- <redhat@cococo.net>.
- Fix bug in pci_probe() for 64 bit systems reported
- by <belliott@accessone.com>.
- 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
- 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org>
- 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
- 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure.
- **Incompatible with 2.0.x from here.**
- 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP
- from <lma@varesearch.com>
- Add TP, AUI and BNC cases to 21140m_autoconf() for
- case where a 21140 under SROM control uses, e.g. AUI
- from problem report by <delchini@lpnp09.in2p3.fr>
- Add MII parallel detection to 2114x_autoconf() for
- case where no autonegotiation partner exists from
- problem report by <mlapsley@ndirect.co.uk>.
- Add ability to force connection type directly even
- when using SROM control from problem report by
- <earl@exis.net>.
- Updated the PCI interface to conform with the latest
- version. I hope nothing is broken...
- Add TX done interrupt modification from suggestion
- by <Austin.Donnelly@cl.cam.ac.uk>.
- Fix is_anc_capable() bug reported by
- <Austin.Donnelly@cl.cam.ac.uk>.
- Fix type[13]_infoblock() bug: during MII search, PHY
- lp->rst not run because lp->ibn not initialised -
- from report & fix by <paubert@iram.es>.
- Fix probe bug with EISA & PCI cards present from
- report by <eirik@netcom.com>.
- 0.541 24-Aug-98 Fix compiler problems associated with i386-string
- ops from multiple bug reports and temporary fix
- from <paubert@iram.es>.
- Fix pci_probe() to correctly emulate the old
- pcibios_find_class() function.
- Add an_exception() for old ZYNX346 and fix compile
- warning on PPC & SPARC, from <ecd@skynet.be>.
- Fix lastPCI to correctly work with compiled in
- kernels and modules from bug report by
- <Zlatko.Calusic@CARNet.hr> et al.
- 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
- when media is unconnected.
- Change dev->interrupt to lp->interrupt to ensure
- alignment for Alpha's and avoid their unaligned
- access traps. This flag is merely for log messages:
- should do something more definitive though...
- 0.543 30-Dec-98 Add SMP spin locking.
- 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
- a 21143 by <mmporter@home.com>.
- Change PCI/EISA bus probing order.
- 0.545 28-Nov-99 Further Moto SROM bug fix from
- <mporter@eng.mcd.mot.com>
- Remove double checking for DEBUG_RX in de4x5_dbg_rx()
- from report by <geert@linux-m68k.org>
- 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function
- was causing a page fault when initializing the
- variable 'pb', on a non de4x5 PCI device, in this
- case a PCI bridge (DEC chip 21152). The value of
- 'pb' is now only initialized if a de4x5 chip is
- present.
- <france@handhelds.org>
- 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
- 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
- generic DMA APIs. Fixed DE425 support on Alpha.
- <maz@wild-wind.fr.eu.org>
- =========================================================================
-*/
-
-#include <linux/compat.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/eisa.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/crc32.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/time.h>
-#include <linux/types.h>
-#include <linux/unistd.h>
-#include <linux/ctype.h>
-#include <linux/dma-mapping.h>
-#include <linux/moduleparam.h>
-#include <linux/bitops.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <asm/byteorder.h>
-#include <asm/unaligned.h>
-#include <linux/uaccess.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/machdep.h>
-#endif /* CONFIG_PPC_PMAC */
-
-#include "de4x5.h"
-
-static const char version[] =
- KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
-
-#define c_char const char
-
-/*
-** MII Information
-*/
-struct phy_table {
- int reset; /* Hard reset required? */
- int id; /* IEEE OUI */
- int ta; /* One cycle TA time - 802.3u is confusing here */
- struct { /* Non autonegotiation (parallel) speed det. */
- int reg;
- int mask;
- int value;
- } spd;
-};
-
-struct mii_phy {
- int reset; /* Hard reset required? */
- int id; /* IEEE OUI */
- int ta; /* One cycle TA time */
- struct { /* Non autonegotiation (parallel) speed det. */
- int reg;
- int mask;
- int value;
- } spd;
- int addr; /* MII address for the PHY */
- u_char *gep; /* Start of GEP sequence block in SROM */
- u_char *rst; /* Start of reset sequence in SROM */
- u_int mc; /* Media Capabilities */
- u_int ana; /* NWay Advertisement */
- u_int fdx; /* Full DupleX capabilities for each media */
- u_int ttm; /* Transmit Threshold Mode for each media */
- u_int mci; /* 21142 MII Connector Interrupt info */
-};
-
-#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */
-
-struct sia_phy {
- u_char mc; /* Media Code */
- u_char ext; /* csr13-15 valid when set */
- int csr13; /* SIA Connectivity Register */
- int csr14; /* SIA TX/RX Register */
- int csr15; /* SIA General Register */
- int gepc; /* SIA GEP Control Information */
- int gep; /* SIA GEP Data */
-};
-
-/*
-** Define the know universe of PHY devices that can be
-** recognised by this driver.
-*/
-static struct phy_table phy_info[] = {
- {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
- {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
- {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
- {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
- {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */
-};
-
-/*
-** These GENERIC values assumes that the PHY devices follow 802.3u and
-** allow parallel detection to set the link partner ability register.
-** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
-*/
-#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
-#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
-#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
-
-/*
-** Define special SROM detection cases
-*/
-static c_char enet_det[][ETH_ALEN] = {
- {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
-};
-
-#define SMC 1
-#define ACCTON 2
-
-/*
-** SROM Repair definitions. If a broken SROM is detected a card may
-** use this information to help figure out what to do. This is a
-** "stab in the dark" and so far for SMC9332's only.
-*/
-static c_char srom_repair_info[][100] = {
- {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
- 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
- 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
- 0x00,0x18,}
-};
-
-
-#ifdef DE4X5_DEBUG
-static int de4x5_debug = DE4X5_DEBUG;
-#else
-/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
-static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
-#endif
-
-/*
-** Allow per adapter set up. For modules this is simply a command line
-** parameter, e.g.:
-** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
-**
-** For a compiled in driver, place e.g.
-** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
-** here
-*/
-#ifdef DE4X5_PARM
-static char *args = DE4X5_PARM;
-#else
-static char *args;
-#endif
-
-struct parameters {
- bool fdx;
- int autosense;
-};
-
-#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
-
-#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
-
-/*
-** Ethernet PROM defines
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** Ethernet Info
-*/
-#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
-#define IEEE802_3_SZ 1518 /* Packet + CRC */
-#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
-#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
-#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
-#define PKT_HDR_LEN 14 /* Addresses and data length info */
-#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
-#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
-
-
-/*
-** EISA bus defines
-*/
-#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
-#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
-
-#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
-
-#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
-#define DE4X5_NAME_LENGTH 8
-
-static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
-
-/*
-** Ethernet PROM defines for DC21040
-*/
-#define PROBE_LENGTH 32
-#define ETH_PROM_SIG 0xAA5500FFUL
-
-/*
-** PCI Bus defines
-*/
-#define PCI_MAX_BUS_NUM 8
-#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
-#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
-
-/*
-** Memory Alignment. Each descriptor is 4 longwords long. To force a
-** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
-** DESC_ALIGN. ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry.
-*/
-#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
-#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
-#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */
-#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */
-#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */
-#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */
-
-#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */
-#define DE4X5_CACHE_ALIGN CAL_16LONG
-#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
-/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
-#define DESC_ALIGN
-
-#ifndef DEC_ONLY /* See README.de4x5 for using this */
-static int dec_only;
-#else
-static int dec_only = 1;
-#endif
-
-/*
-** DE4X5 IRQ ENABLE/DISABLE
-*/
-#define ENABLE_IRQs { \
- imr |= lp->irq_en;\
- outl(imr, DE4X5_IMR); /* Enable the IRQs */\
-}
-
-#define DISABLE_IRQs {\
- imr = inl(DE4X5_IMR);\
- imr &= ~lp->irq_en;\
- outl(imr, DE4X5_IMR); /* Disable the IRQs */\
-}
-
-#define UNMASK_IRQs {\
- imr |= lp->irq_mask;\
- outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
-}
-
-#define MASK_IRQs {\
- imr = inl(DE4X5_IMR);\
- imr &= ~lp->irq_mask;\
- outl(imr, DE4X5_IMR); /* Mask the IRQs */\
-}
-
-/*
-** DE4X5 START/STOP
-*/
-#define START_DE4X5 {\
- omr = inl(DE4X5_OMR);\
- omr |= OMR_ST | OMR_SR;\
- outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
-}
-
-#define STOP_DE4X5 {\
- omr = inl(DE4X5_OMR);\
- omr &= ~(OMR_ST|OMR_SR);\
- outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
-}
-
-/*
-** DE4X5 SIA RESET
-*/
-#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
-
-/*
-** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
-*/
-#define DE4X5_AUTOSENSE_MS 250
-
-/*
-** SROM Structure
-*/
-struct de4x5_srom {
- char sub_vendor_id[2];
- char sub_system_id[2];
- char reserved[12];
- char id_block_crc;
- char reserved2;
- char version;
- char num_controllers;
- char ieee_addr[6];
- char info[100];
- short chksum;
-};
-#define SUB_VENDOR_ID 0x500a
-
-/*
-** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
-** and have sizes of both a power of 2 and a multiple of 4.
-** A size of 256 bytes for each buffer could be chosen because over 90% of
-** all packets in our network are <256 bytes long and 64 longword alignment
-** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
-** descriptors are needed for machines with an ALPHA CPU.
-*/
-#define NUM_RX_DESC 8 /* Number of RX descriptors */
-#define NUM_TX_DESC 32 /* Number of TX descriptors */
-#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
- /* Multiple of 4 for DC21040 */
- /* Allows 512 byte alignment */
-struct de4x5_desc {
- volatile __le32 status;
- __le32 des1;
- __le32 buf;
- __le32 next;
- DESC_ALIGN
-};
-
-/*
-** The DE4X5 private structure
-*/
-#define DE4X5_PKT_STAT_SZ 16
-#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
- increase DE4X5_PKT_STAT_SZ */
-
-struct pkt_stats {
- u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
- u_int unicast;
- u_int multicast;
- u_int broadcast;
- u_int excessive_collisions;
- u_int tx_underruns;
- u_int excessive_underruns;
- u_int rx_runt_frames;
- u_int rx_collision;
- u_int rx_dribble;
- u_int rx_overflow;
-};
-
-struct de4x5_private {
- char adapter_name[80]; /* Adapter name */
- u_long interrupt; /* Aligned ISR flag */
- struct de4x5_desc *rx_ring; /* RX descriptor ring */
- struct de4x5_desc *tx_ring; /* TX descriptor ring */
- struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
- struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
- int rx_new, rx_old; /* RX descriptor ring pointers */
- int tx_new, tx_old; /* TX descriptor ring pointers */
- char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
- char frame[64]; /* Min sized packet for loopback*/
- spinlock_t lock; /* Adapter specific spinlock */
- struct net_device_stats stats; /* Public stats */
- struct pkt_stats pktStats; /* Private stats counters */
- char rxRingSize;
- char txRingSize;
- int bus; /* EISA or PCI */
- int bus_num; /* PCI Bus number */
- int device; /* Device number on PCI bus */
- int state; /* Adapter OPENED or CLOSED */
- int chipset; /* DC21040, DC21041 or DC21140 */
- s32 irq_mask; /* Interrupt Mask (Enable) bits */
- s32 irq_en; /* Summary interrupt bits */
- int media; /* Media (eg TP), mode (eg 100B)*/
- int c_media; /* Remember the last media conn */
- bool fdx; /* media full duplex flag */
- int linkOK; /* Link is OK */
- int autosense; /* Allow/disallow autosensing */
- bool tx_enable; /* Enable descriptor polling */
- int setup_f; /* Setup frame filtering type */
- int local_state; /* State within a 'media' state */
- struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
- struct sia_phy sia; /* SIA PHY Information */
- int active; /* Index to active PHY device */
- int mii_cnt; /* Number of attached PHY's */
- int timeout; /* Scheduling counter */
- struct timer_list timer; /* Timer info for kernel */
- int tmp; /* Temporary global per card */
- struct {
- u_long lock; /* Lock the cache accesses */
- s32 csr0; /* Saved Bus Mode Register */
- s32 csr6; /* Saved Operating Mode Reg. */
- s32 csr7; /* Saved IRQ Mask Register */
- s32 gep; /* Saved General Purpose Reg. */
- s32 gepc; /* Control info for GEP */
- s32 csr13; /* Saved SIA Connectivity Reg. */
- s32 csr14; /* Saved SIA TX/RX Register */
- s32 csr15; /* Saved SIA General Register */
- int save_cnt; /* Flag if state already saved */
- struct sk_buff_head queue; /* Save the (re-ordered) skb's */
- } cache;
- struct de4x5_srom srom; /* A copy of the SROM */
- int cfrv; /* Card CFRV copy */
- int rx_ovf; /* Check for 'RX overflow' tag */
- bool useSROM; /* For non-DEC card use SROM */
- bool useMII; /* Infoblock using the MII */
- int asBitValid; /* Autosense bits in GEP? */
- int asPolarity; /* 0 => asserted high */
- int asBit; /* Autosense bit number in GEP */
- int defMedium; /* SROM default medium */
- int tcount; /* Last infoblock number */
- int infoblock_init; /* Initialised this infoblock? */
- int infoleaf_offset; /* SROM infoleaf for controller */
- s32 infoblock_csr6; /* csr6 value in SROM infoblock */
- int infoblock_media; /* infoblock media */
- int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */
- u_char *rst; /* Pointer to Type 5 reset info */
- u_char ibn; /* Infoblock number */
- struct parameters params; /* Command line/ #defined params */
- struct device *gendev; /* Generic device */
- dma_addr_t dma_rings; /* DMA handle for rings */
- int dma_size; /* Size of the DMA area */
- char *rx_bufs; /* rx bufs on alpha, sparc, ... */
-};
-
-/*
-** To get around certain poxy cards that don't provide an SROM
-** for the second and more DECchip, I have to key off the first
-** chip's address. I'll assume there's not a bad SROM iff:
-**
-** o the chipset is the same
-** o the bus number is the same and > 0
-** o the sum of all the returned hw address bytes is 0 or 0x5fa
-**
-** Also have to save the irq for those cards whose hardware designers
-** can't follow the PCI to PCI Bridge Architecture spec.
-*/
-static struct {
- int chipset;
- int bus;
- int irq;
- u_char addr[ETH_ALEN];
-} last = {0,};
-
-/*
-** The transmit ring full condition is described by the tx_old and tx_new
-** pointers by:
-** tx_old = tx_new Empty ring
-** tx_old = tx_new+1 Full ring
-** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
-*/
-#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
- lp->tx_old+lp->txRingSize-lp->tx_new-1:\
- lp->tx_old -lp->tx_new-1)
-
-#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
-
-/*
-** Public Functions
-*/
-static int de4x5_open(struct net_device *dev);
-static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
-static int de4x5_close(struct net_device *dev);
-static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
-static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
-static void set_multicast_list(struct net_device *dev);
-static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq,
- void __user *data, int cmd);
-
-/*
-** Private functions
-*/
-static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
-static int de4x5_init(struct net_device *dev);
-static int de4x5_sw_reset(struct net_device *dev);
-static int de4x5_rx(struct net_device *dev);
-static int de4x5_tx(struct net_device *dev);
-static void de4x5_ast(struct timer_list *t);
-static int de4x5_txur(struct net_device *dev);
-static int de4x5_rx_ovfc(struct net_device *dev);
-
-static int autoconf_media(struct net_device *dev);
-static void create_packet(struct net_device *dev, char *frame, int len);
-static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
-static int dc21040_autoconf(struct net_device *dev);
-static int dc21041_autoconf(struct net_device *dev);
-static int dc21140m_autoconf(struct net_device *dev);
-static int dc2114x_autoconf(struct net_device *dev);
-static int srom_autoconf(struct net_device *dev);
-static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
-static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
-static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
-static int test_for_100Mb(struct net_device *dev, int msec);
-static int wait_for_link(struct net_device *dev);
-static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
-static int is_spd_100(struct net_device *dev);
-static int is_100_up(struct net_device *dev);
-static int is_10_up(struct net_device *dev);
-static int is_anc_capable(struct net_device *dev);
-static int ping_media(struct net_device *dev, int msec);
-static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
-static void de4x5_free_rx_buffs(struct net_device *dev);
-static void de4x5_free_tx_buffs(struct net_device *dev);
-static void de4x5_save_skbs(struct net_device *dev);
-static void de4x5_rst_desc_ring(struct net_device *dev);
-static void de4x5_cache_state(struct net_device *dev, int flag);
-static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
-static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
-static struct sk_buff *de4x5_get_cache(struct net_device *dev);
-static void de4x5_setup_intr(struct net_device *dev);
-static void de4x5_init_connection(struct net_device *dev);
-static int de4x5_reset_phy(struct net_device *dev);
-static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
-static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
-static int test_tp(struct net_device *dev, s32 msec);
-static int EISA_signature(char *name, struct device *device);
-static void PCI_signature(char *name, struct de4x5_private *lp);
-static void DevicePresent(struct net_device *dev, u_long iobase);
-static void enet_addr_rst(u_long aprom_addr);
-static int de4x5_bad_srom(struct de4x5_private *lp);
-static short srom_rd(u_long address, u_char offset);
-static void srom_latch(u_int command, u_long address);
-static void srom_command(u_int command, u_long address);
-static void srom_address(u_int command, u_long address, u_char offset);
-static short srom_data(u_int command, u_long address);
-/*static void srom_busy(u_int command, u_long address);*/
-static void sendto_srom(u_int command, u_long addr);
-static int getfrom_srom(u_long addr);
-static int srom_map_media(struct net_device *dev);
-static int srom_infoleaf_info(struct net_device *dev);
-static void srom_init(struct net_device *dev);
-static void srom_exec(struct net_device *dev, u_char *p);
-static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
-static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
-static int mii_rdata(u_long ioaddr);
-static void mii_wdata(int data, int len, u_long ioaddr);
-static void mii_ta(u_long rw, u_long ioaddr);
-static int mii_swap(int data, int len);
-static void mii_address(u_char addr, u_long ioaddr);
-static void sendto_mii(u32 command, int data, u_long ioaddr);
-static int getfrom_mii(u32 command, u_long ioaddr);
-static int mii_get_oui(u_char phyaddr, u_long ioaddr);
-static int mii_get_phy(struct net_device *dev);
-static void SetMulticastFilter(struct net_device *dev);
-static int get_hw_addr(struct net_device *dev);
-static void srom_repair(struct net_device *dev, int card);
-static int test_bad_enet(struct net_device *dev, int status);
-static int an_exception(struct de4x5_private *lp);
-static char *build_setup_frame(struct net_device *dev, int mode);
-static void disable_ast(struct net_device *dev);
-static long de4x5_switch_mac_port(struct net_device *dev);
-static int gep_rd(struct net_device *dev);
-static void gep_wr(s32 data, struct net_device *dev);
-static void yawn(struct net_device *dev, int state);
-static void de4x5_parse_params(struct net_device *dev);
-static void de4x5_dbg_open(struct net_device *dev);
-static void de4x5_dbg_mii(struct net_device *dev, int k);
-static void de4x5_dbg_media(struct net_device *dev);
-static void de4x5_dbg_srom(struct de4x5_srom *p);
-static void de4x5_dbg_rx(struct sk_buff *skb, int len);
-static int dc21041_infoleaf(struct net_device *dev);
-static int dc21140_infoleaf(struct net_device *dev);
-static int dc21142_infoleaf(struct net_device *dev);
-static int dc21143_infoleaf(struct net_device *dev);
-static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
-static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
-
-/*
-** Note now that module autoprobing is allowed under EISA and PCI. The
-** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
-** to "do the right thing".
-*/
-
-static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
-
-module_param_hw(io, int, ioport, 0);
-module_param(de4x5_debug, int, 0);
-module_param(dec_only, int, 0);
-module_param(args, charp, 0);
-
-MODULE_PARM_DESC(io, "de4x5 I/O base address");
-MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
-MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
-MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
-MODULE_LICENSE("GPL");
-
-/*
-** List the SROM infoleaf functions and chipsets
-*/
-struct InfoLeaf {
- int chipset;
- int (*fn)(struct net_device *);
-};
-static struct InfoLeaf infoleaf_array[] = {
- {DC21041, dc21041_infoleaf},
- {DC21140, dc21140_infoleaf},
- {DC21142, dc21142_infoleaf},
- {DC21143, dc21143_infoleaf}
-};
-#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
-
-/*
-** List the SROM info block functions
-*/
-static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
- type0_infoblock,
- type1_infoblock,
- type2_infoblock,
- type3_infoblock,
- type4_infoblock,
- type5_infoblock,
- compact_infoblock
-};
-
-#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
-
-/*
-** Miscellaneous defines...
-*/
-#define RESET_DE4X5 {\
- int i;\
- i=inl(DE4X5_BMR);\
- mdelay(1);\
- outl(i | BMR_SWR, DE4X5_BMR);\
- mdelay(1);\
- outl(i, DE4X5_BMR);\
- mdelay(1);\
- for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
- mdelay(1);\
-}
-
-#define PHY_HARD_RESET {\
- outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
- mdelay(1); /* Assert for 1ms */\
- outl(0x00, DE4X5_GEP);\
- mdelay(2); /* Wait for 2ms */\
-}
-
-static const struct net_device_ops de4x5_netdev_ops = {
- .ndo_open = de4x5_open,
- .ndo_stop = de4x5_close,
- .ndo_start_xmit = de4x5_queue_pkt,
- .ndo_get_stats = de4x5_get_stats,
- .ndo_set_rx_mode = set_multicast_list,
- .ndo_siocdevprivate = de4x5_siocdevprivate,
- .ndo_set_mac_address= eth_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static int
-de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
-{
- char name[DE4X5_NAME_LENGTH + 1];
- struct de4x5_private *lp = netdev_priv(dev);
- struct pci_dev *pdev = NULL;
- int i, status=0;
-
- dev_set_drvdata(gendev, dev);
-
- /* Ensure we're not sleeping */
- if (lp->bus == EISA) {
- outb(WAKEUP, PCI_CFPM);
- } else {
- pdev = to_pci_dev (gendev);
- pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
- }
- mdelay(10);
-
- RESET_DE4X5;
-
- if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
- return -ENXIO; /* Hardware could not reset */
- }
-
- /*
- ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
- */
- lp->useSROM = false;
- if (lp->bus == PCI) {
- PCI_signature(name, lp);
- } else {
- EISA_signature(name, gendev);
- }
-
- if (*name == '\0') { /* Not found a board signature */
- return -ENXIO;
- }
-
- dev->base_addr = iobase;
- printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
-
- status = get_hw_addr(dev);
- printk(", h/w address %pM\n", dev->dev_addr);
-
- if (status != 0) {
- printk(" which has an Ethernet PROM CRC error.\n");
- return -ENXIO;
- } else {
- skb_queue_head_init(&lp->cache.queue);
- lp->cache.gepc = GEP_INIT;
- lp->asBit = GEP_SLNK;
- lp->asPolarity = GEP_SLNK;
- lp->asBitValid = ~0;
- lp->timeout = -1;
- lp->gendev = gendev;
- spin_lock_init(&lp->lock);
- timer_setup(&lp->timer, de4x5_ast, 0);
- de4x5_parse_params(dev);
-
- /*
- ** Choose correct autosensing in case someone messed up
- */
- lp->autosense = lp->params.autosense;
- if (lp->chipset != DC21140) {
- if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
- lp->params.autosense = TP;
- }
- if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
- lp->params.autosense = BNC;
- }
- }
- lp->fdx = lp->params.fdx;
- sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
-
- lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
-#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
- lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
-#endif
- lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
- &lp->dma_rings, GFP_ATOMIC);
- if (lp->rx_ring == NULL) {
- return -ENOMEM;
- }
-
- lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
-
- /*
- ** Set up the RX descriptor ring (Intels)
- ** Allocate contiguous receive buffers, long word aligned (Alphas)
- */
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
- for (i=0; i<NUM_RX_DESC; i++) {
- lp->rx_ring[i].status = 0;
- lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
- lp->rx_ring[i].buf = 0;
- lp->rx_ring[i].next = 0;
- lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
- }
-
-#else
- {
- dma_addr_t dma_rx_bufs;
-
- dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
- * sizeof(struct de4x5_desc);
- dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
- lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
- + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
- for (i=0; i<NUM_RX_DESC; i++) {
- lp->rx_ring[i].status = 0;
- lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
- lp->rx_ring[i].buf =
- cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
- lp->rx_ring[i].next = 0;
- lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
- }
-
- }
-#endif
-
- barrier();
-
- lp->rxRingSize = NUM_RX_DESC;
- lp->txRingSize = NUM_TX_DESC;
-
- /* Write the end of list marker to the descriptor lists */
- lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
- lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
-
- /* Tell the adapter where the TX/RX rings are located. */
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- /* Initialise the IRQ mask and Enable/Disable */
- lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
- lp->irq_en = IMR_NIM | IMR_AIM;
-
- /* Create a loopback packet frame for later media probing */
- create_packet(dev, lp->frame, sizeof(lp->frame));
-
- /* Check if the RX overflow bug needs testing for */
- i = lp->cfrv & 0x000000fe;
- if ((lp->chipset == DC21140) && (i == 0x20)) {
- lp->rx_ovf = 1;
- }
-
- /* Initialise the SROM pointers if possible */
- if (lp->useSROM) {
- lp->state = INITIALISED;
- if (srom_infoleaf_info(dev)) {
- dma_free_coherent (gendev, lp->dma_size,
- lp->rx_ring, lp->dma_rings);
- return -ENXIO;
- }
- srom_init(dev);
- }
-
- lp->state = CLOSED;
-
- /*
- ** Check for an MII interface
- */
- if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
- mii_get_phy(dev);
- }
-
- printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
- ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
- }
-
- if (de4x5_debug & DEBUG_VERSION) {
- printk(version);
- }
-
- /* The DE4X5-specific entries in the device structure. */
- SET_NETDEV_DEV(dev, gendev);
- dev->netdev_ops = &de4x5_netdev_ops;
- dev->mem_start = 0;
-
- /* Fill in the generic fields of the device structure. */
- if ((status = register_netdev (dev))) {
- dma_free_coherent (gendev, lp->dma_size,
- lp->rx_ring, lp->dma_rings);
- return status;
- }
-
- /* Let the adapter sleep to save power */
- yawn(dev, SLEEP);
-
- return status;
-}
-
-
-static int
-de4x5_open(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, status = 0;
- s32 omr;
-
- /* Allocate the RX buffers */
- for (i=0; i<lp->rxRingSize; i++) {
- if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
- de4x5_free_rx_buffs(dev);
- return -EAGAIN;
- }
- }
-
- /*
- ** Wake up the adapter
- */
- yawn(dev, WAKEUP);
-
- /*
- ** Re-initialize the DE4X5...
- */
- status = de4x5_init(dev);
- spin_lock_init(&lp->lock);
- lp->state = OPEN;
- de4x5_dbg_open(dev);
-
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
- lp->adapter_name, dev)) {
- printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
- if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
- lp->adapter_name, dev)) {
- printk("\n Cannot get IRQ- reconfigure your hardware.\n");
- disable_ast(dev);
- de4x5_free_rx_buffs(dev);
- de4x5_free_tx_buffs(dev);
- yawn(dev, SLEEP);
- lp->state = CLOSED;
- return -EAGAIN;
- } else {
- printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
- printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
- }
- }
-
- lp->interrupt = UNMASK_INTERRUPTS;
- netif_trans_update(dev); /* prevent tx timeout */
-
- START_DE4X5;
-
- de4x5_setup_intr(dev);
-
- if (de4x5_debug & DEBUG_OPEN) {
- printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
- printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
- printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
- printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
- printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
- printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
- printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
- printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
- }
-
- return status;
-}
-
-/*
-** Initialize the DE4X5 operating conditions. NB: a chip problem with the
-** DC21140 requires using perfect filtering mode for that chip. Since I can't
-** see why I'd want > 14 multicast addresses, I have changed all chips to use
-** the perfect filtering mode. Keep the DMA burst length at 8: there seems
-** to be data corruption problems if it is larger (UDP errors seen from a
-** ttcp source).
-*/
-static int
-de4x5_init(struct net_device *dev)
-{
- /* Lock out other processes whilst setting up the hardware */
- netif_stop_queue(dev);
-
- de4x5_sw_reset(dev);
-
- /* Autoconfigure the connected port */
- autoconf_media(dev);
-
- return 0;
-}
-
-static int
-de4x5_sw_reset(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- s32 bmr, omr;
-
- /* Select the MII or SRL port now and RESET the MAC */
- if (!lp->useSROM) {
- if (lp->phy[lp->active].id != 0) {
- lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
- } else {
- lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
- }
- de4x5_switch_mac_port(dev);
- }
-
- /*
- ** Set the programmable burst length to 8 longwords for all the DC21140
- ** Fasternet chips and 4 longwords for all others: DMA errors result
- ** without these values. Cache align 16 long.
- */
- bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
- bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
- outl(bmr, DE4X5_BMR);
-
- omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
- if (lp->chipset == DC21140) {
- omr |= (OMR_SDP | OMR_SB);
- }
- lp->setup_f = PERFECT;
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- lp->rx_new = lp->rx_old = 0;
- lp->tx_new = lp->tx_old = 0;
-
- for (i = 0; i < lp->rxRingSize; i++) {
- lp->rx_ring[i].status = cpu_to_le32(R_OWN);
- }
-
- for (i = 0; i < lp->txRingSize; i++) {
- lp->tx_ring[i].status = cpu_to_le32(0);
- }
-
- barrier();
-
- /* Build the setup frame depending on filtering mode */
- SetMulticastFilter(dev);
-
- load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
- outl(omr|OMR_ST, DE4X5_OMR);
-
- /* Poll for setup frame completion (adapter interrupts are disabled now) */
-
- for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */
- mdelay(1);
- if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
- }
- outl(omr, DE4X5_OMR); /* Stop everything! */
-
- if (j == 0) {
- printk("%s: Setup frame timed out, status %08x\n", dev->name,
- inl(DE4X5_STS));
- status = -EIO;
- }
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- lp->tx_old = lp->tx_new;
-
- return status;
-}
-
-/*
-** Writes a socket buffer address to the next available transmit descriptor.
-*/
-static netdev_tx_t
-de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_long flags = 0;
-
- netif_stop_queue(dev);
- if (!lp->tx_enable) /* Cannot send for now */
- goto tx_err;
-
- /*
- ** Clean out the TX ring asynchronously to interrupts - sometimes the
- ** interrupts are lost by delayed descriptor status updates relative to
- ** the irq assertion, especially with a busy PCI bus.
- */
- spin_lock_irqsave(&lp->lock, flags);
- de4x5_tx(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- /* Test if cache is already locked - requeue skb if so */
- if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
- goto tx_err;
-
- /* Transmit descriptor ring full or stale skb */
- if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
- if (lp->interrupt) {
- de4x5_putb_cache(dev, skb); /* Requeue the buffer */
- } else {
- de4x5_put_cache(dev, skb);
- }
- if (de4x5_debug & DEBUG_TX) {
- printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
- }
- } else if (skb->len > 0) {
- /* If we already have stuff queued locally, use that first */
- if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
- de4x5_put_cache(dev, skb);
- skb = de4x5_get_cache(dev);
- }
-
- while (skb && !netif_queue_stopped(dev) &&
- (u_long) lp->tx_skb[lp->tx_new] <= 1) {
- spin_lock_irqsave(&lp->lock, flags);
- netif_stop_queue(dev);
- load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
- lp->stats.tx_bytes += skb->len;
- outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
-
- if (TX_BUFFS_AVAIL) {
- netif_start_queue(dev); /* Another pkt may be queued */
- }
- skb = de4x5_get_cache(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- if (skb) de4x5_putb_cache(dev, skb);
- }
-
- lp->cache.lock = 0;
-
- return NETDEV_TX_OK;
-tx_err:
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-/*
-** The DE4X5 interrupt handler.
-**
-** I/O Read/Writes through intermediate PCI bridges are never 'posted',
-** so that the asserted interrupt always has some real data to work with -
-** if these I/O accesses are ever changed to memory accesses, ensure the
-** STS write is read immediately to complete the transaction if the adapter
-** is not on bus 0. Lost interrupts can still occur when the PCI bus load
-** is high and descriptor status bits cannot be set before the associated
-** interrupt is asserted and this routine entered.
-*/
-static irqreturn_t
-de4x5_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct de4x5_private *lp;
- s32 imr, omr, sts, limit;
- u_long iobase;
- unsigned int handled = 0;
-
- lp = netdev_priv(dev);
- spin_lock(&lp->lock);
- iobase = dev->base_addr;
-
- DISABLE_IRQs; /* Ensure non re-entrancy */
-
- if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
- printk("%s: Re-entering the interrupt handler.\n", dev->name);
-
- synchronize_irq(dev->irq);
-
- for (limit=0; limit<8; limit++) {
- sts = inl(DE4X5_STS); /* Read IRQ status */
- outl(sts, DE4X5_STS); /* Reset the board interrupts */
-
- if (!(sts & lp->irq_mask)) break;/* All done */
- handled = 1;
-
- if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
- de4x5_rx(dev);
-
- if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
- de4x5_tx(dev);
-
- if (sts & STS_LNF) { /* TP Link has failed */
- lp->irq_mask &= ~IMR_LFM;
- }
-
- if (sts & STS_UNF) { /* Transmit underrun */
- de4x5_txur(dev);
- }
-
- if (sts & STS_SE) { /* Bus Error */
- STOP_DE4X5;
- printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
- dev->name, sts);
- spin_unlock(&lp->lock);
- return IRQ_HANDLED;
- }
- }
-
- /* Load the TX ring with any locally stored packets */
- if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
- while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
- de4x5_queue_pkt(de4x5_get_cache(dev), dev);
- }
- lp->cache.lock = 0;
- }
-
- lp->interrupt = UNMASK_INTERRUPTS;
- ENABLE_IRQs;
- spin_unlock(&lp->lock);
-
- return IRQ_RETVAL(handled);
-}
-
-static int
-de4x5_rx(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int entry;
- s32 status;
-
- for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
- entry=lp->rx_new) {
- status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
-
- if (lp->rx_ovf) {
- if (inl(DE4X5_MFC) & MFC_FOCM) {
- de4x5_rx_ovfc(dev);
- break;
- }
- }
-
- if (status & RD_FS) { /* Remember the start of frame */
- lp->rx_old = entry;
- }
-
- if (status & RD_LS) { /* Valid frame status */
- if (lp->tx_enable) lp->linkOK++;
- if (status & RD_ES) { /* There was an error. */
- lp->stats.rx_errors++; /* Update the error stats. */
- if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
- if (status & RD_CE) lp->stats.rx_crc_errors++;
- if (status & RD_OF) lp->stats.rx_fifo_errors++;
- if (status & RD_TL) lp->stats.rx_length_errors++;
- if (status & RD_RF) lp->pktStats.rx_runt_frames++;
- if (status & RD_CS) lp->pktStats.rx_collision++;
- if (status & RD_DB) lp->pktStats.rx_dribble++;
- if (status & RD_OF) lp->pktStats.rx_overflow++;
- } else { /* A valid frame received */
- struct sk_buff *skb;
- short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
- >> 16) - 4;
-
- if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
- printk("%s: Insufficient memory; nuking packet.\n",
- dev->name);
- lp->stats.rx_dropped++;
- } else {
- de4x5_dbg_rx(skb, pkt_len);
-
- /* Push up the protocol stack */
- skb->protocol=eth_type_trans(skb,dev);
- de4x5_local_stats(dev, skb->data, pkt_len);
- netif_rx(skb);
-
- /* Update stats */
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
- }
- }
-
- /* Change buffer ownership for this frame, back to the adapter */
- for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
- lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
- barrier();
- }
- lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
- barrier();
- }
-
- /*
- ** Update entry information
- */
- lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
- }
-
- return 0;
-}
-
-static inline void
-de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
-{
- dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
- le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
- DMA_TO_DEVICE);
- if ((u_long) lp->tx_skb[entry] > 1)
- dev_kfree_skb_irq(lp->tx_skb[entry]);
- lp->tx_skb[entry] = NULL;
-}
-
-/*
-** Buffer sent - check for TX buffer errors.
-*/
-static int
-de4x5_tx(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int entry;
- s32 status;
-
- for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
- status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
- if (status < 0) { /* Buffer not sent yet */
- break;
- } else if (status != 0x7fffffff) { /* Not setup frame */
- if (status & TD_ES) { /* An error happened */
- lp->stats.tx_errors++;
- if (status & TD_NC) lp->stats.tx_carrier_errors++;
- if (status & TD_LC) lp->stats.tx_window_errors++;
- if (status & TD_UF) lp->stats.tx_fifo_errors++;
- if (status & TD_EC) lp->pktStats.excessive_collisions++;
- if (status & TD_DE) lp->stats.tx_aborted_errors++;
-
- if (TX_PKT_PENDING) {
- outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
- }
- } else { /* Packet sent */
- lp->stats.tx_packets++;
- if (lp->tx_enable) lp->linkOK++;
- }
- /* Update the collision counter */
- lp->stats.collisions += ((status & TD_EC) ? 16 :
- ((status & TD_CC) >> 3));
-
- /* Free the buffer. */
- if (lp->tx_skb[entry] != NULL)
- de4x5_free_tx_buff(lp, entry);
- }
-
- /* Update all the pointers */
- lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
- }
-
- /* Any resources available? */
- if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
- if (lp->interrupt)
- netif_wake_queue(dev);
- else
- netif_start_queue(dev);
- }
-
- return 0;
-}
-
-static void
-de4x5_ast(struct timer_list *t)
-{
- struct de4x5_private *lp = from_timer(lp, t, timer);
- struct net_device *dev = dev_get_drvdata(lp->gendev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int dt;
-
- if (lp->useSROM)
- next_tick = srom_autoconf(dev);
- else if (lp->chipset == DC21140)
- next_tick = dc21140m_autoconf(dev);
- else if (lp->chipset == DC21041)
- next_tick = dc21041_autoconf(dev);
- else if (lp->chipset == DC21040)
- next_tick = dc21040_autoconf(dev);
- lp->linkOK = 0;
-
- dt = (next_tick * HZ) / 1000;
-
- if (!dt)
- dt = 1;
-
- mod_timer(&lp->timer, jiffies + dt);
-}
-
-static int
-de4x5_txur(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int omr;
-
- omr = inl(DE4X5_OMR);
- if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
- omr &= ~(OMR_ST|OMR_SR);
- outl(omr, DE4X5_OMR);
- while (inl(DE4X5_STS) & STS_TS);
- if ((omr & OMR_TR) < OMR_TR) {
- omr += 0x4000;
- } else {
- omr |= OMR_SF;
- }
- outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
- }
-
- return 0;
-}
-
-static int
-de4x5_rx_ovfc(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int omr;
-
- omr = inl(DE4X5_OMR);
- outl(omr & ~OMR_SR, DE4X5_OMR);
- while (inl(DE4X5_STS) & STS_RS);
-
- for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
- lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
- lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
- }
-
- outl(omr, DE4X5_OMR);
-
- return 0;
-}
-
-static int
-de4x5_close(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 imr, omr;
-
- disable_ast(dev);
-
- netif_stop_queue(dev);
-
- if (de4x5_debug & DEBUG_CLOSE) {
- printk("%s: Shutting down ethercard, status was %8.8x.\n",
- dev->name, inl(DE4X5_STS));
- }
-
- /*
- ** We stop the DE4X5 here... mask interrupts and stop TX & RX
- */
- DISABLE_IRQs;
- STOP_DE4X5;
-
- /* Free the associated irq */
- free_irq(dev->irq, dev);
- lp->state = CLOSED;
-
- /* Free any socket buffers */
- de4x5_free_rx_buffs(dev);
- de4x5_free_tx_buffs(dev);
-
- /* Put the adapter to sleep to save power */
- yawn(dev, SLEEP);
-
- return 0;
-}
-
-static struct net_device_stats *
-de4x5_get_stats(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
-
- return &lp->stats;
-}
-
-static void
-de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
- if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
- lp->pktStats.bins[i]++;
- i = DE4X5_PKT_STAT_SZ;
- }
- }
- if (is_multicast_ether_addr(buf)) {
- if (is_broadcast_ether_addr(buf)) {
- lp->pktStats.broadcast++;
- } else {
- lp->pktStats.multicast++;
- }
- } else if (ether_addr_equal(buf, dev->dev_addr)) {
- lp->pktStats.unicast++;
- }
-
- lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
- if (lp->pktStats.bins[0] == 0) { /* Reset counters */
- memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
- }
-}
-
-/*
-** Removes the TD_IC flag from previous descriptor to improve TX performance.
-** If the flag is changed on a descriptor that is being read by the hardware,
-** I assume PCI transaction ordering will mean you are either successful or
-** just miss asserting the change to the hardware. Anyway you're messing with
-** a descriptor you don't own, but this shouldn't kill the chip provided
-** the descriptor register is read only to the hardware.
-*/
-static void
-load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
- dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
-
- lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
- lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
- lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
- lp->tx_skb[lp->tx_new] = skb;
- lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
- barrier();
-
- lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
- barrier();
-}
-
-/*
-** Set or clear the multicast filter for this adaptor.
-*/
-static void
-set_multicast_list(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- /* First, double check that the adapter is open */
- if (lp->state == OPEN) {
- if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
- u32 omr;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PR;
- outl(omr, DE4X5_OMR);
- } else {
- SetMulticastFilter(dev);
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
- SETUP_FRAME_LEN, (struct sk_buff *)1);
-
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_trans_update(dev); /* prevent tx timeout */
- }
- }
-}
-
-/*
-** Calculate the hash code and update the logical address filter
-** from a list of ethernet multicast addresses.
-** Little endian crc one liner from Matt Thomas, DEC.
-*/
-static void
-SetMulticastFilter(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- u_long iobase = dev->base_addr;
- int i, bit, byte;
- u16 hashcode;
- u32 omr, crc;
- char *pa;
- unsigned char *addrs;
-
- omr = inl(DE4X5_OMR);
- omr &= ~(OMR_PR | OMR_PM);
- pa = build_setup_frame(dev, ALL); /* Build the basic frame */
-
- if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
- omr |= OMR_PM; /* Pass all multicasts */
- } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- netdev_for_each_mc_addr(ha, dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- hashcode = crc & DE4X5_HASH_BITS; /* hashcode is 9 LSb of CRC */
-
- byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
- bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
-
- byte <<= 1; /* calc offset into setup frame */
- if (byte & 0x02) {
- byte -= 1;
- }
- lp->setup_frame[byte] |= bit;
- }
- } else { /* Perfect filtering */
- netdev_for_each_mc_addr(ha, dev) {
- addrs = ha->addr;
- for (i=0; i<ETH_ALEN; i++) {
- *(pa + (i&1)) = *addrs++;
- if (i & 0x01) pa += 4;
- }
- }
- }
- outl(omr, DE4X5_OMR);
-}
-
-#ifdef CONFIG_EISA
-
-static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
-
-static int de4x5_eisa_probe(struct device *gendev)
-{
- struct eisa_device *edev;
- u_long iobase;
- u_char irq, regval;
- u_short vendor;
- u32 cfid;
- int status, device;
- struct net_device *dev;
- struct de4x5_private *lp;
-
- edev = to_eisa_device (gendev);
- iobase = edev->base_addr;
-
- if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
- return -EBUSY;
-
- if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
- DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
- status = -EBUSY;
- goto release_reg_1;
- }
-
- if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
- status = -ENOMEM;
- goto release_reg_2;
- }
- lp = netdev_priv(dev);
-
- cfid = (u32) inl(PCI_CFID);
- lp->cfrv = (u_short) inl(PCI_CFRV);
- device = (cfid >> 8) & 0x00ffff00;
- vendor = (u_short) cfid;
-
- /* Read the EISA Configuration Registers */
- regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
-#ifdef CONFIG_ALPHA
- /* Looks like the Jensen firmware (rev 2.2) doesn't really
- * care about the EISA configuration, and thus doesn't
- * configure the PLX bridge properly. Oh well... Simply mimic
- * the EISA config file to sort it out. */
-
- /* EISA REG1: Assert DecChip 21040 HW Reset */
- outb (ER1_IAM | 1, EISA_REG1);
- mdelay (1);
-
- /* EISA REG1: Deassert DecChip 21040 HW Reset */
- outb (ER1_IAM, EISA_REG1);
- mdelay (1);
-
- /* EISA REG3: R/W Burst Transfer Enable */
- outb (ER3_BWE | ER3_BRE, EISA_REG3);
-
- /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
- outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
-#endif
- irq = de4x5_irq[(regval >> 1) & 0x03];
-
- if (is_DC2114x) {
- device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
- }
- lp->chipset = device;
- lp->bus = EISA;
-
- /* Write the PCI Configuration Registers */
- outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
- outl(0x00006000, PCI_CFLT);
- outl(iobase, PCI_CBIO);
-
- DevicePresent(dev, EISA_APROM);
-
- dev->irq = irq;
-
- if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
- return 0;
- }
-
- free_netdev (dev);
- release_reg_2:
- release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
- release_reg_1:
- release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
-
- return status;
-}
-
-static int de4x5_eisa_remove(struct device *device)
-{
- struct net_device *dev;
- u_long iobase;
-
- dev = dev_get_drvdata(device);
- iobase = dev->base_addr;
-
- unregister_netdev (dev);
- free_netdev (dev);
- release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
- release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
-
- return 0;
-}
-
-static const struct eisa_device_id de4x5_eisa_ids[] = {
- { "DEC4250", 0 }, /* 0 is the board name index... */
- { "" }
-};
-MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
-
-static struct eisa_driver de4x5_eisa_driver = {
- .id_table = de4x5_eisa_ids,
- .driver = {
- .name = "de4x5",
- .probe = de4x5_eisa_probe,
- .remove = de4x5_eisa_remove,
- }
-};
-#endif
-
-#ifdef CONFIG_PCI
-
-/*
-** This function searches the current bus (which is >0) for a DECchip with an
-** SROM, so that in multiport cards that have one SROM shared between multiple
-** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
-** For single port cards this is a time waster...
-*/
-static void
-srom_search(struct net_device *dev, struct pci_dev *pdev)
-{
- u_char pb;
- u_short vendor, status;
- u_int irq = 0, device;
- u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int i, j;
- struct de4x5_private *lp = netdev_priv(dev);
- struct pci_dev *this_dev;
-
- list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
- vendor = this_dev->vendor;
- device = this_dev->device << 8;
- if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
-
- /* Get the chip configuration revision register */
- pb = this_dev->bus->number;
-
- /* Set the device number information */
- lp->device = PCI_SLOT(this_dev->devfn);
- lp->bus_num = pb;
-
- /* Set the chipset information */
- if (is_DC2114x) {
- device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
- ? DC21142 : DC21143);
- }
- lp->chipset = device;
-
- /* Get the board I/O address (64 bits on sparc64) */
- iobase = pci_resource_start(this_dev, 0);
-
- /* Fetch the IRQ to be used */
- irq = this_dev->irq;
- if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
-
- /* Check if I/O accesses are enabled */
- pci_read_config_word(this_dev, PCI_COMMAND, &status);
- if (!(status & PCI_COMMAND_IO)) continue;
-
- /* Search for a valid SROM attached to this DECchip */
- DevicePresent(dev, DE4X5_APROM);
- for (j=0, i=0; i<ETH_ALEN; i++) {
- j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
- }
- if (j != 0 && j != 6 * 0xff) {
- last.chipset = device;
- last.bus = pb;
- last.irq = irq;
- for (i=0; i<ETH_ALEN; i++) {
- last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
- }
- return;
- }
- }
-}
-
-/*
-** PCI bus I/O device probe
-** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
-** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
-** enabled by the user first in the set up utility. Hence we just check for
-** enabled features and silently ignore the card if they're not.
-**
-** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
-** bit. Here, check for I/O accesses and then set BM. If you put the card in
-** a non BM slot, you're on your own (and complain to the PC vendor that your
-** PC doesn't conform to the PCI standard)!
-**
-** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x
-** kernels use the V0.535[n] drivers.
-*/
-
-static int de4x5_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- u_char pb, pbus = 0, dev_num, dnum = 0, timer;
- u_short vendor, status;
- u_int irq = 0, device;
- u_long iobase = 0; /* Clear upper 32 bits in Alphas */
- int error;
- struct net_device *dev;
- struct de4x5_private *lp;
-
- dev_num = PCI_SLOT(pdev->devfn);
- pb = pdev->bus->number;
-
- if (io) { /* probe a single PCI device */
- pbus = (u_short)(io >> 8);
- dnum = (u_short)(io & 0xff);
- if ((pbus != pb) || (dnum != dev_num))
- return -ENODEV;
- }
-
- vendor = pdev->vendor;
- device = pdev->device << 8;
- if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
- return -ENODEV;
-
- /* Ok, the device seems to be for us. */
- if ((error = pci_enable_device (pdev)))
- return error;
-
- if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
- error = -ENOMEM;
- goto disable_dev;
- }
-
- lp = netdev_priv(dev);
- lp->bus = PCI;
- lp->bus_num = 0;
-
- /* Search for an SROM on this bus */
- if (lp->bus_num != pb) {
- lp->bus_num = pb;
- srom_search(dev, pdev);
- }
-
- /* Get the chip configuration revision register */
- lp->cfrv = pdev->revision;
-
- /* Set the device number information */
- lp->device = dev_num;
- lp->bus_num = pb;
-
- /* Set the chipset information */
- if (is_DC2114x) {
- device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
- }
- lp->chipset = device;
-
- /* Get the board I/O address (64 bits on sparc64) */
- iobase = pci_resource_start(pdev, 0);
-
- /* Fetch the IRQ to be used */
- irq = pdev->irq;
- if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- /* Check if I/O accesses and Bus Mastering are enabled */
- pci_read_config_word(pdev, PCI_COMMAND, &status);
-#ifdef __powerpc__
- if (!(status & PCI_COMMAND_IO)) {
- status |= PCI_COMMAND_IO;
- pci_write_config_word(pdev, PCI_COMMAND, status);
- pci_read_config_word(pdev, PCI_COMMAND, &status);
- }
-#endif /* __powerpc__ */
- if (!(status & PCI_COMMAND_IO)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- if (!(status & PCI_COMMAND_MASTER)) {
- status |= PCI_COMMAND_MASTER;
- pci_write_config_word(pdev, PCI_COMMAND, status);
- pci_read_config_word(pdev, PCI_COMMAND, &status);
- }
- if (!(status & PCI_COMMAND_MASTER)) {
- error = -ENODEV;
- goto free_dev;
- }
-
- /* Check the latency timer for values >= 0x60 */
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
- if (timer < 0x60) {
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
- }
-
- DevicePresent(dev, DE4X5_APROM);
-
- if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
- error = -EBUSY;
- goto free_dev;
- }
-
- dev->irq = irq;
-
- if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
- goto release;
- }
-
- return 0;
-
- release:
- release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
- free_dev:
- free_netdev (dev);
- disable_dev:
- pci_disable_device (pdev);
- return error;
-}
-
-static void de4x5_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev;
- u_long iobase;
-
- dev = pci_get_drvdata(pdev);
- iobase = dev->base_addr;
-
- unregister_netdev (dev);
- free_netdev (dev);
- release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
- pci_disable_device (pdev);
-}
-
-static const struct pci_device_id de4x5_pci_tbl[] = {
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
- { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
- { },
-};
-
-static struct pci_driver de4x5_pci_driver = {
- .name = "de4x5",
- .id_table = de4x5_pci_tbl,
- .probe = de4x5_pci_probe,
- .remove = de4x5_pci_remove,
-};
-
-#endif
-
-/*
-** Auto configure the media here rather than setting the port at compile
-** time. This routine is called by de4x5_init() and when a loss of media is
-** detected (excessive collisions, loss of carrier, no carrier or link fail
-** [TP] or no recent receive activity) to check whether the user has been
-** sneaky and changed the port on us.
-*/
-static int
-autoconf_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- disable_ast(dev);
-
- lp->c_media = AUTO; /* Bogus last media */
- inl(DE4X5_MFC); /* Zero the lost frames counter */
- lp->media = INIT;
- lp->tcount = 0;
-
- de4x5_ast(&lp->timer);
-
- return lp->media;
-}
-
-/*
-** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
-** from BNC as the port has a jumper to set thick or thin wire. When set for
-** BNC, the BNC port will indicate activity if it's not terminated correctly.
-** The only way to test for that is to place a loopback packet onto the
-** network and watch for errors. Since we're messing with the interrupt mask
-** register, disable the board interrupts and do not allow any more packets to
-** be queued to the hardware. Re-enable everything only when the media is
-** found.
-** I may have to "age out" locally queued packets so that the higher layer
-** timeouts don't effectively duplicate packets on the network.
-*/
-static int
-dc21040_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int next_tick = DE4X5_AUTOSENSE_MS;
- s32 imr;
-
- switch (lp->media) {
- case INIT:
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->timeout = -1;
- de4x5_save_skbs(dev);
- if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
- lp->media = TP;
- } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
- lp->media = BNC_AUI;
- } else if (lp->autosense == EXT_SIA) {
- lp->media = EXT_SIA;
- } else {
- lp->media = NC;
- }
- lp->local_state = 0;
- next_tick = dc21040_autoconf(dev);
- break;
-
- case TP:
- next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
- TP_SUSPECT, test_tp);
- break;
-
- case TP_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
- break;
-
- case BNC:
- case AUI:
- case BNC_AUI:
- next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
- BNC_AUI_SUSPECT, ping_media);
- break;
-
- case BNC_AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
- break;
-
- case EXT_SIA:
- next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
- NC, EXT_SIA_SUSPECT, ping_media);
- break;
-
- case EXT_SIA_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
- break;
-
- case NC:
- /* default to TP for all */
- reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-static int
-dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
- int next_state, int suspect_state,
- int (*fn)(struct net_device *, int))
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int linkBad;
-
- switch (lp->local_state) {
- case 0:
- reset_init_sia(dev, csr13, csr14, csr15);
- lp->local_state++;
- next_tick = 500;
- break;
-
- case 1:
- if (!lp->tx_enable) {
- linkBad = fn(dev, timeout);
- if (linkBad < 0) {
- next_tick = linkBad & ~TIMER_CB;
- } else {
- if (linkBad && (lp->autosense == AUTO)) {
- lp->local_state = 0;
- lp->media = next_state;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = suspect_state;
- next_tick = 3000;
- }
- break;
- }
-
- return next_tick;
-}
-
-static int
-de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
- int (*fn)(struct net_device *, int),
- int (*asfn)(struct net_device *))
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int next_tick = DE4X5_AUTOSENSE_MS;
- int linkBad;
-
- switch (lp->local_state) {
- case 1:
- if (lp->linkOK) {
- lp->media = prev_state;
- } else {
- lp->local_state++;
- next_tick = asfn(dev);
- }
- break;
-
- case 2:
- linkBad = fn(dev, timeout);
- if (linkBad < 0) {
- next_tick = linkBad & ~TIMER_CB;
- } else if (!linkBad) {
- lp->local_state--;
- lp->media = prev_state;
- } else {
- lp->media = INIT;
- lp->tcount++;
- }
- }
-
- return next_tick;
-}
-
-/*
-** Autoconfigure the media when using the DC21041. AUI needs to be tested
-** before BNC, because the BNC port will indicate activity if it's not
-** terminated correctly. The only way to test for that is to place a loopback
-** packet onto the network and watch for errors. Since we're messing with
-** the interrupt mask register, disable the board interrupts and do not allow
-** any more packets to be queued to the hardware. Re-enable everything only
-** when the media is found.
-*/
-static int
-dc21041_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, irqs, irq_mask, imr, omr;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- switch (lp->media) {
- case INIT:
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->timeout = -1;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
- lp->media = TP; /* On chip auto negotiation is broken */
- } else if (lp->autosense == TP) {
- lp->media = TP;
- } else if (lp->autosense == BNC) {
- lp->media = BNC;
- } else if (lp->autosense == AUI) {
- lp->media = AUI;
- } else {
- lp->media = NC;
- }
- lp->local_state = 0;
- next_tick = dc21041_autoconf(dev);
- break;
-
- case TP_NW:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
- outl(omr | OMR_FDX, DE4X5_OMR);
- }
- irqs = STS_LNF | STS_LNP;
- irq_mask = IMR_LFM | IMR_LPM;
- sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts & STS_LNP) {
- lp->media = ANS;
- } else {
- lp->media = AUI;
- }
- next_tick = dc21041_autoconf(dev);
- }
- break;
-
- case ANS:
- if (!lp->tx_enable) {
- irqs = STS_LNP;
- irq_mask = IMR_LPM;
- sts = test_ans(dev, irqs, irq_mask, 3000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
- lp->media = TP;
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = ANS_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case ANS_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
- break;
-
- case TP:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = STS_LNF | STS_LNP;
- irq_mask = IMR_LFM | IMR_LPM;
- sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
- if (inl(DE4X5_SISR) & SISR_NRA) {
- lp->media = AUI; /* Non selected port activity */
- } else {
- lp->media = BNC;
- }
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = TP_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case TP_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
- break;
-
- case AUI:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
- lp->media = BNC;
- next_tick = dc21041_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = AUI_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
- break;
-
- case BNC:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- lp->local_state++; /* Ensure media connected */
- next_tick = dc21041_autoconf(dev);
- }
- break;
-
- case 1:
- if (!lp->tx_enable) {
- if ((sts = ping_media(dev, 3000)) < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts) {
- lp->local_state = 0;
- lp->media = NC;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = BNC_SUSPECT;
- next_tick = 3000;
- }
- break;
- }
- break;
-
- case BNC_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
- break;
-
- case NC:
- omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
- outl(omr | OMR_FDX, DE4X5_OMR);
- reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-/*
-** Some autonegotiation chips are broken in that they do not return the
-** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
-** register, except at the first power up negotiation.
-*/
-static int
-dc21140m_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int ana, anlpa, cap, cr, slnk, sr;
- int next_tick = DE4X5_AUTOSENSE_MS;
- u_long imr, omr, iobase = dev->base_addr;
-
- switch(lp->media) {
- case INIT:
- if (lp->timeout < 0) {
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->linkOK = 0;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- }
- if ((next_tick = de4x5_reset_phy(dev)) < 0) {
- next_tick &= ~TIMER_CB;
- } else {
- if (lp->useSROM) {
- if (srom_map_media(dev) < 0) {
- lp->tcount++;
- return next_tick;
- }
- srom_exec(dev, lp->phy[lp->active].gep);
- if (lp->infoblock_media == ANS) {
- ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- }
- } else {
- lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
- SET_10Mb;
- if (lp->autosense == _100Mb) {
- lp->media = _100Mb;
- } else if (lp->autosense == _10Mb) {
- lp->media = _10Mb;
- } else if ((lp->autosense == AUTO) &&
- ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
- ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
- ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- lp->media = ANS;
- } else if (lp->autosense == AUTO) {
- lp->media = SPD_DET;
- } else if (is_spd_100(dev) && is_100_up(dev)) {
- lp->media = _100Mb;
- } else {
- lp->media = NC;
- }
- }
- lp->local_state = 0;
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case ANS:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
- if (cr < 0) {
- next_tick = cr & ~TIMER_CB;
- } else {
- if (cr) {
- lp->local_state = 0;
- lp->media = SPD_DET;
- } else {
- lp->local_state++;
- }
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case 1:
- if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
- next_tick = sr & ~TIMER_CB;
- } else {
- lp->media = SPD_DET;
- lp->local_state = 0;
- if (sr) { /* Success! */
- lp->tmp = MII_SR_ASSC;
- anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
- ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
- (cap = anlpa & MII_ANLPA_TAF & ana)) {
- if (cap & MII_ANA_100M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
- lp->media = _100Mb;
- } else if (cap & MII_ANA_10M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
-
- lp->media = _10Mb;
- }
- }
- } /* Auto Negotiation failed to finish */
- next_tick = dc21140m_autoconf(dev);
- } /* Auto Negotiation failed to start */
- break;
- }
- break;
-
- case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
- if (lp->timeout < 0) {
- lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
- (~gep_rd(dev) & GEP_LNP));
- SET_100Mb_PDET;
- }
- if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
- next_tick = slnk & ~TIMER_CB;
- } else {
- if (is_spd_100(dev) && is_100_up(dev)) {
- lp->media = _100Mb;
- } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
- lp->media = _10Mb;
- } else {
- lp->media = NC;
- }
- next_tick = dc21140m_autoconf(dev);
- }
- break;
-
- case _100Mb: /* Set 100Mb/s */
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_100Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case BNC:
- case AUI:
- case _10Mb: /* Set 10Mb/s */
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_10Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case NC:
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tx_enable = false;
- break;
- }
-
- return next_tick;
-}
-
-/*
-** This routine may be merged into dc21140m_autoconf() sometime as I'm
-** changing how I figure out the media - but trying to keep it backwards
-** compatible with the de500-xa and de500-aa.
-** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
-** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
-** This routine just has to figure out whether 10Mb/s or 100Mb/s is
-** active.
-** When autonegotiation is working, the ANS part searches the SROM for
-** the highest common speed (TP) link that both can run and if that can
-** be full duplex. That infoblock is executed and then the link speed set.
-**
-** Only _10Mb and _100Mb are tested here.
-*/
-static int
-dc2114x_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- switch (lp->media) {
- case INIT:
- if (lp->timeout < 0) {
- DISABLE_IRQs;
- lp->tx_enable = false;
- lp->linkOK = 0;
- lp->timeout = -1;
- de4x5_save_skbs(dev); /* Save non transmitted skb's */
- if (lp->params.autosense & ~AUTO) {
- srom_map_media(dev); /* Fixed media requested */
- if (lp->media != lp->params.autosense) {
- lp->tcount++;
- lp->media = INIT;
- return next_tick;
- }
- lp->media = INIT;
- }
- }
- if ((next_tick = de4x5_reset_phy(dev)) < 0) {
- next_tick &= ~TIMER_CB;
- } else {
- if (lp->autosense == _100Mb) {
- lp->media = _100Mb;
- } else if (lp->autosense == _10Mb) {
- lp->media = _10Mb;
- } else if (lp->autosense == TP) {
- lp->media = TP;
- } else if (lp->autosense == BNC) {
- lp->media = BNC;
- } else if (lp->autosense == AUI) {
- lp->media = AUI;
- } else {
- lp->media = SPD_DET;
- if ((lp->infoblock_media == ANS) &&
- ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
- ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
- ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
- mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- lp->media = ANS;
- }
- }
- lp->local_state = 0;
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case ANS:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
- if (cr < 0) {
- next_tick = cr & ~TIMER_CB;
- } else {
- if (cr) {
- lp->local_state = 0;
- lp->media = SPD_DET;
- } else {
- lp->local_state++;
- }
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case 1:
- sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
- if (sr < 0) {
- next_tick = sr & ~TIMER_CB;
- } else {
- lp->media = SPD_DET;
- lp->local_state = 0;
- if (sr) { /* Success! */
- lp->tmp = MII_SR_ASSC;
- anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
- ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
- if (!(anlpa & MII_ANLPA_RF) &&
- (cap = anlpa & MII_ANLPA_TAF & ana)) {
- if (cap & MII_ANA_100M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
- lp->media = _100Mb;
- } else if (cap & MII_ANA_10M) {
- lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
- lp->media = _10Mb;
- }
- }
- } /* Auto Negotiation failed to finish */
- next_tick = dc2114x_autoconf(dev);
- } /* Auto Negotiation failed to start */
- break;
- }
- break;
-
- case AUI:
- if (!lp->tx_enable) {
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
- lp->media = BNC;
- next_tick = dc2114x_autoconf(dev);
- } else {
- lp->local_state = 1;
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = AUI_SUSPECT;
- next_tick = 3000;
- }
- break;
-
- case AUI_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
- break;
-
- case BNC:
- switch (lp->local_state) {
- case 0:
- if (lp->timeout < 0) {
- omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
- outl(omr & ~OMR_FDX, DE4X5_OMR);
- }
- irqs = 0;
- irq_mask = 0;
- sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
- if (sts < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- lp->local_state++; /* Ensure media connected */
- next_tick = dc2114x_autoconf(dev);
- }
- break;
-
- case 1:
- if (!lp->tx_enable) {
- if ((sts = ping_media(dev, 3000)) < 0) {
- next_tick = sts & ~TIMER_CB;
- } else {
- if (sts) {
- lp->local_state = 0;
- lp->tcount++;
- lp->media = INIT;
- } else {
- de4x5_init_connection(dev);
- }
- }
- } else if (!lp->linkOK && (lp->autosense == AUTO)) {
- lp->media = BNC_SUSPECT;
- next_tick = 3000;
- }
- break;
- }
- break;
-
- case BNC_SUSPECT:
- next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
- break;
-
- case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
- if (srom_map_media(dev) < 0) {
- lp->tcount++;
- lp->media = INIT;
- return next_tick;
- }
- if (lp->media == _100Mb) {
- if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
- lp->media = SPD_DET;
- return slnk & ~TIMER_CB;
- }
- } else {
- if (wait_for_link(dev) < 0) {
- lp->media = SPD_DET;
- return PDET_LINK_WAIT;
- }
- }
- if (lp->media == ANS) { /* Do MII parallel detection */
- if (is_spd_100(dev)) {
- lp->media = _100Mb;
- } else {
- lp->media = _10Mb;
- }
- next_tick = dc2114x_autoconf(dev);
- } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
- (((lp->media == _10Mb) || (lp->media == TP) ||
- (lp->media == BNC) || (lp->media == AUI)) &&
- is_10_up(dev))) {
- next_tick = dc2114x_autoconf(dev);
- } else {
- lp->tcount++;
- lp->media = INIT;
- }
- break;
-
- case _10Mb:
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_10Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- case _100Mb:
- next_tick = 3000;
- if (!lp->tx_enable) {
- SET_100Mb;
- de4x5_init_connection(dev);
- } else {
- if (!lp->linkOK && (lp->autosense == AUTO)) {
- if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
- lp->media = INIT;
- lp->tcount++;
- next_tick = DE4X5_AUTOSENSE_MS;
- }
- }
- }
- break;
-
- default:
- lp->tcount++;
-printk("Huh?: media:%02x\n", lp->media);
- lp->media = INIT;
- break;
- }
-
- return next_tick;
-}
-
-static int
-srom_autoconf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- return lp->infoleaf_fn(dev);
-}
-
-/*
-** This mapping keeps the original media codes and FDX flag unchanged.
-** While it isn't strictly necessary, it helps me for the moment...
-** The early return avoids a media state / SROM media space clash.
-*/
-static int
-srom_map_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- lp->fdx = false;
- if (lp->infoblock_media == lp->media)
- return 0;
-
- switch(lp->infoblock_media) {
- case SROM_10BASETF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_10BASET:
- if (lp->params.fdx && !lp->fdx) return -1;
- if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
- lp->media = _10Mb;
- } else {
- lp->media = TP;
- }
- break;
-
- case SROM_10BASE2:
- lp->media = BNC;
- break;
-
- case SROM_10BASE5:
- lp->media = AUI;
- break;
-
- case SROM_100BASETF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_100BASET:
- if (lp->params.fdx && !lp->fdx) return -1;
- lp->media = _100Mb;
- break;
-
- case SROM_100BASET4:
- lp->media = _100Mb;
- break;
-
- case SROM_100BASEFF:
- if (!lp->params.fdx) return -1;
- lp->fdx = true;
- fallthrough;
-
- case SROM_100BASEF:
- if (lp->params.fdx && !lp->fdx) return -1;
- lp->media = _100Mb;
- break;
-
- case ANS:
- lp->media = ANS;
- lp->fdx = lp->params.fdx;
- break;
-
- default:
- printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
- lp->infoblock_media);
- return -1;
- }
-
- return 0;
-}
-
-static void
-de4x5_init_connection(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_long flags = 0;
-
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media; /* Stop scrolling media messages */
- }
-
- spin_lock_irqsave(&lp->lock, flags);
- de4x5_rst_desc_ring(dev);
- de4x5_setup_intr(dev);
- lp->tx_enable = true;
- spin_unlock_irqrestore(&lp->lock, flags);
- outl(POLL_DEMAND, DE4X5_TPD);
-
- netif_wake_queue(dev);
-}
-
-/*
-** General PHY reset function. Some MII devices don't reset correctly
-** since their MII address pins can float at voltages that are dependent
-** on the signal pin use. Do a double reset to ensure a reset.
-*/
-static int
-de4x5_reset_phy(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int next_tick = 0;
-
- if ((lp->useSROM) || (lp->phy[lp->active].id)) {
- if (lp->timeout < 0) {
- if (lp->useSROM) {
- if (lp->phy[lp->active].rst) {
- srom_exec(dev, lp->phy[lp->active].rst);
- srom_exec(dev, lp->phy[lp->active].rst);
- } else if (lp->rst) { /* Type 5 infoblock reset */
- srom_exec(dev, lp->rst);
- srom_exec(dev, lp->rst);
- }
- } else {
- PHY_HARD_RESET;
- }
- if (lp->useMII) {
- mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
- }
- }
- if (lp->useMII) {
- next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
- }
- } else if (lp->chipset == DC21140) {
- PHY_HARD_RESET;
- }
-
- return next_tick;
-}
-
-static int
-test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, csr12;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
- reset_init_sia(dev, csr13, csr14, csr15);
- }
-
- /* set up the interrupt mask */
- outl(irq_mask, DE4X5_IMR);
-
- /* clear all pending interrupts */
- sts = inl(DE4X5_STS);
- outl(sts, DE4X5_STS);
-
- /* clear csr12 NRA and SRA bits */
- if ((lp->chipset == DC21041) || lp->useSROM) {
- csr12 = inl(DE4X5_SISR);
- outl(csr12, DE4X5_SISR);
- }
- }
-
- sts = inl(DE4X5_STS) & ~TIMER_CB;
-
- if (!(sts & irqs) && --lp->timeout) {
- sts = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sts;
-}
-
-static int
-test_tp(struct net_device *dev, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int sisr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- }
-
- sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
-
- if (sisr && --lp->timeout) {
- sisr = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sisr;
-}
-
-/*
-** Samples the 100Mb Link State Signal. The sample interval is important
-** because too fast a rate can give erroneous results and confuse the
-** speed sense algorithm.
-*/
-#define SAMPLE_INTERVAL 500 /* ms */
-#define SAMPLE_DELAY 2000 /* ms */
-static int
-test_for_100Mb(struct net_device *dev, int msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
-
- if (lp->timeout < 0) {
- if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
- if (msec > SAMPLE_DELAY) {
- lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
- gep = SAMPLE_DELAY | TIMER_CB;
- return gep;
- } else {
- lp->timeout = msec/SAMPLE_INTERVAL;
- }
- }
-
- if (lp->phy[lp->active].id || lp->useSROM) {
- gep = is_100_up(dev) | is_spd_100(dev);
- } else {
- gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
- }
- if (!(gep & ret) && --lp->timeout) {
- gep = SAMPLE_INTERVAL | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return gep;
-}
-
-static int
-wait_for_link(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->timeout < 0) {
- lp->timeout = 1;
- }
-
- if (lp->timeout--) {
- return TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return 0;
-}
-
-/*
-**
-**
-*/
-static int
-test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int test;
- u_long iobase = dev->base_addr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- }
-
- reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
- test = (reg ^ (pol ? ~0 : 0)) & mask;
-
- if (test && --lp->timeout) {
- reg = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return reg;
-}
-
-static int
-is_spd_100(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int spd;
-
- if (lp->useMII) {
- spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
- spd = ~(spd ^ lp->phy[lp->active].spd.value);
- spd &= lp->phy[lp->active].spd.mask;
- } else if (!lp->useSROM) { /* de500-xa */
- spd = ((~gep_rd(dev)) & GEP_SLNK);
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
-
- spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-
- return spd;
-}
-
-static int
-is_100_up(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->useMII) {
- /* Double read for sticky bits & temporary drops */
- mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
- } else if (!lp->useSROM) { /* de500-xa */
- return (~gep_rd(dev)) & GEP_SLNK;
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
-
- return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-}
-
-static int
-is_10_up(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->useMII) {
- /* Double read for sticky bits & temporary drops */
- mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
- } else if (!lp->useSROM) { /* de500-xa */
- return (~gep_rd(dev)) & GEP_LNP;
- } else {
- if ((lp->ibn == 2) || !lp->asBitValid)
- return ((lp->chipset & ~0x00ff) == DC2114x) ?
- (~inl(DE4X5_SISR)&SISR_LS10):
- 0;
-
- return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
- (lp->linkOK & ~lp->asBitValid);
- }
-}
-
-static int
-is_anc_capable(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
- } else {
- return 0;
- }
-}
-
-/*
-** Send a packet onto the media and watch for send errors that indicate the
-** media is bad or unconnected.
-*/
-static int
-ping_media(struct net_device *dev, int msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int sisr;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
-
- lp->tmp = lp->tx_new; /* Remember the ring position */
- load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD);
- }
-
- sisr = inl(DE4X5_SISR);
-
- if ((!(sisr & SISR_NCR)) &&
- ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
- (--lp->timeout)) {
- sisr = 100 | TIMER_CB;
- } else {
- if ((!(sisr & SISR_NCR)) &&
- !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
- lp->timeout) {
- sisr = 0;
- } else {
- sisr = 1;
- }
- lp->timeout = -1;
- }
-
- return sisr;
-}
-
-/*
-** This function does 2 things: on Intels it kmalloc's another buffer to
-** replace the one about to be passed up. On Alpha's it kmallocs a buffer
-** into which the packet is copied.
-*/
-static struct sk_buff *
-de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct sk_buff *p;
-
-#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
- struct sk_buff *ret;
- u_long i=0, tmp;
-
- p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
- if (!p) return NULL;
-
- tmp = virt_to_bus(p->data);
- i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
- skb_reserve(p, i);
- lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
-
- ret = lp->rx_skb[index];
- lp->rx_skb[index] = p;
-
- if ((u_long) ret > 1) {
- skb_put(ret, len);
- }
-
- return ret;
-
-#else
- if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
-
- p = netdev_alloc_skb(dev, len + 2);
- if (!p) return NULL;
-
- skb_reserve(p, 2); /* Align */
- if (index < lp->rx_old) { /* Wrapped buffer */
- short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
- skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
- skb_put_data(p, lp->rx_bufs, len - tlen);
- } else { /* Linear buffer */
- skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
- }
-
- return p;
-#endif
-}
-
-static void
-de4x5_free_rx_buffs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=0; i<lp->rxRingSize; i++) {
- if ((u_long) lp->rx_skb[i] > 1) {
- dev_kfree_skb(lp->rx_skb[i]);
- }
- lp->rx_ring[i].status = 0;
- lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
- }
-}
-
-static void
-de4x5_free_tx_buffs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- for (i=0; i<lp->txRingSize; i++) {
- if (lp->tx_skb[i])
- de4x5_free_tx_buff(lp, i);
- lp->tx_ring[i].status = 0;
- }
-
- /* Unload the locally queued packets */
- __skb_queue_purge(&lp->cache.queue);
-}
-
-/*
-** When a user pulls a connection, the DECchip can end up in a
-** 'running - waiting for end of transmission' state. This means that we
-** have to perform a chip soft reset to ensure that we can synchronize
-** the hardware and software and make any media probes using a loopback
-** packet meaningful.
-*/
-static void
-de4x5_save_skbs(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 omr;
-
- if (!lp->cache.save_cnt) {
- STOP_DE4X5;
- de4x5_tx(dev); /* Flush any sent skb's */
- de4x5_free_tx_buffs(dev);
- de4x5_cache_state(dev, DE4X5_SAVE_STATE);
- de4x5_sw_reset(dev);
- de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
- lp->cache.save_cnt++;
- START_DE4X5;
- }
-}
-
-static void
-de4x5_rst_desc_ring(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i;
- s32 omr;
-
- if (lp->cache.save_cnt) {
- STOP_DE4X5;
- outl(lp->dma_rings, DE4X5_RRBA);
- outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
- DE4X5_TRBA);
-
- lp->rx_new = lp->rx_old = 0;
- lp->tx_new = lp->tx_old = 0;
-
- for (i = 0; i < lp->rxRingSize; i++) {
- lp->rx_ring[i].status = cpu_to_le32(R_OWN);
- }
-
- for (i = 0; i < lp->txRingSize; i++) {
- lp->tx_ring[i].status = cpu_to_le32(0);
- }
-
- barrier();
- lp->cache.save_cnt--;
- START_DE4X5;
- }
-}
-
-static void
-de4x5_cache_state(struct net_device *dev, int flag)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- switch(flag) {
- case DE4X5_SAVE_STATE:
- lp->cache.csr0 = inl(DE4X5_BMR);
- lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
- lp->cache.csr7 = inl(DE4X5_IMR);
- break;
-
- case DE4X5_RESTORE_STATE:
- outl(lp->cache.csr0, DE4X5_BMR);
- outl(lp->cache.csr6, DE4X5_OMR);
- outl(lp->cache.csr7, DE4X5_IMR);
- if (lp->chipset == DC21140) {
- gep_wr(lp->cache.gepc, dev);
- gep_wr(lp->cache.gep, dev);
- } else {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
- lp->cache.csr15);
- }
- break;
- }
-}
-
-static void
-de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- __skb_queue_tail(&lp->cache.queue, skb);
-}
-
-static void
-de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- __skb_queue_head(&lp->cache.queue, skb);
-}
-
-static struct sk_buff *
-de4x5_get_cache(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- return __skb_dequeue(&lp->cache.queue);
-}
-
-/*
-** Check the Auto Negotiation State. Return OK when a link pass interrupt
-** is received and the auto-negotiation status is NWAY OK.
-*/
-static int
-test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 sts, ans;
-
- if (lp->timeout < 0) {
- lp->timeout = msec/100;
- outl(irq_mask, DE4X5_IMR);
-
- /* clear all pending interrupts */
- sts = inl(DE4X5_STS);
- outl(sts, DE4X5_STS);
- }
-
- ans = inl(DE4X5_SISR) & SISR_ANS;
- sts = inl(DE4X5_STS) & ~TIMER_CB;
-
- if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
- sts = 100 | TIMER_CB;
- } else {
- lp->timeout = -1;
- }
-
- return sts;
-}
-
-static void
-de4x5_setup_intr(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 imr, sts;
-
- if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
- imr = 0;
- UNMASK_IRQs;
- sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
- outl(sts, DE4X5_STS);
- ENABLE_IRQs;
- }
-}
-
-/*
-**
-*/
-static void
-reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- RESET_SIA;
- if (lp->useSROM) {
- if (lp->ibn == 3) {
- srom_exec(dev, lp->phy[lp->active].rst);
- srom_exec(dev, lp->phy[lp->active].gep);
- outl(1, DE4X5_SICR);
- return;
- } else {
- csr15 = lp->cache.csr15;
- csr14 = lp->cache.csr14;
- csr13 = lp->cache.csr13;
- outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
- outl(csr15 | lp->cache.gep, DE4X5_SIGR);
- }
- } else {
- outl(csr15, DE4X5_SIGR);
- }
- outl(csr14, DE4X5_STRR);
- outl(csr13, DE4X5_SICR);
-
- mdelay(10);
-}
-
-/*
-** Create a loopback ethernet packet
-*/
-static void
-create_packet(struct net_device *dev, char *frame, int len)
-{
- int i;
- char *buf = frame;
-
- for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
- *buf++ = dev->dev_addr[i];
- }
- for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
- *buf++ = dev->dev_addr[i];
- }
-
- *buf++ = 0; /* Packet length (2 bytes) */
- *buf++ = 1;
-}
-
-/*
-** Look for a particular board name in the EISA configuration space
-*/
-static int
-EISA_signature(char *name, struct device *device)
-{
- int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
- struct eisa_device *edev;
-
- *name = '\0';
- edev = to_eisa_device (device);
- i = edev->id.driver_data;
-
- if (i >= 0 && i < siglen) {
- strcpy (name, de4x5_signatures[i]);
- status = 1;
- }
-
- return status; /* return the device name string */
-}
-
-/*
-** Look for a particular board name in the PCI configuration space
-*/
-static void
-PCI_signature(char *name, struct de4x5_private *lp)
-{
- int i, siglen = ARRAY_SIZE(de4x5_signatures);
-
- if (lp->chipset == DC21040) {
- strcpy(name, "DE434/5");
- return;
- } else { /* Search for a DEC name in the SROM */
- int tmp = *((char *)&lp->srom + 19) * 3;
- strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
- }
- name[8] = '\0';
- for (i=0; i<siglen; i++) {
- if (strstr(name,de4x5_signatures[i])!=NULL) break;
- }
- if (i == siglen) {
- if (dec_only) {
- *name = '\0';
- } else { /* Use chip name to avoid confusion */
- strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
- ((lp->chipset == DC21041) ? "DC21041" :
- ((lp->chipset == DC21140) ? "DC21140" :
- ((lp->chipset == DC21142) ? "DC21142" :
- ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
- )))))));
- }
- if (lp->chipset != DC21041) {
- lp->useSROM = true; /* card is not recognisably DEC */
- }
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- lp->useSROM = true;
- }
-}
-
-/*
-** Set up the Ethernet PROM counter to the start of the Ethernet address on
-** the DC21040, else read the SROM for the other chips.
-** The SROM may not be present in a multi-MAC card, so first read the
-** MAC address and check for a bad address. If there is a bad one then exit
-** immediately with the prior srom contents intact (the h/w address will
-** be fixed up later).
-*/
-static void
-DevicePresent(struct net_device *dev, u_long aprom_addr)
-{
- int i, j=0;
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->chipset == DC21040) {
- if (lp->bus == EISA) {
- enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
- } else {
- outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
- }
- } else { /* Read new srom */
- u_short tmp;
- __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
- for (i=0; i<(ETH_ALEN>>1); i++) {
- tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
- j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */
- *p = cpu_to_le16(tmp);
- }
- if (j == 0 || j == 3 * 0xffff) {
- /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */
- return;
- }
-
- p = (__le16 *)&lp->srom;
- for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
- tmp = srom_rd(aprom_addr, i);
- *p++ = cpu_to_le16(tmp);
- }
- de4x5_dbg_srom(&lp->srom);
- }
-}
-
-/*
-** Since the write on the Enet PROM register doesn't seem to reset the PROM
-** pointer correctly (at least on my DE425 EISA card), this routine should do
-** it...from depca.c.
-*/
-static void
-enet_addr_rst(u_long aprom_addr)
-{
- union {
- struct {
- u32 a;
- u32 b;
- } llsig;
- char Sig[sizeof(u32) << 1];
- } dev;
- short sigLength=0;
- s8 data;
- int i, j;
-
- dev.llsig.a = ETH_PROM_SIG;
- dev.llsig.b = ETH_PROM_SIG;
- sigLength = sizeof(u32) << 1;
-
- for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
- data = inb(aprom_addr);
- if (dev.Sig[j] == data) { /* track signature */
- j++;
- } else { /* lost signature; begin search again */
- if (data == dev.Sig[0]) { /* rare case.... */
- j=1;
- } else {
- j=0;
- }
- }
- }
-}
-
-/*
-** For the bad status case and no SROM, then add one to the previous
-** address. However, need to add one backwards in case we have 0xff
-** as one or more of the bytes. Only the last 3 bytes should be checked
-** as the first three are invariant - assigned to an organisation.
-*/
-static int
-get_hw_addr(struct net_device *dev)
-{
- u_long iobase = dev->base_addr;
- int broken, i, k, tmp, status = 0;
- u_short j,chksum;
- struct de4x5_private *lp = netdev_priv(dev);
- u8 addr[ETH_ALEN];
-
- broken = de4x5_bad_srom(lp);
-
- for (i=0,k=0,j=0;j<3;j++) {
- k <<= 1;
- if (k > 0xffff) k-=0xffff;
-
- if (lp->bus == PCI) {
- if (lp->chipset == DC21040) {
- while ((tmp = inl(DE4X5_APROM)) < 0);
- k += (u_char) tmp;
- addr[i++] = (u_char) tmp;
- while ((tmp = inl(DE4X5_APROM)) < 0);
- k += (u_short) (tmp << 8);
- addr[i++] = (u_char) tmp;
- } else if (!broken) {
- addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- } else if ((broken == SMC) || (broken == ACCTON)) {
- addr[i] = *((u_char *)&lp->srom + i); i++;
- addr[i] = *((u_char *)&lp->srom + i); i++;
- }
- } else {
- k += (u_char) (tmp = inb(EISA_APROM));
- addr[i++] = (u_char) tmp;
- k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
- addr[i++] = (u_char) tmp;
- }
-
- if (k > 0xffff) k-=0xffff;
- }
- if (k == 0xffff) k=0;
-
- eth_hw_addr_set(dev, addr);
-
- if (lp->bus == PCI) {
- if (lp->chipset == DC21040) {
- while ((tmp = inl(DE4X5_APROM)) < 0);
- chksum = (u_char) tmp;
- while ((tmp = inl(DE4X5_APROM)) < 0);
- chksum |= (u_short) (tmp << 8);
- if ((k != chksum) && (dec_only)) status = -1;
- }
- } else {
- chksum = (u_char) inb(EISA_APROM);
- chksum |= (u_short) (inb(EISA_APROM) << 8);
- if ((k != chksum) && (dec_only)) status = -1;
- }
-
- /* If possible, try to fix a broken card - SMC only so far */
- srom_repair(dev, broken);
-
-#ifdef CONFIG_PPC_PMAC
- /*
- ** If the address starts with 00 a0, we have to bit-reverse
- ** each byte of the address.
- */
- if ( machine_is(powermac) &&
- (dev->dev_addr[0] == 0) &&
- (dev->dev_addr[1] == 0xa0) )
- {
- for (i = 0; i < ETH_ALEN; ++i)
- {
- int x = dev->dev_addr[i];
- x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
- x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
- addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
- }
- eth_hw_addr_set(dev, addr);
- }
-#endif /* CONFIG_PPC_PMAC */
-
- /* Test for a bad enet address */
- status = test_bad_enet(dev, status);
-
- return status;
-}
-
-/*
-** Test for enet addresses in the first 32 bytes.
-*/
-static int
-de4x5_bad_srom(struct de4x5_private *lp)
-{
- int i, status = 0;
-
- for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
- if (!memcmp(&lp->srom, &enet_det[i], 3) &&
- !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
- if (i == 0) {
- status = SMC;
- } else if (i == 1) {
- status = ACCTON;
- }
- break;
- }
- }
-
- return status;
-}
-
-static void
-srom_repair(struct net_device *dev, int card)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- switch(card) {
- case SMC:
- memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
- memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
- memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
- lp->useSROM = true;
- break;
- }
-}
-
-/*
-** Assume that the irq's do not follow the PCI spec - this is seems
-** to be true so far (2 for 2).
-*/
-static int
-test_bad_enet(struct net_device *dev, int status)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i, tmp;
-
- for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
- if ((tmp == 0) || (tmp == 0x5fa)) {
- if ((lp->chipset == last.chipset) &&
- (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
- eth_addr_inc(last.addr);
- eth_hw_addr_set(dev, last.addr);
-
- if (!an_exception(lp)) {
- dev->irq = last.irq;
- }
-
- status = 0;
- }
- } else if (!status) {
- last.chipset = lp->chipset;
- last.bus = lp->bus_num;
- last.irq = dev->irq;
- for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
- }
-
- return status;
-}
-
-/*
-** List of board exceptions with correctly wired IRQs
-*/
-static int
-an_exception(struct de4x5_private *lp)
-{
- if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
- (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
- return -1;
- }
-
- return 0;
-}
-
-/*
-** SROM Read
-*/
-static short
-srom_rd(u_long addr, u_char offset)
-{
- sendto_srom(SROM_RD | SROM_SR, addr);
-
- srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
- srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
- srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
-
- return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
-}
-
-static void
-srom_latch(u_int command, u_long addr)
-{
- sendto_srom(command, addr);
- sendto_srom(command | DT_CLK, addr);
- sendto_srom(command, addr);
-}
-
-static void
-srom_command(u_int command, u_long addr)
-{
- srom_latch(command, addr);
- srom_latch(command, addr);
- srom_latch((command & 0x0000ff00) | DT_CS, addr);
-}
-
-static void
-srom_address(u_int command, u_long addr, u_char offset)
-{
- int i, a;
-
- a = offset << 2;
- for (i=0; i<6; i++, a <<= 1) {
- srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
- }
- udelay(1);
-
- i = (getfrom_srom(addr) >> 3) & 0x01;
-}
-
-static short
-srom_data(u_int command, u_long addr)
-{
- int i;
- short word = 0;
- s32 tmp;
-
- for (i=0; i<16; i++) {
- sendto_srom(command | DT_CLK, addr);
- tmp = getfrom_srom(addr);
- sendto_srom(command, addr);
-
- word = (word << 1) | ((tmp >> 3) & 0x01);
- }
-
- sendto_srom(command & 0x0000ff00, addr);
-
- return word;
-}
-
-/*
-static void
-srom_busy(u_int command, u_long addr)
-{
- sendto_srom((command & 0x0000ff00) | DT_CS, addr);
-
- while (!((getfrom_srom(addr) >> 3) & 0x01)) {
- mdelay(1);
- }
-
- sendto_srom(command & 0x0000ff00, addr);
-}
-*/
-
-static void
-sendto_srom(u_int command, u_long addr)
-{
- outl(command, addr);
- udelay(1);
-}
-
-static int
-getfrom_srom(u_long addr)
-{
- s32 tmp;
-
- tmp = inl(addr);
- udelay(1);
-
- return tmp;
-}
-
-static int
-srom_infoleaf_info(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i, count;
- u_char *p;
-
- /* Find the infoleaf decoder function that matches this chipset */
- for (i=0; i<INFOLEAF_SIZE; i++) {
- if (lp->chipset == infoleaf_array[i].chipset) break;
- }
- if (i == INFOLEAF_SIZE) {
- lp->useSROM = false;
- printk("%s: Cannot find correct chipset for SROM decoding!\n",
- dev->name);
- return -ENXIO;
- }
-
- lp->infoleaf_fn = infoleaf_array[i].fn;
-
- /* Find the information offset that this function should use */
- count = *((u_char *)&lp->srom + 19);
- p = (u_char *)&lp->srom + 26;
-
- if (count > 1) {
- for (i=count; i; --i, p+=3) {
- if (lp->device == *p) break;
- }
- if (i == 0) {
- lp->useSROM = false;
- printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
- dev->name, lp->device);
- return -ENXIO;
- }
- }
-
- lp->infoleaf_offset = get_unaligned_le16(p + 1);
-
- return 0;
-}
-
-/*
-** This routine loads any type 1 or 3 MII info into the mii device
-** struct and executes any type 5 code to reset PHY devices for this
-** controller.
-** The info for the MII devices will be valid since the index used
-** will follow the discovery process from MII address 1-31 then 0.
-*/
-static void
-srom_init(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- u_char count;
-
- p+=2;
- if (lp->chipset == DC21140) {
- lp->cache.gepc = (*p++ | GEP_CTRL);
- gep_wr(lp->cache.gepc, dev);
- }
-
- /* Block count */
- count = *p++;
-
- /* Jump the infoblocks to find types */
- for (;count; --count) {
- if (*p < 128) {
- p += COMPACT_LEN;
- } else if (*(p+1) == 5) {
- type5_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 4) {
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 3) {
- type3_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 2) {
- p += ((*p & BLOCK_LEN) + 1);
- } else if (*(p+1) == 1) {
- type1_infoblock(dev, 1, p);
- p += ((*p & BLOCK_LEN) + 1);
- } else {
- p += ((*p & BLOCK_LEN) + 1);
- }
- }
-}
-
-/*
-** A generic routine that writes GEP control, data and reset information
-** to the GEP register (21140) or csr15 GEP portion (2114[23]).
-*/
-static void
-srom_exec(struct net_device *dev, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- u_char count = (p ? *p++ : 0);
- u_short *w = (u_short *)p;
-
- if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
-
- if (lp->chipset != DC21140) RESET_SIA;
-
- while (count--) {
- gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
- *p++ : get_unaligned_le16(w++)), dev);
- mdelay(2); /* 2ms per action */
- }
-
- if (lp->chipset != DC21140) {
- outl(lp->cache.csr14, DE4X5_STRR);
- outl(lp->cache.csr13, DE4X5_SICR);
- }
-}
-
-/*
-** Basically this function is a NOP since it will never be called,
-** unless I implement the DC21041 SROM functions. There's no need
-** since the existing code will be satisfactory for all boards.
-*/
-static int
-dc21041_infoleaf(struct net_device *dev)
-{
- return DE4X5_AUTOSENSE_MS;
-}
-
-static int
-dc21140_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* GEP control */
- lp->cache.gepc = (*p++ | GEP_CTRL);
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
-
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-static int
-dc21142_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
-
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-static int
-dc21143_infoleaf(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char count = 0;
- u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
- int next_tick = DE4X5_AUTOSENSE_MS;
-
- /* Read the connection type */
- p+=2;
-
- /* Block count */
- count = *p++;
-
- /* Recursively figure out the info blocks */
- if (*p < 128) {
- next_tick = dc_infoblock[COMPACT](dev, count, p);
- } else {
- next_tick = dc_infoblock[*(p+1)](dev, count, p);
- }
- if (lp->tcount == count) {
- lp->media = NC;
- if (lp->media != lp->c_media) {
- de4x5_dbg_media(dev);
- lp->c_media = lp->media;
- }
- lp->media = INIT;
- lp->tcount = 0;
- lp->tx_enable = false;
- }
-
- return next_tick & ~TIMER_CB;
-}
-
-/*
-** The compact infoblock is only designed for DC21140[A] chips, so
-** we'll reuse the dc21140m_autoconf function. Non MII media only.
-*/
-static int
-compact_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+COMPACT_LEN) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
- } else {
- return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = COMPACT;
- lp->active = 0;
- gep_wr(lp->cache.gepc, dev);
- lp->infoblock_media = (*p++) & COMPACT_MC;
- lp->cache.gep = *p++;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-/*
-** This block describes non MII media for the DC21140[A] only.
-*/
-static int
-type0_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 0;
- lp->active = 0;
- gep_wr(lp->cache.gepc, dev);
- p+=2;
- lp->infoblock_media = (*p++) & BLOCK0_MC;
- lp->cache.gep = *p++;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-/* These functions are under construction! */
-
-static int
-type1_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- p += 2;
- if (lp->state == INITIALISED) {
- lp->ibn = 1;
- lp->active = *p++;
- lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
- lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
- lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ttm = get_unaligned_le16(p);
- return 0;
- } else if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 1;
- lp->active = *p;
- lp->infoblock_csr6 = OMR_MII_100;
- lp->useMII = true;
- lp->infoblock_media = ANS;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc21140m_autoconf(dev);
-}
-
-static int
-type2_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 2;
- lp->active = 0;
- p += 2;
- lp->infoblock_media = (*p) & MEDIA_CODE;
-
- if ((*p++) & EXT_FIELD) {
- lp->cache.csr13 = get_unaligned_le16(p); p += 2;
- lp->cache.csr14 = get_unaligned_le16(p); p += 2;
- lp->cache.csr15 = get_unaligned_le16(p); p += 2;
- } else {
- lp->cache.csr13 = CSR13;
- lp->cache.csr14 = CSR14;
- lp->cache.csr15 = CSR15;
- }
- lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
- lp->infoblock_csr6 = OMR_SIA;
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-static int
-type3_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- p += 2;
- if (lp->state == INITIALISED) {
- lp->ibn = 3;
- lp->active = *p++;
- if (MOTO_SROM_BUG) lp->active = 0;
- /* if (MOTO_SROM_BUG) statement indicates lp->active could
- * be 8 (i.e. the size of array lp->phy) */
- if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
- return -EINVAL;
- lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
- lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
- lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
- lp->phy[lp->active].mci = *p;
- return 0;
- } else if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 3;
- lp->active = *p;
- if (MOTO_SROM_BUG) lp->active = 0;
- lp->infoblock_csr6 = OMR_MII_100;
- lp->useMII = true;
- lp->infoblock_media = ANS;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-static int
-type4_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- if ((lp->media == INIT) && (lp->timeout < 0)) {
- lp->ibn = 4;
- lp->active = 0;
- p+=2;
- lp->infoblock_media = (*p++) & MEDIA_CODE;
- lp->cache.csr13 = CSR13; /* Hard coded defaults */
- lp->cache.csr14 = CSR14;
- lp->cache.csr15 = CSR15;
- lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
- csr6 = *p++;
- flags = *p++;
-
- lp->asBitValid = (flags & 0x80) ? 0 : -1;
- lp->defMedium = (flags & 0x40) ? -1 : 0;
- lp->asBit = 1 << ((csr6 >> 1) & 0x07);
- lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
- lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
- lp->useMII = false;
-
- de4x5_switch_mac_port(dev);
- }
-
- return dc2114x_autoconf(dev);
-}
-
-/*
-** This block type provides information for resetting external devices
-** (chips) through the General Purpose Register.
-*/
-static int
-type5_infoblock(struct net_device *dev, u_char count, u_char *p)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_char len = (*p & BLOCK_LEN)+1;
-
- /* Recursively figure out the info blocks */
- if (--count > lp->tcount) {
- if (*(p+len) < 128) {
- return dc_infoblock[COMPACT](dev, count, p+len);
- } else {
- return dc_infoblock[*(p+len+1)](dev, count, p+len);
- }
- }
-
- /* Must be initializing to run this code */
- if ((lp->state == INITIALISED) || (lp->media == INIT)) {
- p+=2;
- lp->rst = p;
- srom_exec(dev, lp->rst);
- }
-
- return DE4X5_AUTOSENSE_MS;
-}
-
-/*
-** MII Read/Write
-*/
-
-static int
-mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
-{
- mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
- mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
- mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
- mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
- mii_address(phyreg, ioaddr); /* PHY Register to read */
- mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
-
- return mii_rdata(ioaddr); /* Read data */
-}
-
-static void
-mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
-{
- mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
- mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
- mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
- mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
- mii_address(phyreg, ioaddr); /* PHY Register to write */
- mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
- data = mii_swap(data, 16); /* Swap data bit ordering */
- mii_wdata(data, 16, ioaddr); /* Write data */
-}
-
-static int
-mii_rdata(u_long ioaddr)
-{
- int i;
- s32 tmp = 0;
-
- for (i=0; i<16; i++) {
- tmp <<= 1;
- tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
- }
-
- return tmp;
-}
-
-static void
-mii_wdata(int data, int len, u_long ioaddr)
-{
- int i;
-
- for (i=0; i<len; i++) {
- sendto_mii(MII_MWR | MII_WR, data, ioaddr);
- data >>= 1;
- }
-}
-
-static void
-mii_address(u_char addr, u_long ioaddr)
-{
- int i;
-
- addr = mii_swap(addr, 5);
- for (i=0; i<5; i++) {
- sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
- addr >>= 1;
- }
-}
-
-static void
-mii_ta(u_long rw, u_long ioaddr)
-{
- if (rw == MII_STWR) {
- sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
- sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
- } else {
- getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
- }
-}
-
-static int
-mii_swap(int data, int len)
-{
- int i, tmp = 0;
-
- for (i=0; i<len; i++) {
- tmp <<= 1;
- tmp |= (data & 1);
- data >>= 1;
- }
-
- return tmp;
-}
-
-static void
-sendto_mii(u32 command, int data, u_long ioaddr)
-{
- u32 j;
-
- j = (data & 1) << 17;
- outl(command | j, ioaddr);
- udelay(1);
- outl(command | MII_MDC | j, ioaddr);
- udelay(1);
-}
-
-static int
-getfrom_mii(u32 command, u_long ioaddr)
-{
- outl(command, ioaddr);
- udelay(1);
- outl(command | MII_MDC, ioaddr);
- udelay(1);
-
- return (inl(ioaddr) >> 19) & 1;
-}
-
-/*
-** Here's 3 ways to calculate the OUI from the ID registers.
-*/
-static int
-mii_get_oui(u_char phyaddr, u_long ioaddr)
-{
-/*
- union {
- u_short reg;
- u_char breg[2];
- } a;
- int i, r2, r3, ret=0;*/
- int r2;
-
- /* Read r2 and r3 */
- r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
- mii_rd(MII_ID1, phyaddr, ioaddr);
- /* SEEQ and Cypress way * /
- / * Shuffle r2 and r3 * /
- a.reg=0;
- r3 = ((r3>>10)|(r2<<6))&0x0ff;
- r2 = ((r2>>2)&0x3fff);
-
- / * Bit reverse r3 * /
- for (i=0;i<8;i++) {
- ret<<=1;
- ret |= (r3&1);
- r3>>=1;
- }
-
- / * Bit reverse r2 * /
- for (i=0;i<16;i++) {
- a.reg<<=1;
- a.reg |= (r2&1);
- r2>>=1;
- }
-
- / * Swap r2 bytes * /
- i=a.breg[0];
- a.breg[0]=a.breg[1];
- a.breg[1]=i;
-
- return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
-/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
- return r2; /* (I did it) My way */
-}
-
-/*
-** The SROM spec forces us to search addresses [1-31 0]. Bummer.
-*/
-static int
-mii_get_phy(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- int i, j, k, n, limit=ARRAY_SIZE(phy_info);
- int id;
-
- lp->active = 0;
- lp->useMII = true;
-
- /* Search the MII address space for possible PHY devices */
- for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
- lp->phy[lp->active].addr = i;
- if (i==0) n++; /* Count cycles */
- while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
- id = mii_get_oui(i, DE4X5_MII);
- if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
- for (j=0; j<limit; j++) { /* Search PHY table */
- if (id != phy_info[j].id) continue; /* ID match? */
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- if (k < DE4X5_MAX_PHY) {
- memcpy((char *)&lp->phy[k],
- (char *)&phy_info[j], sizeof(struct phy_table));
- lp->phy[k].addr = i;
- lp->mii_cnt++;
- lp->active++;
- } else {
- goto purgatory; /* Stop the search */
- }
- break;
- }
- if ((j == limit) && (i < DE4X5_MAX_MII)) {
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
- if (k < DE4X5_MAX_PHY) {
- lp->phy[k].addr = i;
- lp->phy[k].id = id;
- lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
- lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
- lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
- lp->mii_cnt++;
- lp->active++;
- printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
- j = de4x5_debug;
- de4x5_debug |= DEBUG_MII;
- de4x5_dbg_mii(dev, k);
- de4x5_debug = j;
- printk("\n");
- } else {
- goto purgatory;
- }
- }
- }
- purgatory:
- lp->active = 0;
- if (lp->phy[0].id) { /* Reset the PHY devices */
- for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
- mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
- while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
-
- de4x5_dbg_mii(dev, k);
- }
- }
- if (!lp->mii_cnt) lp->useMII = false;
-
- return lp->mii_cnt;
-}
-
-static char *
-build_setup_frame(struct net_device *dev, int mode)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
- char *pa = lp->setup_frame;
-
- /* Initialise the setup frame */
- if (mode == ALL) {
- memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
- }
-
- if (lp->setup_f == HASH_PERF) {
- for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
- *(pa + i) = dev->dev_addr[i]; /* Host address */
- if (i & 0x01) pa += 2;
- }
- *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
- } else {
- for (i=0; i<ETH_ALEN; i++) { /* Host address */
- *(pa + (i&1)) = dev->dev_addr[i];
- if (i & 0x01) pa += 4;
- }
- for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
- *(pa + (i&1)) = (char) 0xff;
- if (i & 0x01) pa += 4;
- }
- }
-
- return pa; /* Points to the next entry */
-}
-
-static void
-disable_ast(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- del_timer_sync(&lp->timer);
-}
-
-static long
-de4x5_switch_mac_port(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
- s32 omr;
-
- STOP_DE4X5;
-
- /* Assert the OMR_PS bit in CSR6 */
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
- OMR_FDX));
- omr |= lp->infoblock_csr6;
- if (omr & OMR_PS) omr |= OMR_HBD;
- outl(omr, DE4X5_OMR);
-
- /* Soft Reset */
- RESET_DE4X5;
-
- /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
- if (lp->chipset == DC21140) {
- gep_wr(lp->cache.gepc, dev);
- gep_wr(lp->cache.gep, dev);
- } else if ((lp->chipset & ~0x0ff) == DC2114x) {
- reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
- }
-
- /* Restore CSR6 */
- outl(omr, DE4X5_OMR);
-
- /* Reset CSR8 */
- inl(DE4X5_MFC);
-
- return omr;
-}
-
-static void
-gep_wr(s32 data, struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->chipset == DC21140) {
- outl(data, DE4X5_GEP);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
- }
-}
-
-static int
-gep_rd(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (lp->chipset == DC21140) {
- return inl(DE4X5_GEP);
- } else if ((lp->chipset & ~0x00ff) == DC2114x) {
- return inl(DE4X5_SIGR) & 0x000fffff;
- }
-
- return 0;
-}
-
-static void
-yawn(struct net_device *dev, int state)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
-
- if(lp->bus == EISA) {
- switch(state) {
- case WAKEUP:
- outb(WAKEUP, PCI_CFPM);
- mdelay(10);
- break;
-
- case SNOOZE:
- outb(SNOOZE, PCI_CFPM);
- break;
-
- case SLEEP:
- outl(0, DE4X5_SICR);
- outb(SLEEP, PCI_CFPM);
- break;
- }
- } else {
- struct pci_dev *pdev = to_pci_dev (lp->gendev);
- switch(state) {
- case WAKEUP:
- pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
- mdelay(10);
- break;
-
- case SNOOZE:
- pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
- break;
-
- case SLEEP:
- outl(0, DE4X5_SICR);
- pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
- break;
- }
- }
-}
-
-static void
-de4x5_parse_params(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- char *p, *q, t;
-
- lp->params.fdx = false;
- lp->params.autosense = AUTO;
-
- if (args == NULL) return;
-
- if ((p = strstr(args, dev->name))) {
- if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
- t = *q;
- *q = '\0';
-
- if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
-
- if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
- if (strstr(p, "TP_NW")) {
- lp->params.autosense = TP_NW;
- } else if (strstr(p, "TP")) {
- lp->params.autosense = TP;
- } else if (strstr(p, "BNC_AUI")) {
- lp->params.autosense = BNC;
- } else if (strstr(p, "BNC")) {
- lp->params.autosense = BNC;
- } else if (strstr(p, "AUI")) {
- lp->params.autosense = AUI;
- } else if (strstr(p, "10Mb")) {
- lp->params.autosense = _10Mb;
- } else if (strstr(p, "100Mb")) {
- lp->params.autosense = _100Mb;
- } else if (strstr(p, "AUTO")) {
- lp->params.autosense = AUTO;
- }
- }
- *q = t;
- }
-}
-
-static void
-de4x5_dbg_open(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- int i;
-
- if (de4x5_debug & DEBUG_OPEN) {
- printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
- printk("\tphysical address: %pM\n", dev->dev_addr);
- printk("Descriptor head addresses:\n");
- printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
- printk("Descriptor addresses:\nRX: ");
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
- }
- }
- printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
- printk("TX: ");
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
- }
- }
- printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
- printk("Descriptor buffers:\nRX: ");
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
- }
- }
- printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
- printk("TX: ");
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
- }
- }
- printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
- printk("Ring size:\nRX: %d\nTX: %d\n",
- (short)lp->rxRingSize,
- (short)lp->txRingSize);
- }
-}
-
-static void
-de4x5_dbg_mii(struct net_device *dev, int k)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- u_long iobase = dev->base_addr;
-
- if (de4x5_debug & DEBUG_MII) {
- printk("\nMII device address: %d\n", lp->phy[k].addr);
- printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
- printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
- printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
- printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
- if (lp->phy[k].id != BROADCOM_T4) {
- printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
- printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
- }
- printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
- if (lp->phy[k].id != BROADCOM_T4) {
- printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
- printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
- } else {
- printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
- }
- }
-}
-
-static void
-de4x5_dbg_media(struct net_device *dev)
-{
- struct de4x5_private *lp = netdev_priv(dev);
-
- if (lp->media != lp->c_media) {
- if (de4x5_debug & DEBUG_MEDIA) {
- printk("%s: media is %s%s\n", dev->name,
- (lp->media == NC ? "unconnected, link down or incompatible connection" :
- (lp->media == TP ? "TP" :
- (lp->media == ANS ? "TP/Nway" :
- (lp->media == BNC ? "BNC" :
- (lp->media == AUI ? "AUI" :
- (lp->media == BNC_AUI ? "BNC/AUI" :
- (lp->media == EXT_SIA ? "EXT SIA" :
- (lp->media == _100Mb ? "100Mb/s" :
- (lp->media == _10Mb ? "10Mb/s" :
- "???"
- ))))))))), (lp->fdx?" full duplex.":"."));
- }
- lp->c_media = lp->media;
- }
-}
-
-static void
-de4x5_dbg_srom(struct de4x5_srom *p)
-{
- int i;
-
- if (de4x5_debug & DEBUG_SROM) {
- printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
- printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
- printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
- printk("SROM version: %02x\n", (u_char)(p->version));
- printk("# controllers: %02x\n", (u_char)(p->num_controllers));
-
- printk("Hardware Address: %pM\n", p->ieee_addr);
- printk("CRC checksum: %04x\n", (u_short)(p->chksum));
- for (i=0; i<64; i++) {
- printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
- }
- }
-}
-
-static void
-de4x5_dbg_rx(struct sk_buff *skb, int len)
-{
- int i, j;
-
- if (de4x5_debug & DEBUG_RX) {
- printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
- skb->data, &skb->data[6],
- (u_char)skb->data[12],
- (u_char)skb->data[13],
- len);
- for (j=0; len>0;j+=16, len-=16) {
- printk(" %03x: ",j);
- for (i=0; i<16 && i<len; i++) {
- printk("%02x ",(u_char)skb->data[i+j]);
- }
- printk("\n");
- }
- }
-}
-
-/*
-** Perform IOCTL call functions here. Some are privileged operations and the
-** effective uid is checked in those cases. In the normal course of events
-** this function is only used for my testing.
-*/
-static int
-de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
-{
- struct de4x5_private *lp = netdev_priv(dev);
- struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
- u_long iobase = dev->base_addr;
- int i, j, status = 0;
- s32 omr;
- union {
- u8 addr[144];
- u16 sval[72];
- u32 lval[36];
- } tmp;
- u_long flags = 0;
-
- if (cmd != SIOCDEVPRIVATE || in_compat_syscall())
- return -EOPNOTSUPP;
-
- switch(ioc->cmd) {
- case DE4X5_GET_HWADDR: /* Get the hardware address */
- ioc->len = ETH_ALEN;
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[i] = dev->dev_addr[i];
- }
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
- case DE4X5_SET_HWADDR: /* Set the hardware address */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
- if (netif_queue_stopped(dev))
- return -EBUSY;
- netif_stop_queue(dev);
- eth_hw_addr_set(dev, tmp.addr);
- build_setup_frame(dev, PHYS_ADDR_ONLY);
- /* Set up the descriptor and give ownership to the card */
- load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
- SETUP_FRAME_LEN, (struct sk_buff *)1);
- lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
- outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- netif_wake_queue(dev); /* Unlock the TX ring */
- break;
-
- case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- printk("%s: Boo!\n", dev->name);
- break;
-
- case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- omr = inl(DE4X5_OMR);
- omr |= OMR_PM;
- outl(omr, DE4X5_OMR);
- break;
-
- case DE4X5_GET_STATS: /* Get the driver statistics */
- {
- struct pkt_stats statbuf;
- ioc->len = sizeof(statbuf);
- spin_lock_irqsave(&lp->lock, flags);
- memcpy(&statbuf, &lp->pktStats, ioc->len);
- spin_unlock_irqrestore(&lp->lock, flags);
- if (copy_to_user(ioc->data, &statbuf, ioc->len))
- return -EFAULT;
- break;
- }
- case DE4X5_CLR_STATS: /* Zero out the driver statistics */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- spin_lock_irqsave(&lp->lock, flags);
- memset(&lp->pktStats, 0, sizeof(lp->pktStats));
- spin_unlock_irqrestore(&lp->lock, flags);
- break;
-
- case DE4X5_GET_OMR: /* Get the OMR Register contents */
- tmp.addr[0] = inl(DE4X5_OMR);
- if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
- break;
-
- case DE4X5_SET_OMR: /* Set the OMR Register contents */
- if (!capable(CAP_NET_ADMIN)) return -EPERM;
- if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
- outl(tmp.addr[0], DE4X5_OMR);
- break;
-
- case DE4X5_GET_REG: /* Get the DE4X5 Registers */
- j = 0;
- tmp.lval[0] = inl(DE4X5_STS); j+=4;
- tmp.lval[1] = inl(DE4X5_BMR); j+=4;
- tmp.lval[2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[3] = inl(DE4X5_OMR); j+=4;
- tmp.lval[4] = inl(DE4X5_SISR); j+=4;
- tmp.lval[5] = inl(DE4X5_SICR); j+=4;
- tmp.lval[6] = inl(DE4X5_STRR); j+=4;
- tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
- ioc->len = j;
- if (copy_to_user(ioc->data, tmp.lval, ioc->len))
- return -EFAULT;
- break;
-
-#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
-/*
- case DE4X5_DUMP:
- j = 0;
- tmp.addr[j++] = dev->irq;
- for (i=0; i<ETH_ALEN; i++) {
- tmp.addr[j++] = dev->dev_addr[i];
- }
- tmp.addr[j++] = lp->rxRingSize;
- tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
- tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
-
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
- }
- }
- tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
- }
- }
- tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
-
- for (i=0;i<lp->rxRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
- }
- }
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
- for (i=0;i<lp->txRingSize-1;i++){
- if (i < 3) {
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
- }
- }
- tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
-
- for (i=0;i<lp->rxRingSize;i++){
- tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
- }
- for (i=0;i<lp->txRingSize;i++){
- tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
- }
-
- tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
- tmp.lval[j>>2] = lp->chipset; j+=4;
- if (lp->chipset == DC21140) {
- tmp.lval[j>>2] = gep_rd(dev); j+=4;
- } else {
- tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
- tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
- }
- tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
- if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
- tmp.lval[j>>2] = lp->active; j+=4;
- tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- if (lp->phy[lp->active].id != BROADCOM_T4) {
- tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- }
- tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- if (lp->phy[lp->active].id != BROADCOM_T4) {
- tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- } else {
- tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
- }
- }
-
- tmp.addr[j++] = lp->txRingSize;
- tmp.addr[j++] = netif_queue_stopped(dev);
-
- ioc->len = j;
- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
- break;
-
-*/
- default:
- return -EOPNOTSUPP;
- }
-
- return status;
-}
-
-static int __init de4x5_module_init (void)
-{
- int err = 0;
-
-#ifdef CONFIG_PCI
- err = pci_register_driver(&de4x5_pci_driver);
-#endif
-#ifdef CONFIG_EISA
- err |= eisa_driver_register (&de4x5_eisa_driver);
-#endif
-
- return err;
-}
-
-static void __exit de4x5_module_exit (void)
-{
-#ifdef CONFIG_PCI
- pci_unregister_driver (&de4x5_pci_driver);
-#endif
-#ifdef CONFIG_EISA
- eisa_driver_unregister (&de4x5_eisa_driver);
-#endif
-}
-
-module_init (de4x5_module_init);
-module_exit (de4x5_module_exit);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
deleted file mode 100644
index 1bfdc9b117f6..000000000000
--- a/drivers/net/ethernet/dec/tulip/de4x5.h
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- Copyright 1994 Digital Equipment Corporation.
-
- This software may be used and distributed according to the terms of the
- GNU General Public License, incorporated herein by reference.
-
- The author may be reached as davies@wanton.lkg.dec.com or Digital
- Equipment Corporation, 550 King Street, Littleton MA 01460.
-
- =========================================================================
-*/
-
-/*
-** DC21040 CSR<1..15> Register Address Map
-*/
-#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
-#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
-#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
-#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
-#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
-#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
-#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
-#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
-#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
-#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
-#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
-#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
-#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
-#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
-#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
-#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
-#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
-#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
-#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
-#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
-#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
-
-/*
-** EISA Register Address Map
-*/
-#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
-#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
-#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
-#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
-#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
-#define EISA_CR iobase+0x0c84 /* EISA Control Register */
-#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
-#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
-#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
-#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
-#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
-
-/*
-** PCI/EISA Configuration Registers Address Map
-*/
-#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
-#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
-#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
-#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
-#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
-#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
-#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
-#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
-#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
-#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
-#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
-
-/*
-** EISA Configuration Register 0 bit definitions
-*/
-#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
-#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
-#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
-#define ER0_ISTS 0x10 /* Interrupt Status (X) */
-#define ER0_LI 0x08 /* Latch Interrupts */
-#define ER0_INTL 0x06 /* INTerrupt Level */
-#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
-
-/*
-** EISA Configuration Register 1 bit definitions
-*/
-#define ER1_IAM 0xe0 /* ISA Address Mode */
-#define ER1_IAE 0x10 /* ISA Addressing Enable */
-#define ER1_UPIN 0x0f /* User Pins */
-
-/*
-** EISA Configuration Register 2 bit definitions
-*/
-#define ER2_BRS 0xc0 /* Boot ROM Size */
-#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
-
-/*
-** EISA Configuration Register 3 bit definitions
-*/
-#define ER3_BWE 0x40 /* Burst Write Enable */
-#define ER3_BRE 0x04 /* Burst Read Enable */
-#define ER3_LSR 0x02 /* Local Software Reset */
-
-/*
-** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
-** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
-** the configuration revision register step number.
-*/
-#define CFID_DID 0xff00 /* Device ID */
-#define CFID_VID 0x00ff /* Vendor ID */
-#define DC21040_DID 0x0200 /* Unique Device ID # */
-#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
-#define DC21041_DID 0x1400 /* Unique Device ID # */
-#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
-#define DC21140_DID 0x0900 /* Unique Device ID # */
-#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
-#define DC2114x_DID 0x1900 /* Unique Device ID # */
-#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
-
-/*
-** Chipset defines
-*/
-#define DC21040 DC21040_DID
-#define DC21041 DC21041_DID
-#define DC21140 DC21140_DID
-#define DC2114x DC2114x_DID
-#define DC21142 (DC2114x_DID | 0x0010)
-#define DC21143 (DC2114x_DID | 0x0030)
-#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */
-
-#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
-#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
-#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
-#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
-#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
-#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
-
-/*
-** PCI Configuration Command/Status Register (PCI_CFCS)
-*/
-#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
-#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
-#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
-#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
-#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
-#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
-#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
-#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
-#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
-#define CFCS_MO 0x00000004 /* Master Operation (C) */
-#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
-#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
-
-/*
-** PCI Configuration Revision Register (PCI_CFRV)
-*/
-#define CFRV_BC 0xff000000 /* Base Class */
-#define CFRV_SC 0x00ff0000 /* Subclass */
-#define CFRV_RN 0x000000f0 /* Revision Number */
-#define CFRV_SN 0x0000000f /* Step Number */
-#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
-#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
-#define STEP_NUMBER 0x00000020 /* Increments for future chips */
-#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
-#define CFRV_MASK 0xffff0000 /* Register mask */
-
-/*
-** PCI Configuration Latency Timer Register (PCI_CFLT)
-*/
-#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
-
-/*
-** PCI Configuration Base I/O Address Register (PCI_CBIO)
-*/
-#define CBIO_MASK -128 /* Base I/O Address Mask */
-#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
-
-/*
-** PCI Configuration Card Information Structure Register (PCI_CCIS)
-*/
-#define CCIS_ROMI 0xf0000000 /* ROM Image */
-#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
-#define CCIS_ASI 0x00000007 /* Address Space Indicator */
-
-/*
-** PCI Configuration Subsystem ID Register (PCI_SSID)
-*/
-#define SSID_SSID 0xffff0000 /* Subsystem ID */
-#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
-
-/*
-** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
-*/
-#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
-#define CBER_ROME 0x00000001 /* ROM Enable */
-
-/*
-** PCI Configuration Interrupt Register (PCI_CFIT)
-*/
-#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
-#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
-#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
-#define CFIT_IRQL 0x000000ff /* Interrupt Line */
-
-/*
-** PCI Configuration Power Management Area Register (PCI_CFPM)
-*/
-#define SLEEP 0x80 /* Power Saving Sleep Mode */
-#define SNOOZE 0x40 /* Power Saving Snooze Mode */
-#define WAKEUP 0x00 /* Power Saving Wakeup */
-
-#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
-#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
-
-/*
-** DC21040 Bus Mode Register (DE4X5_BMR)
-*/
-#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
-#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
-#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
-#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
-#define BMR_CAL 0x0000c000 /* Cache Alignment */
-#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
-#define BMR_BLE 0x00000080 /* Big/Little Endian */
-#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
-#define BMR_BAR 0x00000002 /* Bus ARbitration */
-#define BMR_SWR 0x00000001 /* Software Reset */
-
- /* Timings here are for 10BASE-T/AUI only*/
-#define TAP_NOPOLL 0x00000000 /* No automatic polling */
-#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
-#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
-#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
-#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
-#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
-#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
-#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
-
-#define CAL_NOUSE 0x00000000 /* Not used */
-#define CAL_8LONG 0x00004000 /* 8-longword alignment */
-#define CAL_16LONG 0x00008000 /* 16-longword alignment */
-#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
-
-#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
-#define PBL_1 0x00000100 /* 1 longword DMA burst length */
-#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
-#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
-#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
-#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
-#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
-
-#define DSL_0 0x00000000 /* 0 longword / descriptor */
-#define DSL_1 0x00000004 /* 1 longword / descriptor */
-#define DSL_2 0x00000008 /* 2 longwords / descriptor */
-#define DSL_4 0x00000010 /* 4 longwords / descriptor */
-#define DSL_8 0x00000020 /* 8 longwords / descriptor */
-#define DSL_16 0x00000040 /* 16 longwords / descriptor */
-#define DSL_32 0x00000080 /* 32 longwords / descriptor */
-
-/*
-** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
-*/
-#define TPD 0x00000001 /* Transmit Poll Demand */
-
-/*
-** DC21040 Receive Poll Demand Register (DE4X5_RPD)
-*/
-#define RPD 0x00000001 /* Receive Poll Demand */
-
-/*
-** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
-*/
-#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
-
-/*
-** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
-*/
-#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
-
-/*
-** Status Register (DE4X5_STS)
-*/
-#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
-#define STS_BE 0x03800000 /* Bus Error Bits */
-#define STS_TS 0x00700000 /* Transmit Process State */
-#define STS_RS 0x000e0000 /* Receive Process State */
-#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define STS_ER 0x00004000 /* Early Receive */
-#define STS_FBE 0x00002000 /* Fatal Bus Error */
-#define STS_SE 0x00002000 /* System Error */
-#define STS_LNF 0x00001000 /* Link Fail */
-#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
-#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
-#define STS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define STS_AT 0x00000400 /* AUI/TP Pin */
-#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
-#define STS_RPS 0x00000100 /* Receive Process Stopped */
-#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define STS_RI 0x00000040 /* Receive Interrupt */
-#define STS_UNF 0x00000020 /* Transmit Underflow */
-#define STS_LNP 0x00000010 /* Link Pass */
-#define STS_ANC 0x00000010 /* Autonegotiation Complete */
-#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
-#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define STS_TPS 0x00000002 /* Transmit Process Stopped */
-#define STS_TI 0x00000001 /* Transmit Interrupt */
-
-#define EB_PAR 0x00000000 /* Parity Error */
-#define EB_MA 0x00800000 /* Master Abort */
-#define EB_TA 0x01000000 /* Target Abort */
-#define EB_RES0 0x01800000 /* Reserved */
-#define EB_RES1 0x02000000 /* Reserved */
-
-#define TS_STOP 0x00000000 /* Stopped */
-#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
-#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
-#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
-#define TS_RES 0x00400000 /* Reserved */
-#define TS_SPKT 0x00500000 /* Setup Packet */
-#define TS_SUSP 0x00600000 /* Suspended */
-#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
-
-#define RS_STOP 0x00000000 /* Stopped */
-#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
-#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
-#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
-#define RS_SUSP 0x00080000 /* Suspended */
-#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
-#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
-#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
-
-#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
-
-/*
-** Operation Mode Register (DE4X5_OMR)
-*/
-#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
-#define OMR_RA 0x40000000 /* Receive All */
-#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
-#define OMR_SCR 0x01000000 /* Scrambler Mode */
-#define OMR_PCS 0x00800000 /* PCS Function */
-#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
-#define OMR_SF 0x00200000 /* Store and Forward */
-#define OMR_HBD 0x00080000 /* HeartBeat Disable */
-#define OMR_PS 0x00040000 /* Port Select */
-#define OMR_CA 0x00020000 /* Capture Effect Enable */
-#define OMR_BP 0x00010000 /* Back Pressure */
-#define OMR_TR 0x0000c000 /* Threshold Control Bits */
-#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
-#define OMR_FC 0x00001000 /* Force Collision Mode */
-#define OMR_OM 0x00000c00 /* Operating Mode */
-#define OMR_FDX 0x00000200 /* Full Duplex Mode */
-#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
-#define OMR_PM 0x00000080 /* Pass All Multicast */
-#define OMR_PR 0x00000040 /* Promiscuous Mode */
-#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
-#define OMR_IF 0x00000010 /* Inverse Filtering */
-#define OMR_PB 0x00000008 /* Pass Bad Frames */
-#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
-#define OMR_SR 0x00000002 /* Start/Stop Receive */
-#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
-
-#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
-#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
-#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
-#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
-
-#define OMR_DEF (OMR_SDP)
-#define OMR_SIA (OMR_SDP | OMR_TTM)
-#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
-#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
-#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
-
-/*
-** DC21040 Interrupt Mask Register (DE4X5_IMR)
-*/
-#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
-#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
-#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
-#define IMR_ERM 0x00004000 /* Early Receive Mask */
-#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
-#define IMR_SEM 0x00002000 /* System Error Mask */
-#define IMR_LFM 0x00001000 /* Link Fail Mask */
-#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
-#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
-#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
-#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
-#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
-#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
-#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
-#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
-#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
-#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
-#define IMR_LPM 0x00000010 /* Link Pass */
-#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
-#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
-#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
-#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
-
-/*
-** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
-*/
-#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
-#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
-#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
-#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
-#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
-
-/*
-** DC21040 Ethernet Address PROM (DE4X5_APROM)
-*/
-#define APROM_DN 0x80000000 /* Data Not Valid */
-#define APROM_DT 0x000000ff /* Address Byte */
-
-/*
-** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
-*/
-#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
-#define BROM_RD 0x00004000 /* Read from Boot ROM */
-#define BROM_WR 0x00002000 /* Write to Boot ROM */
-#define BROM_BR 0x00001000 /* Select Boot ROM when set */
-#define BROM_SR 0x00000800 /* Select Serial ROM when set */
-#define BROM_REG 0x00000400 /* External Register Select */
-#define BROM_DT 0x000000ff /* Data Byte */
-
-/*
-** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
-*/
-#define MII_MDI 0x00080000 /* MII Management Data In */
-#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
-#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
-#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
-#define MII_MDT 0x00020000 /* MII Management Data Out */
-#define MII_MDC 0x00010000 /* MII Management Clock */
-#define MII_RD 0x00004000 /* Read from MII */
-#define MII_WR 0x00002000 /* Write to MII */
-#define MII_SEL 0x00000800 /* Select MII when RESET */
-
-#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
-#define SROM_RD 0x00004000 /* Read from Boot ROM */
-#define SROM_WR 0x00002000 /* Write to Boot ROM */
-#define SROM_BR 0x00001000 /* Select Boot ROM when set */
-#define SROM_SR 0x00000800 /* Select Serial ROM when set */
-#define SROM_REG 0x00000400 /* External Register Select */
-#define SROM_DT 0x000000ff /* Data Byte */
-
-#define DT_OUT 0x00000008 /* Serial Data Out */
-#define DT_IN 0x00000004 /* Serial Data In */
-#define DT_CLK 0x00000002 /* Serial ROM Clock */
-#define DT_CS 0x00000001 /* Serial ROM Chip Select */
-
-#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
-#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
-#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
-#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
-
-#define MII_CR 0x00 /* MII Management Control Register */
-#define MII_SR 0x01 /* MII Management Status Register */
-#define MII_ID0 0x02 /* PHY Identifier Register 0 */
-#define MII_ID1 0x03 /* PHY Identifier Register 1 */
-#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
-#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
-#define MII_ANE 0x06 /* Auto Negotiation Expansion */
-#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
-
-#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
-
-/*
-** MII Management Control Register
-*/
-#define MII_CR_RST 0x8000 /* RESET the PHY chip */
-#define MII_CR_LPBK 0x4000 /* Loopback enable */
-#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
-#define MII_CR_10 0x0000 /* Set 10Mb/s */
-#define MII_CR_100 0x2000 /* Set 100Mb/s */
-#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
-#define MII_CR_PD 0x0800 /* Power Down */
-#define MII_CR_ISOL 0x0400 /* Isolate Mode */
-#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
-#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
-#define MII_CR_CTE 0x0080 /* Collision Test Enable */
-
-/*
-** MII Management Status Register
-*/
-#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
-#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
-#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
-#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
-#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
-#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
-#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
-#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
-#define MII_SR_LKS 0x0004 /* Link Status */
-#define MII_SR_JABD 0x0002 /* Jabber Detect */
-#define MII_SR_XC 0x0001 /* Extended Capabilities */
-
-/*
-** MII Management Auto Negotiation Advertisement Register
-*/
-#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
-#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
-#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
-#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
-#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
-#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
-#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
-#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
-
-/*
-** MII Management Auto Negotiation Remote End Register
-*/
-#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
-#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
-#define MII_ANLPA_RF 0x2000 /* Remote Fault */
-#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
-#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
-#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
-#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
-#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
-#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
-#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
-#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
-
-/*
-** SROM Media Definitions (ABG SROM Section)
-*/
-#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
-#define MEDIA_MII 0x0040 /* MII Present on the adapter */
-#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
-#define MEDIA_AUI 0x0004 /* AUI Media present */
-#define MEDIA_TP 0x0002 /* TP Media present */
-#define MEDIA_BNC 0x0001 /* BNC Media present */
-
-/*
-** SROM Definitions (Digital Semiconductor Format)
-*/
-#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
-#define SROM_SSID 0x0002 /* Sub-system ID offset */
-#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
-#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
-#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
-#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
-#define SROM_SFV 0x0012 /* SROM Format Version offset */
-#define SROM_CCNT 0x0013 /* Controller Count offset */
-#define SROM_HWADD 0x0014 /* Hardware Address offset */
-#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
-#define SROM_CRC 0x007e /* SROM CRC offset */
-
-/*
-** SROM Media Connection Definitions
-*/
-#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
-#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
-#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
-#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
-#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
-#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
-#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
-#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
-#define SROM_100BT4 0x0006 /* 100BASE-T4 */
-#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
-#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
-#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
-#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
-#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
-#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
-#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
-#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
-#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
-#define SROM_PAO 0x8800 /* Powerup Autosense Only */
-#define SROM_NSMI 0xffff /* No Selected Media Information */
-
-/*
-** SROM Media Definitions
-*/
-#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
-#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
-#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
-#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
-#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
-#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
-#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
-#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
-#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
-
-#define BLOCK_LEN 0x7f /* Extended blocks length mask */
-#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
-#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
-
-/*
-** SROM Compact Format Block Masks
-*/
-#define COMPACT_FI 0x80 /* Format Indicator */
-#define COMPACT_LEN 0x04 /* Length */
-#define COMPACT_MC 0x3f /* Media Code */
-
-/*
-** SROM Extended Format Block Type 0 Masks
-*/
-#define BLOCK0_FI 0x80 /* Format Indicator */
-#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
-#define BLOCK0_MC 0x3f /* Media Code */
-
-/*
-** DC21040 Full Duplex Register (DE4X5_FDR)
-*/
-#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
-
-/*
-** DC21041 General Purpose Timer Register (DE4X5_GPT)
-*/
-#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
-#define GPT_VAL 0x0000ffff /* Timer Value */
-
-/*
-** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
-*/
-/* Valid ONLY for DE500 hardware */
-#define GEP_LNP 0x00000080 /* Link Pass (input) */
-#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
-#define GEP_SDET 0x00000020 /* Signal Detect (input) */
-#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
-#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
-#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
-#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
-#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
-#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
-#define GEP_CTRL 0x00000100 /* GEP control bit */
-
-/*
-** SIA Register Defaults
-*/
-#define CSR13 0x00000001
-#define CSR14 0x0003ff7f /* Autonegotiation disabled */
-#define CSR15 0x00000008
-
-/*
-** SIA Status Register (DE4X5_SISR)
-*/
-#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
-#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
-#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
-#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
-#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
-#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
-#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
-#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
-#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
-#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
-#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
-#define SISR_DAO 0x00000080 /* PLL All One */
-#define SISR_DAZ 0x00000040 /* PLL All Zero */
-#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
-#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
-#define SISR_APS 0x00000008 /* Auto Polarity State */
-#define SISR_LKF 0x00000004 /* Link Fail Status */
-#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
-#define SISR_NCR 0x00000002 /* Network Connection Error */
-#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
-#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
-#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
-
-#define ANS_NDIS 0x00000000 /* Nway disable */
-#define ANS_TDIS 0x00001000 /* Transmit Disable */
-#define ANS_ADET 0x00002000 /* Ability Detect */
-#define ANS_ACK 0x00003000 /* Acknowledge */
-#define ANS_CACK 0x00004000 /* Complete Acknowledge */
-#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
-#define ANS_LCHK 0x00006000 /* Link Check */
-
-#define SISR_RST 0x00000301 /* CSR12 reset */
-#define SISR_ANR 0x00001301 /* Autonegotiation restart */
-
-/*
-** SIA Connectivity Register (DE4X5_SICR)
-*/
-#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
-#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
-#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
-#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
-#define SICR_IE 0x00001000 /* Input Enable */
-#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
-#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
-#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
-#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
-#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
-#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
-#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
-#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
-#define SICR_ASE 0x00000080 /* APLL Start Enable*/
-#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
-#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
-#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
-#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
-#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
-#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
-#define SICR_SRL 0x00000001 /* SIA Reset */
-#define SIA_RESET 0x00000000 /* SIA Reset Value */
-
-/*
-** SIA Transmit and Receive Register (DE4X5_STRR)
-*/
-#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
-#define STRR_SPP 0x00004000 /* Set Polarity Plus */
-#define STRR_APE 0x00002000 /* Auto Polarity Enable */
-#define STRR_LTE 0x00001000 /* Link Test Enable */
-#define STRR_SQE 0x00000800 /* Signal Quality Enable */
-#define STRR_CLD 0x00000400 /* Collision Detect Enable */
-#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
-#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
-#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
-#define STRR_HDE 0x00000040 /* Half Duplex Enable */
-#define STRR_CPEN 0x00000030 /* Compensation Enable */
-#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
-#define STRR_DREN 0x00000004 /* Driver Enable */
-#define STRR_LBK 0x00000002 /* Loopback Enable */
-#define STRR_ECEN 0x00000001 /* Encoder Enable */
-#define STRR_RESET 0xffffffff /* Reset value for STRR */
-
-/*
-** SIA General Register (DE4X5_SIGR)
-*/
-#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
-#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
-#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
-#define SIGR_CWE 0x08000000 /* Control Write Enable */
-#define SIGR_RME 0x04000000 /* Receive Match Enable */
-#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
-#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
-#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
-#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
-#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
-#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
-#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
-#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
-#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
-#define SIGR_FRL 0x00002000 /* Force Receiver Low */
-#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
-#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
-#define SIGR_FLF 0x00000400 /* Force Link Fail */
-#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
-#define SIGR_TSCK 0x00000100 /* Test Clock */
-#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
-#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
-#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
-#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
-#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
-#define SIGR_JCK 0x00000004 /* Jabber Clock */
-#define SIGR_HUJ 0x00000002 /* Host Unjab */
-#define SIGR_JBD 0x00000001 /* Jabber Disable */
-#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
-
-/*
-** Receive Descriptor Bit Summary
-*/
-#define R_OWN 0x80000000 /* Own Bit */
-#define RD_FF 0x40000000 /* Filtering Fail */
-#define RD_FL 0x3fff0000 /* Frame Length */
-#define RD_ES 0x00008000 /* Error Summary */
-#define RD_LE 0x00004000 /* Length Error */
-#define RD_DT 0x00003000 /* Data Type */
-#define RD_RF 0x00000800 /* Runt Frame */
-#define RD_MF 0x00000400 /* Multicast Frame */
-#define RD_FS 0x00000200 /* First Descriptor */
-#define RD_LS 0x00000100 /* Last Descriptor */
-#define RD_TL 0x00000080 /* Frame Too Long */
-#define RD_CS 0x00000040 /* Collision Seen */
-#define RD_FT 0x00000020 /* Frame Type */
-#define RD_RJ 0x00000010 /* Receive Watchdog */
-#define RD_RE 0x00000008 /* Report on MII Error */
-#define RD_DB 0x00000004 /* Dribbling Bit */
-#define RD_CE 0x00000002 /* CRC Error */
-#define RD_OF 0x00000001 /* Overflow */
-
-#define RD_RER 0x02000000 /* Receive End Of Ring */
-#define RD_RCH 0x01000000 /* Second Address Chained */
-#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
-#define RD_RBS1 0x000007ff /* Buffer 1 Size */
-
-/*
-** Transmit Descriptor Bit Summary
-*/
-#define T_OWN 0x80000000 /* Own Bit */
-#define TD_ES 0x00008000 /* Error Summary */
-#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
-#define TD_LO 0x00000800 /* Loss Of Carrier */
-#define TD_NC 0x00000400 /* No Carrier */
-#define TD_LC 0x00000200 /* Late Collision */
-#define TD_EC 0x00000100 /* Excessive Collisions */
-#define TD_HF 0x00000080 /* Heartbeat Fail */
-#define TD_CC 0x00000078 /* Collision Counter */
-#define TD_LF 0x00000004 /* Link Fail */
-#define TD_UF 0x00000002 /* Underflow Error */
-#define TD_DE 0x00000001 /* Deferred */
-
-#define TD_IC 0x80000000 /* Interrupt On Completion */
-#define TD_LS 0x40000000 /* Last Segment */
-#define TD_FS 0x20000000 /* First Segment */
-#define TD_FT1 0x10000000 /* Filtering Type */
-#define TD_SET 0x08000000 /* Setup Packet */
-#define TD_AC 0x04000000 /* Add CRC Disable */
-#define TD_TER 0x02000000 /* Transmit End Of Ring */
-#define TD_TCH 0x01000000 /* Second Address Chained */
-#define TD_DPD 0x00800000 /* Disabled Padding */
-#define TD_FT0 0x00400000 /* Filtering Type */
-#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
-#define TD_TBS1 0x000007ff /* Buffer 1 Size */
-
-#define PERFECT_F 0x00000000
-#define HASH_F TD_FT0
-#define INVERSE_F TD_FT1
-#define HASH_O_F (TD_FT1 | TD_F0)
-
-/*
-** Media / mode state machine definitions
-** User selectable:
-*/
-#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */
-#define TP_NW 0x0002 /* 10Base-T with Nway */
-#define BNC 0x0004 /* Thinwire */
-#define AUI 0x0008 /* Thickwire */
-#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
-#define _10Mb 0x0040 /* 10Mb/s Ethernet */
-#define _100Mb 0x0080 /* 100Mb/s Ethernet */
-#define AUTO 0x4000 /* Auto sense the media or speed */
-
-/*
-** Internal states
-*/
-#define NC 0x0000 /* No Connection */
-#define ANS 0x0020 /* Intermediate AutoNegotiation State */
-#define SPD_DET 0x0100 /* Parallel speed detection */
-#define INIT 0x0200 /* Initial state */
-#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
-#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
-#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
-#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
-#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
-#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
-#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
-#define MII 0x1000 /* MII on the 21143 */
-
-#define TIMER_CB 0x80000000 /* Timer callback detection */
-
-/*
-** DE4X5 DEBUG Options
-*/
-#define DEBUG_NONE 0x0000 /* No DEBUG messages */
-#define DEBUG_VERSION 0x0001 /* Print version message */
-#define DEBUG_MEDIA 0x0002 /* Print media messages */
-#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
-#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
-#define DEBUG_SROM 0x0010 /* Print SROM messages */
-#define DEBUG_MII 0x0020 /* Print MII messages */
-#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
-#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
-#define DEBUG_PCICFG 0x0100
-#define DEBUG_ALL 0x01ff
-
-/*
-** Miscellaneous
-*/
-#define PCI 0
-#define EISA 1
-
-#define DE4X5_HASH_TABLE_LEN 512 /* Bits */
-#define DE4X5_HASH_BITS 0x01ff /* 9 LS bits */
-
-#define SETUP_FRAME_LEN 192 /* Bytes */
-#define IMPERF_PA_OFFSET 156 /* Bytes */
-
-#define POLL_DEMAND 1
-
-#define LOST_MEDIA_THRESHOLD 3
-
-#define MASK_INTERRUPTS 1
-#define UNMASK_INTERRUPTS 0
-
-#define DE4X5_STRLEN 8
-
-#define DE4X5_INIT 0 /* Initialisation time */
-#define DE4X5_RUN 1 /* Run time */
-
-#define DE4X5_SAVE_STATE 0
-#define DE4X5_RESTORE_STATE 1
-
-/*
-** Address Filtering Modes
-*/
-#define PERFECT 0 /* 16 perfect physical addresses */
-#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
-#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
-#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
-
-#define ALL 0 /* Clear out all the setup frame */
-#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
-
-/*
-** Adapter state
-*/
-#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
-#define CLOSED 1 /* Ready for opening */
-#define OPEN 2 /* Running */
-
-/*
-** Various wait times
-*/
-#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
-#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
-
-/*
-** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
-** the vendors seem split 50-50 on how to calculate the OUI register values
-** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
-*/
-#define NATIONAL_TX 0x2000
-#define BROADCOM_T4 0x03e0
-#define SEEQ_T4 0x0016
-#define CYPRESS_T4 0x0014
-
-/*
-** Speed Selection stuff
-*/
-#define SET_10Mb {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
- if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
- mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
- outl(omr, DE4X5_OMR);\
- if (!lp->useSROM) lp->cache.gep = 0;\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
- lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-#define SET_100Mb {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- int fdx=0;\
- if (lp->phy[lp->active].id == NATIONAL_TX) {\
- mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
- 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
- sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
- if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
- if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
- mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- }\
- if (fdx) omr |= OMR_FDX;\
- outl(omr, DE4X5_OMR);\
- if (!lp->useSROM) lp->cache.gep = 0;\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- omr |= (lp->fdx ? OMR_FDX : 0);\
- outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
- lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-/* FIX ME so I don't jam 10Mb networks */
-#define SET_100Mb_PDET {\
- if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
- mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
- omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr, DE4X5_OMR);\
- } else if (lp->useSROM && !lp->useMII) {\
- omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr, DE4X5_OMR);\
- } else {\
- omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
- outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
- lp->cache.gep = (GEP_FDXD | GEP_MODE);\
- gep_wr(lp->cache.gep, dev);\
- }\
-}
-
-/*
-** Include the IOCTL stuff
-*/
-#include <linux/sockios.h>
-
-struct de4x5_ioctl {
- unsigned short cmd; /* Command to run */
- unsigned short len; /* Length of the data buffer */
- unsigned char __user *data; /* Pointer to the data buffer */
-};
-
-/*
-** Recognised commands for the driver
-*/
-#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
-#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
-/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */
-#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
-#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
-#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
-#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
-#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
-#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
-#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
-#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
-#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
-#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
-
-#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83f1727d1423..3188ba7b450f 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1074,8 +1074,8 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int dmfe_ethtool_set_wol(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
index ba0a69b363f8..d5657ff15e3c 100644
--- a/drivers/net/ethernet/dec/tulip/eeprom.c
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -117,8 +117,8 @@ static void tulip_build_fake_mediatable(struct tulip_private *tp)
0x00, 0x06 /* ttm bit map */
};
- tp->mtable = kmalloc(sizeof(struct mediatable) +
- sizeof(struct medialeaf), GFP_KERNEL);
+ tp->mtable = devm_kmalloc(&tp->pdev->dev, sizeof(struct mediatable) +
+ sizeof(struct medialeaf), GFP_KERNEL);
if (tp->mtable == NULL)
return; /* Horrible, impossible failure. */
@@ -224,7 +224,8 @@ subsequent_board:
return;
}
- mtable = kmalloc(struct_size(mtable, mleaf, count), GFP_KERNEL);
+ mtable = devm_kmalloc(&tp->pdev->dev, struct_size(mtable, mleaf, count),
+ GFP_KERNEL);
if (mtable == NULL)
return; /* Horrible, impossible failure. */
last_mediatable = tp->mtable = mtable;
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 3fb39e32e1b4..653bde48ef44 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -21,7 +21,7 @@ void pnic_do_nway(struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->base_addr;
u32 phy_reg = ioread32(ioaddr + 0xB8);
- u32 new_csr6 = tp->csr6 & ~0x40C40200;
+ u32 new_csr6;
if (phy_reg & 0x78000000) { /* Ignore baseT4 */
if (phy_reg & 0x20000000) dev->if_port = 5;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 79df5a72877b..ecfad43df45a 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -858,8 +858,8 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
@@ -1389,7 +1389,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* And back to business
*/
- i = pci_enable_device(pdev);
+ i = pcim_enable_device(pdev);
if (i) {
pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
return i;
@@ -1398,7 +1398,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
irq = pdev->irq;
/* alloc_etherdev ensures aligned and zeroed private structures */
- dev = alloc_etherdev (sizeof (*tp));
+ dev = devm_alloc_etherdev(&pdev->dev, sizeof(*tp));
if (!dev)
return -ENOMEM;
@@ -1408,18 +1408,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_name(pdev),
(unsigned long long)pci_resource_len (pdev, 0),
(unsigned long long)pci_resource_start (pdev, 0));
- goto err_out_free_netdev;
+ return -ENODEV;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions (pdev, DRV_NAME))
- goto err_out_free_netdev;
+ if (pci_request_regions(pdev, DRV_NAME))
+ return -ENODEV;
- ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
+ ioaddr = pcim_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
if (!ioaddr)
- goto err_out_free_res;
+ return -ENODEV;
/*
* initialize private data structure 'tp'
@@ -1428,12 +1428,12 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
- tp->rx_ring = dma_alloc_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- &tp->rx_ring_dma, GFP_KERNEL);
+ tp->rx_ring = dmam_alloc_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma, GFP_KERNEL);
if (!tp->rx_ring)
- goto err_out_mtable;
+ return -ENODEV;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
@@ -1689,12 +1689,13 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &tulip_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
#ifdef CONFIG_TULIP_NAPI
- netif_napi_add(dev, &tp->napi, tulip_poll, 16);
+ netif_napi_add_weight(dev, &tp->napi, tulip_poll, 16);
#endif
dev->ethtool_ops = &ops;
- if (register_netdev(dev))
- goto err_out_free_ring;
+ i = register_netdev(dev);
+ if (i)
+ return i;
pci_set_drvdata(pdev, dev);
@@ -1769,23 +1770,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tulip_set_power_state (tp, 0, 1);
return 0;
-
-err_out_free_ring:
- dma_free_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
-
-err_out_mtable:
- kfree (tp->mtable);
- pci_iounmap(pdev, ioaddr);
-
-err_out_free_res:
- pci_release_regions (pdev);
-
-err_out_free_netdev:
- free_netdev (dev);
- return -ENODEV;
}
@@ -1885,24 +1869,11 @@ static int __maybe_unused tulip_resume(struct device *dev_d)
static void tulip_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct tulip_private *tp;
if (!dev)
return;
- tp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_coherent(&pdev->dev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
- kfree (tp->mtable);
- pci_iounmap(pdev, tp->base_addr);
- free_netdev (dev);
- pci_release_regions (pdev);
- pci_disable_device(pdev);
-
- /* pci_power_off (pdev, -1); */
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 77d9058431e3..ff080ab0f116 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -971,8 +971,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 86b1d23eba83..37fba39c0056 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -474,8 +474,6 @@ err_out_netdev:
No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
made udelay() unreliable.
- The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
- deprecated.
*/
#define eeprom_delay(ee_addr) ioread32(ee_addr)
@@ -1376,8 +1374,8 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a301f7e6a440..db6615aa921b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -814,7 +814,6 @@ rio_free_tx (struct net_device *dev, int irq)
{
struct netdev_private *np = netdev_priv(dev);
int entry = np->old_tx % TX_RING_SIZE;
- int tx_use = 0;
unsigned long flag = 0;
if (irq)
@@ -839,7 +838,6 @@ rio_free_tx (struct net_device *dev, int irq)
np->tx_skbuff[entry] = NULL;
entry = (entry + 1) % TX_RING_SIZE;
- tx_use++;
}
if (irq)
spin_unlock(&np->tx_lock);
@@ -1235,8 +1233,8 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "dl2k", sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c710dc17be90..aaf0eda96292 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -340,7 +340,7 @@ enum wake_event_bits {
struct netdev_desc {
__le32 next_desc;
__le32 status;
- struct desc_frag { __le32 addr, length; } frag[1];
+ struct desc_frag { __le32 addr, length; } frag;
};
/* Bits in netdev_desc.status */
@@ -980,8 +980,8 @@ static void tx_timeout(struct net_device *dev, unsigned int txqueue)
le32_to_cpu(np->tx_ring[i].next_desc),
le32_to_cpu(np->tx_ring[i].status),
(le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
- le32_to_cpu(np->tx_ring[i].frag[0].length));
+ le32_to_cpu(np->tx_ring[i].frag.addr),
+ le32_to_cpu(np->tx_ring[i].frag.length));
}
printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
ioread32(np->base + TxListPtr),
@@ -1027,7 +1027,7 @@ static void init_ring(struct net_device *dev)
np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
np->rx_ring[i].status = 0;
- np->rx_ring[i].frag[0].length = 0;
+ np->rx_ring[i].frag.length = 0;
np->rx_skbuff[i] = NULL;
}
@@ -1039,16 +1039,16 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- np->rx_ring[i].frag[0].addr = cpu_to_le32(
+ np->rx_ring[i].frag.addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[i].frag[0].addr)) {
+ np->rx_ring[i].frag.addr)) {
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
break;
}
- np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
+ np->rx_ring[i].frag.length = cpu_to_le32(np->rx_buf_sz | LastFrag);
}
np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1097,12 +1097,12 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
txdesc->next_desc = 0;
txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
- txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
+ txdesc->frag.addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev,
skb->data, skb->len, DMA_TO_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- txdesc->frag[0].addr))
+ txdesc->frag.addr))
goto drop_frame;
- txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
+ txdesc->frag.length = cpu_to_le32 (skb->len | LastFrag);
/* Increment cur_tx before tasklet_schedule() */
np->cur_tx++;
@@ -1151,7 +1151,7 @@ reset_tx (struct net_device *dev)
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
np->tx_skbuff[i] = NULL;
@@ -1271,12 +1271,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
} else {
@@ -1290,12 +1290,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
skb = np->tx_skbuff[entry];
/* Free the original skb. */
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[entry].frag[0].addr),
+ le32_to_cpu(np->tx_ring[entry].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
- np->tx_ring[entry].frag[0].addr = 0;
- np->tx_ring[entry].frag[0].length = 0;
+ np->tx_ring[entry].frag.addr = 0;
+ np->tx_ring[entry].frag.length = 0;
}
spin_unlock(&np->lock);
}
@@ -1372,16 +1372,16 @@ static void rx_poll(struct tasklet_struct *t)
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
dma_sync_single_for_cpu(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
dma_sync_single_for_device(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(desc->frag[0].addr),
+ le32_to_cpu(desc->frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
@@ -1414,7 +1414,6 @@ static void refill_rx (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int entry;
- int cnt = 0;
/* Refill the Rx ring buffers. */
for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
@@ -1427,21 +1426,20 @@ static void refill_rx (struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- np->rx_ring[entry].frag[0].addr = cpu_to_le32(
+ np->rx_ring[entry].frag.addr = cpu_to_le32(
dma_map_single(&np->pci_dev->dev, skb->data,
np->rx_buf_sz, DMA_FROM_DEVICE));
if (dma_mapping_error(&np->pci_dev->dev,
- np->rx_ring[entry].frag[0].addr)) {
+ np->rx_ring[entry].frag.addr)) {
dev_kfree_skb_irq(skb);
np->rx_skbuff[entry] = NULL;
break;
}
}
/* Perhaps we need not reset this field. */
- np->rx_ring[entry].frag[0].length =
+ np->rx_ring[entry].frag.length =
cpu_to_le32(np->rx_buf_sz | LastFrag);
np->rx_ring[entry].status = 0;
- cnt++;
}
}
static void netdev_error(struct net_device *dev, int intr_status)
@@ -1644,8 +1642,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
@@ -1870,14 +1868,14 @@ static int netdev_close(struct net_device *dev)
(int)(np->tx_ring_dma));
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
- i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
- np->tx_ring[i].frag[0].length);
+ i, np->tx_ring[i].status, np->tx_ring[i].frag.addr,
+ np->tx_ring[i].frag.length);
printk(KERN_DEBUG " Rx ring %8.8x:\n",
(int)(np->rx_ring_dma));
for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
- np->rx_ring[i].frag[0].length);
+ i, np->rx_ring[i].status, np->rx_ring[i].frag.addr,
+ np->rx_ring[i].frag.length);
}
}
#endif /* __i386__ debugging only */
@@ -1892,19 +1890,19 @@ static int netdev_close(struct net_device *dev)
skb = np->rx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->rx_ring[i].frag[0].addr),
+ le32_to_cpu(np->rx_ring[i].frag.addr),
np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
- np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
+ np->rx_ring[i].frag.addr = cpu_to_le32(0xBADF00D0); /* poison */
}
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_ring[i].next_desc = 0;
skb = np->tx_skbuff[i];
if (skb) {
dma_unmap_single(&np->pci_dev->dev,
- le32_to_cpu(np->tx_ring[i].frag[0].addr),
+ le32_to_cpu(np->tx_ring[i].frag.addr),
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 92462ed87bc4..151ca9573be9 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -550,11 +550,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
/* free the buffer */
dev_kfree_skb(skb);
- spin_unlock_irqrestore(&bp->lock, flags);
-
return NETDEV_TX_OK;
}
@@ -725,8 +725,8 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, "0", sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -788,7 +788,7 @@ static int dnet_probe(struct platform_device *pdev)
}
dev->netdev_ops = &dnet_netdev_ops;
- netif_napi_add(dev, &bp->napi, dnet_poll, 64);
+ netif_napi_add(dev, &bp->napi, dnet_poll);
dev->ethtool_ops = &dnet_ethtool_ops;
dev->base_addr = (unsigned long)bp->regs;
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 46e3a05e9582..c2c5c589a5e3 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -558,7 +558,6 @@ err_unmap:
err_release_regions:
pci_release_regions(dev);
err_disable_dev:
- pci_clear_master(dev);
pci_disable_device(dev);
return err;
@@ -577,7 +576,6 @@ static void ec_bhf_remove(struct pci_dev *dev)
free_netdev(net_dev);
pci_release_regions(dev);
- pci_clear_master(dev);
pci_disable_device(dev);
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8689d4a51fe5..61fe9625bed1 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -101,8 +101,7 @@
#define MAX_ROCE_EQS 5
#define MAX_MSIX_VECTORS 32
#define MIN_MSIX_VECTORS 1
-#define BE_NAPI_WEIGHT 64
-#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
+#define MAX_RX_POST NAPI_POLL_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define MAX_NUM_POST_ERX_DB 255u
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 528eb0f223b1..61adcebeef01 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -135,7 +135,8 @@ static int be_mcc_notify(struct be_adapter *adapter)
/* To check if valid bit is set, check the entire word as we don't know
* the endianness of the data (old entry is host endian while a new entry is
- * little endian) */
+ * little endian)
+ */
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
{
u32 flags;
@@ -248,7 +249,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
u8 opcode = 0, subsystem = 0;
/* Just swap the status to host endian; mcc tag is opaquely copied
- * from mcc_wrb */
+ * from mcc_wrb
+ */
be_dws_le_to_cpu(compl, 4);
base_status = base_status(compl->status);
@@ -657,8 +659,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
return 0;
}
-/*
- * Insert the mailbox address into the doorbell in two steps
+/* Insert the mailbox address into the doorbell in two steps
* Polls on the mbox doorbell till a command completion (or a timeout) occurs
*/
static int be_mbox_notify_wait(struct be_adapter *adapter)
@@ -802,7 +803,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
req_hdr->version = 0;
- fill_wrb_tags(wrb, (ulong) req_hdr);
+ fill_wrb_tags(wrb, (ulong)req_hdr);
wrb->payload_length = cmd_len;
if (mem) {
wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -832,8 +833,8 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
{
struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
- struct be_mcc_wrb *wrb
- = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+ struct be_mcc_wrb *wrb = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
+
memset(wrb, 0, sizeof(*wrb));
return wrb;
}
@@ -896,7 +897,7 @@ static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
memcpy(dest_wrb, wrb, sizeof(*wrb));
if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
- fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
+ fill_wrb_tags(dest_wrb, (ulong)embedded_payload(wrb));
return dest_wrb;
}
@@ -1114,7 +1115,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);
- if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
@@ -1803,7 +1804,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
total_size = buf_len;
- get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60 * 1024;
get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
get_fat_cmd.size,
&get_fat_cmd.dma, GFP_ATOMIC);
@@ -1813,7 +1814,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
mutex_lock(&adapter->mcc_lock);
while (total_size) {
- buf_size = min(total_size, (u32)60*1024);
+ buf_size = min(total_size, (u32)60 * 1024);
total_size -= buf_size;
wrb = wrb_from_mccq(adapter);
@@ -1878,9 +1879,9 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strlcpy(adapter->fw_ver, resp->firmware_version_string,
+ strscpy(adapter->fw_ver, resp->firmware_version_string,
sizeof(adapter->fw_ver));
- strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
+ strscpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
sizeof(adapter->fw_on_flash));
}
err:
@@ -2287,7 +2288,7 @@ err:
/* Uses sync mcc */
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data)
+ u8 page_num, u32 off, u32 len, u8 *data)
{
struct be_dma_mem cmd;
struct be_mcc_wrb *wrb;
@@ -2321,10 +2322,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
req->port = cpu_to_le32(adapter->hba_port_num);
req->page_num = cpu_to_le32(page_num);
status = be_mcc_notify_wait(adapter);
- if (!status) {
+ if (!status && len > 0) {
struct be_cmd_resp_port_type *resp = cmd.va;
- memcpy(data, resp->page_data, PAGE_DATA_LEN);
+ memcpy(data, resp->page_data + off, len);
}
err:
mutex_unlock(&adapter->mcc_lock);
@@ -2373,7 +2374,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
@@ -2415,7 +2416,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
switch (adapter->phy.interface_type) {
case PHY_TYPE_QSFP:
@@ -2440,11 +2441,11 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
- strlcpy(adapter->phy.vendor_name, page_data +
+ strscpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
- strlcpy(adapter->phy.vendor_pn,
+ strscpy(adapter->phy.vendor_pn,
page_data + SFP_VENDOR_PN_OFFSET,
SFP_VENDOR_NAME_LEN - 1);
}
@@ -2473,7 +2474,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
OPCODE_COMMON_DELETE_OBJECT,
sizeof(*req), wrb, NULL);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
status = be_mcc_notify_wait(adapter);
err:
@@ -3362,7 +3363,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt);
for (i = 0; i < byte_cnt; i++) {
- req->snd_buff[i] = (u8)(pattern >> (j*8));
+ req->snd_buff[i] = (u8)(pattern >> (j * 8));
j++;
if (j > 7)
j = 0;
@@ -3846,7 +3847,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
req->hdr.domain = domain;
req->mac_count = mac_count;
if (mac_count)
- memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+ memcpy(req->mac, mac_array, ETH_ALEN * mac_count);
status = be_mcc_notify_wait(adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db1f3b908582..e2085c68c0ee 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
u32 *state);
int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
- u8 page_num, u8 *data);
+ u8 page_num, u32 off, u32 len, u8 *data);
int be_cmd_query_cable_type(struct be_adapter *adapter);
int be_cmd_query_sfp_info(struct be_adapter *adapter);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f9955308b93d..a29de29bdf23 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -220,15 +220,15 @@ static void be_get_drvinfo(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
- strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ strscpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
else
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
@@ -389,10 +389,10 @@ static void be_get_ethtool_stats(struct net_device *netdev,
struct be_rx_stats *stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_irq(&stats->sync);
+ start = u64_stats_fetch_begin(&stats->sync);
data[base] = stats->rx_bytes;
data[base + 1] = stats->rx_pkts;
- } while (u64_stats_fetch_retry_irq(&stats->sync, start));
+ } while (u64_stats_fetch_retry(&stats->sync, start));
for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
p = (u8 *)stats + et_rx_stats[i].offset;
@@ -405,19 +405,19 @@ static void be_get_ethtool_stats(struct net_device *netdev,
struct be_tx_stats *stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_irq(&stats->sync_compl);
+ start = u64_stats_fetch_begin(&stats->sync_compl);
data[base] = stats->tx_compl;
- } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
+ } while (u64_stats_fetch_retry(&stats->sync_compl, start));
do {
- start = u64_stats_fetch_begin_irq(&stats->sync);
+ start = u64_stats_fetch_begin(&stats->sync);
for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
p = (u8 *)stats + et_tx_stats[i].offset;
data[base + i] =
(et_tx_stats[i].size == sizeof(u64)) ?
*(u64 *)p : *(u32 *)p;
}
- } while (u64_stats_fetch_retry_irq(&stats->sync, start));
+ } while (u64_stats_fetch_retry(&stats->sync, start));
base += ETHTOOL_TXSTATS_NUM;
}
}
@@ -683,7 +683,9 @@ static int be_get_link_ksettings(struct net_device *netdev,
}
static void be_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1342,7 +1344,7 @@ static int be_get_module_info(struct net_device *netdev,
return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- page_data);
+ 0, PAGE_DATA_LEN, page_data);
if (!status) {
if (!page_data[SFP_PLUS_SFF_8472_COMP]) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -1360,25 +1362,32 @@ static int be_get_module_eeprom(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
+ u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES))
return -EOPNOTSUPP;
- status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
- data);
- if (status)
- goto err;
+ begin = eeprom->offset;
+ end = eeprom->offset + eeprom->len;
+
+ if (begin < PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin,
+ min_t(u32, end, PAGE_DATA_LEN) - begin,
+ data);
+ if (status)
+ goto err;
+
+ data += PAGE_DATA_LEN - begin;
+ begin = PAGE_DATA_LEN;
+ }
- if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) {
- status = be_cmd_read_port_transceiver_data(adapter,
- TR_PAGE_A2,
- data +
- PAGE_DATA_LEN);
+ if (end > PAGE_DATA_LEN) {
+ status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2,
+ begin - PAGE_DATA_LEN,
+ end - begin, data);
if (status)
goto err;
}
- if (eeprom->offset)
- memcpy(data, data + eeprom->offset, eeprom->len);
err:
return be_cmd_status(status);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d51f24c9e1b8..7e408bcc88de 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -16,7 +16,6 @@
#include "be.h"
#include "be_cmds.h"
#include <asm/div64.h>
-#include <linux/aer.h>
#include <linux/if_bridge.h>
#include <net/busy_poll.h>
#include <net/vxlan.h>
@@ -665,10 +664,10 @@ static void be_get_stats64(struct net_device *netdev,
const struct be_rx_stats *rx_stats = rx_stats(rxo);
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->sync);
+ start = u64_stats_fetch_begin(&rx_stats->sync);
pkts = rx_stats(rxo)->rx_pkts;
bytes = rx_stats(rxo)->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
+ } while (u64_stats_fetch_retry(&rx_stats->sync, start));
stats->rx_packets += pkts;
stats->rx_bytes += bytes;
stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@@ -680,10 +679,10 @@ static void be_get_stats64(struct net_device *netdev,
const struct be_tx_stats *tx_stats = tx_stats(txo);
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->sync);
+ start = u64_stats_fetch_begin(&tx_stats->sync);
pkts = tx_stats(txo)->tx_pkts;
bytes = tx_stats(txo)->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
+ } while (u64_stats_fetch_retry(&tx_stats->sync, start));
stats->tx_packets += pkts;
stats->tx_bytes += bytes;
}
@@ -737,9 +736,9 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
static int be_gso_hdr_len(struct sk_buff *skb)
{
if (skb->encapsulation)
- return skb_inner_transport_offset(skb) +
- inner_tcp_hdrlen(skb);
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
}
static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
@@ -1125,7 +1124,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
struct be_wrb_params
*wrb_params)
{
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb);
unsigned int eth_hdr_len;
struct iphdr *ip;
@@ -2155,16 +2154,16 @@ static int be_get_new_eqd(struct be_eq_obj *eqo)
for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
do {
- start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
+ start = u64_stats_fetch_begin(&rxo->stats.sync);
rx_pkts += rxo->stats.rx_pkts;
- } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
+ } while (u64_stats_fetch_retry(&rxo->stats.sync, start));
}
for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
do {
- start = u64_stats_fetch_begin_irq(&txo->stats.sync);
+ start = u64_stats_fetch_begin(&txo->stats.sync);
tx_pkts += txo->stats.tx_reqs;
- } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
+ } while (u64_stats_fetch_retry(&txo->stats.sync, start));
}
/* Skip, if wrapped around or first calculation */
@@ -2982,8 +2981,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- BE_NAPI_WEIGHT);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
}
return 0;
}
@@ -3178,7 +3176,7 @@ static irqreturn_t be_intx(int irq, void *dev)
}
be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
- /* Return IRQ_HANDLED only for the the first spurious intr
+ /* Return IRQ_HANDLED only for the first spurious intr
* after a valid intr to stop the kernel from branding
* this irq as a bad one!
*/
@@ -3491,7 +3489,7 @@ static int be_msix_register(struct be_adapter *adapter)
if (status)
goto err_msix;
- irq_set_affinity_hint(vec, eqo->affinity_mask);
+ irq_update_affinity_hint(vec, eqo->affinity_mask);
}
return 0;
@@ -3552,7 +3550,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
/* MSIx */
for_all_evt_queues(adapter, eqo, i) {
vec = be_msix_vec_get(adapter, eqo);
- irq_set_affinity_hint(vec, NULL);
+ irq_update_affinity_hint(vec, NULL);
free_irq(vec, eqo);
}
@@ -5194,7 +5192,8 @@ static void be_netdev_init(struct net_device *netdev)
netdev->hw_features |= NETIF_F_RXHASH;
netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -5203,7 +5202,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
+ netif_set_tso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
@@ -5726,8 +5725,6 @@ static void be_remove(struct pci_dev *pdev)
be_unmap_pci_bars(adapter);
be_drv_cleanup(adapter);
- pci_disable_pcie_error_reporting(pdev);
-
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -5840,20 +5837,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
SET_NETDEV_DEV(netdev, &pdev->dev);
status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!status) {
- netdev->features |= NETIF_F_HIGHDMA;
- } else {
- status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (status) {
- dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
- goto free_netdev;
- }
+ if (status) {
+ dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
+ goto free_netdev;
}
- status = pci_enable_pcie_error_reporting(pdev);
- if (!status)
- dev_info(&pdev->dev, "PCIe error reporting enabled\n");
-
status = be_map_pci_bars(adapter);
if (status)
goto free_netdev;
@@ -5898,7 +5886,6 @@ drv_cleanup:
unmap_bars:
be_unmap_pci_bars(adapter);
free_netdev:
- pci_disable_pcie_error_reporting(pdev);
free_netdev(netdev);
rel_reg:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
new file mode 100644
index 000000000000..3df6bf476ae7
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Engleder network device configuration
+#
+
+config NET_VENDOR_ENGLEDER
+ bool "Engleder devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Engleder devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ENGLEDER
+
+config TSNEP
+ tristate "TSN endpoint support"
+ depends on HAS_IOMEM && HAS_DMA
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
+ select PAGE_POOL
+ help
+ Support for the Engleder TSN endpoint Ethernet MAC IP Core.
+
+ To compile this driver as a module, choose M here. The module will be
+ called tsnep.
+
+config TSNEP_SELFTESTS
+ bool "TSN endpoint self test support"
+ default n
+ depends on TSNEP
+ help
+ This enables self test support within the TSN endpoint driver.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_ENGLEDER
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
new file mode 100644
index 000000000000..b98135f65eb7
--- /dev/null
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Engleder Ethernet drivers
+#
+
+obj-$(CONFIG_TSNEP) += tsnep.o
+
+tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
+ tsnep_rxnfc.o tsnep_xdp.o
+tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
new file mode 100644
index 000000000000..11b29f56aaf9
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#ifndef _TSNEP_H
+#define _TSNEP_H
+
+#include "tsnep_hw.h"
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/ethtool.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/miscdevice.h>
+
+#define TSNEP "tsnep"
+
+#define TSNEP_RING_SIZE 256
+#define TSNEP_RING_MASK (TSNEP_RING_SIZE - 1)
+#define TSNEP_RING_RX_REFILL 16
+#define TSNEP_RING_RX_REUSE (TSNEP_RING_SIZE - TSNEP_RING_SIZE / 4)
+#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
+#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
+
+struct tsnep_gcl {
+ void __iomem *addr;
+
+ u64 base_time;
+ u64 cycle_time;
+ u64 cycle_time_extension;
+
+ struct tsnep_gcl_operation operation[TSNEP_GCL_COUNT];
+ int count;
+
+ u64 change_limit;
+
+ u64 start_time;
+ bool change;
+};
+
+enum tsnep_rxnfc_filter_type {
+ TSNEP_RXNFC_ETHER_TYPE,
+};
+
+struct tsnep_rxnfc_filter {
+ enum tsnep_rxnfc_filter_type type;
+ union {
+ u16 ether_type;
+ };
+};
+
+struct tsnep_rxnfc_rule {
+ struct list_head list;
+ struct tsnep_rxnfc_filter filter;
+ int queue_index;
+ int location;
+};
+
+struct tsnep_tx_entry {
+ struct tsnep_tx_desc *desc;
+ struct tsnep_tx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+ bool owner_user_flag;
+
+ u32 properties;
+
+ u32 type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ bool zc;
+ };
+ size_t len;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+};
+
+struct tsnep_tx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+ int queue_index;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+ struct xsk_buff_pool *xsk_pool;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+};
+
+struct tsnep_rx_entry {
+ struct tsnep_rx_desc *desc;
+ struct tsnep_rx_desc_wb *desc_wb;
+ dma_addr_t desc_dma;
+
+ u32 properties;
+
+ union {
+ struct page *page;
+ struct xdp_buff *xdp;
+ };
+ size_t len;
+ dma_addr_t dma;
+};
+
+struct tsnep_rx {
+ struct tsnep_adapter *adapter;
+ void __iomem *addr;
+ int queue_index;
+ int tx_queue_index;
+
+ void *page[TSNEP_RING_PAGE_COUNT];
+ dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
+
+ struct tsnep_rx_entry entry[TSNEP_RING_SIZE];
+ int write;
+ int read;
+ u32 owner_counter;
+ int increment_owner_counter;
+ struct page_pool *page_pool;
+ struct page **page_buffer;
+ struct xsk_buff_pool *xsk_pool;
+ struct xdp_buff **xdp_batch;
+
+ u32 packets;
+ u32 bytes;
+ u32 dropped;
+ u32 multicast;
+ u32 alloc_failed;
+
+ struct xdp_rxq_info xdp_rxq;
+ struct xdp_rxq_info xdp_rxq_zc;
+};
+
+struct tsnep_queue {
+ struct tsnep_adapter *adapter;
+ char name[IFNAMSIZ + 9];
+
+ struct tsnep_tx *tx;
+ struct tsnep_rx *rx;
+
+ struct napi_struct napi;
+
+ int irq;
+ u32 irq_mask;
+ void __iomem *irq_delay_addr;
+ u8 irq_delay;
+};
+
+struct tsnep_adapter {
+ struct net_device *netdev;
+ u8 mac_address[ETH_ALEN];
+ struct mii_bus *mdiobus;
+ bool suppress_preamble;
+ phy_interface_t phy_mode;
+ struct phy_device *phydev;
+ int msg_enable;
+
+ struct platform_device *pdev;
+ struct device *dmadev;
+ void __iomem *addr;
+
+ bool gate_control;
+ /* gate control lock */
+ struct mutex gate_control_lock;
+ bool gate_control_active;
+ struct tsnep_gcl gcl[2];
+ int next_gcl;
+
+ struct hwtstamp_config hwtstamp_config;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ /* ptp clock lock */
+ spinlock_t ptp_lock;
+
+ /* RX flow classification rules lock */
+ struct mutex rxnfc_lock;
+ struct list_head rxnfc_rules;
+ int rxnfc_count;
+ int rxnfc_max;
+
+ struct bpf_prog *xdp_prog;
+
+ int num_tx_queues;
+ struct tsnep_tx tx[TSNEP_MAX_QUEUES];
+ int num_rx_queues;
+ struct tsnep_rx rx[TSNEP_MAX_QUEUES];
+
+ int num_queues;
+ struct tsnep_queue queue[TSNEP_MAX_QUEUES];
+};
+
+extern const struct ethtool_ops tsnep_ethtool_ops;
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter);
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter);
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+
+int tsnep_tc_init(struct tsnep_adapter *adapter);
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data);
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter);
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
+int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id);
+
+#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
+int tsnep_ethtool_get_test_count(void);
+void tsnep_ethtool_get_test_strings(u8 *data);
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data);
+#else
+static inline int tsnep_ethtool_get_test_count(void)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ /* not enabled */
+}
+
+static inline void tsnep_ethtool_self_test(struct net_device *dev,
+ struct ethtool_test *eth_test,
+ u64 *data)
+{
+ /* not enabled */
+}
+#endif /* CONFIG_TSNEP_SELFTESTS */
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time);
+int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs);
+u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue);
+int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool);
+void tsnep_disable_xsk(struct tsnep_queue *queue);
+
+#endif /* _TSNEP_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
new file mode 100644
index 000000000000..716815dad7d2
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -0,0 +1,486 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "rx_bytes",
+ "rx_dropped",
+ "rx_multicast",
+ "rx_alloc_failed",
+ "rx_phy_errors",
+ "rx_forwarded_phy_errors",
+ "rx_invalid_frame_errors",
+ "tx_packets",
+ "tx_bytes",
+ "tx_dropped",
+};
+
+struct tsnep_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_alloc_failed;
+ u64 rx_phy_errors;
+ u64 rx_forwarded_phy_errors;
+ u64 rx_invalid_frame_errors;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64))
+
+static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_%d_packets",
+ "rx_%d_bytes",
+ "rx_%d_dropped",
+ "rx_%d_multicast",
+ "rx_%d_alloc_failed",
+ "rx_%d_no_descriptor_errors",
+ "rx_%d_buffer_too_small_errors",
+ "rx_%d_fifo_overflow_errors",
+ "rx_%d_invalid_frame_errors",
+};
+
+struct tsnep_rx_queue_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+ u64 rx_multicast;
+ u64 rx_alloc_failed;
+ u64 rx_no_descriptor_errors;
+ u64 rx_buffer_too_small_errors;
+ u64 rx_fifo_overflow_errors;
+ u64 rx_invalid_frame_errors;
+};
+
+#define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \
+ sizeof(u64))
+
+static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "tx_%d_packets",
+ "tx_%d_bytes",
+ "tx_%d_dropped",
+};
+
+struct tsnep_tx_queue_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+};
+
+#define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \
+ sizeof(u64))
+
+static void tsnep_ethtool_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev),
+ sizeof(drvinfo->bus_info));
+}
+
+static int tsnep_ethtool_get_regs_len(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int len;
+ int num_additional_queues;
+
+ len = TSNEP_MAC_SIZE;
+
+ /* first queue pair is within TSNEP_MAC_SIZE, only queues additional to
+ * the first queue pair extend the register length by TSNEP_QUEUE_SIZE
+ */
+ num_additional_queues =
+ max(adapter->num_tx_queues, adapter->num_rx_queues) - 1;
+ len += TSNEP_QUEUE_SIZE * num_additional_queues;
+
+ return len;
+}
+
+static void tsnep_ethtool_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ regs->version = 1;
+
+ memcpy_fromio(p, adapter->addr, regs->len);
+}
+
+static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->msg_enable;
+}
+
+static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ adapter->msg_enable = data;
+}
+
+static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings));
+ data += sizeof(tsnep_stats_strings);
+
+ for (i = 0; i < rx_count; i++) {
+ for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_rx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) {
+ snprintf(data, ETH_GSTRING_LEN,
+ tsnep_tx_queue_stats_strings[j], i);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ case ETH_SS_TEST:
+ tsnep_ethtool_get_test_strings(data);
+ break;
+ }
+}
+
+static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count = adapter->num_rx_queues;
+ int tx_count = adapter->num_tx_queues;
+ struct tsnep_stats tsnep_stats;
+ struct tsnep_rx_queue_stats tsnep_rx_queue_stats;
+ struct tsnep_tx_queue_stats tsnep_tx_queue_stats;
+ u32 reg;
+ int i;
+
+ memset(&tsnep_stats, 0, sizeof(tsnep_stats));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ tsnep_stats.rx_packets += adapter->rx[i].packets;
+ tsnep_stats.rx_bytes += adapter->rx[i].bytes;
+ tsnep_stats.rx_dropped += adapter->rx[i].dropped;
+ tsnep_stats.rx_multicast += adapter->rx[i].multicast;
+ tsnep_stats.rx_alloc_failed += adapter->rx[i].alloc_failed;
+ }
+ reg = ioread32(adapter->addr + ECM_STAT);
+ tsnep_stats.rx_phy_errors =
+ (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ tsnep_stats.rx_forwarded_phy_errors =
+ (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ tsnep_stats.rx_invalid_frame_errors =
+ (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tsnep_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_stats.tx_dropped += adapter->tx[i].dropped;
+ }
+ memcpy(data, &tsnep_stats, sizeof(tsnep_stats));
+ data += TSNEP_STATS_COUNT;
+
+ for (i = 0; i < rx_count; i++) {
+ memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats));
+ tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets;
+ tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes;
+ tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped;
+ tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast;
+ tsnep_rx_queue_stats.rx_alloc_failed =
+ adapter->rx[i].alloc_failed;
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ tsnep_rx_queue_stats.rx_no_descriptor_errors =
+ (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ tsnep_rx_queue_stats.rx_buffer_too_small_errors =
+ (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ tsnep_rx_queue_stats.rx_fifo_overflow_errors =
+ (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ tsnep_rx_queue_stats.rx_invalid_frame_errors =
+ (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ memcpy(data, &tsnep_rx_queue_stats,
+ sizeof(tsnep_rx_queue_stats));
+ data += TSNEP_RX_QUEUE_STATS_COUNT;
+ }
+
+ for (i = 0; i < tx_count; i++) {
+ memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats));
+ tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets;
+ tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes;
+ tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped;
+ memcpy(data, &tsnep_tx_queue_stats,
+ sizeof(tsnep_tx_queue_stats));
+ data += TSNEP_TX_QUEUE_STATS_COUNT;
+ }
+}
+
+static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int rx_count;
+ int tx_count;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ rx_count = adapter->num_rx_queues;
+ tx_count = adapter->num_tx_queues;
+ return TSNEP_STATS_COUNT +
+ TSNEP_RX_QUEUE_STATS_COUNT * rx_count +
+ TSNEP_TX_QUEUE_STATS_COUNT * tx_count;
+ case ETH_SS_TEST:
+ return tsnep_ethtool_get_test_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->rxnfc_count;
+ cmd->data = adapter->rxnfc_max;
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return tsnep_rxnfc_get_rule(adapter, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ return tsnep_rxnfc_get_all(adapter, cmd, rule_locs);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return tsnep_rxnfc_add_rule(adapter, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return tsnep_rxnfc_del_rule(adapter, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void tsnep_ethtool_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ ch->max_rx = adapter->num_rx_queues;
+ ch->max_tx = adapter->num_tx_queues;
+ ch->rx_count = adapter->num_rx_queues;
+ ch->tx_count = adapter->num_tx_queues;
+}
+
+static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static struct tsnep_queue *tsnep_get_queue_with_tx(struct tsnep_adapter *adapter,
+ int index)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].tx) {
+ if (index == 0)
+ return &adapter->queue[i];
+
+ index--;
+ }
+ }
+
+ return NULL;
+}
+
+static struct tsnep_queue *tsnep_get_queue_with_rx(struct tsnep_adapter *adapter,
+ int index)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].rx) {
+ if (index == 0)
+ return &adapter->queue[i];
+
+ index--;
+ }
+ }
+
+ return NULL;
+}
+
+static int tsnep_ethtool_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue;
+
+ queue = tsnep_get_queue_with_rx(adapter, 0);
+ if (queue)
+ ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
+
+ queue = tsnep_get_queue_with_tx(adapter, 0);
+ if (queue)
+ ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue);
+
+ return 0;
+}
+
+static int tsnep_ethtool_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+ int retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ /* RX coalesce has priority for queues with TX and RX */
+ if (adapter->queue[i].rx)
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ ec->rx_coalesce_usecs);
+ else
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ ec->tx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_ethtool_get_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue_with_rx;
+ struct tsnep_queue *queue_with_tx;
+
+ if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
+ return -EINVAL;
+
+ queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
+ if (queue_with_rx)
+ ec->rx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_rx);
+
+ queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
+ if (queue_with_tx)
+ ec->tx_coalesce_usecs = tsnep_get_irq_coalesce(queue_with_tx);
+
+ return 0;
+}
+
+static int tsnep_ethtool_set_per_queue_coalesce(struct net_device *netdev,
+ u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct tsnep_queue *queue_with_rx;
+ struct tsnep_queue *queue_with_tx;
+ int retval;
+
+ if (queue >= max(adapter->num_tx_queues, adapter->num_rx_queues))
+ return -EINVAL;
+
+ queue_with_rx = tsnep_get_queue_with_rx(adapter, queue);
+ if (queue_with_rx) {
+ retval = tsnep_set_irq_coalesce(queue_with_rx, ec->rx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ /* RX coalesce has priority for queues with TX and RX */
+ queue_with_tx = tsnep_get_queue_with_tx(adapter, queue);
+ if (queue_with_tx && !queue_with_tx->rx) {
+ retval = tsnep_set_irq_coalesce(queue_with_tx, ec->tx_coalesce_usecs);
+ if (retval != 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+const struct ethtool_ops tsnep_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .get_drvinfo = tsnep_ethtool_get_drvinfo,
+ .get_regs_len = tsnep_ethtool_get_regs_len,
+ .get_regs = tsnep_ethtool_get_regs,
+ .get_msglevel = tsnep_ethtool_get_msglevel,
+ .set_msglevel = tsnep_ethtool_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .self_test = tsnep_ethtool_self_test,
+ .get_strings = tsnep_ethtool_get_strings,
+ .get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
+ .get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_rxnfc = tsnep_ethtool_get_rxnfc,
+ .set_rxnfc = tsnep_ethtool_set_rxnfc,
+ .get_channels = tsnep_ethtool_get_channels,
+ .get_ts_info = tsnep_ethtool_get_ts_info,
+ .get_coalesce = tsnep_ethtool_get_coalesce,
+ .set_coalesce = tsnep_ethtool_set_coalesce,
+ .get_per_queue_coalesce = tsnep_ethtool_get_per_queue_coalesce,
+ .set_per_queue_coalesce = tsnep_ethtool_set_per_queue_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
new file mode 100644
index 000000000000..55e1caf193a6
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* Hardware definition of TSNEP and EtherCAT MAC device */
+
+#ifndef _TSNEP_HW_H
+#define _TSNEP_HW_H
+
+#include <linux/types.h>
+
+/* type */
+#define ECM_TYPE 0x0000
+#define ECM_REVISION_MASK 0x000000FF
+#define ECM_REVISION_SHIFT 0
+#define ECM_VERSION_MASK 0x0000FF00
+#define ECM_VERSION_SHIFT 8
+#define ECM_QUEUE_COUNT_MASK 0x00070000
+#define ECM_QUEUE_COUNT_SHIFT 16
+#define ECM_GATE_CONTROL 0x02000000
+
+/* system time */
+#define ECM_SYSTEM_TIME_LOW 0x0008
+#define ECM_SYSTEM_TIME_HIGH 0x000C
+
+/* clock */
+#define ECM_CLOCK_RATE 0x0010
+#define ECM_CLOCK_RATE_OFFSET_MASK 0x7FFFFFFF
+#define ECM_CLOCK_RATE_OFFSET_SIGN 0x80000000
+
+/* interrupt */
+#define ECM_INT_ENABLE 0x0018
+#define ECM_INT_ACTIVE 0x001C
+#define ECM_INT_ACKNOWLEDGE 0x001C
+#define ECM_INT_LINK 0x00000020
+#define ECM_INT_TX_0 0x00000100
+#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_TXRX_SHIFT 2
+#define ECM_INT_ALL 0x7FFFFFFF
+#define ECM_INT_DISABLE 0x80000000
+
+/* reset */
+#define ECM_RESET 0x0020
+#define ECM_RESET_COMMON 0x00000001
+#define ECM_RESET_CHANNEL 0x00000100
+#define ECM_RESET_TXRX 0x00010000
+
+/* counter */
+#define ECM_COUNTER_LOW 0x0028
+#define ECM_COUNTER_HIGH 0x002C
+
+/* interrupt delay */
+#define ECM_INT_DELAY 0x0030
+#define ECM_INT_DELAY_MASK 0xF0
+#define ECM_INT_DELAY_SHIFT 4
+#define ECM_INT_DELAY_BASE_US 16
+#define ECM_INT_DELAY_OFFSET 1
+
+/* control and status */
+#define ECM_STATUS 0x0080
+#define ECM_LINK_MODE_OFF 0x01000000
+#define ECM_LINK_MODE_100 0x02000000
+#define ECM_LINK_MODE_1000 0x04000000
+#define ECM_NO_LINK 0x01000000
+#define ECM_LINK_MODE_MASK 0x06000000
+
+/* management data */
+#define ECM_MD_CONTROL 0x0084
+#define ECM_MD_STATUS 0x0084
+#define ECM_MD_PREAMBLE 0x00000001
+#define ECM_MD_READ 0x00000004
+#define ECM_MD_WRITE 0x00000002
+#define ECM_MD_ADDR_MASK 0x000000F8
+#define ECM_MD_ADDR_SHIFT 3
+#define ECM_MD_PHY_ADDR_MASK 0x00001F00
+#define ECM_MD_PHY_ADDR_SHIFT 8
+#define ECM_MD_BUSY 0x00000001
+#define ECM_MD_DATA_MASK 0xFFFF0000
+#define ECM_MD_DATA_SHIFT 16
+
+/* statistic */
+#define ECM_STAT 0x00B0
+#define ECM_STAT_RX_ERR_MASK 0x000000FF
+#define ECM_STAT_RX_ERR_SHIFT 0
+#define ECM_STAT_INV_FRM_MASK 0x0000FF00
+#define ECM_STAT_INV_FRM_SHIFT 8
+#define ECM_STAT_FWD_RX_ERR_MASK 0x00FF0000
+#define ECM_STAT_FWD_RX_ERR_SHIFT 16
+
+/* tsnep */
+#define TSNEP_MAC_SIZE 0x4000
+#define TSNEP_QUEUE_SIZE 0x1000
+#define TSNEP_QUEUE(n) ({ typeof(n) __n = (n); \
+ (__n) == 0 ? \
+ 0 : \
+ TSNEP_MAC_SIZE + TSNEP_QUEUE_SIZE * ((__n) - 1); })
+#define TSNEP_MAX_QUEUES 8
+#define TSNEP_MAX_FRAME_SIZE (2 * 1024) /* hardware supports actually 16k */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_OFFSET 128
+
+/* tsnep register */
+#define TSNEP_INFO 0x0100
+#define TSNEP_INFO_TX_TIME 0x00010000
+#define TSNEP_CONTROL 0x0108
+#define TSNEP_CONTROL_TX_RESET 0x00000001
+#define TSNEP_CONTROL_TX_ENABLE 0x00000002
+#define TSNEP_CONTROL_TX_DMA_ERROR 0x00000010
+#define TSNEP_CONTROL_TX_DESC_ERROR 0x00000020
+#define TSNEP_CONTROL_RX_RESET 0x00000100
+#define TSNEP_CONTROL_RX_ENABLE 0x00000200
+#define TSNEP_CONTROL_RX_DISABLE 0x00000400
+#define TSNEP_CONTROL_RX_DMA_ERROR 0x00001000
+#define TSNEP_CONTROL_RX_DESC_ERROR 0x00002000
+#define TSNEP_TX_DESC_ADDR_LOW 0x0140
+#define TSNEP_TX_DESC_ADDR_HIGH 0x0144
+#define TSNEP_RX_DESC_ADDR_LOW 0x0180
+#define TSNEP_RX_DESC_ADDR_HIGH 0x0184
+#define TSNEP_RESET_OWNER_COUNTER 0x01
+#define TSNEP_RX_STATISTIC 0x0190
+#define TSNEP_RX_STATISTIC_NO_DESC_MASK 0x000000FF
+#define TSNEP_RX_STATISTIC_NO_DESC_SHIFT 0
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK 0x0000FF00
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT 8
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK 0x00FF0000
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT 16
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_MASK 0xFF000000
+#define TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT 24
+#define TSNEP_RX_STATISTIC_NO_DESC 0x0190
+#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
+#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
+#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
+#define TSNEP_MAC_ADDRESS_LOW 0x0800
+#define TSNEP_MAC_ADDRESS_HIGH 0x0804
+#define TSNEP_RX_FILTER 0x0806
+#define TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS 0x0001
+#define TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS 0x0002
+#define TSNEP_GC 0x0808
+#define TSNEP_GC_ENABLE_A 0x00000002
+#define TSNEP_GC_ENABLE_B 0x00000004
+#define TSNEP_GC_DISABLE 0x00000008
+#define TSNEP_GC_ENABLE_TIMEOUT 0x00000010
+#define TSNEP_GC_ACTIVE_A 0x00000002
+#define TSNEP_GC_ACTIVE_B 0x00000004
+#define TSNEP_GC_CHANGE_AB 0x00000008
+#define TSNEP_GC_TIMEOUT_ACTIVE 0x00000010
+#define TSNEP_GC_TIMEOUT_SIGNAL 0x00000020
+#define TSNEP_GC_LIST_ERROR 0x00000080
+#define TSNEP_GC_OPEN 0x00FF0000
+#define TSNEP_GC_OPEN_SHIFT 16
+#define TSNEP_GC_NEXT_OPEN 0xFF000000
+#define TSNEP_GC_NEXT_OPEN_SHIFT 24
+#define TSNEP_GC_TIMEOUT 131072
+#define TSNEP_GC_TIME 0x080C
+#define TSNEP_GC_CHANGE 0x0810
+#define TSNEP_GCL_A 0x2000
+#define TSNEP_GCL_B 0x2800
+#define TSNEP_GCL_SIZE SZ_2K
+#define TSNEP_RX_ASSIGN 0x0840
+#define TSNEP_RX_ASSIGN_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_QUEUE_MASK 0x00000006
+#define TSNEP_RX_ASSIGN_QUEUE_SHIFT 1
+#define TSNEP_RX_ASSIGN_OFFSET 1
+#define TSNEP_RX_ASSIGN_ETHER_TYPE 0x0880
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET 2
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT 2
+
+/* tsnep gate control list operation */
+struct tsnep_gcl_operation {
+ u32 properties;
+ u32 interval;
+};
+
+#define TSNEP_GCL_COUNT (TSNEP_GCL_SIZE / sizeof(struct tsnep_gcl_operation))
+#define TSNEP_GCL_MASK 0x000000FF
+#define TSNEP_GCL_INSERT 0x20000000
+#define TSNEP_GCL_CHANGE 0x40000000
+#define TSNEP_GCL_LAST 0x80000000
+#define TSNEP_GCL_MIN_INTERVAL 32
+
+/* tsnep TX/RX descriptor */
+#define TSNEP_DESC_SIZE 256
+#define TSNEP_DESC_SIZE_DATA_AFTER 2048
+#define TSNEP_DESC_OFFSET 128
+#define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000
+#define TSNEP_DESC_OWNER_COUNTER_SHIFT 30
+#define TSNEP_DESC_LENGTH_MASK 0x00003FFF
+#define TSNEP_DESC_INTERRUPT_FLAG 0x00040000
+#define TSNEP_DESC_EXTENDED_WRITEBACK_FLAG 0x00080000
+#define TSNEP_DESC_NO_LINK_FLAG 0x01000000
+
+/* tsnep TX descriptor */
+struct tsnep_tx_desc {
+ __le32 properties;
+ __le32 more_properties;
+ __le32 reserved[2];
+ __le64 next;
+ __le64 tx;
+};
+
+#define TSNEP_TX_DESC_OWNER_MASK 0xE0000000
+#define TSNEP_TX_DESC_OWNER_USER_FLAG 0x20000000
+#define TSNEP_TX_DESC_LAST_FRAGMENT_FLAG 0x00010000
+#define TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG 0x00020000
+
+/* tsnep TX descriptor writeback */
+struct tsnep_tx_desc_wb {
+ __le32 properties;
+ __le32 reserved1;
+ __le64 counter;
+ __le64 timestamp;
+ __le32 dma_delay;
+ __le32 reserved2;
+};
+
+#define TSNEP_TX_DESC_UNDERRUN_ERROR_FLAG 0x00010000
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_MASK 0x0000FFFC
+#define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_SHIFT 2
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_MASK 0xFFFC0000
+#define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_SHIFT 18
+#define TSNEP_TX_DESC_DMA_DELAY_NS 64
+
+/* tsnep RX descriptor */
+struct tsnep_rx_desc {
+ __le32 properties;
+ __le32 reserved[3];
+ __le64 next;
+ __le64 rx;
+};
+
+#define TSNEP_RX_DESC_BUFFER_SIZE_MASK 0x00003FFC
+
+/* tsnep RX descriptor writeback */
+struct tsnep_rx_desc_wb {
+ __le32 properties;
+ __le32 reserved[7];
+};
+
+/* tsnep RX inline meta */
+struct tsnep_rx_inline {
+ __le64 counter;
+ __le64 timestamp;
+};
+
+#define TSNEP_RX_INLINE_METADATA_SIZE (sizeof(struct tsnep_rx_inline))
+
+#endif /* _TSNEP_HW_H */
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
new file mode 100644
index 000000000000..84751bb303a6
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -0,0 +1,2617 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+/* TSN endpoint Ethernet MAC driver
+ *
+ * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
+ * communication. It is designed for endpoints within TSN (Time Sensitive
+ * Networking) networks; e.g., for PLCs in the industrial automation case.
+ *
+ * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
+ * by the driver.
+ *
+ * More information can be found here:
+ * - www.embedded-experts.at/tsn
+ * - www.engleder-embedded.com
+ */
+
+#include "tsnep.h"
+#include "tsnep_hw.h"
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/iopoll.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp_sock_drv.h>
+
+#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4)
+#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+/* XSK buffer shall store at least Q-in-Q frame */
+#define TSNEP_XSK_RX_BUF_SIZE (ALIGN(TSNEP_RX_INLINE_METADATA_SIZE + \
+ ETH_FRAME_LEN + ETH_FCS_LEN + \
+ VLAN_HLEN * 2, 4))
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
+#else
+#define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
+#endif
+#define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
+
+#define TSNEP_COALESCE_USECS_DEFAULT 64
+#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
+ ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
+
+#define TSNEP_TX_TYPE_SKB BIT(0)
+#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
+#define TSNEP_TX_TYPE_XDP_TX BIT(2)
+#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
+#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
+#define TSNEP_TX_TYPE_XSK BIT(4)
+
+#define TSNEP_XDP_TX BIT(0)
+#define TSNEP_XDP_REDIRECT BIT(1)
+
+static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
+{
+ mask |= ECM_INT_DISABLE;
+ iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
+}
+
+static irqreturn_t tsnep_irq(int irq, void *arg)
+{
+ struct tsnep_adapter *adapter = arg;
+ u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
+
+ /* acknowledge interrupt */
+ if (active != 0)
+ iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
+
+ /* handle link interrupt */
+ if ((active & ECM_INT_LINK) != 0)
+ phy_mac_interrupt(adapter->netdev->phydev);
+
+ /* handle TX/RX queue 0 interrupt */
+ if ((active & adapter->queue[0].irq_mask) != 0) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ napi_schedule(&adapter->queue[0].napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+{
+ struct tsnep_queue *queue = arg;
+
+ /* handle TX/RX queue interrupt */
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(&queue->napi);
+
+ return IRQ_HANDLED;
+}
+
+int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs)
+{
+ if (usecs > TSNEP_COALESCE_USECS_MAX)
+ return -ERANGE;
+
+ usecs /= ECM_INT_DELAY_BASE_US;
+ usecs <<= ECM_INT_DELAY_SHIFT;
+ usecs &= ECM_INT_DELAY_MASK;
+
+ queue->irq_delay &= ~ECM_INT_DELAY_MASK;
+ queue->irq_delay |= usecs;
+ iowrite8(queue->irq_delay, queue->irq_delay_addr);
+
+ return 0;
+}
+
+u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue)
+{
+ u32 usecs;
+
+ usecs = (queue->irq_delay & ECM_INT_DELAY_MASK);
+ usecs >>= ECM_INT_DELAY_SHIFT;
+ usecs *= ECM_INT_DELAY_BASE_US;
+
+ return usecs;
+}
+
+static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ md = ECM_MD_READ;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
+}
+
+static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct tsnep_adapter *adapter = bus->priv;
+ u32 md;
+ int retval;
+
+ md = ECM_MD_WRITE;
+ if (!adapter->suppress_preamble)
+ md |= ECM_MD_PREAMBLE;
+ md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
+ md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
+ md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
+ iowrite32(md, adapter->addr + ECM_MD_CONTROL);
+ retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
+ !(md & ECM_MD_BUSY), 16, 1000);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
+{
+ u32 mode;
+
+ switch (adapter->phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+}
+
+static void tsnep_phy_link_status_change(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+
+ if (phydev->link)
+ tsnep_set_link_mode(adapter);
+
+ phy_print_status(netdev->phydev);
+}
+
+static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
+{
+ int retval;
+
+ retval = phy_loopback(adapter->phydev, enable);
+
+ /* PHY link state change is not signaled if loopback is enabled, it
+ * would delay a working loopback anyway, let's ensure that loopback
+ * is working immediately by setting link mode directly
+ */
+ if (!retval && enable)
+ tsnep_set_link_mode(adapter);
+
+ return retval;
+}
+
+static int tsnep_phy_open(struct tsnep_adapter *adapter)
+{
+ struct phy_device *phydev;
+ struct ethtool_eee ethtool_eee;
+ int retval;
+
+ retval = phy_connect_direct(adapter->netdev, adapter->phydev,
+ tsnep_phy_link_status_change,
+ adapter->phy_mode);
+ if (retval)
+ return retval;
+ phydev = adapter->netdev->phydev;
+
+ /* MAC supports only 100Mbps|1000Mbps full duplex
+ * SPE (Single Pair Ethernet) is also an option but not implemented yet
+ */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* disable EEE autoneg, EEE not supported by TSNEP */
+ memset(&ethtool_eee, 0, sizeof(ethtool_eee));
+ phy_ethtool_set_eee(adapter->phydev, &ethtool_eee);
+
+ adapter->phydev->irq = PHY_MAC_INTERRUPT;
+ phy_start(adapter->phydev);
+
+ return 0;
+}
+
+static void tsnep_phy_close(struct tsnep_adapter *adapter)
+{
+ phy_stop(adapter->netdev->phydev);
+ phy_disconnect(adapter->netdev->phydev);
+}
+
+static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ int i;
+
+ memset(tx->entry, 0, sizeof(tx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (tx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
+ tx->page_dma[i]);
+ tx->page[i] = NULL;
+ tx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_tx_ring_create(struct tsnep_tx *tx)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct tsnep_tx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ tx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
+ GFP_KERNEL);
+ if (!tx->page[i]) {
+ retval = -ENOMEM;
+ goto alloc_failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_tx_desc_wb *)
+ (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_tx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ entry->owner_user_flag = false;
+ }
+ }
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &tx->entry[i];
+ next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_tx_ring_cleanup(tx);
+ return retval;
+}
+
+static void tsnep_tx_init(struct tsnep_tx *tx)
+{
+ dma_addr_t dma;
+
+ dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
+ tx->write = 0;
+ tx->read = 0;
+ tx->owner_counter = 1;
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+}
+
+static void tsnep_tx_enable(struct tsnep_tx *tx)
+{
+ struct netdev_queue *nq;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock_bh(nq);
+ netif_tx_wake_queue(nq);
+ __netif_tx_unlock_bh(nq);
+}
+
+static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi)
+{
+ struct netdev_queue *nq;
+ u32 val;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock_bh(nq);
+ netif_tx_stop_queue(nq);
+ __netif_tx_unlock_bh(nq);
+
+ /* wait until TX is done in hardware */
+ readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
+ 1000000);
+
+ /* wait until TX is also done in software */
+ while (READ_ONCE(tx->read) != tx->write) {
+ napi_schedule(napi);
+ napi_synchronize(napi);
+ }
+}
+
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
+ bool last)
+{
+ struct tsnep_tx_entry *entry = &tx->entry[index];
+
+ entry->properties = 0;
+ /* xdpf and zc are union with skb */
+ if (entry->skb) {
+ entry->properties = length & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
+ entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
+
+ /* toggle user flag to prevent false acknowledge
+ *
+ * Only the first fragment is acknowledged. For all other
+ * fragments no acknowledge is done and the last written owner
+ * counter stays in the writeback descriptor. Therefore, it is
+ * possible that the last written owner counter is identical to
+ * the new incremented owner counter and a false acknowledge is
+ * detected before the real acknowledge has been done by
+ * hardware.
+ *
+ * The user flag is used to prevent this situation. The user
+ * flag is copied to the writeback descriptor by the hardware
+ * and is used as additional acknowledge data. By toggeling the
+ * user flag only for the first fragment (which is
+ * acknowledged), it is guaranteed that the last acknowledge
+ * done for this descriptor has used a different user flag and
+ * cannot be detected as false acknowledge.
+ */
+ entry->owner_user_flag = !entry->owner_user_flag;
+ }
+ if (last)
+ entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
+ if (index == tx->increment_owner_counter) {
+ tx->owner_counter++;
+ if (tx->owner_counter == 4)
+ tx->owner_counter = 1;
+ tx->increment_owner_counter--;
+ if (tx->increment_owner_counter < 0)
+ tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+ if (entry->owner_user_flag)
+ entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
+ entry->desc->more_properties =
+ __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_tx_desc_available(struct tsnep_tx *tx)
+{
+ if (tx->read <= tx->write)
+ return TSNEP_RING_SIZE - tx->write + tx->read - 1;
+ else
+ return tx->read - tx->write - 1;
+}
+
+static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ unsigned int len;
+ dma_addr_t dma;
+ int map_len = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
+
+ if (!i) {
+ len = skb_headlen(skb);
+ dma = dma_map_single(dmadev, skb->data, len,
+ DMA_TO_DEVICE);
+
+ entry->type = TSNEP_TX_TYPE_SKB;
+ } else {
+ len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
+ dma = skb_frag_dma_map(dmadev,
+ &skb_shinfo(skb)->frags[i - 1],
+ 0, len, DMA_TO_DEVICE);
+
+ entry->type = TSNEP_TX_TYPE_SKB_FRAG;
+ }
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
+ }
+
+ return map_len;
+}
+
+static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ int map_len = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(index + i) & TSNEP_RING_MASK];
+
+ if (entry->len) {
+ if (entry->type & TSNEP_TX_TYPE_SKB)
+ dma_unmap_single(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ else if (entry->type &
+ (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO))
+ dma_unmap_page(dmadev,
+ dma_unmap_addr(entry, dma),
+ dma_unmap_len(entry, len),
+ DMA_TO_DEVICE);
+ map_len += entry->len;
+ entry->len = 0;
+ }
+ }
+
+ return map_len;
+}
+
+static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
+ struct tsnep_tx *tx)
+{
+ int count = 1;
+ struct tsnep_tx_entry *entry;
+ int length;
+ int i;
+ int retval;
+
+ if (skb_shinfo(skb)->nr_frags > 0)
+ count += skb_shinfo(skb)->nr_frags;
+
+ if (tsnep_tx_desc_available(tx) < count) {
+ /* ring full, shall not happen because queue is stopped if full
+ * below
+ */
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ entry = &tx->entry[tx->write];
+ entry->skb = skb;
+
+ retval = tsnep_tx_map(skb, tx, count);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
+ dev_kfree_skb_any(entry->skb);
+ entry->skb = NULL;
+
+ tx->dropped++;
+
+ return NETDEV_TX_OK;
+ }
+ length = retval;
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
+ i == count - 1);
+ tx->write = (tx->write + count) & TSNEP_RING_MASK;
+
+ skb_tx_timestamp(skb);
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
+ /* ring can get full with next frame */
+ netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
+ struct skb_shared_info *shinfo, int count, u32 type)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct page *page;
+ skb_frag_t *frag;
+ unsigned int len;
+ int map_len = 0;
+ dma_addr_t dma;
+ void *data;
+ int i;
+
+ frag = NULL;
+ len = xdpf->len;
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
+ if (type & TSNEP_TX_TYPE_XDP_NDO) {
+ data = unlikely(frag) ? skb_frag_address(frag) :
+ xdpf->data;
+ dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->type = TSNEP_TX_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag) :
+ virt_to_page(xdpf->data);
+ dma = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma += skb_frag_off(frag);
+ else
+ dma += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dmadev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ entry->type = TSNEP_TX_TYPE_XDP_TX;
+ }
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
+
+ if (i + 1 < count) {
+ frag = &shinfo->frags[i];
+ len = skb_frag_size(frag);
+ }
+ }
+
+ return map_len;
+}
+
+/* This function requires __netif_tx_lock is held by the caller. */
+static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
+ struct tsnep_tx *tx, u32 type)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct tsnep_tx_entry *entry;
+ int count, length, retval, i;
+
+ count = 1;
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ count += shinfo->nr_frags;
+
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
+ return false;
+
+ entry = &tx->entry[tx->write];
+ entry->xdpf = xdpf;
+
+ retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
+ entry->xdpf = NULL;
+
+ tx->dropped++;
+
+ return false;
+ }
+ length = retval;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length,
+ i == count - 1);
+ tx->write = (tx->write + count) & TSNEP_RING_MASK;
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ return true;
+}
+
+static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
+{
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+}
+
+static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
+ struct xdp_buff *xdp,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ bool xmit;
+
+ if (unlikely(!xdpf))
+ return false;
+
+ __netif_tx_lock(tx_nq, smp_processor_id());
+
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ if (xmit)
+ txq_trans_cond_update(tx_nq);
+
+ __netif_tx_unlock(tx_nq);
+
+ return xmit;
+}
+
+static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx)
+{
+ struct tsnep_tx_entry *entry;
+ dma_addr_t dma;
+
+ entry = &tx->entry[tx->write];
+ entry->zc = true;
+
+ dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr);
+ xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len);
+
+ entry->type = TSNEP_TX_TYPE_XSK;
+ entry->len = xdpd->len;
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ return xdpd->len;
+}
+
+static void tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc *xdpd,
+ struct tsnep_tx *tx)
+{
+ int length;
+
+ length = tsnep_xdp_tx_map_zc(xdpd, tx);
+
+ tsnep_tx_activate(tx, tx->write, length, true);
+ tx->write = (tx->write + 1) & TSNEP_RING_MASK;
+}
+
+static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx)
+{
+ int desc_available = tsnep_tx_desc_available(tx);
+ struct xdp_desc *descs = tx->xsk_pool->tx_descs;
+ int batch, i;
+
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (desc_available <= (MAX_SKB_FRAGS + 1))
+ return;
+ desc_available -= MAX_SKB_FRAGS + 1;
+
+ batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available);
+ for (i = 0; i < batch; i++)
+ tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx);
+
+ if (batch) {
+ /* descriptor properties shall be valid before hardware is
+ * notified
+ */
+ dma_wmb();
+
+ tsnep_xdp_xmit_flush(tx);
+ }
+}
+
+static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
+{
+ struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
+ int xsk_frames = 0;
+ int budget = 128;
+ int length;
+ int count;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ do {
+ if (tx->read == tx->write)
+ break;
+
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) !=
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ break;
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ count = 1;
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ skb_shinfo(entry->skb)->nr_frags > 0)
+ count += skb_shinfo(entry->skb)->nr_frags;
+ else if ((entry->type & TSNEP_TX_TYPE_XDP) &&
+ xdp_frame_has_frags(entry->xdpf))
+ count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
+
+ length = tsnep_tx_unmap(tx, tx->read, count);
+
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ (__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 timestamp;
+
+ if (skb_shinfo(entry->skb)->tx_flags &
+ SKBTX_HW_TSTAMP_USE_CYCLES)
+ timestamp =
+ __le64_to_cpu(entry->desc_wb->counter);
+ else
+ timestamp =
+ __le64_to_cpu(entry->desc_wb->timestamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(timestamp);
+
+ skb_tstamp_tx(entry->skb, &hwtstamps);
+ }
+
+ if (entry->type & TSNEP_TX_TYPE_SKB)
+ napi_consume_skb(entry->skb, napi_budget);
+ else if (entry->type & TSNEP_TX_TYPE_XDP)
+ xdp_return_frame_rx_napi(entry->xdpf);
+ else
+ xsk_frames++;
+ /* xdpf and zc are union with skb */
+ entry->skb = NULL;
+
+ tx->read = (tx->read + count) & TSNEP_RING_MASK;
+
+ tx->packets++;
+ tx->bytes += length + ETH_FCS_LEN;
+
+ budget--;
+ } while (likely(budget));
+
+ if (tx->xsk_pool) {
+ if (xsk_frames)
+ xsk_tx_completed(tx->xsk_pool, xsk_frames);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ tsnep_xdp_xmit_zc(tx);
+ }
+
+ if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
+ netif_tx_queue_stopped(nq)) {
+ netif_tx_wake_queue(nq);
+ }
+
+ __netif_tx_unlock(nq);
+
+ return budget != 0;
+}
+
+static bool tsnep_tx_pending(struct tsnep_tx *tx)
+{
+ struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
+ bool pending = false;
+
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ __netif_tx_lock(nq, smp_processor_id());
+
+ if (tx->read != tx->write) {
+ entry = &tx->entry[tx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_TX_DESC_OWNER_MASK) ==
+ (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
+ pending = true;
+ }
+
+ __netif_tx_unlock(nq);
+
+ return pending;
+}
+
+static int tsnep_tx_open(struct tsnep_tx *tx)
+{
+ int retval;
+
+ retval = tsnep_tx_ring_create(tx);
+ if (retval)
+ return retval;
+
+ tsnep_tx_init(tx);
+
+ return 0;
+}
+
+static void tsnep_tx_close(struct tsnep_tx *tx)
+{
+ tsnep_tx_ring_cleanup(tx);
+}
+
+static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ if (!rx->xsk_pool && entry->page)
+ page_pool_put_full_page(rx->page_pool, entry->page,
+ false);
+ if (rx->xsk_pool && entry->xdp)
+ xsk_buff_free(entry->xdp);
+ /* xdp is union with page */
+ entry->page = NULL;
+ }
+
+ if (rx->page_pool)
+ page_pool_destroy(rx->page_pool);
+
+ memset(rx->entry, 0, sizeof(rx->entry));
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ if (rx->page[i]) {
+ dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
+ rx->page_dma[i]);
+ rx->page[i] = NULL;
+ rx->page_dma[i] = 0;
+ }
+ }
+}
+
+static int tsnep_rx_ring_create(struct tsnep_rx *rx)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ struct tsnep_rx_entry *entry;
+ struct page_pool_params pp_params = { 0 };
+ struct tsnep_rx_entry *next_entry;
+ int i, j;
+ int retval;
+
+ for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
+ rx->page[i] =
+ dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
+ GFP_KERNEL);
+ if (!rx->page[i]) {
+ retval = -ENOMEM;
+ goto failed;
+ }
+ for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
+ entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
+ entry->desc_wb = (struct tsnep_rx_desc_wb *)
+ (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
+ entry->desc = (struct tsnep_rx_desc *)
+ (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
+ entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
+ }
+ }
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = TSNEP_RING_SIZE;
+ pp_params.nid = dev_to_node(dmadev);
+ pp_params.dev = dmadev;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
+ pp_params.offset = TSNEP_RX_OFFSET;
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool)) {
+ retval = PTR_ERR(rx->page_pool);
+ rx->page_pool = NULL;
+ goto failed;
+ }
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ entry = &rx->entry[i];
+ next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK];
+ entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
+ }
+
+ return 0;
+
+failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_init(struct tsnep_rx *rx)
+{
+ dma_addr_t dma;
+
+ dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
+ iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
+ iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
+ rx->write = 0;
+ rx->read = 0;
+ rx->owner_counter = 1;
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+}
+
+static void tsnep_rx_enable(struct tsnep_rx *rx)
+{
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
+}
+
+static void tsnep_rx_disable(struct tsnep_rx *rx)
+{
+ u32 val;
+
+ iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
+ readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
+ ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
+ 1000000);
+}
+
+static int tsnep_rx_desc_available(struct tsnep_rx *rx)
+{
+ if (rx->read <= rx->write)
+ return TSNEP_RING_SIZE - rx->write + rx->read - 1;
+ else
+ return rx->read - rx->write - 1;
+}
+
+static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx)
+{
+ struct page **page;
+
+ /* last entry of page_buffer is always zero, because ring cannot be
+ * filled completely
+ */
+ page = rx->page_buffer;
+ while (*page) {
+ page_pool_put_full_page(rx->page_pool, *page, false);
+ *page = NULL;
+ page++;
+ }
+}
+
+static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx)
+{
+ int i;
+
+ /* alloc for all ring entries except the last one, because ring cannot
+ * be filled completely
+ */
+ for (i = 0; i < TSNEP_RING_SIZE - 1; i++) {
+ rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool);
+ if (!rx->page_buffer[i]) {
+ tsnep_rx_free_page_buffer(rx);
+
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
+ struct page *page)
+{
+ entry->page = page;
+ entry->len = TSNEP_MAX_RX_BUF_SIZE;
+ entry->dma = page_pool_get_dma_addr(entry->page);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
+}
+
+static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
+ return -ENOMEM;
+ tsnep_rx_set_page(rx, entry, page);
+
+ return 0;
+}
+
+static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct tsnep_rx_entry *read = &rx->entry[rx->read];
+
+ tsnep_rx_set_page(rx, entry, read->page);
+ read->page = NULL;
+}
+
+static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ /* TSNEP_MAX_RX_BUF_SIZE and TSNEP_XSK_RX_BUF_SIZE are multiple of 4 */
+ entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
+ entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
+ if (index == rx->increment_owner_counter) {
+ rx->owner_counter++;
+ if (rx->owner_counter == 4)
+ rx->owner_counter = 1;
+ rx->increment_owner_counter--;
+ if (rx->increment_owner_counter < 0)
+ rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
+ }
+ entry->properties |=
+ (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
+ TSNEP_DESC_OWNER_COUNTER_MASK;
+
+ /* descriptor properties shall be written last, because valid data is
+ * signaled there
+ */
+ dma_wmb();
+
+ entry->desc->properties = __cpu_to_le32(entry->properties);
+}
+
+static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ bool alloc_failed = false;
+ int i, index;
+
+ for (i = 0; i < count && !alloc_failed; i++) {
+ index = (rx->write + i) & TSNEP_RING_MASK;
+
+ if (unlikely(tsnep_rx_alloc_buffer(rx, index))) {
+ rx->alloc_failed++;
+ alloc_failed = true;
+
+ /* reuse only if no other allocation was successful */
+ if (i == 0 && reuse)
+ tsnep_rx_reuse_buffer(rx, index);
+ else
+ break;
+ }
+
+ tsnep_rx_activate(rx, index);
+ }
+
+ if (i)
+ rx->write = (rx->write + i) & TSNEP_RING_MASK;
+
+ return i;
+}
+
+static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
+{
+ int desc_refilled;
+
+ desc_refilled = tsnep_rx_alloc(rx, count, reuse);
+ if (desc_refilled)
+ tsnep_rx_enable(rx);
+
+ return desc_refilled;
+}
+
+static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
+ struct xdp_buff *xdp)
+{
+ entry->xdp = xdp;
+ entry->len = TSNEP_XSK_RX_BUF_SIZE;
+ entry->dma = xsk_buff_xdp_get_dma(entry->xdp);
+ entry->desc->rx = __cpu_to_le64(entry->dma);
+}
+
+static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index)
+{
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+ struct tsnep_rx_entry *read = &rx->entry[rx->read];
+
+ tsnep_rx_set_xdp(rx, entry, read->xdp);
+ read->xdp = NULL;
+}
+
+static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ u32 allocated;
+ int i;
+
+ allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count);
+ for (i = 0; i < allocated; i++) {
+ int index = (rx->write + i) & TSNEP_RING_MASK;
+ struct tsnep_rx_entry *entry = &rx->entry[index];
+
+ tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]);
+ tsnep_rx_activate(rx, index);
+ }
+ if (i == 0) {
+ rx->alloc_failed++;
+
+ if (reuse) {
+ tsnep_rx_reuse_buffer_zc(rx, rx->write);
+ tsnep_rx_activate(rx, rx->write);
+ }
+ }
+
+ if (i)
+ rx->write = (rx->write + i) & TSNEP_RING_MASK;
+
+ return i;
+}
+
+static void tsnep_rx_free_zc(struct tsnep_rx *rx)
+{
+ int i;
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ if (entry->xdp)
+ xsk_buff_free(entry->xdp);
+ entry->xdp = NULL;
+ }
+}
+
+static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse)
+{
+ int desc_refilled;
+
+ desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse);
+ if (desc_refilled)
+ tsnep_rx_enable(rx);
+
+ return desc_refilled;
+}
+
+static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, int *status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ unsigned int length;
+ unsigned int sync;
+ u32 act;
+
+ length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ *status |= TSNEP_XDP_REDIRECT;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU
+ * touch
+ */
+ sync = xdp->data_end - xdp->data_hard_start -
+ XDP_PACKET_HEADROOM;
+ sync = max(sync, length);
+ page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
+ sync, true);
+ return true;
+ }
+}
+
+static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, int *status,
+ struct netdev_queue *tx_nq,
+ struct tsnep_tx *tx)
+{
+ u32 act;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* XDP_REDIRECT is the main action for zero-copy */
+ if (likely(act == XDP_REDIRECT)) {
+ if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ *status |= TSNEP_XDP_REDIRECT;
+ return true;
+ }
+
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ xsk_buff_free(xdp);
+ return true;
+ }
+}
+
+static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ if (status & TSNEP_XDP_TX) {
+ __netif_tx_lock(tx_nq, smp_processor_id());
+ tsnep_xdp_xmit_flush(tx);
+ __netif_tx_unlock(tx_nq);
+ }
+
+ if (status & TSNEP_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
+static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
+ int length)
+{
+ struct sk_buff *skb;
+
+ skb = napi_build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - ETH_FCS_LEN);
+
+ if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)(page_address(page) +
+ TSNEP_RX_OFFSET);
+
+ skb_shinfo(skb)->tx_flags |=
+ SKBTX_HW_TSTAMP_NETDEV;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->netdev_data = rx_inline;
+ }
+
+ skb_record_rx_queue(skb, rx->queue_index);
+ skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
+
+ return skb;
+}
+
+static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi,
+ struct page *page, int length)
+{
+ struct sk_buff *skb;
+
+ skb = tsnep_build_skb(rx, page, length);
+ if (skb) {
+ page_pool_release_page(rx->page_pool, page);
+
+ rx->packets++;
+ rx->bytes += length;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
+
+ napi_gro_receive(napi, skb);
+ } else {
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ rx->dropped++;
+ }
+}
+
+static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct device *dmadev = rx->adapter->dmadev;
+ enum dma_data_direction dma_dir;
+ struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ int done = 0;
+ int length;
+
+ desc_available = tsnep_rx_desc_available(rx);
+ dma_dir = page_pool_get_dma_dir(rx->page_pool);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
+ }
+
+ while (likely(done < budget) && (rx->read != rx->write)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+ done++;
+
+ if (desc_available >= TSNEP_RING_RX_REFILL) {
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
+
+ desc_available -= tsnep_rx_refill(rx, desc_available,
+ reuse);
+ if (!entry->page) {
+ /* buffer has been reused for refill to prevent
+ * empty RX ring, thus buffer cannot be used for
+ * RX processing
+ */
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ rx->dropped++;
+
+ continue;
+ }
+ }
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ dma_sync_single_range_for_cpu(dmadev, entry->dma,
+ TSNEP_RX_OFFSET, length, dma_dir);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
+
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ if (prog) {
+ bool consume;
+
+ xdp_prepare_buff(&xdp, page_address(entry->page),
+ XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
+ length, false);
+
+ consume = tsnep_xdp_run_prog(rx, prog, &xdp,
+ &xdp_status, tx_nq, tx);
+ if (consume) {
+ rx->packets++;
+ rx->bytes += length;
+
+ entry->page = NULL;
+
+ continue;
+ }
+ }
+
+ tsnep_rx_page(rx, napi, entry->page, length);
+ entry->page = NULL;
+ }
+
+ if (xdp_status)
+ tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
+
+ if (desc_available)
+ tsnep_rx_refill(rx, desc_available, false);
+
+ return done;
+}
+
+static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
+ int budget)
+{
+ struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ struct page *page;
+ int done = 0;
+ int length;
+
+ desc_available = tsnep_rx_desc_available(rx);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+ }
+
+ while (likely(done < budget) && (rx->read != rx->write)) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) !=
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ break;
+ done++;
+
+ if (desc_available >= TSNEP_RING_RX_REFILL) {
+ bool reuse = desc_available >= TSNEP_RING_RX_REUSE;
+
+ desc_available -= tsnep_rx_refill_zc(rx, desc_available,
+ reuse);
+ if (!entry->xdp) {
+ /* buffer has been reused for refill to prevent
+ * empty RX ring, thus buffer cannot be used for
+ * RX processing
+ */
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ rx->dropped++;
+
+ continue;
+ }
+ }
+
+ /* descriptor properties shall be read first, because valid data
+ * is signaled there
+ */
+ dma_rmb();
+
+ prefetch(entry->xdp->data);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ xsk_buff_set_size(entry->xdp, length);
+ xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
+
+ rx->read = (rx->read + 1) & TSNEP_RING_MASK;
+ desc_available++;
+
+ if (prog) {
+ bool consume;
+
+ entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE;
+ entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE;
+
+ consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp,
+ &xdp_status, tx_nq, tx);
+ if (consume) {
+ rx->packets++;
+ rx->bytes += length;
+
+ entry->xdp = NULL;
+
+ continue;
+ }
+ }
+
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (page) {
+ memcpy(page_address(page) + TSNEP_RX_OFFSET,
+ entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE,
+ length + TSNEP_RX_INLINE_METADATA_SIZE);
+ tsnep_rx_page(rx, napi, page, length);
+ } else {
+ rx->dropped++;
+ }
+ xsk_buff_free(entry->xdp);
+ entry->xdp = NULL;
+ }
+
+ if (xdp_status)
+ tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
+
+ if (desc_available)
+ desc_available -= tsnep_rx_refill_zc(rx, desc_available, false);
+
+ if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+ if (desc_available)
+ xsk_set_rx_need_wakeup(rx->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx->xsk_pool);
+
+ return done;
+ }
+
+ return desc_available ? budget : done;
+}
+
+static bool tsnep_rx_pending(struct tsnep_rx *rx)
+{
+ struct tsnep_rx_entry *entry;
+
+ if (rx->read != rx->write) {
+ entry = &rx->entry[rx->read];
+ if ((__le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_OWNER_COUNTER_MASK) ==
+ (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
+ return true;
+ }
+
+ return false;
+}
+
+static int tsnep_rx_open(struct tsnep_rx *rx)
+{
+ int desc_available;
+ int retval;
+
+ retval = tsnep_rx_ring_create(rx);
+ if (retval)
+ return retval;
+
+ tsnep_rx_init(rx);
+
+ desc_available = tsnep_rx_desc_available(rx);
+ if (rx->xsk_pool)
+ retval = tsnep_rx_alloc_zc(rx, desc_available, false);
+ else
+ retval = tsnep_rx_alloc(rx, desc_available, false);
+ if (retval != desc_available) {
+ retval = -ENOMEM;
+
+ goto alloc_failed;
+ }
+
+ /* prealloc pages to prevent allocation failures when XSK pool is
+ * disabled at runtime
+ */
+ if (rx->xsk_pool) {
+ retval = tsnep_rx_alloc_page_buffer(rx);
+ if (retval)
+ goto alloc_failed;
+ }
+
+ return 0;
+
+alloc_failed:
+ tsnep_rx_ring_cleanup(rx);
+ return retval;
+}
+
+static void tsnep_rx_close(struct tsnep_rx *rx)
+{
+ if (rx->xsk_pool)
+ tsnep_rx_free_page_buffer(rx);
+
+ tsnep_rx_ring_cleanup(rx);
+}
+
+static void tsnep_rx_reopen(struct tsnep_rx *rx)
+{
+ struct page **page = rx->page_buffer;
+ int i;
+
+ tsnep_rx_init(rx);
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ /* defined initial values for properties are required for
+ * correct owner counter checking
+ */
+ entry->desc->properties = 0;
+ entry->desc_wb->properties = 0;
+
+ /* prevent allocation failures by reusing kept pages */
+ if (*page) {
+ tsnep_rx_set_page(rx, entry, *page);
+ tsnep_rx_activate(rx, rx->write);
+ rx->write++;
+
+ *page = NULL;
+ page++;
+ }
+ }
+}
+
+static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
+{
+ struct page **page = rx->page_buffer;
+ u32 allocated;
+ int i;
+
+ tsnep_rx_init(rx);
+
+ /* alloc all ring entries except the last one, because ring cannot be
+ * filled completely, as many buffers as possible is enough as wakeup is
+ * done if new buffers are available
+ */
+ allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch,
+ TSNEP_RING_SIZE - 1);
+
+ for (i = 0; i < TSNEP_RING_SIZE; i++) {
+ struct tsnep_rx_entry *entry = &rx->entry[i];
+
+ /* keep pages to prevent allocation failures when xsk is
+ * disabled
+ */
+ if (entry->page) {
+ *page = entry->page;
+ entry->page = NULL;
+
+ page++;
+ }
+
+ /* defined initial values for properties are required for
+ * correct owner counter checking
+ */
+ entry->desc->properties = 0;
+ entry->desc_wb->properties = 0;
+
+ if (allocated) {
+ tsnep_rx_set_xdp(rx, entry,
+ rx->xdp_batch[allocated - 1]);
+ tsnep_rx_activate(rx, rx->write);
+ rx->write++;
+
+ allocated--;
+ }
+ }
+}
+
+static bool tsnep_pending(struct tsnep_queue *queue)
+{
+ if (queue->tx && tsnep_tx_pending(queue->tx))
+ return true;
+
+ if (queue->rx && tsnep_rx_pending(queue->rx))
+ return true;
+
+ return false;
+}
+
+static int tsnep_poll(struct napi_struct *napi, int budget)
+{
+ struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
+ napi);
+ bool complete = true;
+ int done = 0;
+
+ if (queue->tx)
+ complete = tsnep_tx_poll(queue->tx, budget);
+
+ if (queue->rx) {
+ done = queue->rx->xsk_pool ?
+ tsnep_rx_poll_zc(queue->rx, napi, budget) :
+ tsnep_rx_poll(queue->rx, napi, budget);
+ if (done >= budget)
+ complete = false;
+ }
+
+ /* if all work not completed, return budget and keep polling */
+ if (!complete)
+ return budget;
+
+ if (likely(napi_complete_done(napi, done))) {
+ tsnep_enable_irq(queue->adapter, queue->irq_mask);
+
+ /* reschedule if work is already pending, prevent rotten packets
+ * which are transmitted or received after polling but before
+ * interrupt enable
+ */
+ if (tsnep_pending(queue)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(napi);
+ }
+ }
+
+ return min(done, budget - 1);
+}
+
+static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+{
+ const char *name = netdev_name(queue->adapter->netdev);
+ irq_handler_t handler;
+ void *dev;
+ int retval;
+
+ if (first) {
+ sprintf(queue->name, "%s-mac", name);
+ handler = tsnep_irq;
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+ sprintf(queue->name, "%s-txrx-%d", name,
+ queue->rx->queue_index);
+ else if (queue->tx)
+ sprintf(queue->name, "%s-tx-%d", name,
+ queue->tx->queue_index);
+ else
+ sprintf(queue->name, "%s-rx-%d", name,
+ queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+
+ retval = request_irq(queue->irq, handler, 0, queue->name, dev);
+ if (retval) {
+ /* if name is empty, then interrupt won't be freed */
+ memset(queue->name, 0, sizeof(queue->name));
+ }
+
+ return retval;
+}
+
+static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
+{
+ void *dev;
+
+ if (!strlen(queue->name))
+ return;
+
+ if (first)
+ dev = queue->adapter;
+ else
+ dev = queue;
+
+ free_irq(queue->irq, dev);
+ memset(queue->name, 0, sizeof(queue->name));
+}
+
+static void tsnep_queue_close(struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+
+ tsnep_free_irq(queue, first);
+
+ if (rx) {
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc))
+ xdp_rxq_info_unreg(&rx->xdp_rxq_zc);
+ }
+
+ netif_napi_del(&queue->napi);
+}
+
+static int tsnep_queue_open(struct tsnep_adapter *adapter,
+ struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+ struct tsnep_tx *tx = queue->tx;
+ int retval;
+
+ netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll);
+
+ if (rx) {
+ /* choose TX queue for XDP_TX */
+ if (tx)
+ rx->tx_queue_index = tx->queue_index;
+ else if (rx->queue_index < adapter->num_tx_queues)
+ rx->tx_queue_index = rx->queue_index;
+ else
+ rx->tx_queue_index = 0;
+
+ /* prepare both memory models to eliminate possible registration
+ * errors when memory model is switched between page pool and
+ * XSK pool during runtime
+ */
+ retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
+ rx->queue_index, queue->napi.napi_id);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev,
+ rx->queue_index, queue->napi.napi_id);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (retval)
+ goto failed;
+ if (rx->xsk_pool)
+ xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc);
+ }
+
+ retval = tsnep_request_irq(queue, first);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n", queue->irq);
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tsnep_queue_close(queue, first);
+
+ return retval;
+}
+
+static void tsnep_queue_enable(struct tsnep_queue *queue)
+{
+ napi_enable(&queue->napi);
+ tsnep_enable_irq(queue->adapter, queue->irq_mask);
+
+ if (queue->tx)
+ tsnep_tx_enable(queue->tx);
+
+ if (queue->rx)
+ tsnep_rx_enable(queue->rx);
+}
+
+static void tsnep_queue_disable(struct tsnep_queue *queue)
+{
+ if (queue->tx)
+ tsnep_tx_disable(queue->tx, &queue->napi);
+
+ napi_disable(&queue->napi);
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+
+ /* disable RX after NAPI polling has been disabled, because RX can be
+ * enabled during NAPI polling
+ */
+ if (queue->rx)
+ tsnep_rx_disable(queue->rx);
+}
+
+static int tsnep_netdev_open(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i, retval;
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ if (adapter->queue[i].tx) {
+ retval = tsnep_tx_open(adapter->queue[i].tx);
+ if (retval)
+ goto failed;
+ }
+ if (adapter->queue[i].rx) {
+ retval = tsnep_rx_open(adapter->queue[i].rx);
+ if (retval)
+ goto failed;
+ }
+
+ retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0);
+ if (retval)
+ goto failed;
+ }
+
+ retval = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (retval)
+ goto failed;
+ retval = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (retval)
+ goto failed;
+
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ goto phy_failed;
+
+ for (i = 0; i < adapter->num_queues; i++)
+ tsnep_queue_enable(&adapter->queue[i]);
+
+ return 0;
+
+phy_failed:
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+failed:
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_queue_close(&adapter->queue[i], i == 0);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+ return retval;
+}
+
+static int tsnep_netdev_close(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
+
+ for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_queue_disable(&adapter->queue[i]);
+
+ tsnep_queue_close(&adapter->queue[i], i == 0);
+
+ if (adapter->queue[i].rx)
+ tsnep_rx_close(adapter->queue[i].rx);
+ if (adapter->queue[i].tx)
+ tsnep_tx_close(adapter->queue[i].tx);
+ }
+
+ return 0;
+}
+
+int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool)
+{
+ bool running = netif_running(queue->adapter->netdev);
+ u32 frame_size;
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ if (frame_size < TSNEP_XSK_RX_BUF_SIZE)
+ return -EOPNOTSUPP;
+
+ queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE,
+ sizeof(*queue->rx->page_buffer),
+ GFP_KERNEL);
+ if (!queue->rx->page_buffer)
+ return -ENOMEM;
+ queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE,
+ sizeof(*queue->rx->xdp_batch),
+ GFP_KERNEL);
+ if (!queue->rx->xdp_batch) {
+ kfree(queue->rx->page_buffer);
+ queue->rx->page_buffer = NULL;
+
+ return -ENOMEM;
+ }
+
+ xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc);
+
+ if (running)
+ tsnep_queue_disable(queue);
+
+ queue->tx->xsk_pool = pool;
+ queue->rx->xsk_pool = pool;
+
+ if (running) {
+ tsnep_rx_reopen_xsk(queue->rx);
+ tsnep_queue_enable(queue);
+ }
+
+ return 0;
+}
+
+void tsnep_disable_xsk(struct tsnep_queue *queue)
+{
+ bool running = netif_running(queue->adapter->netdev);
+
+ if (running)
+ tsnep_queue_disable(queue);
+
+ tsnep_rx_free_zc(queue->rx);
+
+ queue->rx->xsk_pool = NULL;
+ queue->tx->xsk_pool = NULL;
+
+ if (running) {
+ tsnep_rx_reopen(queue->rx);
+ tsnep_queue_enable(queue);
+ }
+
+ kfree(queue->rx->xdp_batch);
+ queue->rx->xdp_batch = NULL;
+ kfree(queue->rx->page_buffer);
+ queue->rx->page_buffer = NULL;
+}
+
+static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u16 queue_mapping = skb_get_queue_mapping(skb);
+
+ if (queue_mapping >= adapter->num_tx_queues)
+ queue_mapping = 0;
+
+ return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
+}
+
+static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
+ int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+ if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
+ return tsnep_ptp_ioctl(netdev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
+}
+
+static void tsnep_netdev_set_multicast(struct net_device *netdev)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ u16 rx_filter = 0;
+
+ /* configured MAC address and broadcasts are never filtered */
+ if (netdev->flags & IFF_PROMISC) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
+ } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
+ rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
+ }
+ iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
+}
+
+static void tsnep_netdev_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ u32 reg;
+ u32 val;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ stats->tx_packets += adapter->tx[i].packets;
+ stats->tx_bytes += adapter->tx[i].bytes;
+ stats->tx_dropped += adapter->tx[i].dropped;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ stats->rx_packets += adapter->rx[i].packets;
+ stats->rx_bytes += adapter->rx[i].bytes;
+ stats->rx_dropped += adapter->rx[i].dropped;
+ stats->multicast += adapter->rx[i].multicast;
+
+ reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
+ TSNEP_RX_STATISTIC);
+ val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
+ TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
+ TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
+ stats->rx_dropped += val;
+ val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
+ TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_fifo_errors += val;
+ val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
+ TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_frame_errors += val;
+ }
+
+ reg = ioread32(adapter->addr + ECM_STAT);
+ val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+ val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
+ stats->rx_errors += val;
+ stats->rx_crc_errors += val;
+ val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
+ stats->rx_errors += val;
+}
+
+static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
+{
+ iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ iowrite16(*(u16 *)(addr + sizeof(u32)),
+ adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+
+ ether_addr_copy(adapter->mac_address, addr);
+ netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
+ addr);
+}
+
+static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *sock_addr = addr;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, sock_addr);
+ if (retval)
+ return retval;
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
+ tsnep_mac_set_address(adapter, sock_addr->sa_data);
+
+ return 0;
+}
+
+static int tsnep_netdev_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ bool enable;
+ int retval = 0;
+
+ if (changed & NETIF_F_LOOPBACK) {
+ enable = !!(features & NETIF_F_LOOPBACK);
+ retval = tsnep_phy_loopback(adapter, enable);
+ }
+
+ return retval;
+}
+
+static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
+ const struct skb_shared_hwtstamps *hwtstamps,
+ bool cycles)
+{
+ struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
+ u64 timestamp;
+
+ if (cycles)
+ timestamp = __le64_to_cpu(rx_inline->counter);
+ else
+ timestamp = __le64_to_cpu(rx_inline->timestamp);
+
+ return ns_to_ktime(timestamp);
+}
+
+static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool,
+ bpf->xsk.queue_id);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
+{
+ if (cpu >= TSNEP_MAX_QUEUES)
+ cpu &= TSNEP_MAX_QUEUES - 1;
+
+ while (cpu >= adapter->num_tx_queues)
+ cpu -= adapter->num_tx_queues;
+
+ return &adapter->tx[cpu];
+}
+
+static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **xdp, u32 flags)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+ u32 cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct tsnep_tx *tx;
+ int nxmit;
+ bool xmit;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ tx = tsnep_xdp_get_tx(adapter, cpu);
+ nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock(nq, cpu);
+
+ for (nxmit = 0; nxmit < n; nxmit++) {
+ xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
+ TSNEP_TX_TYPE_XDP_NDO);
+ if (!xmit)
+ break;
+
+ /* avoid transmit queue timeout since we share it with the slow
+ * path
+ */
+ txq_trans_cond_update(nq);
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ tsnep_xdp_xmit_flush(tx);
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+static int tsnep_netdev_xsk_wakeup(struct net_device *dev, u32 queue_id,
+ u32 flags)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+ struct tsnep_queue *queue;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+
+ if (!napi_if_scheduled_mark_missed(&queue->napi))
+ napi_schedule(&queue->napi);
+
+ return 0;
+}
+
+static const struct net_device_ops tsnep_netdev_ops = {
+ .ndo_open = tsnep_netdev_open,
+ .ndo_stop = tsnep_netdev_close,
+ .ndo_start_xmit = tsnep_netdev_xmit_frame,
+ .ndo_eth_ioctl = tsnep_netdev_ioctl,
+ .ndo_set_rx_mode = tsnep_netdev_set_multicast,
+ .ndo_get_stats64 = tsnep_netdev_get_stats64,
+ .ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_set_features = tsnep_netdev_set_features,
+ .ndo_get_tstamp = tsnep_netdev_get_tstamp,
+ .ndo_setup_tc = tsnep_tc_setup,
+ .ndo_bpf = tsnep_netdev_bpf,
+ .ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
+ .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup,
+};
+
+static int tsnep_mac_init(struct tsnep_adapter *adapter)
+{
+ int retval;
+
+ /* initialize RX filtering, at least configured MAC address and
+ * broadcast are not filtered
+ */
+ iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
+
+ /* try to get MAC address in the following order:
+ * - device tree
+ * - valid MAC address already set
+ * - MAC address register if valid
+ * - random MAC address
+ */
+ retval = of_get_mac_address(adapter->pdev->dev.of_node,
+ adapter->mac_address);
+ if (retval == -EPROBE_DEFER)
+ return retval;
+ if (retval && !is_valid_ether_addr(adapter->mac_address)) {
+ *(u32 *)adapter->mac_address =
+ ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
+ *(u16 *)(adapter->mac_address + sizeof(u32)) =
+ ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
+ if (!is_valid_ether_addr(adapter->mac_address))
+ eth_random_addr(adapter->mac_address);
+ }
+
+ tsnep_mac_set_address(adapter, adapter->mac_address);
+ eth_hw_addr_set(adapter->netdev, adapter->mac_address);
+
+ return 0;
+}
+
+static int tsnep_mdio_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *np = adapter->pdev->dev.of_node;
+ int retval;
+
+ if (np) {
+ np = of_get_child_by_name(np, "mdio");
+ if (!np)
+ return 0;
+
+ adapter->suppress_preamble =
+ of_property_read_bool(np, "suppress-preamble");
+ }
+
+ adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
+ if (!adapter->mdiobus) {
+ retval = -ENOMEM;
+
+ goto out;
+ }
+
+ adapter->mdiobus->priv = (void *)adapter;
+ adapter->mdiobus->parent = &adapter->pdev->dev;
+ adapter->mdiobus->read = tsnep_mdiobus_read;
+ adapter->mdiobus->write = tsnep_mdiobus_write;
+ adapter->mdiobus->name = TSNEP "-mdiobus";
+ snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
+ adapter->pdev->name);
+
+ /* do not scan broadcast address */
+ adapter->mdiobus->phy_mask = 0x0000001;
+
+ retval = of_mdiobus_register(adapter->mdiobus, np);
+
+out:
+ of_node_put(np);
+
+ return retval;
+}
+
+static int tsnep_phy_init(struct tsnep_adapter *adapter)
+{
+ struct device_node *phy_node;
+ int retval;
+
+ retval = of_get_phy_mode(adapter->pdev->dev.of_node,
+ &adapter->phy_mode);
+ if (retval)
+ adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
+
+ phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
+ 0);
+ adapter->phydev = of_phy_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!adapter->phydev && adapter->mdiobus)
+ adapter->phydev = phy_find_first(adapter->mdiobus);
+ if (!adapter->phydev)
+ return -EIO;
+
+ return 0;
+}
+
+static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
+{
+ u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ char name[8];
+ int i;
+ int retval;
+
+ /* one TX/RX queue pair for netdev is mandatory */
+ if (platform_irq_count(adapter->pdev) == 1)
+ retval = platform_get_irq(adapter->pdev, 0);
+ else
+ retval = platform_get_irq_byname(adapter->pdev, "mac");
+ if (retval < 0)
+ return retval;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_queues = 1;
+ adapter->queue[0].adapter = adapter;
+ adapter->queue[0].irq = retval;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].tx->adapter = adapter;
+ adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0);
+ adapter->queue[0].tx->queue_index = 0;
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].rx->adapter = adapter;
+ adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0);
+ adapter->queue[0].rx->queue_index = 0;
+ adapter->queue[0].irq_mask = irq_mask;
+ adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY;
+ retval = tsnep_set_irq_coalesce(&adapter->queue[0],
+ TSNEP_COALESCE_USECS_DEFAULT);
+ if (retval < 0)
+ return retval;
+
+ adapter->netdev->irq = adapter->queue[0].irq;
+
+ /* add additional TX/RX queue pairs only if dedicated interrupt is
+ * available
+ */
+ for (i = 1; i < queue_count; i++) {
+ sprintf(name, "txrx-%d", i);
+ retval = platform_get_irq_byname_optional(adapter->pdev, name);
+ if (retval < 0)
+ break;
+
+ adapter->num_tx_queues++;
+ adapter->num_rx_queues++;
+ adapter->num_queues++;
+ adapter->queue[i].adapter = adapter;
+ adapter->queue[i].irq = retval;
+ adapter->queue[i].tx = &adapter->tx[i];
+ adapter->queue[i].tx->adapter = adapter;
+ adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i);
+ adapter->queue[i].tx->queue_index = i;
+ adapter->queue[i].rx = &adapter->rx[i];
+ adapter->queue[i].rx->adapter = adapter;
+ adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i);
+ adapter->queue[i].rx->queue_index = i;
+ adapter->queue[i].irq_mask =
+ irq_mask << (ECM_INT_TXRX_SHIFT * i);
+ adapter->queue[i].irq_delay_addr =
+ adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i;
+ retval = tsnep_set_irq_coalesce(&adapter->queue[i],
+ TSNEP_COALESCE_USECS_DEFAULT);
+ if (retval < 0)
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_probe(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter;
+ struct net_device *netdev;
+ struct resource *io;
+ u32 type;
+ int revision;
+ int version;
+ int queue_count;
+ int retval;
+
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct tsnep_adapter),
+ TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
+ if (!netdev)
+ return -ENODEV;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ adapter = netdev_priv(netdev);
+ platform_set_drvdata(pdev, adapter);
+ adapter->pdev = pdev;
+ adapter->dmadev = &pdev->dev;
+ adapter->netdev = netdev;
+ adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
+
+ mutex_init(&adapter->gate_control_lock);
+ mutex_init(&adapter->rxnfc_lock);
+ INIT_LIST_HEAD(&adapter->rxnfc_rules);
+
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adapter->addr = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(adapter->addr))
+ return PTR_ERR(adapter->addr);
+ netdev->mem_start = io->start;
+ netdev->mem_end = io->end;
+
+ type = ioread32(adapter->addr + ECM_TYPE);
+ revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
+ version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
+ adapter->gate_control = type & ECM_GATE_CONTROL;
+ adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+
+ retval = tsnep_queue_init(adapter, queue_count);
+ if (retval)
+ return retval;
+
+ retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(64));
+ if (retval) {
+ dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
+ return retval;
+ }
+
+ retval = tsnep_mac_init(adapter);
+ if (retval)
+ return retval;
+
+ retval = tsnep_mdio_init(adapter);
+ if (retval)
+ goto mdio_init_failed;
+
+ retval = tsnep_phy_init(adapter);
+ if (retval)
+ goto phy_init_failed;
+
+ retval = tsnep_ptp_init(adapter);
+ if (retval)
+ goto ptp_init_failed;
+
+ retval = tsnep_tc_init(adapter);
+ if (retval)
+ goto tc_init_failed;
+
+ retval = tsnep_rxnfc_init(adapter);
+ if (retval)
+ goto rxnfc_init_failed;
+
+ netdev->netdev_ops = &tsnep_netdev_ops;
+ netdev->ethtool_ops = &tsnep_ethtool_ops;
+ netdev->features = NETIF_F_SG;
+ netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ retval = register_netdev(netdev);
+ if (retval)
+ goto register_failed;
+
+ dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
+ revision);
+ if (adapter->gate_control)
+ dev_info(&adapter->pdev->dev, "gate control detected\n");
+
+ return 0;
+
+register_failed:
+ tsnep_rxnfc_cleanup(adapter);
+rxnfc_init_failed:
+ tsnep_tc_cleanup(adapter);
+tc_init_failed:
+ tsnep_ptp_cleanup(adapter);
+ptp_init_failed:
+phy_init_failed:
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+mdio_init_failed:
+ return retval;
+}
+
+static int tsnep_remove(struct platform_device *pdev)
+{
+ struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
+
+ unregister_netdev(adapter->netdev);
+
+ tsnep_rxnfc_cleanup(adapter);
+
+ tsnep_tc_cleanup(adapter);
+
+ tsnep_ptp_cleanup(adapter);
+
+ if (adapter->mdiobus)
+ mdiobus_unregister(adapter->mdiobus);
+
+ tsnep_disable_irq(adapter, ECM_INT_ALL);
+
+ return 0;
+}
+
+static const struct of_device_id tsnep_of_match[] = {
+ { .compatible = "engleder,tsnep", },
+{ },
+};
+MODULE_DEVICE_TABLE(of, tsnep_of_match);
+
+static struct platform_driver tsnep_driver = {
+ .driver = {
+ .name = TSNEP,
+ .of_match_table = tsnep_of_match,
+ },
+ .probe = tsnep_probe,
+ .remove = tsnep_remove,
+};
+module_platform_driver(tsnep_driver);
+
+MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
+MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/engleder/tsnep_ptp.c b/drivers/net/ethernet/engleder/tsnep_ptp.c
new file mode 100644
index 000000000000..54fbf0126815
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_ptp.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time)
+{
+ u32 high_before;
+ u32 low;
+ u32 high;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ *time = (((u64)high) << 32) | ((u64)low);
+}
+
+int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!ifr)
+ return -EINVAL;
+
+ if (cmd == SIOCSHWTSTAMP) {
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&adapter->hwtstamp_config, &config,
+ sizeof(adapter->hwtstamp_config));
+ }
+
+ if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
+ sizeof(adapter->hwtstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ bool negative = false;
+ u64 rate_offset;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ negative = true;
+ }
+
+ /* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to
+ * eliminate ppm, multiply with 8 to compensate 8ns clock cycle time,
+ * simplify calculation because 15625 * 8 = 1000000 / 8
+ */
+ rate_offset = scaled_ppm;
+ rate_offset <<= 16 - 3;
+ rate_offset = div_u64(rate_offset, 15625);
+
+ rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK;
+ if (negative)
+ rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN;
+ iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE);
+
+ return 0;
+}
+
+static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ tsnep_get_system_time(adapter, &system_time);
+
+ system_time += delta;
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 system_time;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while (high != high_before);
+ system_time = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(system_time);
+
+ return 0;
+}
+
+static int tsnep_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u64 system_time = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->ptp_lock, flags);
+
+ /* high dword is buffered in hardware and synchronously written to
+ * system time when low dword is written
+ */
+ iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ iowrite32(system_time & 0xFFFFFFFF,
+ adapter->addr + ECM_SYSTEM_TIME_LOW);
+
+ spin_unlock_irqrestore(&adapter->ptp_lock, flags);
+
+ return 0;
+}
+
+static int tsnep_ptp_getcyclesx64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter,
+ ptp_clock_info);
+ u32 high_before;
+ u32 low;
+ u32 high;
+ u64 counter;
+
+ /* read high dword twice to detect overrun */
+ high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
+ do {
+ ptp_read_system_prets(sts);
+ low = ioread32(adapter->addr + ECM_COUNTER_LOW);
+ ptp_read_system_postts(sts);
+ high_before = high;
+ high = ioread32(adapter->addr + ECM_COUNTER_HIGH);
+ } while (high != high_before);
+ counter = (((u64)high) << 32) | ((u64)low);
+
+ *ts = ns_to_timespec64(counter);
+
+ return 0;
+}
+
+int tsnep_ptp_init(struct tsnep_adapter *adapter)
+{
+ int retval = 0;
+
+ adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP);
+ adapter->ptp_clock_info.owner = THIS_MODULE;
+ /* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time,
+ * stay slightly below because only bits below 2^-1ns are supported
+ */
+ adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1);
+ adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine;
+ adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime;
+ adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64;
+ adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64;
+ adapter->ptp_clock_info.getcyclesx64 = tsnep_ptp_getcyclesx64;
+
+ spin_lock_init(&adapter->ptp_lock);
+
+ adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info,
+ &adapter->pdev->dev);
+ if (IS_ERR(adapter->ptp_clock)) {
+ netdev_err(adapter->netdev, "ptp_clock_register failed\n");
+
+ retval = PTR_ERR(adapter->ptp_clock);
+ adapter->ptp_clock = NULL;
+ } else if (adapter->ptp_clock) {
+ netdev_info(adapter->netdev, "PHC added\n");
+ }
+
+ return retval;
+}
+
+void tsnep_ptp_cleanup(struct tsnep_adapter *adapter)
+{
+ if (adapter->ptp_clock) {
+ ptp_clock_unregister(adapter->ptp_clock);
+ netdev_info(adapter->netdev, "PHC removed\n");
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_rxnfc.c b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
new file mode 100644
index 000000000000..9ac2a0cf3833
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static void tsnep_enable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ u8 rx_assign;
+ void __iomem *addr;
+
+ rx_assign = TSNEP_RX_ASSIGN_ACTIVE;
+ rx_assign |= (rule->queue_index << TSNEP_RX_ASSIGN_QUEUE_SHIFT) &
+ TSNEP_RX_ASSIGN_QUEUE_MASK;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN_ETHER_TYPE +
+ TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET * rule->location;
+ iowrite16(rule->filter.ether_type, addr);
+
+ /* enable rule after all settings are done */
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(rx_assign, addr);
+}
+
+static void tsnep_disable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ void __iomem *addr;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(0, addr);
+}
+
+static struct tsnep_rxnfc_rule *tsnep_get_rule(struct tsnep_adapter *adapter,
+ int location)
+{
+ struct tsnep_rxnfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+static void tsnep_add_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct tsnep_rxnfc_rule *pred, *cur;
+
+ tsnep_enable_rule(adapter, rule);
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->rxnfc_rules);
+ adapter->rxnfc_count++;
+}
+
+static void tsnep_delete_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ tsnep_disable_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->rxnfc_count--;
+
+ kfree(rule);
+}
+
+static void tsnep_flush_rules(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->rxnfc_rules, list)
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+}
+
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct tsnep_rxnfc_rule *rule = NULL;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->queue_index;
+
+ if (rule->filter.type == TSNEP_RXNFC_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.ether_type);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct tsnep_rxnfc_rule *rule;
+ int count = 0;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (count == cmd->rule_cnt) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -EMSGSIZE;
+ }
+
+ rule_locs[count] = rule->location;
+ count++;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+static int tsnep_rxnfc_find_location(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *tmp;
+ int location = 0;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (tmp->location == location)
+ location++;
+ else
+ return location;
+ }
+
+ if (location >= adapter->rxnfc_max)
+ return -ENOSPC;
+
+ return location;
+}
+
+static void tsnep_rxnfc_init_rule(struct tsnep_rxnfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
+{
+ INIT_LIST_HEAD(&rule->list);
+
+ rule->queue_index = fsp->ring_cookie;
+ rule->location = fsp->location;
+
+ rule->filter.type = TSNEP_RXNFC_ETHER_TYPE;
+ rule->filter.ether_type = ntohs(fsp->h_u.ether_spec.h_proto);
+}
+
+static int tsnep_rxnfc_check_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct net_device *dev = adapter->netdev;
+ struct tsnep_rxnfc_rule *tmp;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "rule already exists\n");
+
+ return -EEXIST;
+ }
+ }
+
+ return 0;
+}
+
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule, *old_rule;
+ int retval;
+
+ /* only EtherType is supported */
+ if (fsp->flow_type != ETHER_FLOW ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_source) ||
+ fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "only ethernet protocol is supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (fsp->ring_cookie >
+ (TSNEP_RX_ASSIGN_QUEUE_MASK >> TSNEP_RX_ASSIGN_QUEUE_SHIFT)) {
+ netdev_dbg(netdev, "invalid action\n");
+
+ return -EINVAL;
+ }
+
+ if (fsp->location != RX_CLS_LOC_ANY &&
+ fsp->location >= adapter->rxnfc_max) {
+ netdev_dbg(netdev, "invalid location\n");
+
+ return -EINVAL;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ if (fsp->location == RX_CLS_LOC_ANY) {
+ retval = tsnep_rxnfc_find_location(adapter);
+ if (retval < 0)
+ goto failed;
+ fsp->location = retval;
+ }
+
+ tsnep_rxnfc_init_rule(rule, fsp);
+
+ retval = tsnep_rxnfc_check_rule(adapter, rule);
+ if (retval)
+ goto failed;
+
+ old_rule = tsnep_get_rule(adapter, fsp->location);
+ if (old_rule)
+ tsnep_delete_rule(adapter, old_rule);
+
+ tsnep_add_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&adapter->rxnfc_lock);
+ kfree(rule);
+ return retval;
+}
+
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ /* disable all rules */
+ for (i = 0; i < adapter->rxnfc_max;
+ i += sizeof(u32) / TSNEP_RX_ASSIGN_OFFSET)
+ iowrite32(0, adapter->addr + TSNEP_RX_ASSIGN + i);
+
+ return 0;
+}
+
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter)
+{
+ tsnep_flush_rules(adapter);
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_selftests.c b/drivers/net/ethernet/engleder/tsnep_selftests.c
new file mode 100644
index 000000000000..1581d6b22232
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_selftests.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+enum tsnep_test {
+ TSNEP_TEST_ENABLE = 0,
+ TSNEP_TEST_TAPRIO,
+ TSNEP_TEST_TAPRIO_CHANGE,
+ TSNEP_TEST_TAPRIO_EXTENSION,
+};
+
+static const char tsnep_test_strings[][ETH_GSTRING_LEN] = {
+ "Enable timeout (offline)",
+ "TAPRIO (offline)",
+ "TAPRIO change (offline)",
+ "TAPRIO extension (offline)",
+};
+
+#define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN)
+
+static bool enable_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE))
+ return false;
+
+ return true;
+}
+
+static bool gc_timeout_signaled(struct tsnep_adapter *adapter)
+{
+ if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL)
+ return true;
+
+ return false;
+}
+
+static bool ack_gc_timeout(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL))
+ return false;
+ return true;
+}
+
+static bool enable_gc(struct tsnep_adapter *adapter, bool a)
+{
+ u8 enable;
+ u8 active;
+
+ if (a) {
+ enable = TSNEP_GC_ENABLE_A;
+ active = TSNEP_GC_ACTIVE_A;
+ } else {
+ enable = TSNEP_GC_ENABLE_B;
+ active = TSNEP_GC_ACTIVE_B;
+ }
+
+ iowrite8(enable, adapter->addr + TSNEP_GC);
+ if (!(ioread32(adapter->addr + TSNEP_GC) & active))
+ return false;
+
+ return true;
+}
+
+static bool disable_gc(struct tsnep_adapter *adapter)
+{
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ if (ioread32(adapter->addr + TSNEP_GC) &
+ (TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B))
+ return false;
+
+ return true;
+}
+
+static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay)
+{
+ u64 before, after;
+ u32 time;
+ bool enabled;
+
+ if (!disable_gc(adapter))
+ return false;
+
+ before = ktime_get_ns();
+
+ if (!enable_gc_timeout(adapter))
+ return false;
+
+ /* for start time after timeout, the timeout can guarantee, that enable
+ * is blocked if too late
+ */
+ time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ time += TSNEP_GC_TIMEOUT;
+ iowrite32(time, adapter->addr + TSNEP_GC_TIME);
+
+ ndelay(delay);
+
+ enabled = enable_gc(adapter, a);
+ after = ktime_get_ns();
+
+ if (delay > TSNEP_GC_TIMEOUT) {
+ /* timeout must have blocked enable */
+ if (enabled)
+ return false;
+ } else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) {
+ /* timeout must not have blocked enable */
+ if (!enabled)
+ return false;
+ }
+
+ if (enabled) {
+ if (gc_timeout_signaled(adapter))
+ return false;
+ } else {
+ if (!gc_timeout_signaled(adapter))
+ return false;
+ if (!ack_gc_timeout(adapter))
+ return false;
+ }
+
+ if (!disable_gc(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, true, i))
+ return false;
+ }
+
+ iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0);
+ iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4);
+
+ for (i = 0; i < 200000; i += 100) {
+ if (!gc_delayed_enable(adapter, false, i))
+ return false;
+ }
+
+ return true;
+}
+
+static void delay_base_time(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ u64 system_time;
+ u64 base_time = ktime_to_ns(qopt->base_time);
+ u64 n;
+
+ tsnep_get_system_time(adapter, &system_time);
+ system_time += ms * 1000000;
+ n = div64_u64(system_time - base_time, qopt->cycle_time);
+
+ qopt->base_time = ktime_add_ns(qopt->base_time,
+ (n + 1) * qopt->cycle_time);
+}
+
+static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time,
+ u64 *system_time)
+{
+ u32 time_high_before;
+ u32 time_low;
+ u32 time_high;
+ u32 gc_time_before;
+
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ do {
+ time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW);
+ *gc = ioread32(adapter->addr + TSNEP_GC);
+
+ gc_time_before = *gc_time;
+ *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME);
+ time_high_before = time_high;
+ time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH);
+ } while ((time_high != time_high_before) ||
+ (*gc_time != gc_time_before));
+
+ *system_time = (((u64)time_high) << 32) | ((u64)time_low);
+}
+
+static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next)
+{
+ u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time);
+ u64 cycle_start = gcl->base_time + gcl->cycle_time * n;
+ int i;
+
+ *next = cycle_start;
+ for (i = 0; i < gcl->count; i++) {
+ *next += gcl->operation[i].interval;
+ if (*next > system_time)
+ break;
+ }
+
+ return i;
+}
+
+static bool check_gate(struct tsnep_adapter *adapter)
+{
+ u32 gc_time;
+ u32 gc;
+ u64 system_time;
+ struct tsnep_gcl *curr;
+ struct tsnep_gcl *prev;
+ u64 next_time;
+ u8 gate_open;
+ u8 next_gate_open;
+
+ get_gate_state(adapter, &gc, &gc_time, &system_time);
+
+ if (gc & TSNEP_GC_ACTIVE_A) {
+ curr = &adapter->gcl[0];
+ prev = &adapter->gcl[1];
+ } else if (gc & TSNEP_GC_ACTIVE_B) {
+ curr = &adapter->gcl[1];
+ prev = &adapter->gcl[0];
+ } else {
+ return false;
+ }
+ if (curr->start_time <= system_time) {
+ /* GCL is already active */
+ int index;
+
+ index = get_operation(curr, system_time, &next_time);
+ gate_open = curr->operation[index].properties & TSNEP_GCL_MASK;
+ if (index == curr->count - 1)
+ index = 0;
+ else
+ index++;
+ next_gate_open =
+ curr->operation[index].properties & TSNEP_GCL_MASK;
+ } else if (curr->change) {
+ /* operation of previous GCL is active */
+ int index;
+ u64 start_before;
+ u64 n;
+
+ index = get_operation(prev, system_time, &next_time);
+ next_time = curr->start_time;
+ start_before = prev->base_time;
+ n = div64_u64(curr->start_time - start_before,
+ prev->cycle_time);
+ start_before += n * prev->cycle_time;
+ if (curr->start_time == start_before)
+ start_before -= prev->cycle_time;
+ if (((start_before + prev->cycle_time_extension) >=
+ curr->start_time) &&
+ (curr->start_time - prev->cycle_time_extension <=
+ system_time)) {
+ /* extend */
+ index = prev->count - 1;
+ }
+ gate_open = prev->operation[index].properties & TSNEP_GCL_MASK;
+ next_gate_open =
+ curr->operation[0].properties & TSNEP_GCL_MASK;
+ } else {
+ /* GCL is waiting for start */
+ next_time = curr->start_time;
+ gate_open = 0xFF;
+ next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK;
+ }
+
+ if (gc_time != (next_time & 0xFFFFFFFF)) {
+ dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n",
+ gc_time, next_time);
+ return false;
+ }
+ if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT),
+ gate_open);
+ return false;
+ }
+ if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) !=
+ next_gate_open) {
+ dev_err(&adapter->pdev->dev,
+ "gate control next open 0x%02x!=0x%02x\n",
+ ((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT),
+ next_gate_open);
+ return false;
+ }
+
+ return true;
+}
+
+static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms)
+{
+ ktime_t start = ktime_get();
+
+ do {
+ if (!check_gate(adapter))
+ return false;
+ } while (ktime_ms_delta(ktime_get(), start) < ms);
+
+ return true;
+}
+
+static bool enable_check_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ int retval;
+
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt);
+ if (retval)
+ return false;
+
+ if (!check_gate_duration(adapter, ms))
+ return false;
+
+ return true;
+}
+
+static bool disable_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload qopt;
+ int retval;
+
+ memset(&qopt, 0, sizeof(qopt));
+ qopt.enable = 0;
+ retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt);
+ if (retval)
+ return false;
+
+ return true;
+}
+
+static bool run_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt, s64 ms)
+{
+ if (!enable_check_taprio(adapter, qopt, ms))
+ return false;
+
+ if (!disable_taprio(adapter))
+ return false;
+
+ return true;
+}
+
+static bool tsnep_test_taprio(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x02;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x03;
+ qopt->entries[1].interval = 800000;
+ qopt->entries[2].gate_mask = 0x07;
+ qopt->entries[2].interval = 240000;
+ qopt->entries[3].gate_mask = 0x01;
+ qopt->entries[3].interval = 80000;
+ qopt->entries[4].gate_mask = 0x04;
+ qopt->entries[4].interval = 70000;
+ qopt->entries[5].gate_mask = 0x06;
+ qopt->entries[5].interval = 60000;
+ qopt->entries[6].gate_mask = 0x0F;
+ qopt->entries[6].interval = 50000;
+ qopt->num_entries = 7;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 411854;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x17;
+ qopt->entries[0].interval = 23842;
+ qopt->entries[1].gate_mask = 0x16;
+ qopt->entries[1].interval = 13482;
+ qopt->entries[2].gate_mask = 0x15;
+ qopt->entries[2].interval = 49428;
+ qopt->entries[3].gate_mask = 0x14;
+ qopt->entries[3].interval = 38189;
+ qopt->entries[4].gate_mask = 0x13;
+ qopt->entries[4].interval = 92321;
+ qopt->entries[5].gate_mask = 0x12;
+ qopt->entries[5].interval = 71239;
+ qopt->entries[6].gate_mask = 0x11;
+ qopt->entries[6].interval = 69932;
+ qopt->entries[7].gate_mask = 0x10;
+ qopt->entries[7].interval = 53421;
+ qopt->num_entries = 8;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ delay_base_time(adapter, qopt, 12);
+ qopt->cycle_time = 125000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x27;
+ qopt->entries[0].interval = 15000;
+ qopt->entries[1].gate_mask = 0x26;
+ qopt->entries[1].interval = 15000;
+ qopt->entries[2].gate_mask = 0x25;
+ qopt->entries[2].interval = 12500;
+ qopt->entries[3].gate_mask = 0x24;
+ qopt->entries[3].interval = 17500;
+ qopt->entries[4].gate_mask = 0x23;
+ qopt->entries[4].interval = 10000;
+ qopt->entries[5].gate_mask = 0x22;
+ qopt->entries[5].interval = 11000;
+ qopt->entries[6].gate_mask = 0x21;
+ qopt->entries[6].interval = 9000;
+ qopt->entries[7].gate_mask = 0x20;
+ qopt->entries[7].interval = 10000;
+ qopt->entries[8].gate_mask = 0x20;
+ qopt->entries[8].interval = 12500;
+ qopt->entries[9].gate_mask = 0x20;
+ qopt->entries[9].interval = 12500;
+ qopt->num_entries = 10;
+ if (!run_taprio(adapter, qopt, 100))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 0;
+ qopt->entries[0].gate_mask = 0x30;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x31;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to identical */
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ delay_base_time(adapter, qopt, 17);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to same cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x42;
+ qopt->entries[1].gate_mask = 0x43;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x54;
+ qopt->entries[0].interval = 33333;
+ qopt->entries[1].gate_mask = 0x55;
+ qopt->entries[1].interval = 66667;
+ delay_base_time(adapter, qopt, 23);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->entries[0].gate_mask = 0x66;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x67;
+ qopt->entries[1].interval = 25000;
+ qopt->entries[2].gate_mask = 0x68;
+ qopt->entries[2].interval = 25000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to multiple of cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 200000;
+ qopt->entries[0].gate_mask = 0x79;
+ qopt->entries[0].interval = 50000;
+ qopt->entries[1].gate_mask = 0x7A;
+ qopt->entries[1].interval = 150000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->entries[0].gate_mask = 0x7B;
+ qopt->entries[0].interval = 125000;
+ qopt->entries[1].gate_mask = 0x7C;
+ qopt->entries[1].interval = 250000;
+ qopt->entries[2].gate_mask = 0x7D;
+ qopt->entries[2].interval = 375000;
+ qopt->entries[3].gate_mask = 0x7E;
+ qopt->entries[3].interval = 250000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 333333;
+ qopt->entries[0].gate_mask = 0x8F;
+ qopt->entries[0].interval = 166666;
+ qopt->entries[1].gate_mask = 0x80;
+ qopt->entries[1].interval = 166667;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 11);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 62500;
+ qopt->entries[0].gate_mask = 0x81;
+ qopt->entries[0].interval = 31250;
+ qopt->entries[1].gate_mask = 0x82;
+ qopt->entries[1].interval = 15625;
+ qopt->entries[2].gate_mask = 0x83;
+ qopt->entries[2].interval = 15625;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->entries[0].gate_mask = 0x84;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0x85;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0x86;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0x87;
+ qopt->entries[3].interval = 100000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1700000;
+ qopt->entries[0].gate_mask = 0x88;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0x89;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0x8A;
+ qopt->entries[2].interval = 600000;
+ qopt->entries[3].gate_mask = 0x8B;
+ qopt->entries[3].interval = 100000;
+ qopt->entries[4].gate_mask = 0x8C;
+ qopt->entries[4].interval = 500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 6);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter)
+{
+ struct tc_taprio_qopt_offload *qopt;
+ int i;
+
+ qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL);
+ if (!qopt)
+ return false;
+ for (i = 0; i < 255; i++)
+ qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES;
+
+ qopt->enable = 1;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 100000;
+ qopt->cycle_time_extension = 50000;
+ qopt->entries[0].gate_mask = 0x90;
+ qopt->entries[0].interval = 20000;
+ qopt->entries[1].gate_mask = 0x91;
+ qopt->entries[1].interval = 80000;
+ qopt->num_entries = 2;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase */
+ qopt->base_time = ktime_set(0, 50000);
+ qopt->entries[0].gate_mask = 0x92;
+ qopt->entries[0].interval = 33000;
+ qopt->entries[1].gate_mask = 0x93;
+ qopt->entries[1].interval = 67000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 2);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and longer cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x94;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x95;
+ qopt->entries[1].interval = 600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 7);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 700000);
+ qopt->cycle_time = 2000000;
+ qopt->cycle_time_extension = 1900000;
+ qopt->entries[0].gate_mask = 0x96;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x97;
+ qopt->entries[1].interval = 1600000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different phase and shorter cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1500000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0x98;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0x99;
+ qopt->entries[1].interval = 600000;
+ qopt->entries[2].gate_mask = 0x9A;
+ qopt->entries[2].interval = 500000;
+ qopt->num_entries = 3;
+ delay_base_time(adapter, qopt, 3);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 100000);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 300000;
+ qopt->entries[0].gate_mask = 0x9B;
+ qopt->entries[0].interval = 150000;
+ qopt->entries[1].gate_mask = 0x9C;
+ qopt->entries[1].interval = 350000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 9);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ /* change to different cycle time */
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 1000000;
+ qopt->cycle_time_extension = 700000;
+ qopt->entries[0].gate_mask = 0xAD;
+ qopt->entries[0].interval = 400000;
+ qopt->entries[1].gate_mask = 0xAE;
+ qopt->entries[1].interval = 300000;
+ qopt->entries[2].gate_mask = 0xAF;
+ qopt->entries[2].interval = 300000;
+ qopt->num_entries = 3;
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 400000;
+ qopt->cycle_time_extension = 100000;
+ qopt->entries[0].gate_mask = 0xA0;
+ qopt->entries[0].interval = 200000;
+ qopt->entries[1].gate_mask = 0xA1;
+ qopt->entries[1].interval = 200000;
+ qopt->num_entries = 2;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 500000;
+ qopt->cycle_time_extension = 499999;
+ qopt->entries[0].gate_mask = 0xB2;
+ qopt->entries[0].interval = 100000;
+ qopt->entries[1].gate_mask = 0xB3;
+ qopt->entries[1].interval = 100000;
+ qopt->entries[2].gate_mask = 0xB4;
+ qopt->entries[2].interval = 100000;
+ qopt->entries[3].gate_mask = 0xB5;
+ qopt->entries[3].interval = 200000;
+ qopt->num_entries = 4;
+ delay_base_time(adapter, qopt, 19);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+ qopt->base_time = ktime_set(0, 0);
+ qopt->cycle_time = 6000000;
+ qopt->cycle_time_extension = 5999999;
+ qopt->entries[0].gate_mask = 0xC6;
+ qopt->entries[0].interval = 1000000;
+ qopt->entries[1].gate_mask = 0xC7;
+ qopt->entries[1].interval = 1000000;
+ qopt->entries[2].gate_mask = 0xC8;
+ qopt->entries[2].interval = 1000000;
+ qopt->entries[3].gate_mask = 0xC9;
+ qopt->entries[3].interval = 1500000;
+ qopt->entries[4].gate_mask = 0xCA;
+ qopt->entries[4].interval = 1500000;
+ qopt->num_entries = 5;
+ delay_base_time(adapter, qopt, 1);
+ if (!enable_check_taprio(adapter, qopt, 100))
+ goto failed;
+
+ if (!disable_taprio(adapter))
+ goto failed;
+
+ kfree(qopt);
+
+ return true;
+
+failed:
+ disable_taprio(adapter);
+ kfree(qopt);
+
+ return false;
+}
+
+int tsnep_ethtool_get_test_count(void)
+{
+ return TSNEP_TEST_COUNT;
+}
+
+void tsnep_ethtool_get_test_strings(u8 *data)
+{
+ memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings));
+}
+
+void tsnep_ethtool_self_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ eth_test->len = TSNEP_TEST_COUNT;
+
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE) {
+ /* no tests are done online */
+ data[TSNEP_TEST_ENABLE] = 0;
+ data[TSNEP_TEST_TAPRIO] = 0;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+
+ return;
+ }
+
+ if (tsnep_test_gc_enable(adapter)) {
+ data[TSNEP_TEST_ENABLE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_ENABLE] = 1;
+ }
+
+ if (tsnep_test_taprio(adapter)) {
+ data[TSNEP_TEST_TAPRIO] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO] = 1;
+ }
+
+ if (tsnep_test_taprio_change(adapter)) {
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_CHANGE] = 1;
+ }
+
+ if (tsnep_test_taprio_extension(adapter)) {
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 0;
+ } else {
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ data[TSNEP_TEST_TAPRIO_EXTENSION] = 1;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
new file mode 100644
index 000000000000..d083e6684f12
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include "tsnep.h"
+
+#include <net/pkt_sched.h>
+
+/* save one operation at the end for additional operation at list change */
+#define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1)
+
+static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u64 cycle_time;
+
+ if (!qopt->cycle_time)
+ return -ERANGE;
+ if (qopt->num_entries > TSNEP_MAX_GCL_NUM)
+ return -EINVAL;
+ cycle_time = 0;
+ for (i = 0; i < qopt->num_entries; i++) {
+ if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES)
+ return -EINVAL;
+ if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK)
+ return -EINVAL;
+ if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL)
+ return -EINVAL;
+ cycle_time += qopt->entries[i].interval;
+ }
+ if (qopt->cycle_time != cycle_time)
+ return -EINVAL;
+ if (qopt->cycle_time_extension >= qopt->cycle_time)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index,
+ u32 properties, u32 interval, bool flush)
+{
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties = properties;
+ gcl->operation[index].interval = interval;
+
+ iowrite32(properties, addr);
+ iowrite32(interval, addr + sizeof(u32));
+
+ if (flush) {
+ /* flush write with read access */
+ ioread32(addr);
+ }
+}
+
+static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index)
+{
+ u64 duration;
+ int count;
+
+ /* change needs to be triggered one or two operations before start of
+ * new gate control list
+ * - change is triggered at start of operation (minimum one operation)
+ * - operation with adjusted interval is inserted on demand to exactly
+ * meet the start of the new gate control list (optional)
+ *
+ * additionally properties are read directly after start of previous
+ * operation
+ *
+ * therefore, three operations needs to be considered for the limit
+ */
+ duration = 0;
+ count = 3;
+ while (count) {
+ duration += gcl->operation[index].interval;
+
+ index--;
+ if (index < 0)
+ index = gcl->count - 1;
+
+ count--;
+ }
+
+ return duration;
+}
+
+static void tsnep_write_gcl(struct tsnep_gcl *gcl,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ int i;
+ u32 properties;
+ u64 extend;
+ u64 cut;
+
+ gcl->base_time = ktime_to_ns(qopt->base_time);
+ gcl->cycle_time = qopt->cycle_time;
+ gcl->cycle_time_extension = qopt->cycle_time_extension;
+
+ for (i = 0; i < qopt->num_entries; i++) {
+ properties = qopt->entries[i].gate_mask;
+ if (i == (qopt->num_entries - 1))
+ properties |= TSNEP_GCL_LAST;
+
+ tsnep_write_gcl_operation(gcl, i, properties,
+ qopt->entries[i].interval, true);
+ }
+ gcl->count = qopt->num_entries;
+
+ /* calculate change limit; i.e., the time needed between enable and
+ * start of new gate control list
+ */
+
+ /* case 1: extend cycle time for change
+ * - change duration of last operation
+ * - cycle time extension
+ */
+ extend = tsnep_change_duration(gcl, gcl->count - 1);
+ extend += gcl->cycle_time_extension;
+
+ /* case 2: cut cycle time for change
+ * - maximum change duration
+ */
+ cut = 0;
+ for (i = 0; i < gcl->count; i++)
+ cut = max(cut, tsnep_change_duration(gcl, i));
+
+ /* use maximum, because the actual case (extend or cut) can be
+ * determined only after limit is known (chicken-and-egg problem)
+ */
+ gcl->change_limit = max(extend, cut);
+}
+
+static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ if (start <= limit) {
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += (n + 1) * gcl->cycle_time;
+ }
+
+ return start;
+}
+
+static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit)
+{
+ u64 start = gcl->base_time;
+ u64 n;
+
+ n = div64_u64(limit - start, gcl->cycle_time);
+ start += n * gcl->cycle_time;
+ if (start == limit)
+ start -= gcl->cycle_time;
+
+ return start;
+}
+
+static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change,
+ bool insert)
+{
+ /* previous operation triggers change and properties are evaluated at
+ * start of operation
+ */
+ if (index == 0)
+ index = gcl->count - 1;
+ else
+ index = index - 1;
+ change -= gcl->operation[index].interval;
+
+ /* optionally change to new list with additional operation in between */
+ if (insert) {
+ void __iomem *addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * index;
+
+ gcl->operation[index].properties |= TSNEP_GCL_INSERT;
+ iowrite32(gcl->operation[index].properties, addr);
+ }
+
+ return change;
+}
+
+static void tsnep_clean_gcl(struct tsnep_gcl *gcl)
+{
+ int i;
+ u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK;
+ void __iomem *addr;
+
+ /* search for insert operation and reset properties */
+ for (i = 0; i < gcl->count; i++) {
+ if (gcl->operation[i].properties & ~mask) {
+ addr = gcl->addr +
+ sizeof(struct tsnep_gcl_operation) * i;
+
+ gcl->operation[i].properties &= mask;
+ iowrite32(gcl->operation[i].properties, addr);
+
+ break;
+ }
+ }
+}
+
+static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref,
+ u64 change, u32 interval)
+{
+ u32 properties;
+
+ properties = gcl->operation[ref].properties & TSNEP_GCL_MASK;
+ /* change to new list directly after inserted operation */
+ properties |= TSNEP_GCL_CHANGE;
+
+ /* last operation of list is reserved to insert operation */
+ tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties,
+ interval, false);
+
+ return tsnep_set_gcl_change(gcl, ref, change, true);
+}
+
+static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension)
+{
+ int ref = gcl->count - 1;
+ u32 interval = gcl->operation[ref].interval + extension;
+
+ start -= gcl->operation[ref].interval;
+
+ return tsnep_insert_gcl_operation(gcl, ref, start, interval);
+}
+
+static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time)
+{
+ u64 sum = 0;
+ int i;
+
+ /* find operation which shall be cutted */
+ for (i = 0; i < gcl->count; i++) {
+ u64 sum_tmp = sum + gcl->operation[i].interval;
+ u64 interval;
+
+ /* sum up operations as long as cycle time is not exceeded */
+ if (sum_tmp > cycle_time)
+ break;
+
+ /* remaining interval must be big enough for hardware */
+ interval = cycle_time - sum_tmp;
+ if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL)
+ break;
+
+ sum = sum_tmp;
+ }
+ if (sum == cycle_time) {
+ /* no need to cut operation itself or whole cycle
+ * => change exactly at operation
+ */
+ return tsnep_set_gcl_change(gcl, i, start + sum, false);
+ }
+ return tsnep_insert_gcl_operation(gcl, i, start + sum,
+ cycle_time - sum);
+}
+
+static int tsnep_enable_gcl(struct tsnep_adapter *adapter,
+ struct tsnep_gcl *gcl, struct tsnep_gcl *curr)
+{
+ u64 system_time;
+ u64 timeout;
+ u64 limit;
+
+ /* estimate timeout limit after timeout enable, actually timeout limit
+ * in hardware will be earlier than estimate so we are on the safe side
+ */
+ tsnep_get_system_time(adapter, &system_time);
+ timeout = system_time + TSNEP_GC_TIMEOUT;
+
+ if (curr)
+ limit = timeout + curr->change_limit;
+ else
+ limit = timeout;
+
+ gcl->start_time = tsnep_gcl_start_after(gcl, limit);
+
+ /* gate control time register is only 32bit => time shall be in the near
+ * future (no driver support for far future implemented)
+ */
+ if ((gcl->start_time - system_time) >= U32_MAX)
+ return -EAGAIN;
+
+ if (curr) {
+ /* change gate control list */
+ u64 last;
+ u64 change;
+
+ last = tsnep_gcl_start_before(curr, gcl->start_time);
+ if ((last + curr->cycle_time) == gcl->start_time)
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+ else if (((gcl->start_time - last) <=
+ curr->cycle_time_extension) ||
+ ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL))
+ change = tsnep_extend_gcl(curr, last,
+ gcl->start_time - last);
+ else
+ change = tsnep_cut_gcl(curr, last,
+ gcl->start_time - last);
+
+ WARN_ON(change <= timeout);
+ gcl->change = true;
+ iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE);
+ } else {
+ /* start gate control list */
+ WARN_ON(gcl->start_time <= timeout);
+ gcl->change = false;
+ iowrite32(gcl->start_time & 0xFFFFFFFF,
+ adapter->addr + TSNEP_GC_TIME);
+ }
+
+ return 0;
+}
+
+static int tsnep_taprio(struct tsnep_adapter *adapter,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ struct tsnep_gcl *gcl;
+ struct tsnep_gcl *curr;
+ int retval;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ if (!qopt->enable) {
+ /* disable gate control if active */
+ mutex_lock(&adapter->gate_control_lock);
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+ }
+
+ retval = tsnep_validate_gcl(qopt);
+ if (retval)
+ return retval;
+
+ mutex_lock(&adapter->gate_control_lock);
+
+ gcl = &adapter->gcl[adapter->next_gcl];
+ tsnep_write_gcl(gcl, qopt);
+
+ /* select current gate control list if active */
+ if (adapter->gate_control_active) {
+ if (adapter->next_gcl == 0)
+ curr = &adapter->gcl[1];
+ else
+ curr = &adapter->gcl[0];
+ } else {
+ curr = NULL;
+ }
+
+ for (;;) {
+ /* start timeout which discards late enable, this helps ensuring
+ * that start/change time are in the future at enable
+ */
+ iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC);
+
+ retval = tsnep_enable_gcl(adapter, gcl, curr);
+ if (retval) {
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return retval;
+ }
+
+ /* enable gate control list */
+ if (adapter->next_gcl == 0)
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+ else
+ iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC);
+
+ /* done if timeout did not happen */
+ if (!(ioread32(adapter->addr + TSNEP_GC) &
+ TSNEP_GC_TIMEOUT_SIGNAL))
+ break;
+
+ /* timeout is acknowledged with any enable */
+ iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC);
+
+ if (curr)
+ tsnep_clean_gcl(curr);
+
+ /* retry because of timeout */
+ }
+
+ adapter->gate_control_active = true;
+
+ if (adapter->next_gcl == 0)
+ adapter->next_gcl = 1;
+ else
+ adapter->next_gcl = 0;
+
+ mutex_unlock(&adapter->gate_control_lock);
+
+ return 0;
+}
+
+static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return tsnep_tc_query_caps(adapter, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return tsnep_taprio(adapter, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int tsnep_tc_init(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return 0;
+
+ /* open all gates */
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC);
+
+ adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A;
+ adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B;
+
+ return 0;
+}
+
+void tsnep_tc_cleanup(struct tsnep_adapter *adapter)
+{
+ if (!adapter->gate_control)
+ return;
+
+ if (adapter->gate_control_active) {
+ iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC);
+ adapter->gate_control_active = false;
+ }
+}
diff --git a/drivers/net/ethernet/engleder/tsnep_xdp.c b/drivers/net/ethernet/engleder/tsnep_xdp.c
new file mode 100644
index 000000000000..c0513848c547
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_xdp.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include <linux/if_vlan.h>
+#include <net/xdp_sock_drv.h>
+
+#include "tsnep.h"
+
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *old_prog;
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int tsnep_xdp_enable_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ struct tsnep_queue *queue;
+ int retval;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+ if (queue->rx->queue_index != queue_id ||
+ queue->tx->queue_index != queue_id) {
+ netdev_err(adapter->netdev,
+ "XSK support only for TX/RX queue pairs\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ retval = xsk_pool_dma_map(pool, adapter->dmadev,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (retval) {
+ netdev_err(adapter->netdev, "failed to map XSK pool\n");
+
+ return retval;
+ }
+
+ retval = tsnep_enable_xsk(queue, pool);
+ if (retval) {
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
+
+ return retval;
+ }
+
+ return 0;
+}
+
+static int tsnep_xdp_disable_pool(struct tsnep_adapter *adapter, u16 queue_id)
+{
+ struct xsk_buff_pool *pool;
+ struct tsnep_queue *queue;
+
+ if (queue_id >= adapter->num_rx_queues ||
+ queue_id >= adapter->num_tx_queues)
+ return -EINVAL;
+
+ pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
+ if (!pool)
+ return -EINVAL;
+
+ queue = &adapter->queue[queue_id];
+
+ tsnep_disable_xsk(queue);
+
+ xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC);
+
+ return 0;
+}
+
+int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter,
+ struct xsk_buff_pool *pool, u16 queue_id)
+{
+ return pool ? tsnep_xdp_enable_pool(adapter, pool, queue_id) :
+ tsnep_xdp_disable_pool(adapter, queue_id);
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index b1c8ffea6ad2..95cbad198b4b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -945,7 +945,9 @@ static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
static void ethoc_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -961,7 +963,9 @@ static void ethoc_get_ringparam(struct net_device *dev,
}
static int ethoc_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ethoc *priv = netdev_priv(dev);
@@ -1074,14 +1078,11 @@ static int ethoc_probe(struct platform_device *pdev)
/* obtain device IRQ number */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain IRQ\n");
- ret = -ENXIO;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
goto free;
- }
- netdev->irq = res->start;
+ netdev->irq = ret;
/* setup driver-private data */
priv = netdev_priv(netdev);
@@ -1223,7 +1224,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
- netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ethoc_poll);
spin_lock_init(&priv->lock);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 323340826dab..f1eb660aaee2 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -608,13 +608,12 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* Get IRQ number */
priv->irq = platform_get_irq(pdev, 0);
if (priv->irq < 0) {
- dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
err = -ENODEV;
goto out_netdev;
}
- netif_napi_add(ndev, &priv->napi, nps_enet_poll,
- NPS_ENET_NAPI_POLL_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, nps_enet_poll,
+ NPS_ENET_NAPI_POLL_WEIGHT);
/* Register the driver. Should be the last thing in probe */
err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
index 3d1e9a302148..c699bd6bcbb9 100644
--- a/drivers/net/ethernet/faraday/Kconfig
+++ b/drivers/net/ethernet/faraday/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_FARADAY
bool "Faraday devices"
default y
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -19,24 +19,22 @@ if NET_VENDOR_FARADAY
config FTMAC100
tristate "Faraday FTMAC100 10/100 Ethernet support"
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select MII
help
This driver supports the FTMAC100 10/100 Ethernet controller
- from Faraday. It is used on Faraday A320, Andes AG101 and some
- other ARM/NDS32 SoC's.
+ from Faraday. It is used on Faraday A320 and some other ARM SoC's.
config FTGMAC100
tristate "Faraday FTGMAC100 Gigabit Ethernet support"
- depends on ARM || NDS32 || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
depends on !64BIT || BROKEN
select PHYLIB
select MDIO_ASPEED if MACH_ASPEED_G6
select CRC32
help
This driver supports the FTGMAC100 Gigabit Ethernet controller
- from Faraday. It is used on Faraday A369, Andes AG102 and some
- other ARM/NDS32 SoC's.
+ from Faraday. It is used on Faraday A369 and some other ARM SoC's.
endif # NET_VENDOR_FARADAY
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 97c5d70de76e..a03879a27b04 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -989,117 +989,6 @@ static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
return 0;
}
-static void ftgmac100_adjust_link(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = netdev->phydev;
- bool tx_pause, rx_pause;
- int new_speed;
-
- /* We store "no link" as speed 0 */
- if (!phydev->link)
- new_speed = 0;
- else
- new_speed = phydev->speed;
-
- /* Grab pause settings from PHY if configured to do so */
- if (priv->aneg_pause) {
- rx_pause = tx_pause = phydev->pause;
- if (phydev->asym_pause)
- tx_pause = !rx_pause;
- } else {
- rx_pause = priv->rx_pause;
- tx_pause = priv->tx_pause;
- }
-
- /* Link hasn't changed, do nothing */
- if (phydev->speed == priv->cur_speed &&
- phydev->duplex == priv->cur_duplex &&
- rx_pause == priv->rx_pause &&
- tx_pause == priv->tx_pause)
- return;
-
- /* Print status if we have a link or we had one and just lost it,
- * don't print otherwise.
- */
- if (new_speed || priv->cur_speed)
- phy_print_status(phydev);
-
- priv->cur_speed = new_speed;
- priv->cur_duplex = phydev->duplex;
- priv->rx_pause = rx_pause;
- priv->tx_pause = tx_pause;
-
- /* Link is down, do nothing else */
- if (!new_speed)
- return;
-
- /* Disable all interrupts */
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
-
- /* Reset the adapter asynchronously */
- schedule_work(&priv->reset_task);
-}
-
-static int ftgmac100_mii_probe(struct net_device *netdev)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
- struct platform_device *pdev = to_platform_device(priv->dev);
- struct device_node *np = pdev->dev.of_node;
- struct phy_device *phydev;
- phy_interface_t phy_intf;
- int err;
-
- /* Default to RGMII. It's a gigabit part after all */
- err = of_get_phy_mode(np, &phy_intf);
- if (err)
- phy_intf = PHY_INTERFACE_MODE_RGMII;
-
- /* Aspeed only supports these. I don't know about other IP
- * block vendors so I'm going to just let them through for
- * now. Note that this is only a warning if for some obscure
- * reason the DT really means to lie about it or it's a newer
- * part we don't know about.
- *
- * On the Aspeed SoC there are additionally straps and SCU
- * control bits that could tell us what the interface is
- * (or allow us to configure it while the IP block is held
- * in reset). For now I chose to keep this driver away from
- * those SoC specific bits and assume the device-tree is
- * right and the SCU has been configured properly by pinmux
- * or the firmware.
- */
- if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
- netdev_warn(netdev,
- "Unsupported PHY mode %s !\n",
- phy_modes(phy_intf));
- }
-
- phydev = phy_find_first(priv->mii_bus);
- if (!phydev) {
- netdev_info(netdev, "%s: no PHY found\n", netdev->name);
- return -ENODEV;
- }
-
- phydev = phy_connect(netdev, phydev_name(phydev),
- &ftgmac100_adjust_link, phy_intf);
-
- if (IS_ERR(phydev)) {
- netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
- return PTR_ERR(phydev);
- }
-
- /* Indicate that we support PAUSE frames (see comment in
- * Documentation/networking/phy.rst)
- */
- phy_support_asym_pause(phydev);
-
- /* Display what we found */
- phy_attached_info(phydev);
-
- return 0;
-}
-
static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
{
struct net_device *netdev = bus->priv;
@@ -1174,12 +1063,15 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static void ftgmac100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static void
+ftgmac100_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1190,8 +1082,11 @@ static void ftgmac100_get_ringparam(struct net_device *netdev,
ering->tx_pending = priv->tx_q_entries;
}
-static int ftgmac100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ering)
+static int
+ftgmac100_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1404,10 +1299,8 @@ static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
return err;
}
-static void ftgmac100_reset_task(struct work_struct *work)
+static void ftgmac100_reset(struct ftgmac100 *priv)
{
- struct ftgmac100 *priv = container_of(work, struct ftgmac100,
- reset_task);
struct net_device *netdev = priv->netdev;
int err;
@@ -1453,6 +1346,134 @@ static void ftgmac100_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static void ftgmac100_reset_task(struct work_struct *work)
+{
+ struct ftgmac100 *priv = container_of(work, struct ftgmac100,
+ reset_task);
+
+ ftgmac100_reset(priv);
+}
+
+static void ftgmac100_adjust_link(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ bool tx_pause, rx_pause;
+ int new_speed;
+
+ /* We store "no link" as speed 0 */
+ if (!phydev->link)
+ new_speed = 0;
+ else
+ new_speed = phydev->speed;
+
+ /* Grab pause settings from PHY if configured to do so */
+ if (priv->aneg_pause) {
+ rx_pause = tx_pause = phydev->pause;
+ if (phydev->asym_pause)
+ tx_pause = !rx_pause;
+ } else {
+ rx_pause = priv->rx_pause;
+ tx_pause = priv->tx_pause;
+ }
+
+ /* Link hasn't changed, do nothing */
+ if (phydev->speed == priv->cur_speed &&
+ phydev->duplex == priv->cur_duplex &&
+ rx_pause == priv->rx_pause &&
+ tx_pause == priv->tx_pause)
+ return;
+
+ /* Print status if we have a link or we had one and just lost it,
+ * don't print otherwise.
+ */
+ if (new_speed || priv->cur_speed)
+ phy_print_status(phydev);
+
+ priv->cur_speed = new_speed;
+ priv->cur_duplex = phydev->duplex;
+ priv->rx_pause = rx_pause;
+ priv->tx_pause = tx_pause;
+
+ /* Link is down, do nothing else */
+ if (!new_speed)
+ return;
+
+ /* Disable all interrupts */
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Release phy lock to allow ftgmac100_reset to aquire it, keeping lock
+ * order consistent to prevent dead lock.
+ */
+ if (netdev->phydev)
+ mutex_unlock(&netdev->phydev->lock);
+
+ ftgmac100_reset(priv);
+
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+
+}
+
+static int ftgmac100_mii_probe(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_device *phydev;
+ phy_interface_t phy_intf;
+ int err;
+
+ /* Default to RGMII. It's a gigabit part after all */
+ err = of_get_phy_mode(np, &phy_intf);
+ if (err)
+ phy_intf = PHY_INTERFACE_MODE_RGMII;
+
+ /* Aspeed only supports these. I don't know about other IP
+ * block vendors so I'm going to just let them through for
+ * now. Note that this is only a warning if for some obscure
+ * reason the DT really means to lie about it or it's a newer
+ * part we don't know about.
+ *
+ * On the Aspeed SoC there are additionally straps and SCU
+ * control bits that could tell us what the interface is
+ * (or allow us to configure it while the IP block is held
+ * in reset). For now I chose to keep this driver away from
+ * those SoC specific bits and assume the device-tree is
+ * right and the SCU has been configured properly by pinmux
+ * or the firmware.
+ */
+ if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
+ netdev_warn(netdev,
+ "Unsupported PHY mode %s !\n",
+ phy_modes(phy_intf));
+ }
+
+ phydev = phy_find_first(priv->mii_bus);
+ if (!phydev) {
+ netdev_info(netdev, "%s: no PHY found\n", netdev->name);
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(netdev, phydev_name(phydev),
+ &ftgmac100_adjust_link, phy_intf);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
+ return PTR_ERR(phydev);
+ }
+
+ /* Indicate that we support PAUSE frames (see comment in
+ * Documentation/networking/phy.rst)
+ */
+ phy_support_asym_pause(phydev);
+
+ /* Display what we found */
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
static int ftgmac100_open(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
@@ -1485,7 +1506,7 @@ static int ftgmac100_open(struct net_device *netdev)
goto err_hw;
/* Initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll);
/* Grab our interrupt */
err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
@@ -1680,10 +1701,14 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
if (!netdev->phydev)
return;
phy_disconnect(netdev->phydev);
+ if (of_phy_is_fixed_link(priv->dev->of_node))
+ of_phy_deregister_fixed_link(priv->dev->of_node);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1743,6 +1768,19 @@ cleanup_clk:
return rc;
}
+static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
+{
+ struct device_node *child_np = of_get_child_by_name(np, name);
+ bool ret = false;
+
+ if (child_np) {
+ ret = true;
+ of_node_put(child_np);
+ }
+
+ return ret;
+}
+
static int ftgmac100_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1814,11 +1852,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->rxdes0_edorr_mask = BIT(30);
priv->txdes0_edotr_mask = BIT(30);
priv->is_aspeed = true;
- /* Disable ast2600 problematic HW arbitration */
- if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
- }
} else {
priv->rxdes0_edorr_mask = BIT(15);
priv->txdes0_edotr_mask = BIT(15);
@@ -1838,6 +1871,26 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
+ } else if (np && of_phy_is_fixed_link(np)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(np);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register fixed PHY\n");
+ goto err_phy_connect;
+ }
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to fixed PHY\n");
+ of_phy_deregister_fixed_link(np);
+ err = -EINVAL;
+ goto err_phy_connect;
+ }
+
+ /* Display what we found */
+ phy_attached_info(phy);
} else if (np && of_get_property(np, "phy-handle", NULL)) {
struct phy_device *phy;
@@ -1867,7 +1920,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* Display what we found */
phy_attached_info(phy);
- } else if (np && !of_get_child_by_name(np, "mdio")) {
+ } else if (np && !ftgmac100_has_child_node(np, "mdio")) {
/* Support legacy ASPEED devicetree descriptions that decribe a
* MAC with an embedded MDIO controller but have no "mdio"
* child node. Automatically scan the MDIO bus for available
@@ -1890,6 +1943,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = ftgmac100_setup_clk(priv);
if (err)
goto err_phy_connect;
+
+ /* Disable ast2600 problematic HW arbitration */
+ if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
}
/* Default ring sizes */
@@ -1907,6 +1965,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
/* AST2400 doesn't have working HW checksum generation */
if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
+ /* AST2600 tx checksum with NCSI is broken */
+ if (priv->use_ncsi && of_device_is_compatible(np, "aspeed,ast2600-mac"))
+ netdev->hw_features &= ~NETIF_F_HW_CSUM;
+
if (np && of_get_property(np, "no-hw-checksum", NULL))
netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8a341e2d5833..139fe66f8bcd 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -11,6 +11,8 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -27,8 +29,8 @@
#define RX_QUEUE_ENTRIES 128 /* must be power of 2 */
#define TX_QUEUE_ENTRIES 16 /* must be power of 2 */
-#define MAX_PKT_SIZE 1518
#define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */
+#define MAX_PKT_SIZE RX_BUF_SIZE /* multi-segment not supported */
#if MAX_PKT_SIZE > 0x7ff
#error invalid MAX_PKT_SIZE
@@ -159,6 +161,7 @@ static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac)
static int ftmac100_start_hw(struct ftmac100 *priv)
{
struct net_device *netdev = priv->netdev;
+ unsigned int maccr = MACCR_ENABLE_ALL;
if (ftmac100_reset(priv))
return -EIO;
@@ -175,7 +178,17 @@ static int ftmac100_start_hw(struct ftmac100 *priv)
ftmac100_set_mac(priv, netdev->dev_addr);
- iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR);
+ /* See ftmac100_change_mtu() */
+ if (netdev->mtu > ETH_DATA_LEN)
+ maccr |= FTMAC100_MACCR_RX_FTL;
+
+ /* Add other bits as needed */
+ if (netdev->flags & IFF_PROMISC)
+ maccr |= FTMAC100_MACCR_RCV_ALL;
+ if (netdev->flags & IFF_ALLMULTI)
+ maccr |= FTMAC100_MACCR_RX_MULTIPKT;
+
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
return 0;
}
@@ -218,11 +231,6 @@ static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes)
return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR);
}
-static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes)
-{
- return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL);
-}
-
static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes)
{
return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT);
@@ -337,13 +345,7 @@ static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
error = true;
}
- if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) {
- if (net_ratelimit())
- netdev_info(netdev, "rx frame too long\n");
-
- netdev->stats.rx_length_errors++;
- error = true;
- } else if (unlikely(ftmac100_rxdes_runt(rxdes))) {
+ if (unlikely(ftmac100_rxdes_runt(rxdes))) {
if (net_ratelimit())
netdev_info(netdev, "rx runt\n");
@@ -356,6 +358,11 @@ static bool ftmac100_rx_packet_error(struct ftmac100 *priv,
netdev->stats.rx_length_errors++;
error = true;
}
+ /*
+ * FTMAC100_RXDES0_FTL is not an error, it just indicates that the
+ * frame is longer than 1518 octets. Receiving these is possible when
+ * we told the hardware not to drop them, via FTMAC100_MACCR_RX_FTL.
+ */
return error;
}
@@ -400,12 +407,13 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
return true;
}
- /*
- * It is impossible to get multi-segment packets
- * because we always provide big enough receive buffers.
- */
+ /* We don't support multi-segment packets for now, so drop them. */
ret = ftmac100_rxdes_last_segment(rxdes);
- BUG_ON(!ret);
+ if (unlikely(!ret)) {
+ netdev->stats.rx_length_errors++;
+ ftmac100_rx_drop_packet(priv);
+ return true;
+ }
/* start processing */
skb = netdev_alloc_skb_ip_align(netdev, 128);
@@ -807,8 +815,8 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_link_ksettings(struct net_device *netdev,
@@ -1037,6 +1045,28 @@ static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int c
return generic_mii_ioctl(&priv->mii, data, cmd, NULL);
}
+static int ftmac100_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct ftmac100 *priv = netdev_priv(netdev);
+ unsigned int maccr;
+
+ maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR);
+ if (mtu > ETH_DATA_LEN) {
+ /* process long packets in the driver */
+ maccr |= FTMAC100_MACCR_RX_FTL;
+ } else {
+ /* Let the controller drop incoming packets greater
+ * than 1518 (that is 1500 + 14 Ethernet + 4 FCS).
+ */
+ maccr &= ~FTMAC100_MACCR_RX_FTL;
+ }
+ iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
+
+ netdev->mtu = mtu;
+
+ return 0;
+}
+
static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_open = ftmac100_open,
.ndo_stop = ftmac100_stop,
@@ -1044,6 +1074,7 @@ static const struct net_device_ops ftmac100_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_eth_ioctl = ftmac100_do_ioctl,
+ .ndo_change_mtu = ftmac100_change_mtu,
};
/******************************************************************************
@@ -1075,6 +1106,11 @@ static int ftmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
+ netdev->max_mtu = MAX_PKT_SIZE - VLAN_ETH_HLEN;
+
+ err = platform_get_ethdev_address(&pdev->dev, netdev);
+ if (err == -EPROBE_DEFER)
+ goto defer_get_mac;
platform_set_drvdata(pdev, netdev);
@@ -1086,7 +1122,7 @@ static int ftmac100_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftmac100_poll);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
@@ -1137,6 +1173,7 @@ err_ioremap:
release_resource(priv->res);
err_req_mem:
netif_napi_del(&priv->napi);
+defer_get_mac:
free_netdev(netdev);
err_alloc_etherdev:
return err;
diff --git a/drivers/net/ethernet/faraday/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h
index fe986f1673fc..8af32f9070f4 100644
--- a/drivers/net/ethernet/faraday/ftmac100.h
+++ b/drivers/net/ethernet/faraday/ftmac100.h
@@ -122,9 +122,9 @@
* Transmit descriptor, aligned to 16 bytes
*/
struct ftmac100_txdes {
- unsigned int txdes0;
- unsigned int txdes1;
- unsigned int txdes2; /* TXBUF_BADR */
+ __le32 txdes0;
+ __le32 txdes1;
+ __le32 txdes2; /* TXBUF_BADR */
unsigned int txdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
@@ -143,9 +143,9 @@ struct ftmac100_txdes {
* Receive descriptor, aligned to 16 bytes
*/
struct ftmac100_rxdes {
- unsigned int rxdes0;
- unsigned int rxdes1;
- unsigned int rxdes2; /* RXBUF_BADR */
+ __le32 rxdes0;
+ __le32 rxdes1;
+ __le32 rxdes2; /* RXBUF_BADR */
unsigned int rxdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b3939a5f7b03..ed18450fd2cc 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1809,8 +1809,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index e04e1c5cb013..1c78f66a89da 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -9,7 +9,7 @@ config NET_VENDOR_FREESCALE
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
- ARCH_LAYERSCAPE || COMPILE_TEST
+ ARCH_LAYERSCAPE || ARCH_S32 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -23,15 +23,17 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
+ ARCH_MXC || ARCH_S32 || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
+ select PAGE_POOL
+ select PAGE_POOL_STATS
imply NET_SELFTESTS
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
- controller on some Motorola ColdFire and Freescale i.MX processors.
+ controller on some Motorola ColdFire and Freescale i.MX/S32 processors.
config FEC_MPC52xx
tristate "FEC MPC52xx driver"
@@ -69,6 +71,7 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
select PHYLIB
depends on OF
+ select MDIO_DEVRES
select OF_MDIO
help
This driver supports the MDIO bus on the Fman 10G Ethernet MACs, and
diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig
index 0e1439fd00bd..2b560661c82a 100644
--- a/drivers/net/ethernet/freescale/dpaa/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa/Kconfig
@@ -2,8 +2,8 @@
menuconfig FSL_DPAA_ETH
tristate "DPAA Ethernet"
depends on FSL_DPAA && FSL_FMAN
- select PHYLIB
- select FIXED_PHY
+ select PHYLINK
+ select PCS_LYNX
help
Data Path Acceleration Architecture Ethernet driver,
supporting the Freescale QorIQ chips.
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6b2927d863e2..431f8917dc39 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1,32 +1,7 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -222,12 +197,15 @@ static int dpaa_rx_extra_headroom;
#define dpaa_get_max_mtu() \
(dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
+
static int dpaa_netdev_init(struct net_device *net_dev,
const struct net_device_ops *dpaa_ops,
u16 tx_timeout)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
+ struct mac_device *mac_dev = priv->mac_dev;
struct dpaa_percpu_priv *percpu_priv;
const u8 *mac_addr;
int i, err;
@@ -241,10 +219,10 @@ static int dpaa_netdev_init(struct net_device *net_dev,
}
net_dev->netdev_ops = dpaa_ops;
- mac_addr = priv->mac_dev->addr;
+ mac_addr = mac_dev->addr;
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
+ net_dev->mem_start = (unsigned long)priv->mac_dev->res->start;
+ net_dev->mem_end = (unsigned long)priv->mac_dev->res->end;
net_dev->min_mtu = ETH_MIN_MTU;
net_dev->max_mtu = dpaa_get_max_mtu();
@@ -266,12 +244,16 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
- err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ err = mac_dev->change_addr(mac_dev->fman_mac,
(const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
@@ -286,12 +268,27 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->needed_headroom = priv->tx_headroom;
net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+ /* The rest of the config is filled in by the mac device already */
+ mac_dev->phylink_config.dev = &net_dev->dev;
+ mac_dev->phylink_config.type = PHYLINK_NETDEV;
+ mac_dev->update_speed = dpaa_eth_cgr_set_speed;
+ mac_dev->phylink = phylink_create(&mac_dev->phylink_config,
+ dev_fwnode(mac_dev->dev),
+ mac_dev->phy_if,
+ mac_dev->phylink_ops);
+ if (IS_ERR(mac_dev->phylink)) {
+ err = PTR_ERR(mac_dev->phylink);
+ dev_err_probe(dev, err, "Could not create phylink\n");
+ return err;
+ }
+
/* start without the RUNNING flag, phylib controls it later */
netif_carrier_off(net_dev);
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() = %d\n", err);
+ phylink_destroy(mac_dev->phylink);
return err;
}
@@ -302,7 +299,8 @@ static int dpaa_stop(struct net_device *net_dev)
{
struct mac_device *mac_dev;
struct dpaa_priv *priv;
- int i, err, error;
+ int i, error;
+ int err = 0;
priv = netdev_priv(net_dev);
mac_dev = priv->mac_dev;
@@ -313,10 +311,8 @@ static int dpaa_stop(struct net_device *net_dev)
*/
msleep(200);
- err = mac_dev->stop(mac_dev);
- if (err < 0)
- netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
- err);
+ phylink_stop(mac_dev->phylink);
+ mac_dev->disable(mac_dev->fman_mac);
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
error = fman_port_disable(mac_dev->port[i]);
@@ -324,8 +320,7 @@ static int dpaa_stop(struct net_device *net_dev)
err = error;
}
- if (net_dev->phydev)
- phy_disconnect(net_dev->phydev);
+ phylink_disconnect_phy(mac_dev->phylink);
net_dev->phydev = NULL;
msleep(200);
@@ -851,12 +846,12 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
- /* Set different thresholds based on the MAC speed.
- * This may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
+ /* Set different thresholds based on the configured MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at another
+ * speed, so MACs must call dpaa_eth_cgr_set_speed in their link_up
+ * callback.
*/
- if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
+ if (priv->mac_dev->phylink_config.mac_capabilities & MAC_10000FD)
cs_th = DPAA_CS_THRESHOLD_10G;
else
cs_th = DPAA_CS_THRESHOLD_1G;
@@ -883,6 +878,31 @@ out_error:
return err;
}
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
+{
+ struct net_device *net_dev = to_net_dev(mac_dev->phylink_config.dev);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct qm_mcc_initcgr opts = { };
+ u32 cs_th;
+ int err;
+
+ opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ switch (speed) {
+ case SPEED_10000:
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ break;
+ case SPEED_1000:
+ default:
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ break;
+ }
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
+
+ err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
+ if (err)
+ netdev_err(net_dev, "could not update speed: %d\n", err);
+}
+
static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
struct dpaa_fq *fq,
const struct qman_fq *template)
@@ -1463,13 +1483,8 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
parse_result = (struct fman_prs_result *)parse_results;
/* If we're dealing with VLAN, get the real Ethernet type */
- if (ethertype == ETH_P_8021Q) {
- /* We can't always assume the MAC header is set correctly
- * by the stack, so reset to beginning of skb->data
- */
- skb_reset_mac_header(skb);
- ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
- }
+ if (ethertype == ETH_P_8021Q)
+ ethertype = ntohs(skb_vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
/* Fill in the relevant L3 parse result fields
* and read the L4 protocol type
@@ -2325,7 +2340,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
txq = netdev_get_tx_queue(net_dev, queue_mapping);
/* LLTX requires to do our own update of trans_start */
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
@@ -2395,6 +2410,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
cleaned = qman_p_poll_dqrr(np->p, budget);
+ if (np->xdp_act & XDP_REDIRECT)
+ xdp_do_flush();
+
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
@@ -2402,9 +2420,6 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
- if (np->xdp_act & XDP_REDIRECT)
- xdp_do_flush();
-
return cleaned;
}
@@ -2531,7 +2546,7 @@ static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
/* Bump the trans_start */
txq = netdev_get_tx_queue(net_dev, smp_processor_id());
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
if (err) {
@@ -2623,7 +2638,7 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
}
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
@@ -2899,54 +2914,6 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
}
}
-static void dpaa_adjust_link(struct net_device *net_dev)
-{
- struct mac_device *mac_dev;
- struct dpaa_priv *priv;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
- mac_dev->adjust_link(mac_dev);
-}
-
-/* The Aquantia PHYs are capable of performing rate adaptation */
-#define PHY_VEND_AQUANTIA 0x03a1b400
-
-static int dpaa_phy_init(struct net_device *net_dev)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct mac_device *mac_dev;
- struct phy_device *phy_dev;
- struct dpaa_priv *priv;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
- &dpaa_adjust_link, 0,
- mac_dev->phy_if);
- if (!phy_dev) {
- netif_err(priv, ifup, net_dev, "init_phy() failed\n");
- return -ENODEV;
- }
-
- /* Unless the PHY is capable of rate adaptation */
- if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
- ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
- /* remove any features not supported by the controller */
- ethtool_convert_legacy_u32_to_link_mode(mask,
- mac_dev->if_support);
- linkmode_and(phy_dev->supported, phy_dev->supported, mask);
- }
-
- phy_support_asym_pause(phy_dev);
-
- mac_dev->phy_dev = phy_dev;
- net_dev->phydev = phy_dev;
-
- return 0;
-}
-
static int dpaa_open(struct net_device *net_dev)
{
struct mac_device *mac_dev;
@@ -2957,7 +2924,8 @@ static int dpaa_open(struct net_device *net_dev)
mac_dev = priv->mac_dev;
dpaa_eth_napi_enable(priv);
- err = dpaa_phy_init(net_dev);
+ err = phylink_of_phy_connect(mac_dev->phylink,
+ mac_dev->dev->of_node, 0);
if (err)
goto phy_init_failed;
@@ -2967,11 +2935,12 @@ static int dpaa_open(struct net_device *net_dev)
goto mac_start_failed;
}
- err = priv->mac_dev->start(mac_dev);
+ err = priv->mac_dev->enable(mac_dev->fman_mac);
if (err < 0) {
- netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
goto mac_start_failed;
}
+ phylink_start(mac_dev->phylink);
netif_tx_start_all_queues(net_dev);
@@ -2980,6 +2949,7 @@ static int dpaa_open(struct net_device *net_dev)
mac_start_failed:
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
fman_port_disable(mac_dev->port[i]);
+ phylink_disconnect_phy(mac_dev->phylink);
phy_init_failed:
dpaa_eth_napi_disable(priv);
@@ -3135,10 +3105,12 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
{
int ret = -EINVAL;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
if (cmd == SIOCGMIIREG) {
if (net_dev->phydev)
- return phy_mii_ioctl(net_dev->phydev, rq, cmd);
+ return phylink_mii_ioctl(priv->mac_dev->phylink, rq,
+ cmd);
}
if (cmd == SIOCSHWTSTAMP)
@@ -3173,8 +3145,7 @@ static int dpaa_napi_add(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_add(net_dev, &percpu_priv->np.napi,
- dpaa_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
}
return 0;
@@ -3542,6 +3513,7 @@ static int dpaa_remove(struct platform_device *pdev)
dev_set_drvdata(dev, NULL);
unregister_netdev(net_dev);
+ phylink_destroy(priv->mac_dev->phylink);
err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index daf894a97050..35b8cea7f886 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -1,31 +1,6 @@
-/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#ifndef __DPAA_H
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index ee62d25cac81..4fee74c024bd 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#include <linux/init.h>
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 409c1dc39430..889f89df9930 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -1,32 +1,6 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
*/
#undef TRACE_SYSTEM
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 763d2c7b5fb1..9c71cbbb13d8 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2016 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2016 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -80,35 +54,27 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
static int dpaa_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *cmd)
{
- if (!net_dev->phydev)
- return 0;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- phy_ethtool_ksettings_get(net_dev->phydev, cmd);
-
- return 0;
+ return phylink_ethtool_ksettings_get(mac_dev->phylink, cmd);
}
static int dpaa_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *cmd)
{
- int err;
-
- if (!net_dev->phydev)
- return -ENODEV;
-
- err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
- if (err < 0)
- netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- return err;
+ return phylink_ethtool_ksettings_set(mac_dev->phylink, cmd);
}
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, KBUILD_MODNAME,
+ strscpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
@@ -125,80 +91,28 @@ static void dpaa_set_msglevel(struct net_device *net_dev,
static int dpaa_nway_reset(struct net_device *net_dev)
{
- int err;
-
- if (!net_dev->phydev)
- return -ENODEV;
-
- err = 0;
- if (net_dev->phydev->autoneg) {
- err = phy_start_aneg(net_dev->phydev);
- if (err < 0)
- netdev_err(net_dev, "phy_start_aneg() = %d\n",
- err);
- }
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- return err;
+ return phylink_ethtool_nway_reset(mac_dev->phylink);
}
static void dpaa_get_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *epause)
{
- struct mac_device *mac_dev;
- struct dpaa_priv *priv;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- if (!net_dev->phydev)
- return;
-
- epause->autoneg = mac_dev->autoneg_pause;
- epause->rx_pause = mac_dev->rx_pause_active;
- epause->tx_pause = mac_dev->tx_pause_active;
+ phylink_ethtool_get_pauseparam(mac_dev->phylink, epause);
}
static int dpaa_set_pauseparam(struct net_device *net_dev,
struct ethtool_pauseparam *epause)
{
- struct mac_device *mac_dev;
- struct phy_device *phydev;
- bool rx_pause, tx_pause;
- struct dpaa_priv *priv;
- int err;
-
- priv = netdev_priv(net_dev);
- mac_dev = priv->mac_dev;
-
- phydev = net_dev->phydev;
- if (!phydev) {
- netdev_err(net_dev, "phy device not initialized\n");
- return -ENODEV;
- }
-
- if (!phy_validate_pause(phydev, epause))
- return -EINVAL;
-
- /* The MAC should know how to handle PAUSE frame autonegotiation before
- * adjust_link is triggered by a forced renegotiation of sym/asym PAUSE
- * settings.
- */
- mac_dev->autoneg_pause = !!epause->autoneg;
- mac_dev->rx_pause_req = !!epause->rx_pause;
- mac_dev->tx_pause_req = !!epause->tx_pause;
-
- /* Determine the sym/asym advertised PAUSE capabilities from the desired
- * rx/tx pause settings.
- */
-
- phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct mac_device *mac_dev = priv->mac_dev;
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
-
- return err;
+ return phylink_ethtool_set_pauseparam(mac_dev->phylink, epause);
}
static int dpaa_get_sset_count(struct net_device *net_dev, int type)
@@ -489,11 +403,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
info->phc_index = -1;
fman_node = of_get_parent(mac_node);
- if (fman_node)
+ if (fman_node) {
ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+ of_node_put(fman_node);
+ }
- if (ptp_node)
+ if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
+ }
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 3d9842af7f10..1b05ba8d1cbf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
obj-$(CONFIG_FSL_DPAA2_SWITCH) += fsl-dpaa2-switch.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o dpaa2-xsk.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index 8356af4631fd..1af254caeb0d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -98,14 +98,14 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
int i;
seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name);
- seq_printf(file, "%s%16s%16s%16s%16s%16s%16s\n",
- "CHID", "CPU", "Deq busy", "Frames", "CDANs",
+ seq_printf(file, "%s %5s%16s%16s%16s%16s%16s%16s\n",
+ "IDX", "CHID", "CPU", "Deq busy", "Frames", "CDANs",
"Avg Frm/CDAN", "Buf count");
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
- seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu%16d\n",
- ch->ch_id,
+ seq_printf(file, "%3s%d%6d%16d%16llu%16llu%16llu%16llu%16d\n",
+ "CH#", i, ch->ch_id,
ch->nctx.desired_cpu,
ch->stats.dequeue_portal_busy,
ch->stats.frames,
@@ -119,6 +119,51 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
+static int dpaa2_dbg_bp_show(struct seq_file *file, void *offset)
+{
+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private;
+ int i, j, num_queues, buf_cnt;
+ struct dpaa2_eth_bp *bp;
+ char ch_name[10];
+ int err;
+
+ /* Print out the header */
+ seq_printf(file, "Buffer pool info for %s:\n", priv->net_dev->name);
+ seq_printf(file, "%s %10s%15s", "IDX", "BPID", "Buf count");
+ num_queues = dpaa2_eth_queue_count(priv);
+ for (i = 0; i < num_queues; i++) {
+ snprintf(ch_name, sizeof(ch_name), "CH#%d", i);
+ seq_printf(file, "%10s", ch_name);
+ }
+ seq_printf(file, "\n");
+
+ /* For each buffer pool, print out its BPID, the number of buffers in
+ * that buffer pool and the channels which are using it.
+ */
+ for (i = 0; i < priv->num_bps; i++) {
+ bp = priv->bp[i];
+
+ err = dpaa2_io_query_bp_count(NULL, bp->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(priv->net_dev, "Buffer count query error %d\n", err);
+ return err;
+ }
+
+ seq_printf(file, "%3s%d%10d%15d", "BP#", i, bp->bpid, buf_cnt);
+ for (j = 0; j < num_queues; j++) {
+ if (priv->channel[j]->bp == bp)
+ seq_printf(file, "%10s", "x");
+ else
+ seq_printf(file, "%10s", "");
+ }
+ seq_printf(file, "\n");
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_bp);
+
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpni_dev;
@@ -139,6 +184,10 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
/* per-fq stats file */
debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
+
+ /* per buffer pool stats file */
+ debugfs_create_file("bp_stats", 0444, dir, priv, &dpaa2_dbg_bp_fops);
+
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
index 7fefe1574b6a..76f808d38066 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -37,18 +37,9 @@ static int dpaa2_eth_dl_info_get(struct devlink *devlink,
struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
char buf[10];
- int err;
-
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor);
- err = devlink_info_version_running_put(req, "dpni", buf);
- if (err)
- return err;
-
- return 0;
+ return devlink_info_version_running_put(req, "dpni", buf);
}
static struct dpaa2_eth_trap_item *
@@ -226,25 +217,16 @@ int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
{
struct devlink_port *devlink_port = &priv->devlink_port;
struct devlink_port_attrs attrs = {};
- int err;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(devlink_port, &attrs);
-
- err = devlink_port_register(priv->devlink, devlink_port, 0);
- if (err)
- return err;
-
- devlink_port_type_eth_set(devlink_port, priv->net_dev);
-
- return 0;
+ return devlink_port_register(priv->devlink, devlink_port, 0);
}
void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv)
{
struct devlink_port *devlink_port = &priv->devlink_port;
- devlink_port_type_clear(devlink_port);
devlink_port_unregister(devlink_port);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
index 5fb5f14e01ec..9b43fadb9b11 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -73,6 +73,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd,
TP_ARGS(netdev, fd)
);
+/* Tx (egress) XSK fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_xsk_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
/* Rx fd */
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
TP_PROTO(struct net_device *netdev,
@@ -81,6 +89,14 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd,
TP_ARGS(netdev, fd)
);
+/* Rx XSK fd */
+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_xsk_fd,
+ TP_PROTO(struct net_device *netdev,
+ const struct dpaa2_fd *fd),
+
+ TP_ARGS(netdev, fd)
+);
+
/* Tx confirmation fd */
DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
TP_PROTO(struct net_device *netdev,
@@ -90,57 +106,81 @@ DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd,
);
/* Log data about raw buffers. Useful for tracing DPBP content. */
-TRACE_EVENT(dpaa2_eth_buf_seed,
- /* Trace function prototype */
- TP_PROTO(struct net_device *netdev,
- /* virtual address and size */
- void *vaddr,
- size_t size,
- /* dma map address and size */
- dma_addr_t dma_addr,
- size_t map_size,
- /* buffer pool id, if relevant */
- u16 bpid),
-
- /* Repeat argument list here */
- TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
-
- /* A structure containing the relevant information we want
- * to record. Declare name and type for each normal element,
- * name, type and size for arrays. Use __string for variable
- * length strings.
- */
- TP_STRUCT__entry(
- __field(void *, vaddr)
- __field(size_t, size)
- __field(dma_addr_t, dma_addr)
- __field(size_t, map_size)
- __field(u16, bpid)
- __string(name, netdev->name)
- ),
-
- /* The function that assigns values to the above declared
- * fields
- */
- TP_fast_assign(
- __entry->vaddr = vaddr;
- __entry->size = size;
- __entry->dma_addr = dma_addr;
- __entry->map_size = map_size;
- __entry->bpid = bpid;
- __assign_str(name, netdev->name);
- ),
-
- /* This is what gets printed when the trace event is
- * triggered.
- */
- TP_printk(TR_BUF_FMT,
- __get_str(name),
- __entry->vaddr,
- __entry->size,
- &__entry->dma_addr,
- __entry->map_size,
- __entry->bpid)
+DECLARE_EVENT_CLASS(dpaa2_eth_buf,
+ /* Trace function prototype */
+ TP_PROTO(struct net_device *netdev,
+ /* virtual address and size */
+ void *vaddr,
+ size_t size,
+ /* dma map address and size */
+ dma_addr_t dma_addr,
+ size_t map_size,
+ /* buffer pool id, if relevant */
+ u16 bpid),
+
+ /* Repeat argument list here */
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid),
+
+ /* A structure containing the relevant information we want
+ * to record. Declare name and type for each normal element,
+ * name, type and size for arrays. Use __string for variable
+ * length strings.
+ */
+ TP_STRUCT__entry(
+ __field(void *, vaddr)
+ __field(size_t, size)
+ __field(dma_addr_t, dma_addr)
+ __field(size_t, map_size)
+ __field(u16, bpid)
+ __string(name, netdev->name)
+ ),
+
+ /* The function that assigns values to the above declared
+ * fields
+ */
+ TP_fast_assign(
+ __entry->vaddr = vaddr;
+ __entry->size = size;
+ __entry->dma_addr = dma_addr;
+ __entry->map_size = map_size;
+ __entry->bpid = bpid;
+ __assign_str(name, netdev->name);
+ ),
+
+ /* This is what gets printed when the trace event is
+ * triggered.
+ */
+ TP_printk(TR_BUF_FMT,
+ __get_str(name),
+ __entry->vaddr,
+ __entry->size,
+ &__entry->dma_addr,
+ __entry->map_size,
+ __entry->bpid)
+);
+
+/* Main memory buff seeding */
+DEFINE_EVENT(dpaa2_eth_buf, dpaa2_eth_buf_seed,
+ TP_PROTO(struct net_device *netdev,
+ void *vaddr,
+ size_t size,
+ dma_addr_t dma_addr,
+ size_t map_size,
+ u16 bpid),
+
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
+);
+
+/* UMEM buff seeding on AF_XDP fast path */
+DEFINE_EVENT(dpaa2_eth_buf, dpaa2_xsk_buf_seed,
+ TP_PROTO(struct net_device *netdev,
+ void *vaddr,
+ size_t size,
+ dma_addr_t dma_addr,
+ size_t map_size,
+ u16 bpid),
+
+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid)
);
/* If only one event of a certain type needs to be declared, use TRACE_EVENT().
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 8e643567abce..a62cffaf6ff1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2022 NXP
*/
#include <linux/init.h>
#include <linux/module.h>
@@ -8,7 +8,6 @@
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/interrupt.h>
-#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
#include <linux/fsl/mc.h>
@@ -18,6 +17,8 @@
#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
+#include <net/tso.h>
+#include <net/xdp_sock_drv.h>
#include "dpaa2-eth.h"
@@ -34,8 +35,77 @@ MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
struct ptp_qoriq *dpaa2_ptp;
EXPORT_SYMBOL(dpaa2_ptp);
-static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
- dma_addr_t iova_addr)
+static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
+{
+ priv->features = 0;
+
+ if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
+ DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
+ priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
+}
+
+static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ struct dpni_single_step_cfg cfg;
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+}
+
+static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp)
+{
+ u32 val = 0;
+
+ val = DPAA2_PTP_SINGLE_STEP_ENABLE |
+ DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
+
+ if (udp)
+ val |= DPAA2_PTP_SINGLE_STEP_CH;
+
+ if (priv->onestep_reg_base)
+ writel(val, priv->onestep_reg_base);
+}
+
+static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_single_step_cfg ptp_cfg;
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
+
+ if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
+ return;
+
+ if (dpni_get_single_step_cfg(priv->mc_io, 0,
+ priv->mc_token, &ptp_cfg)) {
+ dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
+ return;
+ }
+
+ if (!ptp_cfg.ptp_onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
+ return;
+ }
+
+ priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
+ sizeof(u32));
+ if (!priv->onestep_reg_base) {
+ dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
+ return;
+ }
+
+ priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
+}
+
+void *dpaa2_iova_to_virt(struct iommu_domain *domain,
+ dma_addr_t iova_addr)
{
phys_addr_t phys_addr;
@@ -209,23 +279,33 @@ static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
* be released in the pool
*/
static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
- int count)
+ int count, bool xsk_zc)
{
struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_eth_swa *swa;
+ struct xdp_buff *xdp_buff;
void *vaddr;
int i;
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- free_pages((unsigned long)vaddr, 0);
+
+ if (!xsk_zc) {
+ dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ swa = (struct dpaa2_eth_swa *)
+ (vaddr + DPAA2_ETH_RX_HWA_SIZE);
+ xdp_buff = swa->xsk.xdp_buff;
+ xsk_buff_free(xdp_buff);
+ }
}
}
-static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- dma_addr_t addr)
+void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
{
int retries = 0;
int err;
@@ -234,7 +314,7 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
return;
- while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+ while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
ch->recycled_bufs,
ch->recycled_bufs_cnt)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
@@ -243,7 +323,8 @@ static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
}
if (err) {
- dpaa2_eth_free_bufs(priv, ch->recycled_bufs, ch->recycled_bufs_cnt);
+ dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
+ ch->recycled_bufs_cnt, ch->xsk_zc);
ch->buf_count -= ch->recycled_bufs_cnt;
}
@@ -307,10 +388,10 @@ static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
fq->xdp_tx_fds.num = 0;
}
-static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_fd *fd,
- void *buf_start, u16 queue_id)
+void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
{
struct dpaa2_faead *faead;
struct dpaa2_fd *dest_fd;
@@ -374,7 +455,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
@@ -415,19 +496,15 @@ out:
return xdp_act;
}
-static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- void *fd_vaddr)
+struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, u32 fd_length,
+ void *fd_vaddr)
{
u16 fd_offset = dpaa2_fd_get_offset(fd);
- struct dpaa2_eth_priv *priv = ch->priv;
- u32 fd_length = dpaa2_fd_get_len(fd);
struct sk_buff *skb = NULL;
unsigned int skb_len;
- if (fd_length > priv->rx_copybreak)
- return NULL;
-
skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
skb = napi_alloc_skb(&ch->napi, skb_len);
@@ -444,11 +521,66 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
return skb;
}
+static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
+{
+ struct dpaa2_eth_priv *priv = ch->priv;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+
+ if (fd_length > priv->rx_copybreak)
+ return NULL;
+
+ return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
+}
+
+void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, void *vaddr,
+ struct dpaa2_eth_fq *fq,
+ struct rtnl_link_stats64 *percpu_stats,
+ struct sk_buff *skb)
+{
+ struct dpaa2_fas *fas;
+ u32 status = 0;
+
+ fas = dpaa2_get_fas(vaddr, false);
+ prefetch(fas);
+ prefetch(skb->data);
+
+ /* Get the timestamp value */
+ if (priv->rx_tstamp) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ __le64 *ts = dpaa2_get_ts(vaddr, false);
+ u64 ns;
+
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps->hwtstamp = ns_to_ktime(ns);
+ }
+
+ /* Check if we need to validate the L4 csum */
+ if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
+ status = le32_to_cpu(fas->status);
+ dpaa2_eth_validate_rx_csum(priv, status, skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, priv->net_dev);
+ skb_record_rx_queue(skb, fq->flowid);
+
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
+
+ list_add_tail(&skb->list, ch->rx_list);
+}
+
/* Main Rx frame processing routine */
-static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- struct dpaa2_eth_fq *fq)
+void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
@@ -457,9 +589,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_fas *fas;
void *buf_data;
- u32 status = 0;
u32 xdp_act;
/* Tracing point */
@@ -469,8 +599,6 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- fas = dpaa2_get_fas(vaddr, false);
- prefetch(fas);
buf_data = vaddr + dpaa2_fd_get_offset(fd);
prefetch(buf_data);
@@ -508,35 +636,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
if (unlikely(!skb))
goto err_build_skb;
- prefetch(skb->data);
-
- /* Get the timestamp value */
- if (priv->rx_tstamp) {
- struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
- __le64 *ts = dpaa2_get_ts(vaddr, false);
- u64 ns;
-
- memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-
- ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
- shhwtstamps->hwtstamp = ns_to_ktime(ns);
- }
-
- /* Check if we need to validate the L4 csum */
- if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
- status = le32_to_cpu(fas->status);
- dpaa2_eth_validate_rx_csum(priv, status, skb);
- }
-
- skb->protocol = eth_type_trans(skb, priv->net_dev);
- skb_record_rx_queue(skb, fq->flowid);
-
- percpu_stats->rx_packets++;
- percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
-
- list_add_tail(&skb->list, ch->rx_list);
-
+ dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
return;
err_build_skb:
@@ -695,7 +795,6 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
{
struct ptp_tstamp origin_timestamp;
- struct dpni_single_step_cfg cfg;
u8 msgtype, twostep, udp;
struct dpaa2_faead *faead;
struct dpaa2_fas *fas;
@@ -749,17 +848,48 @@ static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
htonl(origin_timestamp.sec_lsb);
*(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
- cfg.en = 1;
- cfg.ch_update = udp;
- cfg.offset = offset1;
- cfg.peer_delay = 0;
+ if (priv->ptp_correction_off == offset1)
+ return;
+
+ priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
+ priv->ptp_correction_off = offset1;
- if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
- &cfg))
- WARN_ONCE(1, "Failed to set single step register");
}
}
+void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+ void *sgt_buf = NULL;
+ int sgt_buf_size;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ sgt_buf_size = priv->tx_data_offset +
+ DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
+
+ if (sgt_cache->count == 0)
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ else
+ sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ if (!sgt_buf)
+ return NULL;
+
+ memset(sgt_buf, 0, sgt_buf_size);
+
+ return sgt_buf;
+}
+
+void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
+{
+ struct dpaa2_eth_sgt_cache *sgt_cache;
+
+ sgt_cache = this_cpu_ptr(priv->sgt_cache);
+ if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+ skb_free_frag(sgt_buf);
+ else
+ sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+}
+
/* Create a frame descriptor based on a fragmented skb */
static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
struct sk_buff *skb,
@@ -805,12 +935,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
}
- memset(sgt_buf, 0, sgt_buf_size);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
@@ -846,6 +975,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
err = -ENOMEM;
goto dma_map_single_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, addr);
@@ -855,7 +985,7 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
return 0;
dma_map_single_failed:
- skb_free_frag(sgt_buf);
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
sgt_buf_alloc_failed:
dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
dma_map_sg_failed:
@@ -875,7 +1005,6 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
struct dpaa2_eth_swa *swa;
dma_addr_t addr, sgt_addr;
@@ -884,18 +1013,10 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
int err;
/* Prepare the HW SGT structure */
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
-
- if (sgt_cache->count == 0)
- sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
- GFP_ATOMIC);
- else
- sgt_buf = sgt_cache->buf[--sgt_cache->count];
+ sgt_buf = dpaa2_eth_sgt_get(priv);
if (unlikely(!sgt_buf))
return -ENOMEM;
-
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
@@ -923,6 +1044,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
goto sgt_map_failed;
}
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_offset(fd, priv->tx_data_offset);
dpaa2_fd_set_format(fd, dpaa2_fd_sg);
dpaa2_fd_set_addr(fd, sgt_addr);
@@ -934,10 +1056,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
sgt_map_failed:
dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
data_map_failed:
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(sgt_buf);
- else
- sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
return err;
}
@@ -978,6 +1097,7 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
if (unlikely(dma_mapping_error(dev, addr)))
return -ENOMEM;
+ memset(fd, 0, sizeof(struct dpaa2_fd));
dpaa2_fd_set_addr(fd, addr);
dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
dpaa2_fd_set_len(fd, skb->len);
@@ -994,9 +1114,10 @@ static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
* dpaa2_eth_tx().
*/
-static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- const struct dpaa2_fd *fd, bool in_napi)
+void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr, sg_addr;
@@ -1005,9 +1126,10 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_swa *swa;
u8 fd_format = dpaa2_fd_get_format(fd);
u32 fd_len = dpaa2_fd_get_len(fd);
-
- struct dpaa2_eth_sgt_cache *sgt_cache;
struct dpaa2_sg_entry *sgt;
+ int should_free_skb = 1;
+ void *tso_hdr;
+ int i;
fd_addr = dpaa2_fd_get_addr(fd);
buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
@@ -1039,6 +1161,33 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
/* Unmap the SGT buffer */
dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
DMA_BIDIRECTIONAL);
+ } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
+ skb = swa->tso.skb;
+
+ sgt = (struct dpaa2_sg_entry *)(buffer_start +
+ priv->tx_data_offset);
+
+ /* Unmap the SGT buffer */
+ dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
+ DMA_BIDIRECTIONAL);
+
+ /* Unmap and free the header */
+ tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
+ dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
+ DMA_TO_DEVICE);
+ kfree(tso_hdr);
+
+ /* Unmap the other SG entries for the data */
+ for (i = 1; i < swa->tso.num_sg; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ if (!swa->tso.is_last_fd)
+ should_free_skb = 0;
+ } else if (swa->type == DPAA2_ETH_SWA_XSK) {
+ /* Unmap the SGT Buffer */
+ dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
+ DMA_BIDIRECTIONAL);
} else {
skb = swa->single.skb;
@@ -1056,6 +1205,12 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
return;
}
+ if (swa->type == DPAA2_ETH_SWA_XSK) {
+ ch->xsk_tx_pkts_sent++;
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+ return;
+ }
+
if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
fq->dq_frames++;
fq->dq_bytes += fd_len;
@@ -1067,55 +1222,195 @@ static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
}
/* Get the timestamp value */
- if (skb->cb[0] == TX_TSTAMP) {
- struct skb_shared_hwtstamps shhwtstamps;
- __le64 *ts = dpaa2_get_ts(buffer_start, true);
- u64 ns;
-
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
- ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
- shhwtstamps.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(skb, &shhwtstamps);
- } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
- mutex_unlock(&priv->onestep_tstamp_lock);
+ if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
+ if (skb->cb[0] == TX_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ __le64 *ts = dpaa2_get_ts(buffer_start, true);
+ u64 ns;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+ ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
+ }
}
/* Free SGT buffer allocated on tx */
- if (fd_format != dpaa2_fd_single) {
- sgt_cache = this_cpu_ptr(priv->sgt_cache);
- if (swa->type == DPAA2_ETH_SWA_SG) {
- skb_free_frag(buffer_start);
- } else {
- if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
- kfree(buffer_start);
- else
- sgt_cache->buf[sgt_cache->count++] = buffer_start;
+ if (fd_format != dpaa2_fd_single)
+ dpaa2_eth_sgt_recycle(priv, buffer_start);
+
+ /* Move on with skb release. If we are just confirming multiple FDs
+ * from the same TSO skb then only the last one will need to free the
+ * skb.
+ */
+ if (should_free_skb)
+ napi_consume_skb(skb, in_napi);
+}
+
+static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb, struct dpaa2_fd *fd,
+ int *num_fds, u32 *total_fds_len)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int hdr_len, total_len, data_left, fd_len;
+ int num_sge, err, i, sgt_buf_size;
+ struct dpaa2_fd *fd_start = fd;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ dma_addr_t sgt_addr, addr;
+ dma_addr_t tso_hdr_dma;
+ unsigned int index = 0;
+ struct tso_t tso;
+ char *tso_hdr;
+ void *sgt_buf;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ *total_fds_len = 0;
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ /* Prepare the HW SGT structure for this frame */
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
+ err = -ENOMEM;
+ goto err_sgt_get;
+ }
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Determine the data length of this frame */
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ fd_len = data_left + hdr_len;
+
+ /* Prepare packet headers: MAC + IP + TCP */
+ tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
+ if (!tso_hdr) {
+ err = -ENOMEM;
+ goto err_alloc_tso_hdr;
+ }
+
+ tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
+ tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tso_hdr_dma)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
+ err = -ENOMEM;
+ goto err_map_tso_hdr;
}
+
+ /* Setup the SG entry for the header */
+ dpaa2_sg_set_addr(sgt, tso_hdr_dma);
+ dpaa2_sg_set_len(sgt, hdr_len);
+ dpaa2_sg_set_final(sgt, data_left <= 0);
+
+ /* Compose the SG entries for each fragment of data */
+ num_sge = 1;
+ while (data_left > 0) {
+ int size;
+
+ /* Move to the next SG entry */
+ sgt++;
+ size = min_t(int, tso.size, data_left);
+
+ addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, addr)) {
+ netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
+ err = -ENOMEM;
+ goto err_map_data;
+ }
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, size);
+ dpaa2_sg_set_final(sgt, size == data_left);
+
+ num_sge++;
+
+ /* Build the data for the __next__ fragment */
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ /* Store the skb backpointer in the SGT buffer */
+ sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_SW_TSO;
+ swa->tso.skb = skb;
+ swa->tso.num_sg = num_sge;
+ swa->tso.sgt_size = sgt_buf_size;
+ swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
+ err = -ENOMEM;
+ goto err_map_sgt;
+ }
+
+ /* Setup the frame descriptor */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, fd_len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ *total_fds_len += fd_len;
+ /* Advance to the next frame descriptor */
+ fd++;
+ index++;
}
- /* Move on with skb release */
- napi_consume_skb(skb, in_napi);
+ *num_fds = index;
+
+ return 0;
+
+err_map_sgt:
+err_map_data:
+ /* Unmap all the data S/G entries for the current FD */
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+ for (i = 1; i < num_sge; i++)
+ dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
+ dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
+
+ /* Unmap the header entry */
+ dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
+err_map_tso_hdr:
+ kfree(tso_hdr);
+err_alloc_tso_hdr:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+err_sgt_get:
+ /* Free all the other FDs that were already fully created */
+ for (i = 0; i < index; i++)
+ dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
+
+ return err;
}
static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_fd fd;
- struct rtnl_link_stats64 *percpu_stats;
+ int total_enqueued = 0, retries = 0, enqueued;
struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ unsigned int needed_headroom;
+ int num_fds = 1, max_retries;
struct dpaa2_eth_fq *fq;
struct netdev_queue *nq;
+ struct dpaa2_fd *fd;
u16 queue_mapping;
- unsigned int needed_headroom;
- u32 fd_len;
+ void *swa = NULL;
u8 prio = 0;
int err, i;
- void *swa;
+ u32 fd_len;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fd = (this_cpu_ptr(priv->fd))->array;
needed_headroom = dpaa2_eth_needed_headroom(skb);
@@ -1130,20 +1425,28 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
}
/* Setup the FD fields */
- memset(&fd, 0, sizeof(fd));
- if (skb_is_nonlinear(skb)) {
- err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
+ if (skb_is_gso(skb)) {
+ err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
+ percpu_extras->tx_sg_frames += num_fds;
+ percpu_extras->tx_sg_bytes += fd_len;
+ percpu_extras->tx_tso_frames += num_fds;
+ percpu_extras->tx_tso_bytes += fd_len;
+ } else if (skb_is_nonlinear(skb)) {
+ err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else if (skb_headroom(skb) < needed_headroom) {
- err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
+ fd_len = dpaa2_fd_get_len(fd);
} else {
- err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
+ err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
+ fd_len = dpaa2_fd_get_len(fd);
}
if (unlikely(err)) {
@@ -1151,11 +1454,12 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
goto err_build_fd;
}
- if (skb->cb[0])
- dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
+ if (swa && skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
/* Tracing point */
- trace_dpaa2_tx_fd(net_dev, &fd);
+ for (i = 0; i < num_fds; i++)
+ trace_dpaa2_tx_fd(net_dev, &fd[i]);
/* TxConf FQ selection relies on queue id from the stack.
* In case of a forwarded frame from another DPNI interface, we choose
@@ -1175,27 +1479,32 @@ static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
queue_mapping %= dpaa2_eth_queue_count(priv);
}
fq = &priv->fq[queue_mapping];
-
- fd_len = dpaa2_fd_get_len(&fd);
nq = netdev_get_tx_queue(net_dev, queue_mapping);
netdev_tx_sent_queue(nq, fd_len);
/* Everything that happens after this enqueues might race with
* the Tx confirmation callback for this frame
*/
- for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
- err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
- if (err != -EBUSY)
- break;
+ max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
+ while (total_enqueued < num_fds && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fd[total_enqueued],
+ prio, num_fds - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
}
- percpu_extras->tx_portal_busy += i;
+ percpu_extras->tx_portal_busy += retries;
+
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
+ dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
- percpu_stats->tx_packets++;
+ percpu_stats->tx_packets += total_enqueued;
percpu_stats->tx_bytes += fd_len;
}
@@ -1285,7 +1594,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- dpaa2_eth_free_tx_fd(priv, fq, fd, true);
+ dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
if (likely(!fd_errors))
return;
@@ -1363,44 +1672,76 @@ static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
* to the specified buffer pool
*/
static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch, u16 bpid)
+ struct dpaa2_eth_channel *ch)
{
+ struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ struct dpaa2_eth_swa *swa;
struct page *page;
dma_addr_t addr;
int retries = 0;
- int i, err;
-
- for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
- /* Allocate buffer visible to WRIOP + skb shared info +
- * alignment padding
- */
- /* allocate one page for each Rx buffer. WRIOP sees
- * the entire page except for a tailroom reserved for
- * skb shared info
+ int i = 0, err;
+ u32 batch;
+
+ /* Allocate buffers visible to WRIOP */
+ if (!ch->xsk_zc) {
+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
+ /* Also allocate skb shared info and alignment padding.
+ * There is one page for each Rx buffer. WRIOP sees
+ * the entire page except for a tailroom reserved for
+ * skb shared info
+ */
+ page = dev_alloc_pages(0);
+ if (!page)
+ goto err_alloc;
+
+ addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
+
+ buf_array[i] = addr;
+
+ /* tracing point */
+ trace_dpaa2_eth_buf_seed(priv->net_dev,
+ page_address(page),
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
+ addr, priv->rx_buf_size,
+ ch->bp->bpid);
+ }
+ } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
+ /* Allocate XSK buffers for AF_XDP fast path in batches
+ * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
+ * provide enough buffers at the moment
*/
- page = dev_alloc_pages(0);
- if (!page)
+ batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
+ DPAA2_ETH_BUFS_PER_CMD);
+ if (!batch)
goto err_alloc;
- addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(dev, addr)))
- goto err_map;
+ for (i = 0; i < batch; i++) {
+ swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
+ DPAA2_ETH_RX_HWA_SIZE);
+ swa->xsk.xdp_buff = xdp_buffs[i];
- buf_array[i] = addr;
+ addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
+ if (unlikely(dma_mapping_error(dev, addr)))
+ goto err_map;
- /* tracing point */
- trace_dpaa2_eth_buf_seed(priv->net_dev,
- page, DPAA2_ETH_RX_BUF_RAW_SIZE,
- addr, priv->rx_buf_size,
- bpid);
+ buf_array[i] = addr;
+
+ trace_dpaa2_xsk_buf_seed(priv->net_dev,
+ xdp_buffs[i]->data_hard_start,
+ DPAA2_ETH_RX_BUF_RAW_SIZE,
+ addr, priv->rx_buf_size,
+ ch->bp->bpid);
+ }
}
release_bufs:
/* In case the portal is busy, retry until successful */
- while ((err = dpaa2_io_service_release(ch->dpio, bpid,
+ while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
buf_array, i)) == -EBUSY) {
if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
break;
@@ -1411,14 +1752,19 @@ release_bufs:
* not much else we can do about it
*/
if (err) {
- dpaa2_eth_free_bufs(priv, buf_array, i);
+ dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
return 0;
}
return i;
err_map:
- __free_pages(page, 0);
+ if (!ch->xsk_zc) {
+ __free_pages(page, 0);
+ } else {
+ for (; i < batch; i++)
+ xsk_buff_free(xdp_buffs[i]);
+ }
err_alloc:
/* If we managed to allocate at least some buffers,
* release them to hardware
@@ -1429,39 +1775,64 @@ err_alloc:
return 0;
}
-static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch)
{
- int i, j;
+ int i;
int new_count;
- for (j = 0; j < priv->num_channels; j++) {
- for (i = 0; i < DPAA2_ETH_NUM_BUFS;
- i += DPAA2_ETH_BUFS_PER_CMD) {
- new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
- priv->channel[j]->buf_count += new_count;
+ for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
+ new_count = dpaa2_eth_add_bufs(priv, ch);
+ ch->buf_count += new_count;
- if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
- return -ENOMEM;
- }
- }
+ if (new_count < DPAA2_ETH_BUFS_PER_CMD)
+ return -ENOMEM;
}
return 0;
}
+static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct dpaa2_eth_channel *channel;
+ int i, err = 0;
+
+ for (i = 0; i < priv->num_channels; i++) {
+ channel = priv->channel[i];
+
+ err = dpaa2_eth_seed_pool(priv, channel);
+
+ /* Not much to do; the buffer pool, though not filled up,
+ * may still contain some buffers which would enable us
+ * to limp on.
+ */
+ if (err)
+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
+ channel->bp->dev->obj_desc.id,
+ channel->bp->bpid);
+ }
+}
+
/*
- * Drain the specified number of buffers from the DPNI's private buffer pool.
+ * Drain the specified number of buffers from one of the DPNI's private buffer
+ * pools.
* @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
*/
-static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
+ int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
+ bool xsk_zc = false;
int retries = 0;
- int ret;
+ int i, ret;
+
+ for (i = 0; i < priv->num_channels; i++)
+ if (priv->channel[i]->bp->bpid == bpid)
+ xsk_zc = priv->channel[i]->xsk_zc;
do {
- ret = dpaa2_io_service_acquire(NULL, priv->bpid,
- buf_array, count);
+ ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
if (ret < 0) {
if (ret == -EBUSY &&
retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
@@ -1469,28 +1840,40 @@ static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- dpaa2_eth_free_bufs(priv, buf_array, ret);
+ dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
retries = 0;
} while (ret);
}
-static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
{
int i;
- dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
- dpaa2_eth_drain_bufs(priv, 1);
+ /* Drain the buffer pool */
+ dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
+ dpaa2_eth_drain_bufs(priv, bpid, 1);
+ /* Setup to zero the buffer count of all channels which were
+ * using this buffer pool.
+ */
for (i = 0; i < priv->num_channels; i++)
- priv->channel[i]->buf_count = 0;
+ if (priv->channel[i]->bp->bpid == bpid)
+ priv->channel[i]->buf_count = 0;
+}
+
+static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_bps; i++)
+ dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
}
/* Function is called from softirq context only, so we don't need to guard
* the access to percpu count
*/
static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- u16 bpid)
+ struct dpaa2_eth_channel *ch)
{
int new_count;
@@ -1498,7 +1881,7 @@ static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
return 0;
do {
- new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
+ new_count = dpaa2_eth_add_bufs(priv, ch);
if (unlikely(!new_count)) {
/* Out of memory; abort for now, we'll try later on */
break;
@@ -1523,7 +1906,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
count = sgt_cache->count;
for (i = 0; i < count; i++)
- kfree(sgt_cache->buf[i]);
+ skb_free_frag(sgt_cache->buf[i]);
sgt_cache->count = 0;
}
}
@@ -1562,6 +1945,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_fq *fq, *txc_fq = NULL;
struct netdev_queue *nq;
int store_cleaned, work_done;
+ bool work_done_zc = false;
struct list_head rx_list;
int retries = 0;
u16 flowid;
@@ -1574,13 +1958,22 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
INIT_LIST_HEAD(&rx_list);
ch->rx_list = &rx_list;
+ if (ch->xsk_zc) {
+ work_done_zc = dpaa2_xsk_tx(priv, ch);
+ /* If we reached the XSK Tx per NAPI threshold, we're done */
+ if (work_done_zc) {
+ work_done = budget;
+ goto out;
+ }
+ }
+
do {
err = dpaa2_eth_pull_channel(ch);
if (unlikely(err))
break;
/* Refill pool if appropriate */
- dpaa2_eth_refill_pool(priv, ch, priv->bpid);
+ dpaa2_eth_refill_pool(priv, ch);
store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
if (store_cleaned <= 0)
@@ -1600,10 +1993,15 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
if (rx_cleaned >= budget ||
txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
work_done = budget;
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
goto out;
}
} while (store_cleaned);
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+
/* Update NET DIM with the values for this CDAN */
dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
ch->stats.bytes_per_cdan);
@@ -1626,6 +2024,11 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
out:
netif_receive_skb_list(ch->rx_list);
+ if (ch->xsk_tx_pkts_sent) {
+ xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
+ ch->xsk_tx_pkts_sent = 0;
+ }
+
if (txc_fq && txc_fq->dq_frames) {
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
@@ -1634,9 +2037,7 @@ out:
txc_fq->dq_bytes = 0;
}
- if (ch->xdp.res & XDP_REDIRECT)
- xdp_do_flush_map();
- else if (rx_cleaned && ch->xdp.res & XDP_TX)
+ if (rx_cleaned && ch->xdp.res & XDP_TX)
dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
@@ -1749,8 +2150,11 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
+ * We can avoid locking because we are called from the "link changed"
+ * IRQ handler, which is the same as the "endpoint changed" IRQ handler
+ * (the writer to priv->mac), so we cannot race with it.
*/
- if (dpaa2_eth_is_type_phy(priv))
+ if (dpaa2_mac_is_type_phy(priv->mac))
goto out;
/* Chech link state; speed / duplex changes are not treated yet */
@@ -1779,15 +2183,9 @@ static int dpaa2_eth_open(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
- err = dpaa2_eth_seed_pool(priv, priv->bpid);
- if (err) {
- /* Not much to do; the buffer pool, though not filled up,
- * may still contain some buffers which would enable us
- * to limp on.
- */
- netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
- priv->dpbp_dev->obj_desc.id, priv->bpid);
- }
+ dpaa2_eth_seed_pools(priv);
+
+ mutex_lock(&priv->mac_lock);
if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
@@ -1807,18 +2205,21 @@ static int dpaa2_eth_open(struct net_device *net_dev)
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
+ mutex_unlock(&priv->mac_lock);
netdev_err(net_dev, "dpni_enable() failed\n");
goto enable_err;
}
if (dpaa2_eth_is_type_phy(priv))
- phylink_start(priv->mac->phylink);
+ dpaa2_mac_start(priv->mac);
+
+ mutex_unlock(&priv->mac_lock);
return 0;
enable_err:
dpaa2_eth_disable_ch_napi(priv);
- dpaa2_eth_drain_pool(priv);
+ dpaa2_eth_drain_pools(priv);
return err;
}
@@ -1885,13 +2286,17 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
+ mutex_lock(&priv->mac_lock);
+
if (dpaa2_eth_is_type_phy(priv)) {
- phylink_stop(priv->mac->phylink);
+ dpaa2_mac_stop(priv->mac);
} else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
}
+ mutex_unlock(&priv->mac_lock);
+
/* On dpni_disable(), the MC firmware will:
* - stop MAC Rx and wait for all Rx frames to be enqueued to software
* - cut off WRIOP dequeues from egress FQs and wait until transmission
@@ -1922,7 +2327,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */
- dpaa2_eth_drain_pool(priv);
+ dpaa2_eth_drain_pools(priv);
/* Empty the Scatter-Gather Buffer cache */
dpaa2_eth_sgt_cache_drain(priv);
@@ -2207,6 +2612,9 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ dpaa2_ptp_onestep_reg_update_method(priv);
+
return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -2214,12 +2622,20 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ int err;
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
return -EOPNOTSUPP;
}
@@ -2328,7 +2744,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
need_update = (!!priv->xdp_prog != !!prog);
if (up)
- dpaa2_eth_stop(dev);
+ dev_close(dev);
/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
* Also, when switching between xdp/non-xdp modes we need to reconfigure
@@ -2356,7 +2772,7 @@ static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
}
if (up) {
- err = dpaa2_eth_open(dev);
+ err = dev_open(dev, NULL);
if (err)
return err;
}
@@ -2367,7 +2783,7 @@ out_err:
if (prog)
bpf_prog_sub(prog, priv->num_channels);
if (up)
- dpaa2_eth_open(dev);
+ dev_open(dev, NULL);
return err;
}
@@ -2377,6 +2793,8 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return dpaa2_eth_setup_xdp(dev, xdp->prog);
+ case XDP_SETUP_XSK_POOL:
+ return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
default:
return -EINVAL;
}
@@ -2607,6 +3025,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_change_mtu = dpaa2_eth_change_mtu,
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
+ .ndo_xsk_wakeup = dpaa2_xsk_wakeup,
.ndo_setup_tc = dpaa2_eth_setup_tc,
.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
.ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
@@ -2621,7 +3040,11 @@ static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
/* Update NAPI statistics */
ch->stats.cdan++;
- napi_schedule(&ch->napi);
+ /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
+ * so that it can be rescheduled again.
+ */
+ if (!napi_if_scheduled_mark_missed(&ch->napi))
+ napi_schedule(&ch->napi);
}
/* Allocate and configure a DPCON object */
@@ -2634,10 +3057,12 @@ static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
FSL_MC_POOL_DPCON, &dpcon);
if (err) {
- if (err == -ENXIO)
+ if (err == -ENXIO) {
+ dev_dbg(dev, "Waiting for DPCON\n");
err = -EPROBE_DEFER;
- else
+ } else {
dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ }
return ERR_PTR(err);
}
@@ -2747,7 +3172,9 @@ static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
channel = dpaa2_eth_alloc_channel(priv);
if (IS_ERR_OR_NULL(channel)) {
err = PTR_ERR_OR_ZERO(channel);
- if (err != -EPROBE_DEFER)
+ if (err == -EPROBE_DEFER)
+ dev_dbg(dev, "waiting for affine channel\n");
+ else
dev_info(dev,
"No affine channel for cpu %d and above\n", i);
goto err_alloc_ch;
@@ -2930,13 +3357,14 @@ static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
dpaa2_eth_set_fq_affinity(priv);
}
-/* Allocate and configure one buffer pool for each interface */
-static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
+/* Allocate and configure a buffer pool */
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
{
- int err;
- struct fsl_mc_device *dpbp_dev;
struct device *dev = priv->net_dev->dev.parent;
+ struct fsl_mc_device *dpbp_dev;
struct dpbp_attr dpbp_attrs;
+ struct dpaa2_eth_bp *bp;
+ int err;
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
&dpbp_dev);
@@ -2945,12 +3373,16 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
err = -EPROBE_DEFER;
else
dev_err(dev, "DPBP device allocation failed\n");
- return err;
+ return ERR_PTR(err);
}
- priv->dpbp_dev = dpbp_dev;
+ bp = kzalloc(sizeof(*bp), GFP_KERNEL);
+ if (!bp) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
- err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
+ err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
&dpbp_dev->mc_handle);
if (err) {
dev_err(dev, "dpbp_open() failed\n");
@@ -2975,9 +3407,11 @@ static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
dev_err(dev, "dpbp_get_attributes() failed\n");
goto err_get_attr;
}
- priv->bpid = dpbp_attrs.bpid;
- return 0;
+ bp->dev = dpbp_dev;
+ bp->bpid = dpbp_attrs.bpid;
+
+ return bp;
err_get_attr:
dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
@@ -2985,17 +3419,58 @@ err_enable:
err_reset:
dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
err_open:
+ kfree(bp);
+err_alloc:
fsl_mc_object_free(dpbp_dev);
- return err;
+ return ERR_PTR(err);
+}
+
+static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_bp *bp;
+ int i;
+
+ bp = dpaa2_eth_allocate_dpbp(priv);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
+ priv->num_bps++;
+
+ for (i = 0; i < priv->num_channels; i++)
+ priv->channel[i]->bp = bp;
+
+ return 0;
+}
+
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
+{
+ int idx_bp;
+
+ /* Find the index at which this BP is stored */
+ for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
+ if (priv->bp[idx_bp] == bp)
+ break;
+
+ /* Drain the pool and disable the associated MC object */
+ dpaa2_eth_drain_pool(priv, bp->bpid);
+ dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
+ dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
+ fsl_mc_object_free(bp->dev);
+ kfree(bp);
+
+ /* Move the last in use DPBP over in this position */
+ priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
+ priv->num_bps--;
}
-static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
{
- dpaa2_eth_drain_pool(priv);
- dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
- dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
- fsl_mc_object_free(priv->dpbp_dev);
+ int i;
+
+ for (i = 0; i < priv->num_bps; i++)
+ dpaa2_eth_free_dpbp(priv, priv->bp[i]);
}
static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
@@ -3336,7 +3811,7 @@ static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
priv->dpni_ver_major, priv->dpni_ver_minor,
DPNI_VER_MAJOR, DPNI_VER_MINOR);
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto close;
}
@@ -3880,15 +4355,16 @@ out:
*/
static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
{
+ struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
struct net_device *net_dev = priv->net_dev;
+ struct dpni_pools_cfg pools_params = { 0 };
struct device *dev = net_dev->dev.parent;
- struct dpni_pools_cfg pools_params;
struct dpni_error_cfg err_cfg;
int err = 0;
int i;
pools_params.num_dpbp = 1;
- pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
+ pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
pools_params.pools[0].backup_pool = 0;
pools_params.pools[0].buffer_size = priv->rx_buf_size;
err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
@@ -4100,6 +4576,8 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
return err;
}
+ dpaa2_eth_detect_features(priv);
+
/* Capabilities listing */
supported |= IFF_LIVE_ADDR_CHANGE;
@@ -4115,8 +4593,15 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
net_dev->features = NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_LLTX | NETIF_F_HW_TC;
+ NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
+ net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ if (priv->dpni_attrs.wriop_version >= DPAA2_WRIOP_VERSION(3, 0, 0) &&
+ priv->dpni_attrs.num_queues <= 8)
+ net_dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
if (priv->dpni_attrs.vlan_filter_entries)
net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -4149,8 +4634,10 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
- if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) {
+ netdev_dbg(priv->net_dev, "waiting for mac\n");
return PTR_ERR(dpmac_dev);
+ }
if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
@@ -4166,22 +4653,29 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
err = dpaa2_mac_open(mac);
if (err)
goto err_free_mac;
- priv->mac = mac;
- if (dpaa2_eth_is_type_phy(priv)) {
+ if (dpaa2_mac_is_type_phy(mac)) {
err = dpaa2_mac_connect(mac);
- if (err && err != -EPROBE_DEFER)
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
- ERR_PTR(err));
- if (err)
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ netdev_dbg(priv->net_dev,
+ "could not connect to MAC\n");
+ else
+ netdev_err(priv->net_dev,
+ "Error connecting to the MAC endpoint: %pe",
+ ERR_PTR(err));
goto err_close_mac;
+ }
}
+ mutex_lock(&priv->mac_lock);
+ priv->mac = mac;
+ mutex_unlock(&priv->mac_lock);
+
return 0;
err_close_mac:
dpaa2_mac_close(mac);
- priv->mac = NULL;
err_free_mac:
kfree(mac);
return err;
@@ -4189,15 +4683,21 @@ err_free_mac:
static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
- if (dpaa2_eth_is_type_phy(priv))
- dpaa2_mac_disconnect(priv->mac);
+ struct dpaa2_mac *mac;
+
+ mutex_lock(&priv->mac_lock);
+ mac = priv->mac;
+ priv->mac = NULL;
+ mutex_unlock(&priv->mac_lock);
- if (!dpaa2_eth_has_mac(priv))
+ if (!mac)
return;
- dpaa2_mac_close(priv->mac);
- kfree(priv->mac);
- priv->mac = NULL;
+ if (dpaa2_mac_is_type_phy(mac))
+ dpaa2_mac_disconnect(mac);
+
+ dpaa2_mac_close(mac);
+ kfree(mac);
}
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
@@ -4207,6 +4707,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
struct net_device *net_dev = dev_get_drvdata(dev);
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ bool had_mac;
int err;
err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
@@ -4223,12 +4724,15 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
dpaa2_eth_update_tx_fqids(priv);
- rtnl_lock();
- if (dpaa2_eth_has_mac(priv))
+ /* We can avoid locking because the "endpoint changed" IRQ
+ * handler is the only one who changes priv->mac at runtime,
+ * so we are not racing with anyone.
+ */
+ had_mac = !!priv->mac;
+ if (had_mac)
dpaa2_eth_disconnect_mac(priv);
else
dpaa2_eth_connect_mac(priv);
- rtnl_unlock();
}
return IRQ_HANDLED;
@@ -4246,7 +4750,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
}
irq = ls_dev->irqs[0];
- err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
NULL, dpni_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&ls_dev->dev), &ls_dev->dev);
@@ -4273,7 +4777,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
return 0;
free_irq:
- devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+ devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
free_mc_irq:
fsl_mc_free_irqs(ls_dev);
@@ -4288,8 +4792,7 @@ static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
- netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
}
}
@@ -4325,6 +4828,9 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv = netdev_priv(net_dev);
priv->net_dev = net_dev;
+ SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port);
+
+ mutex_init(&priv->mac_lock);
priv->iommu_domain = iommu_get_domain_for_dev(dev);
@@ -4338,7 +4844,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+ mutex_init(&priv->onestep_tstamp_lock);
skb_queue_head_init(&priv->tx_skbs);
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
@@ -4347,10 +4853,12 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
if (err) {
- if (err == -ENXIO)
+ if (err == -ENXIO) {
+ dev_dbg(dev, "waiting for MC portal\n");
err = -EPROBE_DEFER;
- else
+ } else {
dev_err(dev, "MC portal allocation failed\n");
+ }
goto err_portal_alloc;
}
@@ -4365,7 +4873,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dpaa2_eth_setup_fqs(priv);
- err = dpaa2_eth_setup_dpbp(priv);
+ err = dpaa2_eth_setup_default_dpbp(priv);
if (err)
goto err_dpbp_setup;
@@ -4397,6 +4905,13 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_sgt_cache;
}
+ priv->fd = alloc_percpu(*priv->fd);
+ if (!priv->fd) {
+ dev_err(dev, "alloc_percpu(fds) failed\n");
+ err = -ENOMEM;
+ goto err_alloc_fds;
+ }
+
err = dpaa2_eth_netdev_init(net_dev);
if (err)
goto err_netdev_init;
@@ -4424,6 +4939,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
#endif
+ err = dpaa2_eth_connect_mac(priv);
+ if (err)
+ goto err_connect_mac;
+
err = dpaa2_eth_setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
@@ -4436,10 +4955,6 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->do_link_poll = true;
}
- err = dpaa2_eth_connect_mac(priv);
- if (err)
- goto err_connect_mac;
-
err = dpaa2_eth_dl_alloc(priv);
if (err)
goto err_dl_register;
@@ -4473,17 +4988,19 @@ err_dl_port_add:
err_dl_trap_register:
dpaa2_eth_dl_free(priv);
err_dl_register:
- dpaa2_eth_disconnect_mac(priv);
-err_connect_mac:
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
else
fsl_mc_free_irqs(dpni_dev);
err_poll_thread:
+ dpaa2_eth_disconnect_mac(priv);
+err_connect_mac:
dpaa2_eth_free_rings(priv);
err_alloc_rings:
err_csum:
err_netdev_init:
+ free_percpu(priv->fd);
+err_alloc_fds:
free_percpu(priv->sgt_cache);
err_alloc_sgt_cache:
free_percpu(priv->percpu_extras);
@@ -4492,7 +5009,7 @@ err_alloc_percpu_extras:
err_alloc_percpu_stats:
dpaa2_eth_del_ch_napi(priv);
err_bind:
- dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpbps(priv);
err_dpbp_setup:
dpaa2_eth_free_dpio(priv);
err_dpio_setup:
@@ -4523,9 +5040,6 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
- rtnl_lock();
- dpaa2_eth_disconnect_mac(priv);
- rtnl_unlock();
unregister_netdev(net_dev);
@@ -4538,15 +5052,19 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
else
fsl_mc_free_irqs(ls_dev);
+ dpaa2_eth_disconnect_mac(priv);
dpaa2_eth_free_rings(priv);
+ free_percpu(priv->fd);
free_percpu(priv->sgt_cache);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
dpaa2_eth_del_ch_napi(priv);
- dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpbps(priv);
dpaa2_eth_free_dpio(priv);
dpaa2_eth_free_dpni(priv);
+ if (priv->onestep_reg_base)
+ iounmap(priv->onestep_reg_base);
fsl_mc_portal_free(priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index e54e70ebdd05..d56d7a13262e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2022 NXP
*/
#ifndef __DPAA2_ETH_H
@@ -53,6 +53,12 @@
*/
#define DPAA2_ETH_TXCONF_PER_NAPI 256
+/* Maximum number of Tx frames to be processed in a single NAPI
+ * call when AF_XDP is running. Bind it to DPAA2_ETH_TXCONF_PER_NAPI
+ * to maximize the throughput.
+ */
+#define DPAA2_ETH_TX_ZC_PER_NAPI DPAA2_ETH_TXCONF_PER_NAPI
+
/* Buffer qouta per channel. We want to keep in check number of ingress frames
* in flight: for small sized frames, congestion group taildrop may kick in
* first; for large sizes, Rx FQ taildrop threshold will ensure only a
@@ -109,6 +115,14 @@
#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256
#define DPAA2_ETH_RX_BUF_ALIGN 64
+/* The firmware allows assigning multiple buffer pools to a single DPNI -
+ * maximum 8 DPBP objects. By default, only the first DPBP (idx 0) is used for
+ * all queues. Thus, when enabling AF_XDP we must accommodate up to 9 DPBPs
+ * object: the default and 8 other distinct buffer pools, one for each queue.
+ */
+#define DPAA2_ETH_DEFAULT_BP_IDX 0
+#define DPAA2_ETH_MAX_BPS 9
+
/* We are accommodating a skb backpointer and some S/G info
* in the frame's software annotation. The hardware
* options are either 0 or 64, so we choose the latter.
@@ -122,6 +136,8 @@ enum dpaa2_eth_swa_type {
DPAA2_ETH_SWA_SINGLE,
DPAA2_ETH_SWA_SG,
DPAA2_ETH_SWA_XDP,
+ DPAA2_ETH_SWA_XSK,
+ DPAA2_ETH_SWA_SW_TSO,
};
/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */
@@ -142,6 +158,16 @@ struct dpaa2_eth_swa {
int dma_size;
struct xdp_frame *xdpf;
} xdp;
+ struct {
+ struct xdp_buff *xdp_buff;
+ int sgt_size;
+ } xsk;
+ struct {
+ struct sk_buff *skb;
+ int num_sg;
+ int sgt_size;
+ int is_last_fd;
+ } tso;
};
};
@@ -354,6 +380,8 @@ struct dpaa2_eth_drv_stats {
__u64 tx_conf_bytes;
__u64 tx_sg_frames;
__u64 tx_sg_bytes;
+ __u64 tx_tso_frames;
+ __u64 tx_tso_bytes;
__u64 rx_sg_frames;
__u64 rx_sg_bytes;
/* Linear skbs sent as a S/G FD due to insufficient headroom */
@@ -412,12 +440,19 @@ enum dpaa2_eth_fq_type {
};
struct dpaa2_eth_priv;
+struct dpaa2_eth_channel;
+struct dpaa2_eth_fq;
struct dpaa2_eth_xdp_fds {
struct dpaa2_fd fds[DEV_MAP_BULK_SIZE];
ssize_t num;
};
+typedef void dpaa2_eth_consume_cb_t(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq);
+
struct dpaa2_eth_fq {
u32 fqid;
u32 tx_qdbin;
@@ -430,10 +465,7 @@ struct dpaa2_eth_fq {
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
- void (*consume)(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- struct dpaa2_eth_fq *fq);
+ dpaa2_eth_consume_cb_t *consume;
struct dpaa2_eth_fq_stats stats;
struct dpaa2_eth_xdp_fds xdp_redirect_fds;
@@ -445,6 +477,11 @@ struct dpaa2_eth_ch_xdp {
unsigned int res;
};
+struct dpaa2_eth_bp {
+ struct fsl_mc_device *dev;
+ int bpid;
+};
+
struct dpaa2_eth_channel {
struct dpaa2_io_notification_ctx nctx;
struct fsl_mc_device *dpcon;
@@ -463,6 +500,11 @@ struct dpaa2_eth_channel {
/* Buffers to be recycled back in the buffer pool */
u64 recycled_bufs[DPAA2_ETH_BUFS_PER_CMD];
int recycled_bufs_cnt;
+
+ bool xsk_zc;
+ int xsk_tx_pkts_sent;
+ struct xsk_buff_pool *xsk_pool;
+ struct dpaa2_eth_bp *bp;
};
struct dpaa2_eth_dist_fields {
@@ -493,8 +535,15 @@ struct dpaa2_eth_trap_data {
struct dpaa2_eth_priv *priv;
};
+#define DPAA2_ETH_SG_ENTRIES_MAX (PAGE_SIZE / sizeof(struct scatterlist))
+
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
+#define DPAA2_ETH_ENQUEUE_MAX_FDS 256
+struct dpaa2_eth_fds {
+ struct dpaa2_fd array[DPAA2_ETH_ENQUEUE_MAX_FDS];
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -510,20 +559,25 @@ struct dpaa2_eth_priv {
u8 num_channels;
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
-
+ unsigned long features;
struct dpni_attr dpni_attrs;
u16 dpni_ver_major;
u16 dpni_ver_minor;
u16 tx_data_offset;
-
- struct fsl_mc_device *dpbp_dev;
+ void __iomem *onestep_reg_base;
+ u8 ptp_correction_off;
+ void (*dpaa2_set_onestep_params_cb)(struct dpaa2_eth_priv *priv,
+ u32 offset, u8 udp);
u16 rx_buf_size;
- u16 bpid;
struct iommu_domain *iommu_domain;
enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
bool rx_tstamp; /* Rx timestamping enabled */
+ /* Buffer pool management */
+ struct dpaa2_eth_bp *bp[DPAA2_ETH_MAX_BPS];
+ int num_bps;
+
u16 tx_qdid;
struct fsl_mc_io *mc_io;
/* Cores which have an affine DPIO/DPCON.
@@ -561,6 +615,8 @@ struct dpaa2_eth_priv {
#endif
struct dpaa2_mac *mac;
+ /* Serializes changes to priv->mac */
+ struct mutex mac_lock;
struct workqueue_struct *dpaa2_ptp_wq;
struct work_struct tx_onestep_tstamp;
struct sk_buff_head tx_skbs;
@@ -577,6 +633,8 @@ struct dpaa2_eth_priv {
struct devlink_port devlink_port;
u32 rx_copybreak;
+
+ struct dpaa2_eth_fds __percpu *fd;
};
struct dpaa2_eth_devlink_priv {
@@ -655,6 +713,13 @@ enum dpaa2_eth_rx_dist {
#define DPAA2_ETH_DIST_L4DST BIT(8)
#define DPAA2_ETH_DIST_ALL (~0ULL)
+#define DPNI_PTP_ONESTEP_VER_MAJOR 8
+#define DPNI_PTP_ONESTEP_VER_MINOR 2
+#define DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT BIT(0)
+#define DPAA2_PTP_SINGLE_STEP_ENABLE BIT(31)
+#define DPAA2_PTP_SINGLE_STEP_CH BIT(7)
+#define DPAA2_PTP_SINGLE_CORRECTION_OFF(v) ((v) << 8)
+
#define DPNI_PAUSE_VER_MAJOR 7
#define DPNI_PAUSE_VER_MINOR 13
#define dpaa2_eth_has_pause_support(priv) \
@@ -705,16 +770,15 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
{
- if (priv->mac &&
- (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
- priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
- return true;
+ lockdep_assert_held(&priv->mac_lock);
- return false;
+ return dpaa2_mac_is_type_phy(priv->mac);
}
static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
{
+ lockdep_assert_held(&priv->mac_lock);
+
return priv->mac ? true : false;
}
@@ -743,4 +807,54 @@ void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
struct dpaa2_fapr *fapr);
+
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp);
+
+struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, u32 fd_length,
+ void *fd_vaddr);
+
+void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd, void *vaddr,
+ struct dpaa2_eth_fq *fq,
+ struct rtnl_link_stats64 *percpu_stats,
+ struct sk_buff *skb);
+
+void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq);
+
+struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_bp *bp);
+
+void *dpaa2_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr);
+void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr);
+
+void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id);
+
+int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
+int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
+
+void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi);
+bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch);
+
+/* SGT (Scatter-Gather Table) cache management */
+void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf);
+
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 3fdbf87dccb1..e80e9388c71f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP
- * Copyright 2020 NXP
+ * Copyright 2016-2022 NXP
*/
#include <linux/net_tstamp.h>
@@ -44,6 +43,8 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] tx conf bytes",
"[drv] tx sg frames",
"[drv] tx sg bytes",
+ "[drv] tx tso frames",
+ "[drv] tx tso bytes",
"[drv] rx sg frames",
"[drv] rx sg bytes",
"[drv] tx converted sg frames",
@@ -84,11 +85,16 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
static int dpaa2_eth_nway_reset(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(&priv->mac_lock);
if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_nway_reset(priv->mac->phylink);
+ err = phylink_ethtool_nway_reset(priv->mac->phylink);
+
+ mutex_unlock(&priv->mac_lock);
- return -EOPNOTSUPP;
+ return err;
}
static int
@@ -96,10 +102,18 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
struct ethtool_link_ksettings *link_settings)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_ksettings_get(priv->mac->phylink,
- link_settings);
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_ethtool_ksettings_get(priv->mac->phylink,
+ link_settings);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
link_settings->base.autoneg = AUTONEG_DISABLE;
if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
@@ -114,11 +128,17 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
const struct ethtool_link_ksettings *link_settings)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv))
+ err = phylink_ethtool_ksettings_set(priv->mac->phylink,
+ link_settings);
- if (!dpaa2_eth_is_type_phy(priv))
- return -ENOTSUPP;
+ mutex_unlock(&priv->mac_lock);
- return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
+ return err;
}
static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
@@ -127,11 +147,16 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
+ mutex_lock(&priv->mac_lock);
+
if (dpaa2_eth_is_type_phy(priv)) {
phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
+ mutex_unlock(&priv->mac_lock);
return;
}
+ mutex_unlock(&priv->mac_lock);
+
pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
pause->autoneg = AUTONEG_DISABLE;
@@ -150,9 +175,17 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- if (dpaa2_eth_is_type_phy(priv))
- return phylink_ethtool_set_pauseparam(priv->mac->phylink,
- pause);
+ mutex_lock(&priv->mac_lock);
+
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = phylink_ethtool_set_pauseparam(priv->mac->phylink,
+ pause);
+ mutex_unlock(&priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&priv->mac_lock);
+
if (pause->autoneg)
return -EOPNOTSUPP;
@@ -184,7 +217,6 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- struct dpaa2_eth_priv *priv = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -198,22 +230,17 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (dpaa2_eth_has_mac(priv))
- dpaa2_mac_get_strings(p);
+ dpaa2_mac_get_strings(p);
break;
}
}
static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
{
- int num_ss_stats = DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- if (dpaa2_eth_has_mac(priv))
- num_ss_stats += dpaa2_mac_get_sset_count();
- return num_ss_stats;
+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS +
+ dpaa2_mac_get_sset_count();
default:
return -EOPNOTSUPP;
}
@@ -225,17 +252,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
struct ethtool_stats *stats,
u64 *data)
{
- int i = 0;
- int j, k, err;
- int num_cnt;
- union dpni_statistics dpni_stats;
- u32 fcnt, bcnt;
- u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
- u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
- u32 buf_cnt;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- struct dpaa2_eth_drv_stats *extras;
- struct dpaa2_eth_ch_stats *ch_stats;
+ union dpni_statistics dpni_stats;
int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
sizeof(dpni_stats.page_0),
sizeof(dpni_stats.page_1),
@@ -245,6 +263,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
sizeof(dpni_stats.page_5),
sizeof(dpni_stats.page_6),
};
+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+ struct dpaa2_eth_ch_stats *ch_stats;
+ struct dpaa2_eth_drv_stats *extras;
+ u32 buf_cnt, buf_cnt_total = 0;
+ int j, k, err, num_cnt, i = 0;
+ u32 fcnt, bcnt;
memset(data, 0,
sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
@@ -306,15 +331,22 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
*(data + i++) = fcnt_tx_total;
*(data + i++) = bcnt_tx_total;
- err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
- if (err) {
- netdev_warn(net_dev, "Buffer count query error %d\n", err);
- return;
+ for (j = 0; j < priv->num_bps; j++) {
+ err = dpaa2_io_query_bp_count(NULL, priv->bp[j]->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
+ return;
+ }
+ buf_cnt_total += buf_cnt;
}
- *(data + i++) = buf_cnt;
+ *(data + i++) = buf_cnt_total;
+
+ mutex_lock(&priv->mac_lock);
if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
+
+ mutex_unlock(&priv->mac_lock);
}
static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
@@ -874,6 +906,29 @@ restore_rx_usecs:
return err;
}
+static void dpaa2_eth_get_channels(struct net_device *net_dev,
+ struct ethtool_channels *channels)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int queue_count = dpaa2_eth_queue_count(priv);
+
+ channels->max_rx = queue_count;
+ channels->max_tx = queue_count;
+ channels->rx_count = queue_count;
+ channels->tx_count = queue_count;
+
+ /* Tx confirmation and Rx error */
+ channels->max_other = queue_count + 1;
+ channels->max_combined = channels->max_rx +
+ channels->max_tx +
+ channels->max_other;
+ /* Tx conf and Rx err */
+ channels->other_count = queue_count + 1;
+ channels->combined_count = channels->rx_count +
+ channels->tx_count +
+ channels->other_count;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
@@ -894,4 +949,5 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.set_tunable = dpaa2_eth_set_tunable,
.get_coalesce = dpaa2_eth_get_coalesce,
.set_coalesce = dpaa2_eth_set_coalesce,
+ .get_channels = dpaa2_eth_get_channels,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ef8f0a055024..b1871e6c4006 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -2,6 +2,8 @@
/* Copyright 2019 NXP */
#include <linux/acpi.h>
+#include <linux/pcs-lynx.h>
+#include <linux/phy/phy.h>
#include <linux/property.h>
#include "dpaa2-eth.h"
@@ -10,6 +12,28 @@
#define phylink_to_dpaa2_mac(config) \
container_of((config), struct dpaa2_mac, phylink_config)
+#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR 4
+#define DPMAC_PROTOCOL_CHANGE_VER_MINOR 8
+
+#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE BIT(0)
+
+static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
+ u16 ver_major, u16 ver_minor)
+{
+ if (mac->ver_major == ver_major)
+ return mac->ver_minor - ver_minor;
+ return mac->ver_major - ver_major;
+}
+
+static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
+{
+ mac->features = 0;
+
+ if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
+ DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
+ mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+}
+
static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
{
*if_mode = PHY_INTERFACE_MODE_NA;
@@ -37,10 +61,33 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
return 0;
}
+static enum dpmac_eth_if dpmac_eth_if_mode(phy_interface_t if_mode)
+{
+ switch (if_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return DPMAC_ETH_IF_RGMII;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return DPMAC_ETH_IF_USXGMII;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return DPMAC_ETH_IF_QSGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ return DPMAC_ETH_IF_SGMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return DPMAC_ETH_IF_XFI;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return DPMAC_ETH_IF_1000BASEX;
+ default:
+ return DPMAC_ETH_IF_MII;
+ }
+}
+
static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
u16 dpmac_id)
{
- struct fwnode_handle *fwnode, *parent, *child = NULL;
+ struct fwnode_handle *fwnode, *parent = NULL, *child = NULL;
struct device_node *dpmacs = NULL;
int err;
u32 id;
@@ -53,6 +100,13 @@ static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
parent = of_fwnode_handle(dpmacs);
} else if (is_acpi_node(fwnode)) {
parent = fwnode;
+ } else {
+ /* The root dprc device didn't yet get to finalize it's probe,
+ * thus the fwnode field is not yet set. Defer probe if we are
+ * facing this situation.
+ */
+ dev_dbg(dev, "dprc not finished probing\n");
+ return ERR_PTR(-EPROBE_DEFER);
}
fwnode_for_each_child_node(parent, child) {
@@ -90,86 +144,12 @@ static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
return err;
}
-static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
- phy_interface_t interface)
-{
- switch (interface) {
- /* We can switch between SGMII and 1000BASE-X at runtime with
- * pcs-lynx
- */
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (mac->pcs &&
- (mac->if_mode == PHY_INTERFACE_MODE_SGMII ||
- mac->if_mode == PHY_INTERFACE_MODE_1000BASEX))
- return false;
- return interface != mac->if_mode;
-
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- return (interface != mac->if_mode);
- default:
- return true;
- }
-}
-
-static void dpaa2_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct phylink_pcs *dpaa2_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct dpaa2_mac *mac = phylink_to_dpaa2_mac(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- dpaa2_mac_phy_mode_mismatch(mac, state->interface)) {
- goto empty_set;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_USXGMII:
- phylink_set_10g_modes(mask);
- if (state->interface == PHY_INTERFACE_MODE_10GBASER)
- break;
- phylink_set(mask, 5000baseT_Full);
- phylink_set(mask, 2500baseT_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- break;
- default:
- goto empty_set;
- }
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- return;
-
-empty_set:
- linkmode_zero(supported);
+ return mac->pcs;
}
static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
@@ -179,7 +159,8 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
struct dpmac_link_state *dpmac_state = &mac->state;
int err;
- if (state->an_enabled)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ state->advertising))
dpmac_state->options |= DPMAC_LINK_OPT_AUTONEG;
else
dpmac_state->options &= ~DPMAC_LINK_OPT_AUTONEG;
@@ -189,6 +170,19 @@ static void dpaa2_mac_config(struct phylink_config *config, unsigned int mode,
if (err)
netdev_err(mac->net_dev, "%s: dpmac_set_link_state() = %d\n",
__func__, err);
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* This happens only if we support changing of protocol at runtime */
+ err = dpmac_set_protocol(mac->mc_io, 0, mac->mc_dev->mc_handle,
+ dpmac_eth_if_mode(state->interface));
+ if (err)
+ netdev_err(mac->net_dev, "dpmac_set_protocol() = %d\n", err);
+
+ err = phy_set_mode_ext(mac->serdes_phy, PHY_MODE_ETHERNET, state->interface);
+ if (err)
+ netdev_err(mac->net_dev, "phy_set_mode_ext() = %d\n", err);
}
static void dpaa2_mac_link_up(struct phylink_config *config,
@@ -243,7 +237,7 @@ static void dpaa2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
- .validate = dpaa2_mac_validate,
+ .mac_select_pcs = dpaa2_mac_select_pcs,
.mac_config = dpaa2_mac_config,
.mac_link_up = dpaa2_mac_link_up,
.mac_link_down = dpaa2_mac_link_down,
@@ -271,8 +265,10 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
mdiodev = fwnode_mdio_find_device(node);
fwnode_handle_put(node);
- if (!mdiodev)
+ if (!mdiodev) {
+ netdev_dbg(mac->net_dev, "missing PCS device\n");
return -EPROBE_DEFER;
+ }
mac->pcs = lynx_pcs_create(mdiodev);
if (!mac->pcs) {
@@ -286,20 +282,86 @@ static int dpaa2_pcs_create(struct dpaa2_mac *mac,
static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
{
- struct lynx_pcs *pcs = mac->pcs;
+ struct phylink_pcs *phylink_pcs = mac->pcs;
- if (pcs) {
- struct device *dev = &pcs->mdio->dev;
- lynx_pcs_destroy(pcs);
+ if (phylink_pcs) {
+ struct mdio_device *mdio = lynx_get_mdio_device(phylink_pcs);
+ struct device *dev = &mdio->dev;
+
+ lynx_pcs_destroy(phylink_pcs);
put_device(dev);
mac->pcs = NULL;
}
}
+static void dpaa2_mac_set_supported_interfaces(struct dpaa2_mac *mac)
+{
+ int intf, err;
+
+ /* We support the current interface mode, and if we have a PCS
+ * similar interface modes that do not require the SerDes lane to be
+ * reconfigured.
+ */
+ __set_bit(mac->if_mode, mac->phylink_config.supported_interfaces);
+ if (mac->pcs) {
+ switch (mac->if_mode) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (!mac->serdes_phy)
+ return;
+
+ /* In case we have access to the SerDes phy/lane, then ask the SerDes
+ * driver what interfaces are supported based on the current PLL
+ * configuration.
+ */
+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+ if (intf == PHY_INTERFACE_MODE_NA)
+ continue;
+
+ err = phy_validate(mac->serdes_phy, PHY_MODE_ETHERNET, intf, NULL);
+ if (err)
+ continue;
+
+ __set_bit(intf, mac->phylink_config.supported_interfaces);
+ }
+}
+
+void dpaa2_mac_start(struct dpaa2_mac *mac)
+{
+ ASSERT_RTNL();
+
+ if (mac->serdes_phy)
+ phy_power_on(mac->serdes_phy);
+
+ phylink_start(mac->phylink);
+}
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac)
+{
+ ASSERT_RTNL();
+
+ phylink_stop(mac->phylink);
+
+ if (mac->serdes_phy)
+ phy_power_off(mac->serdes_phy);
+}
+
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct net_device *net_dev = mac->net_dev;
struct fwnode_handle *dpmac_node;
+ struct phy *serdes_phy = NULL;
struct phylink *phylink;
int err;
@@ -316,6 +378,20 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return -EINVAL;
mac->if_mode = err;
+ if (mac->features & DPAA2_MAC_FEATURE_PROTOCOL_CHANGE &&
+ !phy_interface_mode_is_rgmii(mac->if_mode) &&
+ is_of_node(dpmac_node)) {
+ serdes_phy = of_phy_get(to_of_node(dpmac_node), NULL);
+
+ if (serdes_phy == ERR_PTR(-ENODEV))
+ serdes_phy = NULL;
+ else if (IS_ERR(serdes_phy))
+ return PTR_ERR(serdes_phy);
+ else
+ phy_init(serdes_phy);
+ }
+ mac->serdes_phy = serdes_phy;
+
/* The MAC does not have the capability to add RGMII delays so
* error out if the interface mode requests them and there is no PHY
* to act upon them
@@ -336,9 +412,16 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
return err;
}
+ memset(&mac->phylink_config, 0, sizeof(mac->phylink_config));
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ mac->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD | MAC_2500FD | MAC_5000FD |
+ MAC_10000FD;
+
+ dpaa2_mac_set_supported_interfaces(mac);
+
phylink = phylink_create(&mac->phylink_config,
dpmac_node, mac->if_mode,
&dpaa2_mac_phylink_ops);
@@ -348,10 +431,9 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
}
mac->phylink = phylink;
- if (mac->pcs)
- phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
-
+ rtnl_lock();
err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
+ rtnl_unlock();
if (err) {
netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
goto err_phylink_destroy;
@@ -369,18 +451,21 @@ err_pcs_destroy:
void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
{
- if (!mac->phylink)
- return;
-
+ rtnl_lock();
phylink_disconnect_phy(mac->phylink);
+ rtnl_unlock();
+
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+ of_phy_put(mac->serdes_phy);
+ mac->serdes_phy = NULL;
}
int dpaa2_mac_open(struct dpaa2_mac *mac)
{
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
+ struct fwnode_handle *fw_node;
int err;
err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
@@ -397,10 +482,24 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
goto err_close_dpmac;
}
+ err = dpmac_get_api_version(mac->mc_io, 0, &mac->ver_major, &mac->ver_minor);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_api_version() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ dpaa2_mac_detect_features(mac);
+
/* Find the device node representing the MAC device and link the device
* behind the associated netdev to it.
*/
- mac->fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ if (IS_ERR(fw_node)) {
+ err = PTR_ERR(fw_node);
+ goto err_close_dpmac;
+ }
+
+ mac->fw_node = fw_node;
net_dev->dev.of_node = to_of_node(mac->fw_node);
return 0;
@@ -463,7 +562,7 @@ void dpaa2_mac_get_strings(u8 *data)
int i;
for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
- strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 7842cbb2207a..c1ec9efd413a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -7,7 +7,6 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phylink.h>
-#include <linux/pcs-lynx.h>
#include "dpmac.h"
#include "dpmac-cmd.h"
@@ -18,17 +17,27 @@ struct dpaa2_mac {
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
struct dpmac_attr attr;
+ u16 ver_major, ver_minor;
+ unsigned long features;
struct phylink_config phylink_config;
struct phylink *phylink;
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
- struct lynx_pcs *pcs;
+ struct phylink_pcs *pcs;
struct fwnode_handle *fw_node;
+
+ struct phy *serdes_phy;
};
-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
- struct fsl_mc_io *mc_io);
+static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac)
+{
+ if (!mac)
+ return false;
+
+ return mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE;
+}
int dpaa2_mac_open(struct dpaa2_mac *mac);
@@ -44,4 +53,8 @@ void dpaa2_mac_get_strings(u8 *data);
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data);
+void dpaa2_mac_start(struct dpaa2_mac *mac);
+
+void dpaa2_mac_stop(struct dpaa2_mac *mac);
+
#endif /* DPAA2_MAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 32b5faa87bb8..90d23ab1ce9d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/msi.h>
#include <linux/fsl/mc.h>
#include "dpaa2-ptp.h"
@@ -129,7 +128,6 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
- struct fsl_mc_device_irq *irq;
struct ptp_qoriq *ptp_qoriq;
struct device_node *node;
void __iomem *base;
@@ -168,7 +166,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
base = of_iomap(node, 0);
if (!base) {
err = -ENOMEM;
- goto err_close;
+ goto err_put;
}
err = fsl_mc_allocate_irqs(mc_dev);
@@ -177,8 +175,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_unmap;
}
- irq = mc_dev->irqs[0];
- ptp_qoriq->irq = irq->msi_desc->irq;
+ ptp_qoriq->irq = mc_dev->irqs[0]->virq;
err = request_threaded_irq(ptp_qoriq->irq, NULL,
dpaa2_ptp_irq_handler_thread,
@@ -212,6 +209,8 @@ err_free_mc_irq:
fsl_mc_free_irqs(mc_dev);
err_unmap:
iounmap(base);
+err_put:
+ of_node_put(node);
err_close:
dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
err_free_mcp:
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
index 720c9230cab5..6bc1988be311 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
@@ -60,11 +60,18 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev,
{
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
struct dpsw_link_state state = {0};
- int err = 0;
+ int err;
+
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = phylink_ethtool_ksettings_get(port_priv->mac->phylink,
+ link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
+ return err;
+ }
- if (dpaa2_switch_port_is_type_phy(port_priv))
- return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
- link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
@@ -99,9 +106,16 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
bool if_running;
int err = 0, ret;
- if (dpaa2_switch_port_is_type_phy(port_priv))
- return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
- link_ksettings);
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ err = phylink_ethtool_ksettings_set(port_priv->mac->phylink,
+ link_ksettings);
+ mutex_unlock(&port_priv->mac_lock);
+ return err;
+ }
+
+ mutex_unlock(&port_priv->mac_lock);
/* Interface needs to be down to change link settings */
if_running = netif_running(netdev);
@@ -145,14 +159,9 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
static int
dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
- int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS;
-
switch (sset) {
case ETH_SS_STATS:
- if (port_priv->mac)
- num_ss_stats += dpaa2_mac_get_sset_count();
- return num_ss_stats;
+ return DPAA2_SWITCH_NUM_COUNTERS + dpaa2_mac_get_sset_count();
default:
return -EOPNOTSUPP;
}
@@ -161,7 +170,6 @@ dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset)
static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
- struct ethsw_port_priv *port_priv = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -172,8 +180,7 @@ static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (port_priv->mac)
- dpaa2_mac_get_strings(p);
+ dpaa2_mac_get_strings(p);
break;
}
}
@@ -196,8 +203,12 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
dpaa2_switch_ethtool_counters[i].name, err);
}
- if (port_priv->mac)
+ mutex_lock(&port_priv->mac_lock);
+
+ if (dpaa2_switch_port_has_mac(port_priv))
dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
+
+ mutex_unlock(&port_priv->mac_lock);
}
const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index d6eefbbf163f..c39b866e2582 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -132,6 +132,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
dev_err(dev, "DMA mapping failed\n");
+ kfree(cmd_buff);
return -EFAULT;
}
@@ -142,6 +143,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block,
DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err);
+ kfree(cmd_buff);
return err;
}
@@ -172,6 +174,7 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) {
dev_err(dev, "DMA mapping failed\n");
+ kfree(cmd_buff);
return -EFAULT;
}
@@ -182,6 +185,7 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block,
DMA_TO_DEVICE);
if (err) {
dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err);
+ kfree(cmd_buff);
return err;
}
@@ -532,6 +536,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -561,9 +566,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
}
*vlan = (u16)match.key->vlan_id;
+ ret = 0;
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index d039457928b0..f4ae4289c41a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <linux/iommu.h>
@@ -394,7 +393,8 @@ static int dpaa2_switch_dellink(struct ethsw_core *ethsw, u16 vid)
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
ppriv_local = ethsw->ports[i];
- ppriv_local->vlans[vid] = 0;
+ if (ppriv_local)
+ ppriv_local->vlans[vid] = 0;
}
return 0;
@@ -602,8 +602,11 @@ static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
+ * We can avoid locking because we are called from the "link changed"
+ * IRQ handler, which is the same as the "endpoint changed" IRQ handler
+ * (the writer to port_priv->mac), so we cannot race with it.
*/
- if (dpaa2_switch_port_is_type_phy(port_priv))
+ if (dpaa2_mac_is_type_phy(port_priv->mac))
return 0;
/* Interrupts are received even though no one issued an 'ifconfig up'
@@ -683,6 +686,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
struct ethsw_core *ethsw = port_priv->ethsw_data;
int err;
+ mutex_lock(&port_priv->mac_lock);
+
if (!dpaa2_switch_port_is_type_phy(port_priv)) {
/* Explicitly set carrier off, otherwise
* netif_carrier_ok() will return true and cause 'ip link show'
@@ -696,6 +701,7 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
port_priv->ethsw_data->dpsw_handle,
port_priv->idx);
if (err) {
+ mutex_unlock(&port_priv->mac_lock);
netdev_err(netdev, "dpsw_if_enable err %d\n", err);
return err;
}
@@ -703,7 +709,9 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
dpaa2_switch_enable_ctrl_if_napi(ethsw);
if (dpaa2_switch_port_is_type_phy(port_priv))
- phylink_start(port_priv->mac->phylink);
+ dpaa2_mac_start(port_priv->mac);
+
+ mutex_unlock(&port_priv->mac_lock);
return 0;
}
@@ -714,13 +722,17 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
struct ethsw_core *ethsw = port_priv->ethsw_data;
int err;
+ mutex_lock(&port_priv->mac_lock);
+
if (dpaa2_switch_port_is_type_phy(port_priv)) {
- phylink_stop(port_priv->mac->phylink);
+ dpaa2_mac_stop(port_priv->mac);
} else {
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
}
+ mutex_unlock(&port_priv->mac_lock);
+
err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
port_priv->ethsw_data->dpsw_handle,
port_priv->idx);
@@ -1449,9 +1461,8 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
err = dpaa2_mac_open(mac);
if (err)
goto err_free_mac;
- port_priv->mac = mac;
- if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ if (dpaa2_mac_is_type_phy(mac)) {
err = dpaa2_mac_connect(mac);
if (err) {
netdev_err(port_priv->netdev,
@@ -1461,11 +1472,14 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
}
}
+ mutex_lock(&port_priv->mac_lock);
+ port_priv->mac = mac;
+ mutex_unlock(&port_priv->mac_lock);
+
return 0;
err_close_mac:
dpaa2_mac_close(mac);
- port_priv->mac = NULL;
err_free_mac:
kfree(mac);
return err;
@@ -1473,15 +1487,21 @@ err_free_mac:
static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
{
- if (dpaa2_switch_port_is_type_phy(port_priv))
- dpaa2_mac_disconnect(port_priv->mac);
+ struct dpaa2_mac *mac;
- if (!dpaa2_switch_port_has_mac(port_priv))
+ mutex_lock(&port_priv->mac_lock);
+ mac = port_priv->mac;
+ port_priv->mac = NULL;
+ mutex_unlock(&port_priv->mac_lock);
+
+ if (!mac)
return;
- dpaa2_mac_close(port_priv->mac);
- kfree(port_priv->mac);
- port_priv->mac = NULL;
+ if (dpaa2_mac_is_type_phy(mac))
+ dpaa2_mac_disconnect(mac);
+
+ dpaa2_mac_close(mac);
+ kfree(mac);
}
static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
@@ -1491,6 +1511,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
struct ethsw_port_priv *port_priv;
u32 status = ~0;
int err, if_id;
+ bool had_mac;
err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
DPSW_IRQ_INDEX_IF, &status);
@@ -1508,12 +1529,15 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
}
if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
- rtnl_lock();
- if (dpaa2_switch_port_has_mac(port_priv))
+ /* We can avoid locking because the "endpoint changed" IRQ
+ * handler is the only one who changes priv->mac at runtime,
+ * so we are not racing with anyone.
+ */
+ had_mac = !!port_priv->mac;
+ if (had_mac)
dpaa2_switch_port_disconnect_mac(port_priv);
else
dpaa2_switch_port_connect_mac(port_priv);
- rtnl_unlock();
}
out:
@@ -1553,8 +1577,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
- err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
- NULL,
+ err = devm_request_threaded_irq(dev, irq->virq, NULL,
dpaa2_switch_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), dev);
@@ -1580,7 +1603,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
return 0;
free_devm_irq:
- devm_free_irq(dev, irq->msi_desc->irq, dev);
+ devm_free_irq(dev, irq->virq, dev);
free_irq:
fsl_mc_free_irqs(sw_dev);
return err;
@@ -1896,9 +1919,11 @@ static int dpaa2_switch_port_del_vlan(struct ethsw_port_priv *port_priv, u16 vid
/* Delete VLAN from switch if it is no longer configured on
* any port
*/
- for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
- if (ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
+ for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
+ if (ethsw->ports[i] &&
+ ethsw->ports[i]->vlans[vid] & ETHSW_VLAN_MEMBER)
return 0; /* Found a port member in VID */
+ }
ethsw->vlans[vid] &= ~ETHSW_VLAN_GLOBAL;
@@ -2930,9 +2955,7 @@ static void dpaa2_switch_remove_port(struct ethsw_core *ethsw,
{
struct ethsw_port_priv *port_priv = ethsw->ports[port_idx];
- rtnl_lock();
dpaa2_switch_port_disconnect_mac(port_priv);
- rtnl_unlock();
free_netdev(port_priv->netdev);
ethsw->ports[port_idx] = NULL;
}
@@ -3251,6 +3274,8 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
port_priv->netdev = port_netdev;
port_priv->ethsw_data = ethsw;
+ mutex_init(&port_priv->mac_lock);
+
port_priv->idx = port_idx;
port_priv->stp_state = BR_STATE_FORWARDING;
@@ -3368,9 +3393,8 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
* different queues for each switch ports.
*/
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
- netif_napi_add(ethsw->ports[0]->netdev,
- &ethsw->fq[i].napi, dpaa2_switch_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ethsw->ports[0]->netdev, &ethsw->fq[i].napi,
+ dpaa2_switch_poll);
/* Setup IRQs */
err = dpaa2_switch_setup_irqs(sw_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
index 0002dca4d417..42b3ca73f55d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
@@ -161,6 +161,8 @@ struct ethsw_port_priv {
struct dpaa2_switch_filter_block *filter_block;
struct dpaa2_mac *mac;
+ /* Protects against changes to port_priv->mac */
+ struct mutex mac_lock;
};
/* Switch data */
@@ -230,12 +232,7 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
static inline bool
dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
{
- if (port_priv->mac &&
- (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
- port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
- return true;
-
- return false;
+ return dpaa2_mac_is_type_phy(port_priv->mac);
}
static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
new file mode 100644
index 000000000000..051748b997f3
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -0,0 +1,454 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2022 NXP
+ */
+#include <linux/filter.h>
+#include <linux/compiler.h>
+#include <linux/bpf_trace.h>
+#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
+
+#include "dpaa2-eth.h"
+
+static void dpaa2_eth_setup_consume_func(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ enum dpaa2_eth_fq_type type,
+ dpaa2_eth_consume_cb_t *consume)
+{
+ struct dpaa2_eth_fq *fq;
+ int i;
+
+ for (i = 0; i < priv->num_fqs; i++) {
+ fq = &priv->fq[i];
+
+ if (fq->type != type)
+ continue;
+ if (fq->channel != ch)
+ continue;
+
+ fq->consume = consume;
+ }
+}
+
+static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff *xdp_buff;
+ struct dpaa2_eth_swa *swa;
+ u32 xdp_act = XDP_PASS;
+ int err;
+
+ xdp_prog = READ_ONCE(ch->xdp.prog);
+ if (!xdp_prog)
+ goto out;
+
+ swa = (struct dpaa2_eth_swa *)(vaddr + DPAA2_ETH_RX_HWA_SIZE +
+ ch->xsk_pool->umem->headroom);
+ xdp_buff = swa->xsk.xdp_buff;
+
+ xdp_buff->data_hard_start = vaddr;
+ xdp_buff->data = vaddr + dpaa2_fd_get_offset(fd);
+ xdp_buff->data_end = xdp_buff->data + dpaa2_fd_get_len(fd);
+ xdp_set_data_meta_invalid(xdp_buff);
+ xdp_buff->rxq = &ch->xdp_rxq;
+
+ xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
+ xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
+
+ /* xdp.data pointer may have changed */
+ dpaa2_fd_set_offset(fd, xdp_buff->data - vaddr);
+ dpaa2_fd_set_len(fd, xdp_buff->data_end - xdp_buff->data);
+
+ if (likely(xdp_act == XDP_REDIRECT)) {
+ err = xdp_do_redirect(priv->net_dev, xdp_buff, xdp_prog);
+ if (unlikely(err)) {
+ ch->stats.xdp_drop++;
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ } else {
+ ch->buf_count--;
+ ch->stats.xdp_redirect++;
+ }
+
+ goto xdp_redir;
+ }
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ fallthrough;
+ case XDP_DROP:
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+ ch->stats.xdp_drop++;
+ break;
+ }
+
+xdp_redir:
+ ch->xdp.res |= xdp_act;
+out:
+ return xdp_act;
+}
+
+/* Rx frame processing routine for the AF_XDP fast path */
+static void dpaa2_xsk_rx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ u32 fd_length = dpaa2_fd_get_len(fd);
+ struct sk_buff *skb;
+ void *vaddr;
+ u32 xdp_act;
+
+ trace_dpaa2_rx_xsk_fd(priv->net_dev, fd);
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ if (fd_format != dpaa2_fd_single) {
+ WARN_ON(priv->xdp_prog);
+ /* AF_XDP doesn't support any other formats */
+ goto err_frame_format;
+ }
+
+ xdp_act = dpaa2_xsk_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ return;
+ }
+
+ /* Build skb */
+ skb = dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, vaddr);
+ if (!skb)
+ /* Nothing else we can do, recycle the buffer and
+ * drop the frame.
+ */
+ goto err_alloc_skb;
+
+ /* Send the skb to the Linux networking stack */
+ dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
+
+ return;
+
+err_alloc_skb:
+ dpaa2_eth_recycle_buf(priv, ch, addr);
+err_frame_format:
+ percpu_stats->rx_dropped++;
+}
+
+static void dpaa2_xsk_set_bp_per_qdbin(struct dpaa2_eth_priv *priv,
+ struct dpni_pools_cfg *pools_params)
+{
+ int curr_bp = 0, i, j;
+
+ pools_params->pool_options = DPNI_POOL_ASSOC_QDBIN;
+ for (i = 0; i < priv->num_bps; i++) {
+ for (j = 0; j < priv->num_channels; j++)
+ if (priv->bp[i] == priv->channel[j]->bp)
+ pools_params->pools[curr_bp].priority_mask |= (1 << j);
+ if (!pools_params->pools[curr_bp].priority_mask)
+ continue;
+
+ pools_params->pools[curr_bp].dpbp_id = priv->bp[i]->bpid;
+ pools_params->pools[curr_bp].buffer_size = priv->rx_buf_size;
+ pools_params->pools[curr_bp++].backup_pool = 0;
+ }
+ pools_params->num_dpbp = curr_bp;
+}
+
+static int dpaa2_xsk_disable_pool(struct net_device *dev, u16 qid)
+{
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(dev, qid);
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpni_pools_cfg pools_params = { 0 };
+ struct dpaa2_eth_channel *ch;
+ int err;
+ bool up;
+
+ ch = priv->channel[qid];
+ if (!ch->xsk_pool)
+ return -EINVAL;
+
+ up = netif_running(dev);
+ if (up)
+ dev_close(dev);
+
+ xsk_pool_dma_unmap(pool, 0);
+ err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err)
+ netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed (err = %d)\n",
+ err);
+
+ dpaa2_eth_free_dpbp(priv, ch->bp);
+
+ ch->xsk_zc = false;
+ ch->xsk_pool = NULL;
+ ch->xsk_tx_pkts_sent = 0;
+ ch->bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
+
+ dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_eth_rx);
+
+ dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err)
+ netdev_err(dev, "dpni_set_pools() failed\n");
+
+ if (up) {
+ err = dev_open(dev, NULL);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_xsk_enable_pool(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpni_pools_cfg pools_params = { 0 };
+ struct dpaa2_eth_channel *ch;
+ int err, err2;
+ bool up;
+
+ if (priv->dpni_attrs.wriop_version < DPAA2_WRIOP_VERSION(3, 0, 0)) {
+ netdev_err(dev, "AF_XDP zero-copy not supported on devices <= WRIOP(3, 0, 0)\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->dpni_attrs.num_queues > 8) {
+ netdev_err(dev, "AF_XDP zero-copy not supported on DPNI with more then 8 queues\n");
+ return -EOPNOTSUPP;
+ }
+
+ up = netif_running(dev);
+ if (up)
+ dev_close(dev);
+
+ err = xsk_pool_dma_map(pool, priv->net_dev->dev.parent, 0);
+ if (err) {
+ netdev_err(dev, "xsk_pool_dma_map() failed (err = %d)\n",
+ err);
+ goto err_dma_unmap;
+ }
+
+ ch = priv->channel[qid];
+ err = xdp_rxq_info_reg_mem_model(&ch->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err) {
+ netdev_err(dev, "xdp_rxq_info_reg_mem_model() failed (err = %d)\n", err);
+ goto err_mem_model;
+ }
+ xsk_pool_set_rxq_info(pool, &ch->xdp_rxq);
+
+ priv->bp[priv->num_bps] = dpaa2_eth_allocate_dpbp(priv);
+ if (IS_ERR(priv->bp[priv->num_bps])) {
+ err = PTR_ERR(priv->bp[priv->num_bps]);
+ goto err_bp_alloc;
+ }
+ ch->xsk_zc = true;
+ ch->xsk_pool = pool;
+ ch->bp = priv->bp[priv->num_bps++];
+
+ dpaa2_eth_setup_consume_func(priv, ch, DPAA2_RX_FQ, dpaa2_xsk_rx);
+
+ dpaa2_xsk_set_bp_per_qdbin(priv, &pools_params);
+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
+ if (err) {
+ netdev_err(dev, "dpni_set_pools() failed\n");
+ goto err_set_pools;
+ }
+
+ if (up) {
+ err = dev_open(dev, NULL);
+ if (err)
+ return err;
+ }
+
+ return 0;
+
+err_set_pools:
+ err2 = dpaa2_xsk_disable_pool(dev, qid);
+ if (err2)
+ netdev_err(dev, "dpaa2_xsk_disable_pool() failed %d\n", err2);
+err_bp_alloc:
+ err2 = xdp_rxq_info_reg_mem_model(&priv->channel[qid]->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0, NULL);
+ if (err2)
+ netdev_err(dev, "xsk_rxq_info_reg_mem_model() failed with %d)\n", err2);
+err_mem_model:
+ xsk_pool_dma_unmap(pool, 0);
+err_dma_unmap:
+ if (up)
+ dev_open(dev, NULL);
+
+ return err;
+}
+
+int dpaa2_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
+{
+ return pool ? dpaa2_xsk_enable_pool(dev, pool, qid) :
+ dpaa2_xsk_disable_pool(dev, qid);
+}
+
+int dpaa2_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_eth_channel *ch = priv->channel[qid];
+
+ if (!priv->link_state.up)
+ return -ENETDOWN;
+
+ if (!priv->xdp_prog)
+ return -EINVAL;
+
+ if (!ch->xsk_zc)
+ return -EINVAL;
+
+ /* We do not have access to a per channel SW interrupt, so instead we
+ * schedule a NAPI instance.
+ */
+ if (!napi_if_scheduled_mark_missed(&ch->napi))
+ napi_schedule(&ch->napi);
+
+ return 0;
+}
+
+static int dpaa2_xsk_tx_build_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ struct xdp_desc *xdp_desc)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpaa2_sg_entry *sgt;
+ struct dpaa2_eth_swa *swa;
+ void *sgt_buf = NULL;
+ dma_addr_t sgt_addr;
+ int sgt_buf_size;
+ dma_addr_t addr;
+ int err = 0;
+
+ /* Prepare the HW SGT structure */
+ sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
+ sgt_buf = dpaa2_eth_sgt_get(priv);
+ if (unlikely(!sgt_buf))
+ return -ENOMEM;
+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+ /* Get the address of the XSK Tx buffer */
+ addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr);
+ xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len);
+
+ /* Fill in the HW SGT structure */
+ dpaa2_sg_set_addr(sgt, addr);
+ dpaa2_sg_set_len(sgt, xdp_desc->len);
+ dpaa2_sg_set_final(sgt, true);
+
+ /* Store the necessary info in the SGT buffer */
+ swa = (struct dpaa2_eth_swa *)sgt_buf;
+ swa->type = DPAA2_ETH_SWA_XSK;
+ swa->xsk.sgt_size = sgt_buf_size;
+
+ /* Separately map the SGT buffer */
+ sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+ err = -ENOMEM;
+ goto sgt_map_failed;
+ }
+
+ /* Initialize FD fields */
+ memset(fd, 0, sizeof(struct dpaa2_fd));
+ dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+ dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+ dpaa2_fd_set_addr(fd, sgt_addr);
+ dpaa2_fd_set_len(fd, xdp_desc->len);
+ dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+ return 0;
+
+sgt_map_failed:
+ dpaa2_eth_sgt_recycle(priv, sgt_buf);
+
+ return err;
+}
+
+bool dpaa2_xsk_tx(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch)
+{
+ struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs;
+ struct dpaa2_eth_drv_stats *percpu_extras;
+ struct rtnl_link_stats64 *percpu_stats;
+ int budget = DPAA2_ETH_TX_ZC_PER_NAPI;
+ int total_enqueued, enqueued;
+ int retries, max_retries;
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_fd *fds;
+ int batch, i, err;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_extras = this_cpu_ptr(priv->percpu_extras);
+ fds = (this_cpu_ptr(priv->fd))->array;
+
+ /* Use the FQ with the same idx as the affine CPU */
+ fq = &priv->fq[ch->nctx.desired_cpu];
+
+ batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget);
+ if (!batch)
+ return false;
+
+ /* Create a FD for each XSK frame to be sent */
+ for (i = 0; i < batch; i++) {
+ err = dpaa2_xsk_tx_build_fd(priv, ch, &fds[i], &xdp_descs[i]);
+ if (err) {
+ batch = i;
+ break;
+ }
+
+ trace_dpaa2_tx_xsk_fd(priv->net_dev, &fds[i]);
+ }
+
+ /* Enqueue all the created FDs */
+ max_retries = batch * DPAA2_ETH_ENQUEUE_RETRIES;
+ total_enqueued = 0;
+ enqueued = 0;
+ retries = 0;
+ while (total_enqueued < batch && retries < max_retries) {
+ err = priv->enqueue(priv, fq, &fds[total_enqueued], 0,
+ batch - total_enqueued, &enqueued);
+ if (err == -EBUSY) {
+ retries++;
+ continue;
+ }
+
+ total_enqueued += enqueued;
+ }
+ percpu_extras->tx_portal_busy += retries;
+
+ /* Update statistics */
+ percpu_stats->tx_packets += total_enqueued;
+ for (i = 0; i < total_enqueued; i++)
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
+ for (i = total_enqueued; i < batch; i++) {
+ dpaa2_eth_free_tx_fd(priv, ch, fq, &fds[i], false);
+ percpu_stats->tx_errors++;
+ }
+
+ xsk_tx_release(ch->xsk_pool);
+
+ return total_enqueued == budget;
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
index a24b20f76938..e9ac2ecef3be 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h
@@ -19,11 +19,15 @@
#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
+
#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD_V2(0x0c3)
#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+#define DPMAC_CMDID_SET_PROTOCOL DPMAC_CMD(0x0c7)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPMAC_MASK(field) \
GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
@@ -70,4 +74,12 @@ struct dpmac_rsp_get_counter {
__le64 counter;
};
+struct dpmac_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+struct dpmac_cmd_set_protocol {
+ u8 eth_if;
+};
#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.c b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
index d5997b654562..f440a4c3b70c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.c
@@ -181,3 +181,57 @@ int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
return 0;
}
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_protocol() - Reconfigure the DPMAC protocol
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @protocol: New protocol for the DPMAC to be reconfigured in.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol)
+{
+ struct dpmac_cmd_set_protocol *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PROTOCOL,
+ cmd_flags, token);
+ cmd_params = (struct dpmac_cmd_set_protocol *)cmd.params;
+ cmd_params->eth_if = protocol;
+
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac.h b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
index 8f7ceb731282..17488819ef68 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpmac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpmac.h
@@ -205,4 +205,9 @@ enum dpmac_counter_id {
int dpmac_get_counter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
enum dpmac_counter_id id, u64 *value);
+int dpmac_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+int dpmac_set_protocol(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ enum dpmac_eth_if protocol);
#endif /* __FSL_DPMAC_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 9f80bdfeedec..be9492b8d5dc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -13,10 +13,12 @@
#define DPNI_VER_MINOR 0
#define DPNI_CMD_BASE_VERSION 1
#define DPNI_CMD_2ND_VERSION 2
+#define DPNI_CMD_3RD_VERSION 3
#define DPNI_CMD_ID_OFFSET 4
#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION)
#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_2ND_VERSION)
+#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_3RD_VERSION)
#define DPNI_CMDID_OPEN DPNI_CMD(0x801)
#define DPNI_CMDID_CLOSE DPNI_CMD(0x800)
@@ -39,7 +41,7 @@
#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016)
#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017)
-#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200)
+#define DPNI_CMDID_SET_POOLS DPNI_CMD_V3(0x200)
#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B)
#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210)
@@ -98,7 +100,7 @@
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
-#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD_V2(0x27a)
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
@@ -115,14 +117,19 @@ struct dpni_cmd_open {
};
#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order))
+
+struct dpni_cmd_pool {
+ __le16 dpbp_id;
+ u8 priority_mask;
+ u8 pad;
+};
+
struct dpni_cmd_set_pools {
- /* cmd word 0 */
u8 num_dpbp;
u8 backup_pool_mask;
- __le16 pad;
- /* cmd word 0..4 */
- __le32 dpbp_id[DPNI_MAX_DPBP];
- /* cmd word 4..6 */
+ u8 pad;
+ u8 pool_options;
+ struct dpni_cmd_pool pool[DPNI_MAX_DPBP];
__le16 buffer_size[DPNI_MAX_DPBP];
};
@@ -658,12 +665,16 @@ struct dpni_cmd_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_rsp_single_step_cfg {
__le16 flags;
__le16 offset;
__le32 peer_delay;
+ __le32 ptp_onestep_reg_base;
+ __le32 pad0;
};
struct dpni_cmd_enable_vlan_filter {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index d6afada99fb6..02601a283b59 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -173,8 +173,12 @@ int dpni_set_pools(struct fsl_mc_io *mc_io,
token);
cmd_params = (struct dpni_cmd_set_pools *)cmd.params;
cmd_params->num_dpbp = cfg->num_dpbp;
+ cmd_params->pool_options = cfg->pool_options;
for (i = 0; i < DPNI_MAX_DPBP; i++) {
- cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].dpbp_id =
+ cpu_to_le16(cfg->pools[i].dpbp_id);
+ cmd_params->pool[i].priority_mask =
+ cfg->pools[i].priority_mask;
cmd_params->buffer_size[i] =
cpu_to_le16(cfg->pools[i].buffer_size);
cmd_params->backup_pool_mask |=
@@ -2136,6 +2140,8 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
PTP_CH_UPDATE) ? 1 : 0;
ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+ ptp_cfg->ptp_onestep_reg_base =
+ le32_to_cpu(rsp_params->ptp_onestep_reg_base);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 7de0562bbf59..5c0a1d5ac934 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -92,19 +92,28 @@ int dpni_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
+#define DPNI_POOL_ASSOC_QPRI 0
+#define DPNI_POOL_ASSOC_QDBIN 1
+
/**
* struct dpni_pools_cfg - Structure representing buffer pools configuration
* @num_dpbp: Number of DPBPs
+ * @pool_options: Buffer assignment options.
+ * This field is a combination of DPNI_POOL_ASSOC_flags
* @pools: Array of buffer pools parameters; The number of valid entries
* must match 'num_dpbp' value
* @pools.dpbp_id: DPBP object ID
+ * @pools.priority: Priority mask that indicates TC's used with this buffer.
+ * If set to 0x00 MC will assume value 0xff.
* @pools.buffer_size: Buffer size
* @pools.backup_pool: Backup pool
*/
struct dpni_pools_cfg {
u8 num_dpbp;
+ u8 pool_options;
struct {
int dpbp_id;
+ u8 priority_mask;
u16 buffer_size;
int backup_pool;
} pools[DPNI_MAX_DPBP];
@@ -1074,12 +1083,18 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
* @peer_delay: For peer-to-peer transparent clocks add this value to the
* correction field in addition to the transient time update.
* The value expresses nanoseconds.
+ * @ptp_onestep_reg_base: 1588 SINGLE_STEP register base address. This address
+ * is used to update directly the register contents.
+ * User has to create an address mapping for it.
+ *
+ *
*/
struct dpni_single_step_cfg {
u8 en;
u8 ch_update;
u16 offset;
u32 peer_delay;
+ u32 ptp_onestep_reg_base;
};
int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index cdc0ff89388a..4d75e6807e92 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,7 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC_CORE
+ tristate
+ help
+ This module supports common functionality between the PF and VF
+ drivers for the NXP ENETC controller.
+
+ If compiled as module (M), the module name is fsl-enetc-core.
+
config FSL_ENETC
tristate "ENETC PF driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
+ select MDIO_DEVRES
+ select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
select PHYLINK
@@ -16,7 +26,8 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select PHYLINK
select DIMLIB
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index a139f2e9d59f..b13cbbabb2ea 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -1,15 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o
+fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-y := enetc_pf.o $(common-objs)
+fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
-fsl-enetc-vf-y := enetc_vf.o $(common-objs)
-fsl-enetc-vf-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
+fsl-enetc-vf-y := enetc_vf.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 504e12554079..3c4fa26f0f9b 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -11,14 +11,33 @@
#include <net/pkt_sched.h>
#include <net/tso.h>
+u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
+{
+ return enetc_port_rd(&si->hw, reg);
+}
+EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
+
+void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
+{
+ enetc_port_wr(&si->hw, reg, val);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val);
+}
+EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
+
+static void enetc_change_preemptible_tcs(struct enetc_ndev_priv *priv,
+ u8 preemptible_tcs)
+{
+ priv->preemptible_tcs = preemptible_tcs;
+ enetc_mm_commit_preemptible_tcs(priv);
+}
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
- int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- if (priv->rx_ring[i]->xdp.prog)
- return num_tx_rings - num_possible_cpus();
+ if (priv->xdp_prog)
+ return num_tx_rings - num_possible_cpus();
return num_tx_rings;
}
@@ -172,7 +191,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
tx_swbd->do_twostep_tstamp = do_twostep_tstamp;
- tx_swbd->check_wb = tx_swbd->do_twostep_tstamp;
+ tx_swbd->qbv_en = !!(priv->active_offloads & ENETC_F_QBV);
+ tx_swbd->check_wb = tx_swbd->do_twostep_tstamp || tx_swbd->qbv_en;
if (do_vlan || do_onestep_tstamp || do_twostep_tstamp)
flags |= ENETC_TXBD_FLAGS_EX;
@@ -242,8 +262,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
if (udp)
val |= ENETC_PM0_SINGLE_STEP_CH;
- enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
- enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
+ enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
+ val);
} else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
@@ -650,6 +670,7 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return enetc_start_xmit(skb, ndev);
}
+EXPORT_SYMBOL_GPL(enetc_xmit);
static irqreturn_t enetc_msix(int irq, void *data)
{
@@ -792,9 +813,9 @@ static void enetc_recycle_xdp_tx_buff(struct enetc_bdr *tx_ring,
static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
{
+ int tx_frm_cnt = 0, tx_byte_cnt = 0, tx_win_drop = 0;
struct net_device *ndev = tx_ring->ndev;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int tx_frm_cnt = 0, tx_byte_cnt = 0;
struct enetc_tx_swbd *tx_swbd;
int i, bds_to_clean;
bool do_twostep_tstamp;
@@ -821,6 +842,10 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
&tstamp);
do_twostep_tstamp = true;
}
+
+ if (tx_swbd->qbv_en &&
+ txbd->wb.status & ENETC_TXBD_STATS_WIN)
+ tx_win_drop++;
}
if (tx_swbd->is_xdp_tx)
@@ -873,6 +898,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
tx_ring->next_to_clean = i;
tx_ring->stats.packets += tx_frm_cnt;
tx_ring->stats.bytes += tx_byte_cnt;
+ tx_ring->stats.win_drop += tx_win_drop;
if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
__netif_subqueue_stopped(ndev, tx_ring->index) &&
@@ -1299,6 +1325,10 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
xdp_tx_swbd->xdp_frame = NULL;
n++;
+
+ if (!xdp_frame_has_frags(xdp_frame))
+ goto out;
+
xdp_tx_swbd = &xdp_tx_arr[n];
shinfo = xdp_get_shared_info_from_frame(xdp_frame);
@@ -1328,7 +1358,7 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
n++;
xdp_tx_swbd = &xdp_tx_arr[n];
}
-
+out:
xdp_tx_arr[n - 1].is_eof = true;
xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
@@ -1378,22 +1408,19 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
return xdp_tx_frm_cnt;
}
+EXPORT_SYMBOL_GPL(enetc_xdp_xmit);
static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
struct xdp_buff *xdp_buff, u16 size)
{
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
- struct skb_shared_info *shinfo;
/* To be used for XDP_TX */
rx_swbd->len = size;
xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
rx_ring->buffer_offset, size, false);
-
- shinfo = xdp_get_shared_info_from_buff(xdp_buff);
- shinfo->nr_frags = 0;
}
static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
@@ -1401,11 +1428,23 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
- skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
+ skb_frag_t *frag;
/* To be used for XDP_TX */
rx_swbd->len = size;
+ if (!xdp_buff_has_frags(xdp_buff)) {
+ xdp_buff_set_frags_flag(xdp_buff);
+ shinfo->xdp_frags_size = size;
+ shinfo->nr_frags = 0;
+ } else {
+ shinfo->xdp_frags_size += size;
+ }
+
+ if (page_is_pfmemalloc(rx_swbd->page))
+ xdp_buff_set_frag_pfmemalloc(xdp_buff);
+
+ frag = &shinfo->frags[shinfo->nr_frags];
skb_frag_off_set(frag, rx_swbd->page_offset);
skb_frag_size_set(frag, size);
__skb_frag_set_page(frag, rx_swbd->page);
@@ -1483,23 +1522,6 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first,
rx_ring->stats.xdp_drops++;
}
-static void enetc_xdp_free(struct enetc_bdr *rx_ring, int rx_ring_first,
- int rx_ring_last)
-{
- while (rx_ring_first != rx_ring_last) {
- struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[rx_ring_first];
-
- if (rx_swbd->page) {
- dma_unmap_page(rx_ring->dev, rx_swbd->dma, PAGE_SIZE,
- rx_swbd->dir);
- __free_page(rx_swbd->page);
- rx_swbd->page = NULL;
- }
- enetc_bdr_idx_inc(rx_ring, &rx_ring_first);
- }
- rx_ring->stats.xdp_redirect_failures++;
-}
-
static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
struct napi_struct *napi, int work_limit,
struct bpf_prog *prog)
@@ -1521,8 +1543,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
int orig_i, orig_cleaned_cnt;
struct xdp_buff xdp_buff;
struct sk_buff *skb;
- int tmp_orig_i, err;
u32 bd_status;
+ int err;
rxbd = enetc_rxbd(rx_ring, i);
bd_status = le32_to_cpu(rxbd->r.lstatus);
@@ -1547,7 +1569,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
switch (xdp_act) {
default:
- bpf_warn_invalid_xdp_action(xdp_act);
+ bpf_warn_invalid_xdp_action(rx_ring->ndev, prog, xdp_act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->ndev, prog, xdp_act);
@@ -1595,32 +1617,16 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
- /* xdp_return_frame does not support S/G in the sense
- * that it leaks the fragments (__xdp_return should not
- * call page_frag_free only for the initial buffer).
- * Until XDP_REDIRECT gains support for S/G let's keep
- * the code structure in place, but dead. We drop the
- * S/G frames ourselves to avoid memory leaks which
- * would otherwise leave the kernel OOM.
- */
- if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
- enetc_xdp_drop(rx_ring, orig_i, i);
- rx_ring->stats.xdp_redirect_sg++;
- break;
- }
-
- tmp_orig_i = orig_i;
-
- while (orig_i != i) {
- enetc_flip_rx_buff(rx_ring,
- &rx_ring->rx_swbd[orig_i]);
- enetc_bdr_idx_inc(rx_ring, &orig_i);
- }
-
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
if (unlikely(err)) {
- enetc_xdp_free(rx_ring, tmp_orig_i, i);
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_redirect_failures++;
} else {
+ while (orig_i != i) {
+ enetc_flip_rx_buff(rx_ring,
+ &rx_ring->rx_swbd[orig_i]);
+ enetc_bdr_idx_inc(rx_ring, &orig_i);
+ }
xdp_redirect_frm_cnt++;
rx_ring->stats.xdp_redirect++;
}
@@ -1726,204 +1732,263 @@ void enetc_get_si_caps(struct enetc_si *si)
if (val & ENETC_SIPCAPR0_QBV)
si->hw_features |= ENETC_SI_F_QBV;
+ if (val & ENETC_SIPCAPR0_QBU)
+ si->hw_features |= ENETC_SI_F_QBU;
+
if (val & ENETC_SIPCAPR0_PSFP)
si->hw_features |= ENETC_SI_F_PSFP;
}
+EXPORT_SYMBOL_GPL(enetc_get_si_caps);
-static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res)
{
- r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
- &r->bd_dma_base, GFP_KERNEL);
- if (!r->bd_base)
+ size_t bd_base_size = res->bd_count * res->bd_size;
+
+ res->bd_base = dma_alloc_coherent(res->dev, bd_base_size,
+ &res->bd_dma_base, GFP_KERNEL);
+ if (!res->bd_base)
return -ENOMEM;
/* h/w requires 128B alignment */
- if (!IS_ALIGNED(r->bd_dma_base, 128)) {
- dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
- r->bd_dma_base);
+ if (!IS_ALIGNED(res->bd_dma_base, 128)) {
+ dma_free_coherent(res->dev, bd_base_size, res->bd_base,
+ res->bd_dma_base);
return -EINVAL;
}
return 0;
}
-static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res)
+{
+ size_t bd_base_size = res->bd_count * res->bd_size;
+
+ dma_free_coherent(res->dev, bd_base_size, res->bd_base,
+ res->bd_dma_base);
+}
+
+static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res,
+ struct device *dev, size_t bd_count)
{
int err;
- txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
- if (!txr->tx_swbd)
+ res->dev = dev;
+ res->bd_count = bd_count;
+ res->bd_size = sizeof(union enetc_tx_bd);
+
+ res->tx_swbd = vzalloc(bd_count * sizeof(*res->tx_swbd));
+ if (!res->tx_swbd)
return -ENOMEM;
- err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+ err = enetc_dma_alloc_bdr(res);
if (err)
goto err_alloc_bdr;
- txr->tso_headers = dma_alloc_coherent(txr->dev,
- txr->bd_count * TSO_HEADER_SIZE,
- &txr->tso_headers_dma,
+ res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE,
+ &res->tso_headers_dma,
GFP_KERNEL);
- if (!txr->tso_headers) {
+ if (!res->tso_headers) {
err = -ENOMEM;
goto err_alloc_tso;
}
- txr->next_to_clean = 0;
- txr->next_to_use = 0;
-
return 0;
err_alloc_tso:
- dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
- txr->bd_base, txr->bd_dma_base);
- txr->bd_base = NULL;
+ enetc_dma_free_bdr(res);
err_alloc_bdr:
- vfree(txr->tx_swbd);
- txr->tx_swbd = NULL;
+ vfree(res->tx_swbd);
+ res->tx_swbd = NULL;
return err;
}
-static void enetc_free_txbdr(struct enetc_bdr *txr)
+static void enetc_free_tx_resource(const struct enetc_bdr_resource *res)
{
- int size, i;
-
- for (i = 0; i < txr->bd_count; i++)
- enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
-
- size = txr->bd_count * sizeof(union enetc_tx_bd);
-
- dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
- txr->tso_headers, txr->tso_headers_dma);
- txr->tso_headers = NULL;
-
- dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
- txr->bd_base = NULL;
-
- vfree(txr->tx_swbd);
- txr->tx_swbd = NULL;
+ dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE,
+ res->tso_headers, res->tso_headers_dma);
+ enetc_dma_free_bdr(res);
+ vfree(res->tx_swbd);
}
-static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+static struct enetc_bdr_resource *
+enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
{
+ struct enetc_bdr_resource *tx_res;
int i, err;
+ tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL);
+ if (!tx_res)
+ return ERR_PTR(-ENOMEM);
+
for (i = 0; i < priv->num_tx_rings; i++) {
- err = enetc_alloc_txbdr(priv->tx_ring[i]);
+ struct enetc_bdr *tx_ring = priv->tx_ring[i];
+ err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev,
+ tx_ring->bd_count);
if (err)
goto fail;
}
- return 0;
+ return tx_res;
fail:
while (i-- > 0)
- enetc_free_txbdr(priv->tx_ring[i]);
+ enetc_free_tx_resource(&tx_res[i]);
- return err;
+ kfree(tx_res);
+
+ return ERR_PTR(err);
}
-static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res,
+ size_t num_resources)
{
- int i;
+ size_t i;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_free_txbdr(priv->tx_ring[i]);
+ for (i = 0; i < num_resources; i++)
+ enetc_free_tx_resource(&tx_res[i]);
+
+ kfree(tx_res);
}
-static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
+static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res,
+ struct device *dev, size_t bd_count,
+ bool extended)
{
- size_t size = sizeof(union enetc_rx_bd);
int err;
- rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
- if (!rxr->rx_swbd)
- return -ENOMEM;
-
+ res->dev = dev;
+ res->bd_count = bd_count;
+ res->bd_size = sizeof(union enetc_rx_bd);
if (extended)
- size *= 2;
+ res->bd_size *= 2;
+
+ res->rx_swbd = vzalloc(bd_count * sizeof(struct enetc_rx_swbd));
+ if (!res->rx_swbd)
+ return -ENOMEM;
- err = enetc_dma_alloc_bdr(rxr, size);
+ err = enetc_dma_alloc_bdr(res);
if (err) {
- vfree(rxr->rx_swbd);
+ vfree(res->rx_swbd);
return err;
}
- rxr->next_to_clean = 0;
- rxr->next_to_use = 0;
- rxr->next_to_alloc = 0;
- rxr->ext_en = extended;
-
return 0;
}
-static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+static void enetc_free_rx_resource(const struct enetc_bdr_resource *res)
{
- int size;
-
- size = rxr->bd_count * sizeof(union enetc_rx_bd);
-
- dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
- rxr->bd_base = NULL;
-
- vfree(rxr->rx_swbd);
- rxr->rx_swbd = NULL;
+ enetc_dma_free_bdr(res);
+ vfree(res->rx_swbd);
}
-static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+static struct enetc_bdr_resource *
+enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended)
{
- bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+ struct enetc_bdr_resource *rx_res;
int i, err;
+ rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL);
+ if (!rx_res)
+ return ERR_PTR(-ENOMEM);
+
for (i = 0; i < priv->num_rx_rings; i++) {
- err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
+ struct enetc_bdr *rx_ring = priv->rx_ring[i];
+ err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev,
+ rx_ring->bd_count, extended);
if (err)
goto fail;
}
- return 0;
+ return rx_res;
fail:
while (i-- > 0)
- enetc_free_rxbdr(priv->rx_ring[i]);
+ enetc_free_rx_resource(&rx_res[i]);
- return err;
+ kfree(rx_res);
+
+ return ERR_PTR(err);
+}
+
+static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res,
+ size_t num_resources)
+{
+ size_t i;
+
+ for (i = 0; i < num_resources; i++)
+ enetc_free_rx_resource(&rx_res[i]);
+
+ kfree(rx_res);
}
-static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring,
+ const struct enetc_bdr_resource *res)
+{
+ tx_ring->bd_base = res ? res->bd_base : NULL;
+ tx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
+ tx_ring->tx_swbd = res ? res->tx_swbd : NULL;
+ tx_ring->tso_headers = res ? res->tso_headers : NULL;
+ tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0;
+}
+
+static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring,
+ const struct enetc_bdr_resource *res)
+{
+ rx_ring->bd_base = res ? res->bd_base : NULL;
+ rx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
+ rx_ring->rx_swbd = res ? res->rx_swbd : NULL;
+}
+
+static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv,
+ const struct enetc_bdr_resource *res)
{
int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- enetc_free_rxbdr(priv->rx_ring[i]);
+ if (priv->tx_res)
+ enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings);
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ enetc_assign_tx_resource(priv->tx_ring[i],
+ res ? &res[i] : NULL);
+ }
+
+ priv->tx_res = res;
}
-static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv,
+ const struct enetc_bdr_resource *res)
{
int i;
- if (!tx_ring->tx_swbd)
- return;
+ if (priv->rx_res)
+ enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings);
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ enetc_assign_rx_resource(priv->rx_ring[i],
+ res ? &res[i] : NULL);
+ }
+
+ priv->rx_res = res;
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+ int i;
for (i = 0; i < tx_ring->bd_count; i++) {
struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
enetc_free_tx_frame(tx_ring, tx_swbd);
}
-
- tx_ring->next_to_clean = 0;
- tx_ring->next_to_use = 0;
}
static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
{
int i;
- if (!rx_ring->rx_swbd)
- return;
-
for (i = 0; i < rx_ring->bd_count; i++) {
struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
@@ -1935,10 +2000,6 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
__free_page(rx_swbd->page);
rx_swbd->page = NULL;
}
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
- rx_ring->next_to_alloc = 0;
}
static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
@@ -1993,6 +2054,7 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_configure_si);
void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
{
@@ -2012,6 +2074,7 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
priv->tx_ictt = ENETC_TXIC_TIMETHR;
}
+EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
@@ -2024,11 +2087,13 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_alloc_si_resources);
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
kfree(priv->cls_rules);
}
+EXPORT_SYMBOL_GPL(enetc_free_si_resources);
static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
@@ -2052,7 +2117,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
- tbmr = ENETC_TBMR_EN;
+ tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio);
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
@@ -2064,10 +2129,11 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
tx_ring->idr = hw->reg + ENETC_SITXIDR;
}
-static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
+ bool extended)
{
int idx = rx_ring->index;
- u32 rbmr;
+ u32 rbmr = 0;
enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
lower_32_bits(rx_ring->bd_dma_base));
@@ -2084,13 +2150,17 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
else
enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
+ /* Also prepare the consumer index in case page allocation never
+ * succeeds. In that case, hardware will never advance producer index
+ * to match consumer index, and will drop all frames.
+ */
enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
+ enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1);
/* enable Rx ints by setting pkt thr to 1 */
enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
- rbmr = ENETC_RBMR_EN;
-
+ rx_ring->ext_en = extended;
if (rx_ring->ext_en)
rbmr |= ENETC_RBMR_BDS;
@@ -2100,26 +2170,62 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_alloc = 0;
+
enetc_lock_mdio();
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
enetc_unlock_mdio();
- /* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
-static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_setup_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_setup_rxbdr(hw, priv->rx_ring[i], extended);
+}
+
+static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int idx = tx_ring->index;
+ u32 tbmr;
+
+ tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
+ tbmr |= ENETC_TBMR_EN;
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+}
+
+static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+ u32 rbmr;
+
+ rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+ rbmr |= ENETC_RBMR_EN;
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_enable_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
-static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
@@ -2127,13 +2233,30 @@ static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
}
-static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
- int delay = 8, timeout = 100;
- int idx = tx_ring->index;
+ int idx = rx_ring->index;
/* disable EN bit on ring */
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+}
+
+static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_disable_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_disable_rxbdr(hw, priv->rx_ring[i]);
+}
+
+static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int delay = 8, timeout = 100;
+ int idx = tx_ring->index;
/* wait for busy to clear */
while (delay < timeout &&
@@ -2147,29 +2270,25 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
idx);
}
-static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_wait_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
-
- for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
-
- udelay(1);
+ enetc_wait_txbdr(hw, priv->tx_ring[i]);
}
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ struct enetc_hw *hw = &priv->si->hw;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
struct enetc_int_vector *v = priv->int_vector[i];
int entry = ENETC_BDR_INT_BASE_IDX + i;
- struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
@@ -2257,13 +2376,14 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
}
static int enetc_phylink_connect(struct net_device *ndev)
@@ -2272,8 +2392,11 @@ static int enetc_phylink_connect(struct net_device *ndev)
struct ethtool_eee edata;
int err;
- if (!priv->phylink)
- return 0; /* phy-less mode */
+ if (!priv->phylink) {
+ /* phy-less mode */
+ netif_carrier_on(ndev);
+ return 0;
+ }
err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
if (err) {
@@ -2285,6 +2408,8 @@ static int enetc_phylink_connect(struct net_device *ndev)
memset(&edata, 0, sizeof(struct ethtool_eee));
phylink_ethtool_set_eee(priv->phylink, &edata);
+ phylink_start(priv->phylink);
+
return 0;
}
@@ -2295,14 +2420,14 @@ static void enetc_tx_onestep_tstamp(struct work_struct *work)
priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
- netif_tx_lock(priv->ndev);
+ netif_tx_lock_bh(priv->ndev);
clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
skb = skb_dequeue(&priv->tx_skbs);
if (skb)
enetc_start_xmit(skb, priv->ndev);
- netif_tx_unlock(priv->ndev);
+ netif_tx_unlock_bh(priv->ndev);
}
static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
@@ -2326,20 +2451,21 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
- if (priv->phylink)
- phylink_start(priv->phylink);
- else
- netif_carrier_on(ndev);
+ enetc_enable_bdrs(priv);
netif_tx_start_all_queues(ndev);
}
+EXPORT_SYMBOL_GPL(enetc_start);
int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int num_stack_tx_queues;
+ struct enetc_bdr_resource *tx_res, *rx_res;
+ bool extended;
int err;
+ extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+
err = enetc_setup_irqs(priv);
if (err)
return err;
@@ -2348,34 +2474,28 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_phy_connect;
- err = enetc_alloc_tx_resources(priv);
- if (err)
+ tx_res = enetc_alloc_tx_resources(priv);
+ if (IS_ERR(tx_res)) {
+ err = PTR_ERR(tx_res);
goto err_alloc_tx;
+ }
- err = enetc_alloc_rx_resources(priv);
- if (err)
+ rx_res = enetc_alloc_rx_resources(priv, extended);
+ if (IS_ERR(rx_res)) {
+ err = PTR_ERR(rx_res);
goto err_alloc_rx;
-
- num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
-
- err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
- if (err)
- goto err_set_queues;
+ }
enetc_tx_onestep_tstamp_init(priv);
- enetc_setup_bdrs(priv);
+ enetc_assign_tx_resources(priv, tx_res);
+ enetc_assign_rx_resources(priv, rx_res);
+ enetc_setup_bdrs(priv, extended);
enetc_start(ndev);
return 0;
-err_set_queues:
- enetc_free_rx_resources(priv);
err_alloc_rx:
- enetc_free_tx_resources(priv);
+ enetc_free_tx_resources(tx_res, priv->num_tx_rings);
err_alloc_tx:
if (priv->phylink)
phylink_disconnect_phy(priv->phylink);
@@ -2384,6 +2504,7 @@ err_phy_connect:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_open);
void enetc_stop(struct net_device *ndev)
{
@@ -2392,6 +2513,8 @@ void enetc_stop(struct net_device *ndev)
netif_tx_stop_all_queues(ndev);
+ enetc_disable_bdrs(priv);
+
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
ENETC_BDR_INT_BASE_IDX + i);
@@ -2401,120 +2524,210 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
- if (priv->phylink)
- phylink_stop(priv->phylink);
- else
- netif_carrier_off(ndev);
+ enetc_wait_bdrs(priv);
enetc_clear_interrupts(priv);
}
+EXPORT_SYMBOL_GPL(enetc_stop);
int enetc_close(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
enetc_stop(ndev);
- enetc_clear_bdrs(priv);
- if (priv->phylink)
+ if (priv->phylink) {
+ phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
+ } else {
+ netif_carrier_off(ndev);
+ }
+
enetc_free_rxtx_rings(priv);
- enetc_free_rx_resources(priv);
- enetc_free_tx_resources(priv);
+
+ /* Avoids dangling pointers and also frees old resources */
+ enetc_assign_rx_resources(priv, NULL);
+ enetc_assign_tx_resources(priv, NULL);
+
enetc_free_irqs(priv);
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_close);
+
+static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
+ int (*cb)(struct enetc_ndev_priv *priv, void *ctx),
+ void *ctx)
+{
+ struct enetc_bdr_resource *tx_res, *rx_res;
+ int err;
+
+ ASSERT_RTNL();
+
+ /* If the interface is down, run the callback right away,
+ * without reconfiguration.
+ */
+ if (!netif_running(priv->ndev)) {
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+ }
+
+ tx_res = enetc_alloc_tx_resources(priv);
+ if (IS_ERR(tx_res)) {
+ err = PTR_ERR(tx_res);
+ goto out;
+ }
+
+ rx_res = enetc_alloc_rx_resources(priv, extended);
+ if (IS_ERR(rx_res)) {
+ err = PTR_ERR(rx_res);
+ goto out_free_tx_res;
+ }
+
+ enetc_stop(priv->ndev);
+ enetc_free_rxtx_rings(priv);
+
+ /* Interface is down, run optional callback now */
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ goto out_restart;
+ }
+
+ enetc_assign_tx_resources(priv, tx_res);
+ enetc_assign_rx_resources(priv, rx_res);
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
+
+ return 0;
+
+out_restart:
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
+ enetc_free_rx_resources(rx_res, priv->num_rx_rings);
+out_free_tx_res:
+ enetc_free_tx_resources(tx_res, priv->num_tx_rings);
+out:
+ return err;
+}
+
+static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
+ priv->tx_ring[i]->prio);
+}
-static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+static void enetc_reset_tc_mqprio(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
- u8 num_tc;
int i;
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = mqprio->num_tc;
- if (!num_tc) {
- netdev_reset_tc(ndev);
- netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ priv->min_num_stack_tx_queues = num_possible_cpus();
- /* Reset all ring priorities to 0 */
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
- }
+ /* Reset all ring priorities to 0 */
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+
+ enetc_debug_tx_ring_prios(priv);
+
+ enetc_change_preemptible_tcs(priv, 0);
+}
+
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *qopt = &mqprio->qopt;
+ struct enetc_hw *hw = &priv->si->hw;
+ int num_stack_tx_queues = 0;
+ struct enetc_bdr *tx_ring;
+ u8 num_tc = qopt->num_tc;
+ int offset, count;
+ int err, tc, q;
+ if (!num_tc) {
+ enetc_reset_tc_mqprio(ndev);
return 0;
}
- /* Check if we have enough BD rings available to accommodate all TCs */
- if (num_tc > num_stack_tx_queues) {
- netdev_err(ndev, "Max %d traffic classes supported\n",
- priv->num_tx_rings);
- return -EINVAL;
- }
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
- /* For the moment, we use only one BD ring per TC.
- *
- * Configure num_tc BD rings with increasing priorities.
- */
- for (i = 0; i < num_tc; i++) {
- tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ for (tc = 0; tc < num_tc; tc++) {
+ offset = qopt->offset[tc];
+ count = qopt->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+
+ for (q = offset; q < offset + count; q++) {
+ tx_ring = priv->tx_ring[q];
+ /* The prio_tc_map is skb_tx_hash()'s way of selecting
+ * between TX queues based on skb->priority. As such,
+ * there's nothing to offload based on it.
+ * Make the mqprio "traffic class" be the priority of
+ * this ring group, and leave the Tx IPV to traffic
+ * class mapping as its default mapping value of 1:1.
+ */
+ tx_ring->prio = tc;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
}
- /* Reset the number of netdev queues based on the TC count */
- netif_set_real_num_tx_queues(ndev, num_tc);
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
- netdev_set_num_tc(ndev, num_tc);
+ priv->min_num_stack_tx_queues = num_stack_tx_queues;
- /* Each TC is associated with one netdev queue */
- for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(ndev, i, 1, i);
+ enetc_debug_tx_ring_prios(priv);
+
+ enetc_change_preemptible_tcs(priv, mqprio->preemptible_tcs);
return 0;
-}
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data)
-{
- switch (type) {
- case TC_SETUP_QDISC_MQPRIO:
- return enetc_setup_tc_mqprio(ndev, type_data);
- case TC_SETUP_QDISC_TAPRIO:
- return enetc_setup_tc_taprio(ndev, type_data);
- case TC_SETUP_QDISC_CBS:
- return enetc_setup_tc_cbs(ndev, type_data);
- case TC_SETUP_QDISC_ETF:
- return enetc_setup_tc_txtime(ndev, type_data);
- case TC_SETUP_BLOCK:
- return enetc_setup_tc_psfp(ndev, type_data);
- default:
- return -EOPNOTSUPP;
- }
+err_reset_tc:
+ enetc_reset_tc_mqprio(ndev);
+ return err;
}
+EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
-static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
- struct netlink_ext_ack *extack)
+static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
{
- struct enetc_ndev_priv *priv = netdev_priv(dev);
- struct bpf_prog *old_prog;
- bool is_up;
- int i;
-
- /* The buffer layout is changing, so we need to drain the old
- * RX buffers and seed new ones.
- */
- is_up = netif_running(dev);
- if (is_up)
- dev_close(dev);
+ struct bpf_prog *old_prog, *prog = ctx;
+ int num_stack_tx_queues;
+ int err, i;
old_prog = xchg(&priv->xdp_prog, prog);
+
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err) {
+ xchg(&priv->xdp_prog, old_prog);
+ return err;
+ }
+
if (old_prog)
bpf_prog_put(old_prog);
@@ -2529,29 +2742,53 @@ static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
rx_ring->buffer_offset = ENETC_RXB_PAD;
}
- if (is_up)
- return dev_open(dev, extack);
-
return 0;
}
-int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
+static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ bool extended;
+
+ if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ priv->num_tx_rings) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
+ num_xdp_tx_queues,
+ priv->min_num_stack_tx_queues,
+ priv->num_tx_rings);
+ return -EBUSY;
+ }
+
+ extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+
+ /* The buffer layout is changing, so we need to drain the old
+ * RX buffers and seed new ones.
+ */
+ return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog);
+}
+
+int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
{
- switch (xdp->command) {
+ switch (bpf->command) {
case XDP_SETUP_PROG:
- return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
+ return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack);
default:
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_setup_bpf);
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
unsigned long packets = 0, bytes = 0;
+ unsigned long tx_dropped = 0;
int i;
for (i = 0; i < priv->num_rx_rings; i++) {
@@ -2567,13 +2804,16 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
for (i = 0; i < priv->num_tx_rings; i++) {
packets += priv->tx_ring[i]->stats.packets;
bytes += priv->tx_ring[i]->stats.bytes;
+ tx_dropped += priv->tx_ring[i]->stats.win_drop;
}
stats->tx_packets = packets;
stats->tx_bytes = bytes;
+ stats->tx_dropped = tx_dropped;
return stats;
}
+EXPORT_SYMBOL_GPL(enetc_get_stats);
static int enetc_set_rss(struct net_device *ndev, int en)
{
@@ -2591,52 +2831,29 @@ static int enetc_set_rss(struct net_device *ndev, int en)
return 0;
}
-static int enetc_set_psfp(struct net_device *ndev, int en)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
-
- if (en) {
- err = enetc_psfp_enable(priv);
- if (err)
- return err;
-
- priv->active_offloads |= ENETC_F_QCI;
- return 0;
- }
-
- err = enetc_psfp_disable(priv);
- if (err)
- return err;
-
- priv->active_offloads &= ~ENETC_F_QCI;
-
- return 0;
-}
-
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_rxvlan(hw, i, en);
}
static void enetc_enable_txvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_txvlan(hw, i, en);
}
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features)
+void enetc_set_features(struct net_device *ndev, netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
- int err = 0;
if (changed & NETIF_F_RXHASH)
enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
@@ -2648,54 +2865,54 @@ int enetc_set_features(struct net_device *ndev,
if (changed & NETIF_F_HW_VLAN_CTAG_TX)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
- if (changed & NETIF_F_HW_TC)
- err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
-
- return err;
}
+EXPORT_SYMBOL_GPL(enetc_set_features);
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err, new_offloads = priv->active_offloads;
struct hwtstamp_config config;
- int ao;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
case HWTSTAMP_TX_ON:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
- priv->active_offloads |= ENETC_F_TX_TSTAMP;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
- priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
default:
return -ERANGE;
}
- ao = priv->active_offloads;
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
+ new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
- priv->active_offloads |= ENETC_F_RX_TSTAMP;
+ new_offloads |= ENETC_F_RX_TSTAMP;
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
- if (netif_running(ndev) && ao != priv->active_offloads) {
- enetc_close(ndev);
- enetc_open(ndev);
+ if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
+ bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP);
+
+ err = enetc_reconfigure(priv, extended, NULL, NULL);
+ if (err)
+ return err;
}
+ priv->active_offloads = new_offloads;
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -2737,10 +2954,12 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
return phylink_mii_ioctl(priv->phylink, rq, cmd);
}
+EXPORT_SYMBOL_GPL(enetc_ioctl);
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
int v_tx_rings;
@@ -2799,8 +3018,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
v->rx_dim_en = true;
}
INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
v->count_tx_rings = v_tx_rings;
for (j = 0; j < v_tx_rings; j++) {
@@ -2818,6 +3036,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
}
}
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err)
+ goto fail;
+
+ err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
+ if (err)
+ goto fail;
+
+ priv->min_num_stack_tx_queues = num_possible_cpus();
first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
@@ -2839,6 +3068,7 @@ fail:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_alloc_msix);
void enetc_free_msix(struct enetc_ndev_priv *priv)
{
@@ -2868,6 +3098,7 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
}
+EXPORT_SYMBOL_GPL(enetc_free_msix);
static void enetc_kfree_si(struct enetc_si *si)
{
@@ -2897,12 +3128,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, name);
@@ -2961,6 +3188,7 @@ err_dma:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_pci_probe);
void enetc_pci_remove(struct pci_dev *pdev)
{
@@ -2972,3 +3200,6 @@ void enetc_pci_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
+EXPORT_SYMBOL_GPL(enetc_pci_remove);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index fb39e406b7fc..c97a8e3d7a7f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -18,6 +18,8 @@
#define ENETC_MAX_MTU (ENETC_MAC_MAXFRM_SIZE - \
(ETH_FCS_LEN + ETH_HLEN + VLAN_HLEN))
+#define ENETC_CBD_DATA_MEM_ALIGN 64
+
struct enetc_tx_swbd {
union {
struct sk_buff *skb;
@@ -34,6 +36,7 @@ struct enetc_tx_swbd {
u8 is_eof:1;
u8 is_xdp_tx:1;
u8 is_xdp_redirect:1;
+ u8 qbv_en:1;
};
#define ENETC_RX_MAXFRM_SIZE ENETC_MAC_MAXFRM_SIZE
@@ -67,9 +70,9 @@ struct enetc_ring_stats {
unsigned int xdp_tx_drops;
unsigned int xdp_redirect;
unsigned int xdp_redirect_failures;
- unsigned int xdp_redirect_sg;
unsigned int recycles;
unsigned int recycle_failures;
+ unsigned int win_drop;
};
struct enetc_xdp_data {
@@ -82,6 +85,23 @@ struct enetc_xdp_data {
#define ENETC_TX_RING_DEFAULT_SIZE 2048
#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
+struct enetc_bdr_resource {
+ /* Input arguments saved for teardown */
+ struct device *dev; /* for DMA mapping */
+ size_t bd_count;
+ size_t bd_size;
+
+ /* Resource proper */
+ void *bd_base; /* points to Rx or Tx BD ring */
+ dma_addr_t bd_dma_base;
+ union {
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_rx_swbd *rx_swbd;
+ };
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
+};
+
struct enetc_bdr {
struct device *dev; /* for DMA mapping */
struct net_device *ndev;
@@ -91,6 +111,7 @@ struct enetc_bdr {
void __iomem *rcir;
};
u16 index;
+ u16 prio;
int bd_count; /* # of BDs */
int next_to_use;
int next_to_clean;
@@ -208,8 +229,9 @@ enum enetc_errata {
ENETC_ERR_UCMCSWP = BIT(1),
};
-#define ENETC_SI_F_QBV BIT(0)
-#define ENETC_SI_F_PSFP BIT(1)
+#define ENETC_SI_F_PSFP BIT(0)
+#define ENETC_SI_F_QBV BIT(1)
+#define ENETC_SI_F_QBU BIT(2)
/* PCI IEP device data */
struct enetc_si {
@@ -292,7 +314,6 @@ struct psfp_cap {
};
#define ENETC_F_TX_TSTAMP_MASK 0xff
-/* TODO: more hardware offloads */
enum enetc_active_offloads {
/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
ENETC_F_TX_TSTAMP = BIT(0),
@@ -301,6 +322,7 @@ enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(8),
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
+ ENETC_F_QBU = BIT(11),
};
enum enetc_flags_bit {
@@ -333,6 +355,9 @@ struct enetc_ndev_priv {
u16 rx_bd_count, tx_bd_count;
u16 msg_enable;
+
+ u8 preemptible_tcs;
+
enum enetc_active_offloads active_offloads;
u32 speed; /* store speed for compare update pspeed */
@@ -340,11 +365,16 @@ struct enetc_ndev_priv {
struct enetc_bdr **xdp_tx_ring;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
+ const struct enetc_bdr_resource *tx_res;
+ const struct enetc_bdr_resource *rx_res;
struct enetc_cls_rule *cls_rules;
struct psfp_cap psfp_cap;
+ /* Minimum number of TX queues required by the network stack */
+ unsigned int min_num_stack_tx_queues;
+
struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
@@ -355,6 +385,11 @@ struct enetc_ndev_priv {
struct work_struct tx_onestep_tstamp;
struct sk_buff_head tx_skbs;
+
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates
+ */
+ struct mutex mm_lock;
};
/* Messaging */
@@ -373,6 +408,8 @@ struct enetc_msg_cmd_set_primary_mac {
extern int enetc_phc_index;
/* SI common */
+u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
+void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
void enetc_pci_remove(struct pci_dev *pdev);
int enetc_alloc_msix(struct enetc_ndev_priv *priv);
@@ -389,17 +426,17 @@ void enetc_start(struct net_device *ndev);
void enetc_stop(struct net_device *ndev);
netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
struct net_device_stats *enetc_get_stats(struct net_device *ndev);
-int enetc_set_features(struct net_device *ndev,
- netdev_features_t features);
+void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
-int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
- void *type_data);
-int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
+int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
+void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv);
/* control buffer descriptor ring (CBDR) */
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
@@ -415,7 +452,47 @@ int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count);
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count);
int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
+static inline void *enetc_cbd_alloc_data_mem(struct enetc_si *si,
+ struct enetc_cbd *cbd,
+ int size, dma_addr_t *dma,
+ void **data_align)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+ dma_addr_t dma_align;
+ void *data;
+
+ data = dma_alloc_coherent(ring->dma_dev,
+ size + ENETC_CBD_DATA_MEM_ALIGN,
+ dma, GFP_KERNEL);
+ if (!data) {
+ dev_err(ring->dma_dev, "CBD alloc data memory failed!\n");
+ return NULL;
+ }
+
+ dma_align = ALIGN(*dma, ENETC_CBD_DATA_MEM_ALIGN);
+ *data_align = PTR_ALIGN(data, ENETC_CBD_DATA_MEM_ALIGN);
+
+ cbd->addr[0] = cpu_to_le32(lower_32_bits(dma_align));
+ cbd->addr[1] = cpu_to_le32(upper_32_bits(dma_align));
+ cbd->length = cpu_to_le16(size);
+
+ return data;
+}
+
+static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
+ void *data, dma_addr_t *dma)
+{
+ struct enetc_cbdr *ring = &si->cbd_ring;
+
+ dma_free_coherent(ring->dma_dev, size + ENETC_CBD_DATA_MEM_ALIGN,
+ data, *dma);
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw);
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+
#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
@@ -425,22 +502,24 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data);
int enetc_psfp_init(struct enetc_ndev_priv *priv);
int enetc_psfp_clean(struct enetc_ndev_priv *priv);
+int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 reg;
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
/* Port stream filter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSFCAPR);
priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
/* Port stream gate capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSGCAPR);
priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
/* Port flow meter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ reg = enetc_port_rd(hw, ENETC_PFMCAPR);
priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
}
@@ -481,6 +560,7 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
}
#else
+#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(priv, speed) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
@@ -500,4 +580,9 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
{
return 0;
}
+
+static inline int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index 073e56dcca4e..20bfdf7fb4b4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -44,6 +44,7 @@ int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_setup_cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
{
@@ -57,6 +58,7 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
cbdr->bd_base = NULL;
cbdr->dma_dev = NULL;
}
+EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
@@ -127,6 +129,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_send_cmd);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
{
@@ -140,6 +143,7 @@ int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
return enetc_send_cmd(si, &cbd);
}
+EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map)
@@ -165,71 +169,58 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
return enetc_send_cmd(si, &cbd);
}
+EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry);
-#define RFSE_ALIGN 64
/* Set entry in RFS table */
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
int index)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
void *tmp, *tmp_align;
+ dma_addr_t dma;
int err;
/* fill up the "set" descriptor */
cbd.cmd = 0;
cbd.cls = 4;
cbd.index = cpu_to_le16(index);
- cbd.length = cpu_to_le16(sizeof(*rfse));
cbd.opt[3] = cpu_to_le32(0); /* SI */
- tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, sizeof(*rfse),
+ &dma, &tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RFSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
memcpy(tmp_align, rfse, sizeof(*rfse));
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
-
err = enetc_send_cmd(si, &cbd);
if (err)
dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
- dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
- tmp, dma);
+ enetc_cbd_free_data_mem(si, sizeof(*rfse), tmp, &dma);
return err;
}
+EXPORT_SYMBOL_GPL(enetc_set_fs_entry);
-#define RSSE_ALIGN 64
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
{
struct enetc_cbdr *ring = &si->cbd_ring;
struct enetc_cbd cbd = {.cmd = 0};
- dma_addr_t dma, dma_align;
u8 *tmp, *tmp_align;
+ dma_addr_t dma;
int err, i;
- if (count < RSSE_ALIGN)
+ if (count < ENETC_CBD_DATA_MEM_ALIGN)
/* HW only takes in a full 64 entry table */
return -EINVAL;
- tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
- &dma, GFP_KERNEL);
- if (!tmp) {
- dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
+ tmp = enetc_cbd_alloc_data_mem(si, &cbd, count,
+ &dma, (void *)&tmp_align);
+ if (!tmp)
return -ENOMEM;
- }
- dma_align = ALIGN(dma, RSSE_ALIGN);
- tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
if (!read)
for (i = 0; i < count; i++)
@@ -238,10 +229,6 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
/* fill up the descriptor */
cbd.cmd = read ? 2 : 1;
cbd.cls = 3;
- cbd.length = cpu_to_le16(count);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
err = enetc_send_cmd(si, &cbd);
if (err)
@@ -251,7 +238,7 @@ static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
for (i = 0; i < count; i++)
table[i] = tmp_align[i];
- dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
+ enetc_cbd_free_data_mem(si, count, tmp, &dma);
return err;
}
@@ -261,9 +248,11 @@ int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
{
return enetc_cmd_rss_table(si, table, count, true);
}
+EXPORT_SYMBOL_GPL(enetc_get_rss_table);
/* Set RSS table */
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
{
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
+EXPORT_SYMBOL_GPL(enetc_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 910b9f722504..e993ed04ab57 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
+#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
#include "enetc.h"
@@ -31,6 +32,12 @@ static const u32 enetc_port_regs[] = {
ENETC_PM0_CMD_CFG, ENETC_PM0_MAXFRM, ENETC_PM0_IF_MODE
};
+static const u32 enetc_port_mm_regs[] = {
+ ENETC_MMCSR, ENETC_PFPMR, ENETC_PTCFPR(0), ENETC_PTCFPR(1),
+ ENETC_PTCFPR(2), ENETC_PTCFPR(3), ENETC_PTCFPR(4), ENETC_PTCFPR(5),
+ ENETC_PTCFPR(6), ENETC_PTCFPR(7),
+};
+
static int enetc_get_reglen(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -44,6 +51,9 @@ static int enetc_get_reglen(struct net_device *ndev)
if (hw->port)
len += ARRAY_SIZE(enetc_port_regs);
+ if (hw->port && !!(priv->si->hw_features & ENETC_SI_F_QBU))
+ len += ARRAY_SIZE(enetc_port_mm_regs);
+
len *= sizeof(u32) * 2; /* store 2 entries per reg: addr and value */
return len;
@@ -89,6 +99,14 @@ static void enetc_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
*buf++ = addr;
*buf++ = enetc_rd(hw, addr);
}
+
+ if (priv->si->hw_features & ENETC_SI_F_QBU) {
+ for (i = 0; i < ARRAY_SIZE(enetc_port_mm_regs); i++) {
+ addr = ENETC_PORT_BASE + enetc_port_mm_regs[i];
+ *buf++ = addr;
+ *buf++ = enetc_rd(hw, addr);
+ }
+ }
}
static const struct {
@@ -125,68 +143,68 @@ static const struct {
int reg;
char name[ETH_GSTRING_LEN];
} enetc_port_counters[] = {
- { ENETC_PM0_REOCT, "MAC rx ethernet octets" },
- { ENETC_PM0_RALN, "MAC rx alignment errors" },
- { ENETC_PM0_RXPF, "MAC rx valid pause frames" },
- { ENETC_PM0_RFRM, "MAC rx valid frames" },
- { ENETC_PM0_RFCS, "MAC rx fcs errors" },
- { ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
- { ENETC_PM0_RERR, "MAC rx frame errors" },
- { ENETC_PM0_RUCA, "MAC rx unicast frames" },
- { ENETC_PM0_RMCA, "MAC rx multicast frames" },
- { ENETC_PM0_RBCA, "MAC rx broadcast frames" },
- { ENETC_PM0_RDRP, "MAC rx dropped packets" },
- { ENETC_PM0_RPKT, "MAC rx packets" },
- { ENETC_PM0_RUND, "MAC rx undersized packets" },
- { ENETC_PM0_R64, "MAC rx 64 byte packets" },
- { ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
- { ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
- { ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
- { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
- { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
- { ENETC_PM0_ROVR, "MAC rx oversized packets" },
- { ENETC_PM0_RJBR, "MAC rx jabber packets" },
- { ENETC_PM0_RFRG, "MAC rx fragment packets" },
- { ENETC_PM0_RCNP, "MAC rx control packets" },
- { ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
- { ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
- { ENETC_PM0_TOCT, "MAC tx octets" },
- { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
- { ENETC_PM0_TXPF, "MAC tx valid pause frames" },
- { ENETC_PM0_TFRM, "MAC tx frames" },
- { ENETC_PM0_TFCS, "MAC tx fcs errors" },
- { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frame errors" },
- { ENETC_PM0_TUCA, "MAC tx unicast frames" },
- { ENETC_PM0_TMCA, "MAC tx multicast frames" },
- { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
- { ENETC_PM0_TPKT, "MAC tx packets" },
- { ENETC_PM0_TUND, "MAC tx undersized packets" },
- { ENETC_PM0_T64, "MAC tx 64 byte packets" },
- { ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
- { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
- { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
- { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
- { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
- { ENETC_PM0_TCNP, "MAC tx control packets" },
- { ENETC_PM0_TDFR, "MAC tx deferred packets" },
- { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
- { ENETC_PM0_TSCOL, "MAC tx single collisions" },
- { ENETC_PM0_TLCOL, "MAC tx late collisions" },
- { ENETC_PM0_TECOL, "MAC tx excessive collisions" },
- { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
- { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
- { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
- { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
- { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
- { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
- { ENETC_PFDMSAPR, "SI pruning discarded frames" },
- { ENETC_PICDR(0), "ICM DR0 discarded frames" },
- { ENETC_PICDR(1), "ICM DR1 discarded frames" },
- { ENETC_PICDR(2), "ICM DR2 discarded frames" },
- { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+ { ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
+ { ENETC_PM_RALN(0), "MAC rx alignment errors" },
+ { ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
+ { ENETC_PM_RFRM(0), "MAC rx valid frames" },
+ { ENETC_PM_RFCS(0), "MAC rx fcs errors" },
+ { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" },
+ { ENETC_PM_RERR(0), "MAC rx frame errors" },
+ { ENETC_PM_RUCA(0), "MAC rx unicast frames" },
+ { ENETC_PM_RMCA(0), "MAC rx multicast frames" },
+ { ENETC_PM_RBCA(0), "MAC rx broadcast frames" },
+ { ENETC_PM_RDRP(0), "MAC rx dropped packets" },
+ { ENETC_PM_RPKT(0), "MAC rx packets" },
+ { ENETC_PM_RUND(0), "MAC rx undersized packets" },
+ { ENETC_PM_R64(0), "MAC rx 64 byte packets" },
+ { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" },
+ { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" },
+ { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" },
+ { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" },
+ { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" },
+ { ENETC_PM_ROVR(0), "MAC rx oversized packets" },
+ { ENETC_PM_RJBR(0), "MAC rx jabber packets" },
+ { ENETC_PM_RFRG(0), "MAC rx fragment packets" },
+ { ENETC_PM_RCNP(0), "MAC rx control packets" },
+ { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" },
+ { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" },
+ { ENETC_PM_TOCT(0), "MAC tx octets" },
+ { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" },
+ { ENETC_PM_TXPF(0), "MAC tx valid pause frames" },
+ { ENETC_PM_TFRM(0), "MAC tx frames" },
+ { ENETC_PM_TFCS(0), "MAC tx fcs errors" },
+ { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" },
+ { ENETC_PM_TERR(0), "MAC tx frame errors" },
+ { ENETC_PM_TUCA(0), "MAC tx unicast frames" },
+ { ENETC_PM_TMCA(0), "MAC tx multicast frames" },
+ { ENETC_PM_TBCA(0), "MAC tx broadcast frames" },
+ { ENETC_PM_TPKT(0), "MAC tx packets" },
+ { ENETC_PM_TUND(0), "MAC tx undersized packets" },
+ { ENETC_PM_T64(0), "MAC tx 64 byte packets" },
+ { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" },
+ { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" },
+ { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" },
+ { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" },
+ { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" },
+ { ENETC_PM_TCNP(0), "MAC tx control packets" },
+ { ENETC_PM_TDFR(0), "MAC tx deferred packets" },
+ { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" },
+ { ENETC_PM_TSCOL(0), "MAC tx single collisions" },
+ { ENETC_PM_TLCOL(0), "MAC tx late collisions" },
+ { ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
};
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
@@ -197,13 +215,13 @@ static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
"Rx ring %2d recycle failures",
"Rx ring %2d redirects",
"Rx ring %2d redirect failures",
- "Rx ring %2d redirect S/G",
};
static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
"Tx ring %2d frames",
"Tx ring %2d XDP frames",
"Tx ring %2d XDP drops",
+ "Tx window drop %2d frames",
};
static int enetc_get_sset_count(struct net_device *ndev, int sset)
@@ -235,7 +253,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
- strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+ strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < priv->num_tx_rings; i++) {
@@ -257,7 +275,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
- strlcpy(p, enetc_port_counters[i].name,
+ strscpy(p, enetc_port_counters[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -279,6 +297,7 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = priv->tx_ring[i]->stats.packets;
data[o++] = priv->tx_ring[i]->stats.xdp_tx;
data[o++] = priv->tx_ring[i]->stats.xdp_tx_drops;
+ data[o++] = priv->tx_ring[i]->stats.win_drop;
}
for (i = 0; i < priv->num_rx_rings; i++) {
@@ -289,7 +308,6 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = priv->rx_ring[i]->stats.recycle_failures;
data[o++] = priv->rx_ring[i]->stats.xdp_redirect;
data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures;
- data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg;
}
if (!enetc_si_is_pf(priv->si))
@@ -299,6 +317,166 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
+static void enetc_pause_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_pause_stats *pause_stats)
+{
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac));
+}
+
+static void enetc_get_pause_stats(struct net_device *ndev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ switch (pause_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_pause_stats(hw, 0, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_pause_stats(hw, 1, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_pause_stats(ndev, pause_stats);
+ break;
+ }
+}
+
+static void enetc_mac_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+}
+
+static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+}
+
+static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, ENETC_MAC_MAXFRM_SIZE },
+ {},
+};
+
+static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_rmon_stats *s)
+{
+ s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+}
+
+static void enetc_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_mac_stats(hw, 0, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mac_stats(hw, 1, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_mac_stats(ndev, mac_stats);
+ break;
+ }
+}
+
+static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ switch (ctrl_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_ctrl_stats(hw, 1, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_ctrl_stats(ndev, ctrl_stats);
+ break;
+ }
+}
+
+static void enetc_get_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ *ranges = enetc_rmon_ranges;
+
+ switch (rmon_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_rmon_stats(hw, 0, rmon_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_rmon_stats(hw, 1, rmon_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_rmon_stats(ndev, rmon_stats);
+ break;
+ }
+}
+
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
@@ -542,6 +720,7 @@ void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
}
+EXPORT_SYMBOL_GPL(enetc_set_rss_key);
static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
const u8 *key, const u8 hfunc)
@@ -562,7 +741,9 @@ static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
}
static void enetc_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
@@ -672,7 +853,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON) |
@@ -750,6 +934,249 @@ static int enetc_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
+static void enetc_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return;
+
+ s->MACMergeFrameAssErrorCount = enetc_port_rd(hw, ENETC_MMFAECR);
+ s->MACMergeFrameSmdErrorCount = enetc_port_rd(hw, ENETC_MMFSECR);
+ s->MACMergeFrameAssOkCount = enetc_port_rd(hw, ENETC_MMFAOCR);
+ s->MACMergeFragCountRx = enetc_port_rd(hw, ENETC_MMFCRXR);
+ s->MACMergeFragCountTx = enetc_port_rd(hw, ENETC_MMFCTXR);
+ s->MACMergeHoldCount = enetc_port_rd(hw, ENETC_MMHCR);
+}
+
+static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ u32 lafs, rafs, val;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ state->pmac_enabled = !!(val & ENETC_PFPMR_PMACE);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ switch (ENETC_MMCSR_GET_VSTS(val)) {
+ case 0:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ break;
+ case 2:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ break;
+ case 3:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ break;
+ case 4:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ break;
+ case 5:
+ default:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+ break;
+ }
+
+ rafs = ENETC_MMCSR_GET_RAFS(val);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(rafs);
+ lafs = ENETC_MMCSR_GET_LAFS(val);
+ state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs);
+ state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */
+ state->tx_active = state->tx_enabled &&
+ (state->verify_status == ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED ||
+ state->verify_status == ETHTOOL_MM_VERIFY_STATUS_DISABLED);
+ state->verify_enabled = !(val & ENETC_MMCSR_VDIS);
+ state->verify_time = ENETC_MMCSR_GET_VT(val);
+ /* A verifyTime of 128 ms would exceed the 7 bit width
+ * of the ENETC_MMCSR_VT field
+ */
+ state->max_verify_time = 127;
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static int enetc_mm_wait_tx_active(struct enetc_hw *hw, int verify_time)
+{
+ int timeout = verify_time * USEC_PER_MSEC * ENETC_MM_VERIFY_RETRIES;
+ u32 val;
+
+ /* This will time out after the standard value of 3 verification
+ * attempts. To not sleep forever, it relies on a non-zero verify_time,
+ * guarantee which is provided by the ethtool nlattr policy.
+ */
+ return read_poll_timeout(enetc_port_rd, val,
+ ENETC_MMCSR_GET_VSTS(val) == 3,
+ ENETC_MM_VERIFY_SLEEP_US, timeout,
+ true, hw, ENETC_MMCSR);
+}
+
+static void enetc_set_ptcfpr(struct enetc_hw *hw, u8 preemptible_tcs)
+{
+ u32 val;
+ int tc;
+
+ for (tc = 0; tc < 8; tc++) {
+ val = enetc_port_rd(hw, ENETC_PTCFPR(tc));
+
+ if (preemptible_tcs & BIT(tc))
+ val |= ENETC_PTCFPR_FPE;
+ else
+ val &= ~ENETC_PTCFPR_FPE;
+
+ enetc_port_wr(hw, ENETC_PTCFPR(tc), val);
+ }
+}
+
+/* ENETC does not have an IRQ to notify changes to the MAC Merge TX status
+ * (active/inactive), but the preemptible traffic classes should only be
+ * committed to hardware once TX is active. Resort to polling.
+ */
+void enetc_mm_commit_preemptible_tcs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u8 preemptible_tcs = 0;
+ u32 val;
+ int err;
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+ if (!(val & ENETC_MMCSR_ME))
+ goto out;
+
+ if (!(val & ENETC_MMCSR_VDIS)) {
+ err = enetc_mm_wait_tx_active(hw, ENETC_MMCSR_GET_VT(val));
+ if (err)
+ goto out;
+ }
+
+ preemptible_tcs = priv->preemptible_tcs;
+out:
+ enetc_set_ptcfpr(hw, preemptible_tcs);
+}
+
+/* FIXME: Workaround for the link partner's verification failing if ENETC
+ * priorly received too much express traffic. The documentation doesn't
+ * suggest this is needed.
+ */
+static void enetc_restart_emac_rx(struct enetc_si *si)
+{
+ u32 val = enetc_port_rd(&si->hw, ENETC_PM0_CMD_CFG);
+
+ enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val & ~ENETC_PM0_RX_EN);
+
+ if (val & ENETC_PM0_RX_EN)
+ enetc_port_wr(&si->hw, ENETC_PM0_CMD_CFG, val);
+}
+
+static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+ u32 val, add_frag_size;
+ int err;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &add_frag_size, extack);
+ if (err)
+ return err;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ if (cfg->pmac_enabled)
+ val |= ENETC_PFPMR_PMACE;
+ else
+ val &= ~ENETC_PFPMR_PMACE;
+ enetc_port_wr(hw, ENETC_PFPMR, val);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (cfg->verify_enabled)
+ val &= ~ENETC_MMCSR_VDIS;
+ else
+ val |= ENETC_MMCSR_VDIS;
+
+ if (cfg->tx_enabled)
+ priv->active_offloads |= ENETC_F_QBU;
+ else
+ priv->active_offloads &= ~ENETC_F_QBU;
+
+ /* If link is up, enable/disable MAC Merge right away */
+ if (!(val & ENETC_MMCSR_LINK_FAIL)) {
+ if (!!(priv->active_offloads & ENETC_F_QBU))
+ val |= ENETC_MMCSR_ME;
+ else
+ val &= ~ENETC_MMCSR_ME;
+ }
+
+ val &= ~ENETC_MMCSR_VT_MASK;
+ val |= ENETC_MMCSR_VT(cfg->verify_time);
+
+ val &= ~ENETC_MMCSR_RAFS_MASK;
+ val |= ENETC_MMCSR_RAFS(add_frag_size);
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ enetc_restart_emac_rx(priv->si);
+
+ enetc_mm_commit_preemptible_tcs(priv);
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+/* When the link is lost, the verification state machine goes to the FAILED
+ * state and doesn't restart on its own after a new link up event.
+ * According to 802.3 Figure 99-8 - Verify state diagram, the LINK_FAIL bit
+ * should have been sufficient to re-trigger verification, but for ENETC it
+ * doesn't. As a workaround, we need to toggle the Merge Enable bit to
+ * re-trigger verification when link comes up.
+ */
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (link) {
+ val &= ~ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val |= ENETC_MMCSR_ME;
+ } else {
+ val |= ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val &= ~ENETC_MMCSR_ME;
+ }
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ enetc_mm_commit_preemptible_tcs(priv);
+
+ mutex_unlock(&priv->mm_lock);
+}
+EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -759,6 +1186,10 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_pause_stats = enetc_get_pause_stats,
+ .get_rmon_stats = enetc_get_rmon_stats,
+ .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
+ .get_eth_mac_stats = enetc_get_eth_mac_stats,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
@@ -776,6 +1207,9 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.set_wol = enetc_set_wol,
.get_pauseparam = enetc_get_pauseparam,
.set_pauseparam = enetc_set_pauseparam,
+ .get_mm = enetc_get_mm,
+ .set_mm = enetc_set_mm,
+ .get_mm_stats = enetc_get_mm_stats,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
@@ -808,3 +1242,4 @@ void enetc_set_ethtool_ops(struct net_device *ndev)
else
ndev->ethtool_ops = &enetc_vf_ethtool_ops;
}
+EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 1514e6a4a3ff..1619943fb263 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -3,6 +3,9 @@
#include <linux/bitops.h>
+#define ENETC_MM_VERIFY_SLEEP_US USEC_PER_MSEC
+#define ENETC_MM_VERIFY_RETRIES 3
+
/* ENETC device IDs */
#define ENETC_DEV_ID_PF 0xe100
#define ENETC_DEV_ID_VF 0xef00
@@ -18,9 +21,10 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
-#define ENETC_SIPCAPR0_QBV BIT(4)
#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
+#define ENETC_SIPCAPR0_QBV BIT(4)
+#define ENETC_SIPCAPR0_QBU BIT(3)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -213,7 +217,6 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
#define ENETC_PFPMR 0x1900
#define ENETC_PFPMR_PMACE BIT(1)
-#define ENETC_PFPMR_MWLM BIT(0)
#define ENETC_EMDIO_BASE 0x1c00
#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
@@ -222,11 +225,35 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
#define ENETC_MMCSR 0x1f00
-#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_MMCSR_LINK_FAIL BIT(31)
+#define ENETC_MMCSR_VT_MASK GENMASK(29, 23) /* Verify Time */
+#define ENETC_MMCSR_VT(x) (((x) << 23) & ENETC_MMCSR_VT_MASK)
+#define ENETC_MMCSR_GET_VT(x) (((x) & ENETC_MMCSR_VT_MASK) >> 23)
+#define ENETC_MMCSR_TXSTS_MASK GENMASK(22, 21) /* Merge Status */
+#define ENETC_MMCSR_GET_TXSTS(x) (((x) & ENETC_MMCSR_TXSTS_MASK) >> 21)
+#define ENETC_MMCSR_VSTS_MASK GENMASK(20, 18) /* Verify Status */
+#define ENETC_MMCSR_GET_VSTS(x) (((x) & ENETC_MMCSR_VSTS_MASK) >> 18)
+#define ENETC_MMCSR_VDIS BIT(17) /* Verify Disabled */
+#define ENETC_MMCSR_ME BIT(16) /* Merge Enabled */
+#define ENETC_MMCSR_RAFS_MASK GENMASK(9, 8) /* Remote Additional Fragment Size */
+#define ENETC_MMCSR_RAFS(x) (((x) << 8) & ENETC_MMCSR_RAFS_MASK)
+#define ENETC_MMCSR_GET_RAFS(x) (((x) & ENETC_MMCSR_RAFS_MASK) >> 8)
+#define ENETC_MMCSR_LAFS_MASK GENMASK(4, 3) /* Local Additional Fragment Size */
+#define ENETC_MMCSR_GET_LAFS(x) (((x) & ENETC_MMCSR_LAFS_MASK) >> 3)
+#define ENETC_MMCSR_LPA BIT(2) /* Local Preemption Active */
+#define ENETC_MMCSR_LPE BIT(1) /* Local Preemption Enabled */
+#define ENETC_MMCSR_LPS BIT(0) /* Local Preemption Supported */
+#define ENETC_MMFAECR 0x1f08
+#define ENETC_MMFSECR 0x1f0c
+#define ENETC_MMFAOCR 0x1f10
+#define ENETC_MMFCRXR 0x1f14
+#define ENETC_MMFCTXR 0x1f18
+#define ENETC_MMHCR 0x1f1c
#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
+#define ENETC_PMAC_OFFSET 0x1000
+
#define ENETC_PM0_CMD_CFG 0x8008
-#define ENETC_PM1_CMD_CFG 0x9008
#define ENETC_PM0_TX_EN BIT(0)
#define ENETC_PM0_RX_EN BIT(1)
#define ENETC_PM0_PROMISC BIT(4)
@@ -245,11 +272,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_PAUSE_QUANTA 0x8054
#define ENETC_PM0_PAUSE_THRESH 0x8064
-#define ENETC_PM1_PAUSE_QUANTA 0x9054
-#define ENETC_PM1_PAUSE_THRESH 0x9064
#define ENETC_PM0_SINGLE_STEP 0x80c0
-#define ENETC_PM1_SINGLE_STEP 0x90c0
#define ENETC_PM0_SINGLE_STEP_CH BIT(7)
#define ENETC_PM0_SINGLE_STEP_EN BIT(31)
#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8)
@@ -276,58 +300,60 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFMCAPR 0x1b38
#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
-/* MAC counters */
-#define ENETC_PM0_REOCT 0x8100
-#define ENETC_PM0_RALN 0x8110
-#define ENETC_PM0_RXPF 0x8118
-#define ENETC_PM0_RFRM 0x8120
-#define ENETC_PM0_RFCS 0x8128
-#define ENETC_PM0_RVLAN 0x8130
-#define ENETC_PM0_RERR 0x8138
-#define ENETC_PM0_RUCA 0x8140
-#define ENETC_PM0_RMCA 0x8148
-#define ENETC_PM0_RBCA 0x8150
-#define ENETC_PM0_RDRP 0x8158
-#define ENETC_PM0_RPKT 0x8160
-#define ENETC_PM0_RUND 0x8168
-#define ENETC_PM0_R64 0x8170
-#define ENETC_PM0_R127 0x8178
-#define ENETC_PM0_R255 0x8180
-#define ENETC_PM0_R511 0x8188
-#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1522 0x8198
-#define ENETC_PM0_R1523X 0x81A0
-#define ENETC_PM0_ROVR 0x81A8
-#define ENETC_PM0_RJBR 0x81B0
-#define ENETC_PM0_RFRG 0x81B8
-#define ENETC_PM0_RCNP 0x81C0
-#define ENETC_PM0_RDRNTP 0x81C8
-#define ENETC_PM0_TEOCT 0x8200
-#define ENETC_PM0_TOCT 0x8208
-#define ENETC_PM0_TCRSE 0x8210
-#define ENETC_PM0_TXPF 0x8218
-#define ENETC_PM0_TFRM 0x8220
-#define ENETC_PM0_TFCS 0x8228
-#define ENETC_PM0_TVLAN 0x8230
-#define ENETC_PM0_TERR 0x8238
-#define ENETC_PM0_TUCA 0x8240
-#define ENETC_PM0_TMCA 0x8248
-#define ENETC_PM0_TBCA 0x8250
-#define ENETC_PM0_TPKT 0x8260
-#define ENETC_PM0_TUND 0x8268
-#define ENETC_PM0_T64 0x8270
-#define ENETC_PM0_T127 0x8278
-#define ENETC_PM0_T255 0x8280
-#define ENETC_PM0_T511 0x8288
-#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1522 0x8298
-#define ENETC_PM0_T1523X 0x82A0
-#define ENETC_PM0_TCNP 0x82C0
-#define ENETC_PM0_TDFR 0x82D0
-#define ENETC_PM0_TMCOL 0x82D8
-#define ENETC_PM0_TSCOL 0x82E0
-#define ENETC_PM0_TLCOL 0x82E8
-#define ENETC_PM0_TECOL 0x82F0
+/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
+ * Port MAC 1 to the pMAC.
+ */
+#define ENETC_PM_REOCT(mac) (0x8100 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + ENETC_PMAC_OFFSET * (mac))
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
@@ -543,6 +569,7 @@ enum enetc_txbd_flags {
ENETC_TXBD_FLAGS_EX = BIT(6),
ENETC_TXBD_FLAGS_F = BIT(7)
};
+#define ENETC_TXBD_STATS_WIN BIT(7)
#define ENETC_TXBD_TXSTART_MASK GENMASK(24, 0)
#define ENETC_TXBD_FLAGS_OFFSET 24
@@ -881,7 +908,7 @@ struct sgcl_data {
u32 bth;
u32 ct;
u32 cte;
- struct sgce sgcl[0];
+ struct sgce sgcl[];
};
#define ENETC_CBDR_FMI_MR BIT(0)
@@ -941,14 +968,18 @@ static inline u32 enetc_usecs_to_cycles(u32 usecs)
return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL);
}
+/* Port traffic class frame preemption register */
+#define ENETC_PTCFPR(n) (0x1910 + (n) * 4) /* n = [0 ..7] */
+#define ENETC_PTCFPR_FPE BIT(31)
+
/* port time gating control register */
-#define ENETC_QBV_PTGCR_OFFSET 0x11a00
-#define ENETC_QBV_TGE BIT(31)
-#define ENETC_QBV_TGPE BIT(30)
+#define ENETC_PTGCR 0x11a00
+#define ENETC_PTGCR_TGE BIT(31)
+#define ENETC_PTGCR_TGPE BIT(30)
/* Port time gating capability register */
-#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
-#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+#define ENETC_PTGCAPR 0x11a08
+#define ENETC_PTGCAPR_MAX_GCL_LEN_MASK GENMASK(15, 0)
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index 91f02c505028..b307bef4dc29 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -127,11 +127,6 @@ static int enetc_ierb_probe(struct platform_device *pdev)
return 0;
}
-static int enetc_ierb_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id enetc_ierb_match[] = {
{ .compatible = "fsl,ls1028a-enetc-ierb", },
{},
@@ -144,7 +139,6 @@ static struct platform_driver enetc_ierb_driver = {
.of_match_table = enetc_ierb_match,
},
.probe = enetc_ierb_probe,
- .remove = enetc_ierb_remove,
};
module_platform_driver(enetc_ierb_driver);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 70e6d97b380f..998aaa394e9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -55,7 +55,8 @@ static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
is_busy, !is_busy, 10, 10 * 1000);
}
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
u32 mdio_ctl, mdio_cfg;
@@ -63,14 +64,39 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
int ret;
mdio_cfg = ENETC_EMDIO_CFG;
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_cfg |= MDIO_CFG_ENC45;
- } else {
- /* clause 22 (ie 1G) */
- dev_addr = regnum & 0x1f;
- mdio_cfg &= ~MDIO_CFG_ENC45;
- }
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* write the value */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_write_c22);
+
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ mdio_cfg |= MDIO_CFG_ENC45;
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
@@ -83,13 +109,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
- if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(mdio_priv);
- if (ret)
- return ret;
- }
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
/* write the value */
enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
@@ -100,9 +124,9 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
return 0;
}
-EXPORT_SYMBOL_GPL(enetc_mdio_write);
+EXPORT_SYMBOL_GPL(enetc_mdio_write_c45);
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
u32 mdio_ctl, mdio_cfg;
@@ -110,14 +134,51 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
int ret;
mdio_cfg = ENETC_EMDIO_CFG;
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_cfg |= MDIO_CFG_ENC45;
- } else {
- dev_addr = regnum & 0x1f;
- mdio_cfg &= ~MDIO_CFG_ENC45;
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and device addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* initiate the read */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* return all Fs if nothing was there */
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ return 0xffff;
}
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_read_c22);
+
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ u16 value;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ mdio_cfg |= MDIO_CFG_ENC45;
+
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
@@ -129,13 +190,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
- if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(mdio_priv);
- if (ret)
- return ret;
- }
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
/* initiate the read */
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
@@ -147,7 +206,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* return all Fs if nothing was there */
if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
@@ -156,7 +215,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return value;
}
-EXPORT_SYMBOL_GPL(enetc_mdio_read);
+EXPORT_SYMBOL_GPL(enetc_mdio_read_c45);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index 15f37c5b8dc1..a1b595bd7993 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -39,8 +39,10 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
}
bus->name = ENETC_MDIO_BUS_NAME;
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
@@ -69,7 +71,7 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
return 0;
err_mdiobus_reg:
- pci_release_mem_regions(pdev);
+ pci_release_region(pdev, 0);
err_pci_mem_reg:
pci_disable_device(pdev);
err_pci_enable:
@@ -88,7 +90,7 @@ static void enetc_pci_mdio_remove(struct pci_dev *pdev)
mdiobus_unregister(bus);
mdio_priv = bus->priv;
iounmap(mdio_priv->hw->port);
- pci_release_mem_regions(pdev);
+ pci_release_region(pdev, 0);
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 0e87c7043b77..7cd22d370caa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -8,6 +8,7 @@
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/pcs-lynx.h>
#include "enetc_ierb.h"
#include "enetc_pf.h"
@@ -318,24 +319,23 @@ static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
u32 reg;
- reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ reg = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
if (reg & ENETC_PM0_IFM_RG) {
/* RGMII mode */
reg = (reg & ~ENETC_PM0_IFM_RLP) |
(en ? ENETC_PM0_IFM_RLP : 0);
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, reg);
} else {
/* assume SGMII mode */
- reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ reg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
reg = (reg & ~ENETC_PM0_CMD_XGLP) |
(en ? ENETC_PM0_CMD_XGLP : 0);
reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
(en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, reg);
}
}
@@ -515,68 +515,72 @@ static void enetc_port_si_configure(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu)
{
int tc;
- enetc_port_wr(hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+ for (tc = 0; tc < 8; tc++) {
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+
+ if (max_sdu[tc])
+ val = max_sdu[tc] + VLAN_ETH_HLEN;
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val);
+ }
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw)
+{
+ int tc;
for (tc = 0; tc < 8; tc++)
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
+}
+
+static void enetc_configure_port_mac(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+ enetc_port_mac_wr(si, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+ enetc_reset_ptcmsdur(hw);
+
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
/* On LS1028A, the MAC RX FIFO defaults to 2, which is too high
* and may lead to RX lock-up under traffic. Set it to 1 instead,
* as recommended by the hardware team.
*/
- enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
+ enetc_port_mac_wr(si, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
}
-static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+static void enetc_mac_config(struct enetc_si *si, phy_interface_t phy_mode)
{
u32 val;
if (phy_interface_mode_is_rgmii(phy_mode)) {
- val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
if (phy_mode == PHY_INTERFACE_MODE_USXGMII) {
val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
}
-static void enetc_mac_enable(struct enetc_hw *hw, bool en)
+static void enetc_mac_enable(struct enetc_si *si, bool en)
{
- u32 val = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ u32 val = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, val);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, val);
-}
-
-static void enetc_configure_port_pmac(struct enetc_hw *hw)
-{
- u32 temp;
-
- /* Set pMAC step lock */
- temp = enetc_port_rd(hw, ENETC_PFPMR);
- enetc_port_wr(hw, ENETC_PFPMR,
- temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
-
- temp = enetc_port_rd(hw, ENETC_MMCSR);
- enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, val);
}
static void enetc_configure_port(struct enetc_pf *pf)
@@ -584,9 +588,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
- enetc_configure_port_pmac(hw);
-
- enetc_configure_port_mac(hw);
+ enetc_configure_port_mac(pf->si);
enetc_port_si_configure(pf->si);
@@ -708,6 +710,13 @@ static int enetc_pf_set_features(struct net_device *ndev,
{
netdev_features_t changed = ndev->features ^ features;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (changed & NETIF_F_HW_TC) {
+ err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
+ if (err)
+ return err;
+ }
if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
struct enetc_pf *pf = enetc_si_priv(priv->si);
@@ -721,7 +730,30 @@ static int enetc_pf_set_features(struct net_device *ndev,
if (changed & NETIF_F_LOOPBACK)
enetc_set_loopback(ndev, !!(features & NETIF_F_LOOPBACK));
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return enetc_qos_query_caps(ndev, type_data);
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return enetc_setup_tc_taprio(ndev, type_data);
+ case TC_SETUP_QDISC_CBS:
+ return enetc_setup_tc_cbs(ndev, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return enetc_setup_tc_txtime(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return enetc_setup_tc_psfp(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static const struct net_device_ops enetc_ndev_ops = {
@@ -738,7 +770,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk,
.ndo_set_features = enetc_pf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_pf_setup_tc,
.ndo_bpf = enetc_setup_bpf,
.ndo_xdp_xmit = enetc_xdp_xmit,
};
@@ -775,9 +807,9 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features |= NETIF_F_RXHASH;
ndev->priv_flags |= IFF_UNICAST_FLT;
-
- if (si->hw_features & ENETC_SI_F_QBV)
- priv->active_offloads |= ENETC_F_QBV;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
priv->active_offloads |= ENETC_F_QCI;
@@ -801,8 +833,10 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
return -ENOMEM;
bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = &pf->si->hw;
@@ -828,8 +862,8 @@ static int enetc_imdio_create(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
struct enetc_mdio_priv *mdio_priv;
- struct lynx_pcs *pcs_lynx;
- struct mdio_device *pcs;
+ struct phylink_pcs *phylink_pcs;
+ struct mdio_device *mdio_device;
struct mii_bus *bus;
int err;
@@ -838,8 +872,10 @@ static int enetc_imdio_create(struct enetc_pf *pf)
return -ENOMEM;
bus->name = "Freescale ENETC internal MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
bus->phy_mask = ~0;
mdio_priv = bus->priv;
@@ -853,23 +889,23 @@ static int enetc_imdio_create(struct enetc_pf *pf)
goto free_mdio_bus;
}
- pcs = mdio_device_create(bus, 0);
- if (IS_ERR(pcs)) {
- err = PTR_ERR(pcs);
- dev_err(dev, "cannot create pcs (%d)\n", err);
+ mdio_device = mdio_device_create(bus, 0);
+ if (IS_ERR(mdio_device)) {
+ err = PTR_ERR(mdio_device);
+ dev_err(dev, "cannot create mdio device (%d)\n", err);
goto unregister_mdiobus;
}
- pcs_lynx = lynx_pcs_create(pcs);
- if (!pcs_lynx) {
- mdio_device_free(pcs);
+ phylink_pcs = lynx_pcs_create(mdio_device);
+ if (!phylink_pcs) {
+ mdio_device_free(mdio_device);
err = -ENOMEM;
dev_err(dev, "cannot create lynx pcs (%d)\n", err);
goto unregister_mdiobus;
}
pf->imdio = bus;
- pf->pcs = pcs_lynx;
+ pf->pcs = phylink_pcs;
return 0;
@@ -882,8 +918,11 @@ free_mdio_bus:
static void enetc_imdio_remove(struct enetc_pf *pf)
{
+ struct mdio_device *mdio_device;
+
if (pf->pcs) {
- mdio_device_free(pf->pcs->mdio);
+ mdio_device = lynx_get_mdio_device(pf->pcs);
+ mdio_device_free(mdio_device);
lynx_pcs_destroy(pf->pcs);
}
if (pf->imdio) {
@@ -930,43 +969,12 @@ static void enetc_mdiobus_destroy(struct enetc_pf *pf)
enetc_imdio_remove(pf);
}
-static void enetc_pl_mac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct phylink_pcs *
+enetc_pl_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_INTERNAL &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- state->interface != PHY_INTERFACE_MODE_2500BASEX &&
- state->interface != PHY_INTERFACE_MODE_USXGMII &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseT_Full);
-
- if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX ||
- state->interface == PHY_INTERFACE_MODE_USXGMII) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ return pf->pcs;
}
static void enetc_pl_mac_config(struct phylink_config *config,
@@ -974,20 +982,15 @@ static void enetc_pl_mac_config(struct phylink_config *config,
const struct phylink_link_state *state)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
- struct enetc_ndev_priv *priv;
- enetc_mac_config(&pf->si->hw, state->interface);
-
- priv = netdev_priv(pf->si->ndev);
- if (pf->pcs)
- phylink_set_pcs(priv->phylink, &pf->pcs->pcs);
+ enetc_mac_config(pf->si, state->interface);
}
-static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
+static void enetc_force_rgmii_mac(struct enetc_si *si, int speed, int duplex)
{
u32 old_val, val;
- old_val = val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ old_val = val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
if (speed == SPEED_1000) {
val &= ~ENETC_PM0_IFM_SSP_MASK;
@@ -1008,7 +1011,7 @@ static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
if (val == old_val)
return;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
static void enetc_pl_mac_link_up(struct phylink_config *config,
@@ -1020,17 +1023,19 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
u32 pause_off_thresh = 0, pause_on_thresh = 0;
u32 init_quanta = 0, refresh_quanta = 0;
struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_si *si = pf->si;
struct enetc_ndev_priv *priv;
u32 rbmr, cmd_cfg;
int idx;
priv = netdev_priv(pf->si->ndev);
- if (priv->active_offloads & ENETC_F_QBV)
+
+ if (pf->si->hw_features & ENETC_SI_F_QBV)
enetc_sched_speed_set(priv, speed);
if (!phylink_autoneg_inband(mode) &&
phy_interface_mode_is_rgmii(interface))
- enetc_force_rgmii_mac(hw, speed, duplex);
+ enetc_force_rgmii_mac(si, speed, duplex);
/* Flow control */
for (idx = 0; idx < priv->num_rx_rings; idx++) {
@@ -1066,24 +1071,24 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
}
- enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta);
- enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta);
- enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
- enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta);
+ enetc_port_mac_wr(si, ENETC_PM0_PAUSE_QUANTA, init_quanta);
+ enetc_port_mac_wr(si, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh);
enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh);
- cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ cmd_cfg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
if (rx_pause)
cmd_cfg &= ~ENETC_PM0_PAUSE_IGN;
else
cmd_cfg |= ENETC_PM0_PAUSE_IGN;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, cmd_cfg);
+
+ enetc_mac_enable(si, true);
- enetc_mac_enable(hw, true);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, true);
}
static void enetc_pl_mac_link_down(struct phylink_config *config,
@@ -1091,12 +1096,19 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(si->ndev);
- enetc_mac_enable(&pf->si->hw, false);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, false);
+
+ enetc_mac_enable(si, false);
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
- .validate = enetc_pl_mac_validate,
+ .mac_select_pcs = enetc_pl_mac_select_pcs,
.mac_config = enetc_pl_mac_config,
.mac_link_up = enetc_pl_mac_link_up,
.mac_link_down = enetc_pl_mac_link_down,
@@ -1111,6 +1123,18 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv,
pf->phylink_config.dev = &priv->ndev->dev;
pf->phylink_config.type = PHYLINK_NETDEV;
+ pf->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pf->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_USXGMII,
+ pf->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(pf->phylink_config.supported_interfaces);
phylink = phylink_create(&pf->phylink_config, of_fwnode_handle(node),
pf->if_mode, &enetc_mac_phylink_ops);
@@ -1126,8 +1150,7 @@ static int enetc_phylink_create(struct enetc_ndev_priv *priv,
static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
{
- if (priv->phylink)
- phylink_destroy(priv->phylink);
+ phylink_destroy(priv->phylink);
}
/* Initialize the entire shared memory for the flow steering entries
@@ -1274,6 +1297,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
priv = netdev_priv(ndev);
+ mutex_init(&priv->mm_lock);
+
enetc_init_si_rings_params(priv);
err = enetc_alloc_si_resources(priv);
@@ -1294,16 +1319,20 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_msix;
}
- if (!of_get_phy_mode(node, &pf->if_mode)) {
- err = enetc_mdiobus_create(pf, node);
- if (err)
- goto err_mdiobus_create;
-
- err = enetc_phylink_create(priv, node);
- if (err)
- goto err_phylink_create;
+ err = of_get_phy_mode(node, &pf->if_mode);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to read PHY mode\n");
+ goto err_phy_mode;
}
+ err = enetc_mdiobus_create(pf, node);
+ if (err)
+ goto err_mdiobus_create;
+
+ err = enetc_phylink_create(priv, node);
+ if (err)
+ goto err_phylink_create;
+
err = register_netdev(ndev);
if (err)
goto err_reg_netdev;
@@ -1315,6 +1344,7 @@ err_reg_netdev:
err_phylink_create:
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
+err_phy_mode:
enetc_free_msix(priv);
err_config_si:
err_alloc_msix:
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 263946c51e37..c26bd66e4597 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -2,7 +2,7 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
-#include <linux/pcs-lynx.h>
+#include <linux/phylink.h>
#define ENETC_PF_NUM_RINGS 8
@@ -46,7 +46,7 @@ struct enetc_pf {
struct mii_bus *mdio; /* saved for cleanup */
struct mii_bus *imdio;
- struct lynx_pcs *pcs;
+ struct phylink_pcs *pcs;
phy_interface_t if_mode;
struct phylink_config phylink_config;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index 36b4f51dd297..17c097cef7d4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -42,15 +42,10 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
if (err)
return dev_err_probe(&pdev->dev, err, "device enable failed\n");
- /* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, KBUILD_MODNAME);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 0536d2c76fbc..83c27bbbc6ed 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -11,14 +11,14 @@
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
- return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
- & ENETC_QBV_MAX_GCL_LEN_MASK;
+ return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
}
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 old_speed = priv->speed;
- u32 pspeed;
+ u32 pspeed, tmp;
if (speed == old_speed)
return;
@@ -39,36 +39,38 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
}
priv->speed = speed;
- enetc_port_wr(&priv->si->hw, ENETC_PMR,
- (enetc_port_rd(&priv->si->hw, ENETC_PMR)
- & (~ENETC_PMR_PSPEED_MASK))
- | pspeed);
+ tmp = enetc_port_rd(hw, ENETC_PMR);
+ enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
}
static int enetc_setup_taprio(struct net_device *ndev,
struct tc_taprio_qopt_offload *admin_conf)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
- struct gce *gce;
dma_addr_t dma;
+ struct gce *gce;
u16 data_size;
u16 gcl_len;
+ void *tmp;
u32 tge;
int err;
int i;
- if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
return -EINVAL;
gcl_len = admin_conf->num_entries;
- tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ tge = enetc_rd(hw, ENETC_PTGCR);
if (!admin_conf->enable) {
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
+ enetc_reset_ptcmsdur(hw);
+
+ priv->active_offloads &= ~ENETC_F_QBV;
+
return 0;
}
@@ -82,8 +84,9 @@ static int enetc_setup_taprio(struct net_device *ndev,
gcl_config = &cbd.gcl_conf;
data_size = struct_size(gcl_data, entry, gcl_len);
- gcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!gcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&gcl_data);
+ if (!tmp)
return -ENOMEM;
gce = (struct gce *)(gcl_data + 1);
@@ -107,61 +110,48 @@ static int enetc_setup_taprio(struct net_device *ndev,
temp_gce->period = cpu_to_le32(temp_entry->interval);
}
- cbd.length = cpu_to_le16(data_size);
cbd.status_flags = 0;
- dma = dma_map_single(&priv->si->pdev->dev, gcl_data,
- data_size, DMA_TO_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(gcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
- enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
- tge | ENETC_QBV_TGE);
+ enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_TO_DEVICE);
- kfree(gcl_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
- return err;
+ if (err)
+ return err;
+
+ enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
+ priv->active_offloads |= ENETC_F_QBV;
+
+ return 0;
}
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int err;
- int i;
+ int err, i;
/* TSD and Qbv are mutually exclusive in hardware */
for (i = 0; i < priv->num_tx_rings; i++)
if (priv->tx_ring[i]->tsd_enable)
return -EBUSY;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
- taprio->enable ? i : 0);
+ err = enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
+ if (err)
+ return err;
err = enetc_setup_taprio(ndev, taprio);
-
- if (err)
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
- taprio->enable ? 0 : i);
+ if (err) {
+ taprio->mqprio.qopt.num_tc = 0;
+ enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
+ }
return err;
}
@@ -182,7 +172,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
struct tc_cbs_qopt_offload *cbs = type_data;
u32 port_transmit_rate = priv->speed;
u8 tc_nums = netdev_get_num_tc(ndev);
- struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &priv->si->hw;
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
@@ -203,15 +193,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* lower than this TC have been disabled.
*/
if (tc == prio_top &&
- enetc_get_cbs_enable(&si->hw, prio_next)) {
+ enetc_get_cbs_enable(hw, prio_next)) {
dev_err(&ndev->dev,
"Disable TC%d before disable TC%d\n",
prio_next, tc);
return -EINVAL;
}
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
return 0;
}
@@ -228,13 +218,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* higher than this TC have been enabled.
*/
if (tc == prio_next) {
- if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ if (!enetc_get_cbs_enable(hw, prio_top)) {
dev_err(&ndev->dev,
"Enable TC%d first before enable TC%d\n",
prio_top, prio_next);
return -EINVAL;
}
- bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ bw_sum += enetc_get_cbs_bw(hw, prio_top);
}
if (bw_sum + bw >= 100) {
@@ -243,7 +233,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -263,8 +253,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 m0, ma, r0, ra;
m0 = port_frame_max_size * 8;
- ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
- ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(hw, prio_top) *
port_transmit_rate * 10000ULL;
r0 = port_transmit_rate * 1000000ULL;
max_interference_size = m0 + ma +
@@ -284,10 +274,10 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
/* Set bw register and enable this traffic class */
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
return 0;
}
@@ -297,6 +287,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_etf_qopt_offload *qopt = type_data;
u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int tc;
if (!tc_nums)
@@ -307,17 +298,12 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
if (tc < 0 || tc >= priv->num_tx_rings)
return -EINVAL;
- /* Do not support TXSTART and TX CSUM offload simutaniously */
- if (ndev->features & NETIF_F_CSUM_MASK)
- return -EBUSY;
-
/* TSD and Qbv are mutually exclusive in hardware */
- if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
return -EBUSY;
priv->tx_ring[tc]->tsd_enable = qopt->enable;
- enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
- qopt->enable ? ENETC_TSDE : 0);
+ enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
return 0;
}
@@ -450,6 +436,7 @@ static struct actions_fwd enetc_act_fwd[] = {
};
static struct enetc_psfp epsfp = {
+ .dev_bitmap = 0,
.psfp_sfi_bitmap = NULL,
};
@@ -463,8 +450,9 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct enetc_cbd cbd = {.cmd = 0};
struct streamid_data *si_data;
struct streamid_conf *si_conf;
- u16 data_size;
dma_addr_t dma;
+ u16 data_size;
+ void *tmp;
int port;
int err;
@@ -485,21 +473,11 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct streamid_data);
- si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!si_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&si_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
- dma = dma_map_single(&priv->si->pdev->dev, si_data,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto out;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
eth_broadcast_addr(si_data->dmac);
si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
+ ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
@@ -520,11 +498,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
- memset(&cbd, 0, sizeof(cbd));
-
- cbd.index = cpu_to_le16((u16)sid->index);
- cbd.cmd = 0;
- cbd.cls = BDCR_CMD_STREAM_IDENTIFY;
cbd.status_flags = 0;
si_conf->en = 0x80;
@@ -537,11 +510,6 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
memset(si_data, 0, data_size);
- cbd.length = cpu_to_le16(data_size);
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
/* VIDM default to be 1.
* VID Match. If set (b1) then the VID must match, otherwise
* any VID is considered a match. VIDM setting is only used
@@ -561,10 +529,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
out:
- if (!dma_mapping_error(&priv->si->pdev->dev, dma))
- dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
-
- kfree(si_data);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -635,6 +600,7 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
struct sfi_counter_data *data_buf;
dma_addr_t dma;
u16 data_size;
+ void *tmp;
int err;
cbd.index = cpu_to_le16((u16)index);
@@ -643,21 +609,11 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
cbd.status_flags = 0;
data_size = sizeof(struct sfi_counter_data);
- data_buf = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!data_buf)
- return -ENOMEM;
- dma = dma_map_single(&priv->si->pdev->dev, data_buf,
- data_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- err = -ENOMEM;
- goto exit;
- }
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
- cbd.length = cpu_to_le16(data_size);
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&data_buf);
+ if (!tmp)
+ return -ENOMEM;
err = enetc_send_cmd(priv->si, &cbd);
if (err)
@@ -684,7 +640,8 @@ static int enetc_streamcounter_hw_get(struct enetc_ndev_priv *priv,
data_buf->flow_meter_dropl;
exit:
- kfree(data_buf);
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
+
return err;
}
@@ -726,6 +683,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
dma_addr_t dma;
u16 data_size;
int err, i;
+ void *tmp;
u64 now;
cbd.index = cpu_to_le16(sgi->index);
@@ -772,25 +730,11 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
sgcl_config->acl_len = (sgi->num_entries - 1) & 0x3;
data_size = struct_size(sgcl_data, sgcl, sgi->num_entries);
-
- sgcl_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
- if (!sgcl_data)
+ tmp = enetc_cbd_alloc_data_mem(priv->si, &cbd, data_size,
+ &dma, (void *)&sgcl_data);
+ if (!tmp)
return -ENOMEM;
- cbd.length = cpu_to_le16(data_size);
-
- dma = dma_map_single(&priv->si->pdev->dev,
- sgcl_data, data_size,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
- netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(sgcl_data);
- return -ENOMEM;
- }
-
- cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
- cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
-
sgce = &sgcl_data->sgcl[0];
sgcl_config->agtst = 0x80;
@@ -844,8 +788,7 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
exit:
- kfree(sgcl_data);
-
+ enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
return err;
}
@@ -1074,6 +1017,46 @@ static struct actions_fwd *enetc_check_flow_actions(u64 acts,
return NULL;
}
+static int enetc_psfp_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_pkt_ps) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "QoS offload not support packets per second");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
struct flow_cls_offload *f)
{
@@ -1182,7 +1165,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
/* parsing gate action */
- if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) {
+ if (entryg->hw_index >= priv->psfp_cap.max_psfp_gate) {
NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
err = -ENOSPC;
goto free_filter;
@@ -1202,7 +1185,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
}
refcount_set(&sgi->refcount, 1);
- sgi->index = entryg->gate.index;
+ sgi->index = entryg->hw_index;
sgi->init_ipv = entryg->gate.prio;
sgi->basetime = entryg->gate.basetime;
sgi->cycletime = entryg->gate.cycletime;
@@ -1230,11 +1213,10 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
/* Flow meter and max frame size */
if (entryp) {
- if (entryp->police.rate_pkt_ps) {
- NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
- err = -EOPNOTSUPP;
+ err = enetc_psfp_policer_validate(&rule->action, entryp, extack);
+ if (err)
goto free_sfi;
- }
+
if (entryp->police.burst) {
fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
if (!fmi) {
@@ -1244,7 +1226,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
refcount_set(&fmi->refcount, 1);
fmi->cir = entryp->police.rate_bytes_ps;
fmi->cbs = entryp->police.burst;
- fmi->index = entryp->police.index;
+ fmi->index = entryp->hw_index;
filter->flags |= ENETC_PSFP_FLAGS_FMI;
filter->fmi_index = fmi->index;
sfi->meter_id = fmi->index;
@@ -1265,7 +1247,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
int index;
index = enetc_get_free_index(priv);
- if (sfi->handle < 0) {
+ if (index < 0) {
NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
err = -ENOSPC;
goto free_fmi;
@@ -1529,6 +1511,29 @@ int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
+int enetc_set_psfp(struct net_device *ndev, bool en)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (en) {
+ err = enetc_psfp_enable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads |= ENETC_F_QCI;
+ return 0;
+ }
+
+ err = enetc_psfp_disable(priv);
+ if (err)
+ return err;
+
+ priv->active_offloads &= ~ENETC_F_QCI;
+
+ return 0;
+}
+
int enetc_psfp_init(struct enetc_ndev_priv *priv)
{
if (epsfp.psfp_sfi_bitmap)
@@ -1590,3 +1595,30 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_query_caps_base *base = type_data;
+ struct enetc_si *si = priv->si;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (si->hw_features & ENETC_SI_F_QBV)
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 17924305afa2..dfcaac302e24 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -88,7 +88,20 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
static int enetc_vf_set_features(struct net_device *ndev,
netdev_features_t features)
{
- return enetc_set_features(ndev, features);
+ enetc_set_features(ndev, features);
+
+ return 0;
+}
+
+static int enetc_vf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return enetc_setup_tc_mqprio(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/* Probing/ Init */
@@ -100,7 +113,7 @@ static const struct net_device_ops enetc_ndev_ops = {
.ndo_set_mac_address = enetc_vf_set_mac_addr,
.ndo_set_features = enetc_vf_set_features,
.ndo_eth_ioctl = enetc_ioctl,
- .ndo_setup_tc = enetc_setup_tc,
+ .ndo_setup_tc = enetc_vf_setup_tc,
};
static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ed7301b69169..9939ccafb556 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,8 +16,12 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
+#include <linux/bpf.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/firmware/imx/sci.h>
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
@@ -343,8 +347,10 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
+#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
+ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -498,6 +504,14 @@ struct bufdesc_ex {
/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+/* i.MX6Q adds pm_qos support */
+#define FEC_QUIRK_HAS_PMQOS BIT(23)
+
+/* Not all FEC hardware block MDIOs support accesses in C45 mode.
+ * Older blocks in the ColdFire parts do not support it.
+ */
+#define FEC_QUIRK_HAS_MDIO_C45 BIT(24)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -511,6 +525,25 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
+struct fec_enet_priv_txrx_info {
+ int offset;
+ struct page *page;
+ struct sk_buff *skb;
+};
+
+enum {
+ RX_XDP_REDIRECT = 0,
+ RX_XDP_PASS,
+ RX_XDP_DROP,
+ RX_XDP_TX,
+ RX_XDP_TX_ERRORS,
+ TX_XDP_XMIT,
+ TX_XDP_XMIT_ERRORS,
+
+ /* The following must be the last one */
+ XDP_STATS_TOTAL,
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -526,7 +559,15 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+ u32 stats[XDP_STATS_TOTAL];
+
+ /* rx queue number, in the range 0-7 */
+ u8 id;
};
struct fec_stop_mode_gpr {
@@ -579,6 +620,7 @@ struct fec_enet_private {
struct device_node *phy_node;
bool rgmii_txc_dly;
bool rgmii_rxc_dly;
+ bool rpm_active;
int link;
int full_duplex;
int speed;
@@ -608,6 +650,7 @@ struct fec_enet_private {
struct delayed_work time_keep;
struct regulator *reg_phy;
struct fec_stop_mode_gpr stop_gpr;
+ struct pm_qos_request pm_qos_req;
unsigned int tx_align;
unsigned int rx_align;
@@ -633,6 +676,13 @@ struct fec_enet_private {
unsigned int reload_period;
int pps_enable;
unsigned int next_counter;
+ struct hrtimer perout_timer;
+ u64 perout_stime;
+
+ struct imx_sc_ipc *ipc_handle;
+
+ /* XDP BPF Program */
+ struct bpf_prog *xdp_prog;
u64 ethtool_stats[];
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 1b1f7f2a6130..42ec6ca3bf03 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -56,23 +56,25 @@
#include <linux/fec.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/prefetch.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <soc/imx/cpuidle.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
#include <asm/cacheflush.h>
#include "fec.h"
static void set_multicast_list(struct net_device *ndev);
-static void fec_enet_itr_coal_init(struct net_device *ndev);
+static void fec_enet_itr_coal_set(struct net_device *ndev);
#define DRIVER_NAME "fec"
@@ -87,35 +89,43 @@ static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
#define FEC_ENET_OPD_V 0xFFF0
#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+#define FEC_ENET_XDP_PASS 0
+#define FEC_ENET_XDP_CONSUMED BIT(0)
+#define FEC_ENET_XDP_TX BIT(1)
+#define FEC_ENET_XDP_REDIR BIT(2)
+
struct fec_devinfo {
u32 quirks;
};
static const struct fec_devinfo fec_imx25_info = {
.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
- FEC_QUIRK_HAS_FRREG,
+ FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx27_info = {
- .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx28_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
- FEC_QUIRK_NO_HARD_RESET,
+ FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_mvf600_info = {
- .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6x_info = {
@@ -124,7 +134,8 @@ static const struct fec_devinfo fec_imx6x_info = {
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
- FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES,
+ FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx6ul_info = {
@@ -132,7 +143,8 @@ static const struct fec_devinfo fec_imx6ul_info = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx8mq_info = {
@@ -142,7 +154,8 @@ static const struct fec_devinfo fec_imx8mq_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2,
+ FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static const struct fec_devinfo fec_imx8qm_info = {
@@ -152,7 +165,15 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
- FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+ FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
+};
+
+static const struct fec_devinfo fec_s32v234_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_MDIO_C45,
};
static struct platform_device_id fec_devtype[] = {
@@ -188,6 +209,9 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx8qm-fec",
.driver_data = (kernel_ulong_t)&fec_imx8qm_info,
}, {
+ .name = "s32v234-fec",
+ .driver_data = (kernel_ulong_t)&fec_s32v234_info,
+ }, {
/* sentinel */
}
};
@@ -203,6 +227,7 @@ enum imx_fec_type {
IMX6UL_FEC,
IMX8MQ_FEC,
IMX8QM_FEC,
+ S32V234_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -215,6 +240,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+ { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -350,16 +376,6 @@ static void swap_buffer(void *bufaddr, int len)
swab32s(buf);
}
-static void swap_buffer2(void *dst_buf, void *src_buf, int len)
-{
- int i;
- unsigned int *src = src_buf;
- unsigned int *dst = dst_buf;
-
- for (i = 0; i < len; i += 4, src++, dst++)
- *dst = swab32p(src);
-}
-
static void fec_dump(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -409,6 +425,49 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
+static int
+fec_enet_create_page_pool(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq, int size)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = dev_to_node(&fep->pdev->dev),
+ .dev = &fep->pdev->dev,
+ .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
+ .offset = FEC_ENET_XDP_HEADROOM,
+ .max_len = FEC_ENET_RX_FRSIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
@@ -656,7 +715,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
bdp->cbd_datlen = cpu_to_fec16(size);
@@ -691,7 +750,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
void *bufaddr;
unsigned long dmabuf;
@@ -718,7 +777,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
}
@@ -1163,8 +1222,36 @@ fec_restart(struct net_device *ndev)
writel(0, fep->hwp + FEC_IMASK);
/* Init the interrupt coalescing */
- fec_enet_itr_coal_init(ndev);
+ if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
+ fec_enet_itr_coal_set(ndev);
+}
+
+static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
+{
+ if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dxl")))
+ return 0;
+
+ return imx_scu_get_handle(&fep->ipc_handle);
+}
+static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
+{
+ struct device_node *np = fep->pdev->dev.of_node;
+ u32 rsrc_id, val;
+ int idx;
+
+ if (!np || !fep->ipc_handle)
+ return;
+
+ idx = of_alias_get_id(np, "ethernet");
+ if (idx < 0)
+ idx = 0;
+ rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
+
+ val = enabled ? 1 : 0;
+ imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
}
static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
@@ -1182,9 +1269,26 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
BIT(stop_gpr->bit), 0);
} else if (pdata && pdata->sleep_mode_enable) {
pdata->sleep_mode_enable(enabled);
+ } else {
+ fec_enet_ipg_stop_set(fep, enabled);
}
}
+static void fec_irqs_disable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+}
+
+static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ writel(0, fep->hwp + FEC_IMASK);
+ writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+}
+
static void
fec_stop(struct net_device *ndev)
{
@@ -1211,15 +1315,13 @@ fec_stop(struct net_device *ndev)
writel(1, fep->hwp + FEC_ECNTRL);
udelay(10);
}
- writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
} else {
- writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
val = readl(fep->hwp + FEC_ECNTRL);
val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
- fec_enet_stop_mode(fep, true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
if (fep->quirks & FEC_QUIRK_ENET_MAC &&
@@ -1394,50 +1496,75 @@ static void fec_enet_tx(struct net_device *ndev)
fec_enet_tx_queue(ndev, i);
}
-static int
-fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
+static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- int off;
+ struct page *new_page;
+ dma_addr_t phys_addr;
- off = ((unsigned long)skb->data) & fep->rx_align;
- if (off)
- skb_reserve(skb, fep->rx_align + 1 - off);
+ new_page = page_pool_dev_alloc_pages(rxq->page_pool);
+ WARN_ON(!new_page);
+ rxq->rx_skb_info[index].page = new_page;
- bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
- if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
- if (net_ratelimit())
- netdev_err(ndev, "Rx DMA memory map failed\n");
- return -ENOMEM;
- }
-
- return 0;
+ rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
}
-static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length, bool swap)
+static u32
+fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
+ struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index)
{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct sk_buff *new_skb;
-
- if (length > fep->rx_copybreak)
- return false;
-
- new_skb = netdev_alloc_skb(ndev, length);
- if (!new_skb)
- return false;
-
- dma_sync_single_for_cpu(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- if (!swap)
- memcpy(new_skb->data, (*skb)->data, length);
- else
- swap_buffer2(new_skb->data, (*skb)->data, length);
- *skb = new_skb;
+ unsigned int sync, len = xdp->data_end - xdp->data;
+ u32 ret = FEC_ENET_XDP_PASS;
+ struct page *page;
+ int err;
+ u32 act;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM;
+ sync = max(sync, len);
+
+ switch (act) {
+ case XDP_PASS:
+ rxq->stats[RX_XDP_PASS]++;
+ ret = FEC_ENET_XDP_PASS;
+ break;
+
+ case XDP_REDIRECT:
+ rxq->stats[RX_XDP_REDIRECT]++;
+ err = xdp_do_redirect(fep->netdev, xdp, prog);
+ if (!err) {
+ ret = FEC_ENET_XDP_REDIR;
+ } else {
+ ret = FEC_ENET_XDP_CONSUMED;
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ }
+ break;
+
+ default:
+ bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
+ fallthrough;
+
+ case XDP_TX:
+ bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
+ fallthrough;
+
+ case XDP_ABORTED:
+ fallthrough; /* handle aborts by dropping packet */
+
+ case XDP_DROP:
+ rxq->stats[RX_XDP_DROP]++;
+ ret = FEC_ENET_XDP_CONSUMED;
+ page = virt_to_head_page(xdp->data);
+ page_pool_put_page(rxq->page_pool, page, sync, true);
+ break;
+ }
- return true;
+ return ret;
}
/* During a receive, the bd_rx.cur points to the current incoming buffer.
@@ -1452,7 +1579,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
- struct sk_buff *skb_new = NULL;
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
@@ -1461,8 +1587,23 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
bool vlan_packet_rcvd = false;
u16 vlan_tag;
int index = 0;
- bool is_copybreak;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
+ u32 ret, xdp_result = FEC_ENET_XDP_PASS;
+ u32 data_start = FEC_ENET_XDP_HEADROOM;
+ struct xdp_buff xdp;
+ struct page *page;
+ u32 sub_len = 4;
+
+#if !defined(CONFIG_M5272)
+ /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
+ * FEC_RACC_SHIFT16 is set by default in the probe function.
+ */
+ if (fep->quirks & FEC_QUIRK_HAS_RACC) {
+ data_start += 2;
+ sub_len += 2;
+ }
+#endif
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1473,6 +1614,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
* These get messed up if we get called due to a busy condition.
*/
bdp = rxq->bd.cur;
+ xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
@@ -1514,37 +1656,47 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- skb = rxq->rx_skbuff[index];
+ page = rxq->rx_skb_info[index].page;
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
+ fec_enet_update_cbd(rxq, bdp, index);
+
+ if (xdp_prog) {
+ xdp_buff_clear_frags_flag(&xdp);
+ /* subtract 16bit shift and FCS */
+ xdp_prepare_buff(&xdp, page_address(page),
+ data_start, pkt_len - sub_len, false);
+ ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index);
+ xdp_result |= ret;
+ if (ret != FEC_ENET_XDP_PASS)
+ goto rx_processing_done;
+ }
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
- need_swap);
- if (!is_copybreak) {
- skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (unlikely(!skb_new)) {
- ndev->stats.rx_dropped++;
- goto rx_processing_done;
- }
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_recycle_direct(rxq->page_pool, page);
+ ndev->stats.rx_dropped++;
+
+ netdev_err_once(ndev, "build_skb failed!\n");
+ goto rx_processing_done;
}
- prefetch(skb->data - NET_IP_ALIGN);
- skb_put(skb, pkt_len - 4);
- data = skb->data;
+ skb_reserve(skb, data_start);
+ skb_put(skb, pkt_len - sub_len);
+ skb_mark_for_recycle(skb);
- if (!is_copybreak && need_swap)
+ if (unlikely(need_swap)) {
+ data = page_address(page) + FEC_ENET_XDP_HEADROOM;
swap_buffer(data, pkt_len);
-
-#if !defined(CONFIG_M5272)
- if (fep->quirks & FEC_QUIRK_HAS_RACC)
- data = skb_pull_inline(skb, 2);
-#endif
+ }
+ data = skb->data;
/* Extract the enhanced buffer descriptor */
ebdp = NULL;
@@ -1593,16 +1745,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
- if (is_copybreak) {
- dma_sync_single_for_device(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- } else {
- rxq->rx_skbuff[index] = skb_new;
- fec_enet_new_rxbdp(ndev, bdp, skb_new);
- }
-
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -1633,6 +1775,10 @@ rx_processing_done:
writel(0, rxq->bd.reg_desc_active);
}
rxq->bd.cur = bdp;
+
+ if (xdp_result & FEC_ENET_XDP_REDIR)
+ xdp_do_flush_map();
+
return pkt_received;
}
@@ -1847,47 +1993,74 @@ static int fec_enet_mdio_wait(struct fec_enet_private *fep)
return ret;
}
-static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
int ret = 0, frame_start, frame_addr, frame_op;
- bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- if (is_c45) {
- frame_start = FEC_MMFR_ST_C45;
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
- /* write address */
- frame_addr = (regnum >> 16);
- writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | (regnum & 0xFFFF),
- fep->hwp + FEC_MII_DATA);
+ /* start a read op */
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
- /* wait for end of transfer */
- ret = fec_enet_mdio_wait(fep);
- if (ret) {
- netdev_err(fep->netdev, "MDIO address write timeout\n");
- goto out;
- }
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO read timeout\n");
+ goto out;
+ }
- frame_op = FEC_MMFR_OP_READ_C45;
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
- } else {
- /* C22 read */
- frame_op = FEC_MMFR_OP_READ;
- frame_start = FEC_MMFR_ST;
- frame_addr = regnum;
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret = 0, frame_start, frame_op;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
}
+ frame_op = FEC_MMFR_OP_READ_C45;
+
/* start a read op */
writel(frame_start | frame_op |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
ret = fec_enet_mdio_wait(fep);
@@ -1905,45 +2078,69 @@ out:
return ret;
}
-static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
int ret, frame_start, frame_addr;
- bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- if (is_c45) {
- frame_start = FEC_MMFR_ST_C45;
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
- /* write address */
- frame_addr = (regnum >> 16);
- writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | (regnum & 0xFFFF),
- fep->hwp + FEC_MII_DATA);
+ /* start a write op */
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
- /* wait for end of transfer */
- ret = fec_enet_mdio_wait(fep);
- if (ret) {
- netdev_err(fep->netdev, "MDIO address write timeout\n");
- goto out;
- }
- } else {
- /* C22 write */
- frame_start = FEC_MMFR_ST;
- frame_addr = regnum;
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret)
+ netdev_err(fep->netdev, "MDIO write timeout\n");
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum, u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret, frame_start;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
}
/* start a write op */
writel(frame_start | FEC_MMFR_OP_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | FEC_MMFR_DATA(value),
- fep->hwp + FEC_MII_DATA);
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
ret = fec_enet_mdio_wait(fep);
@@ -2091,13 +2288,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
continue;
if (dev_id--)
continue;
- strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
if (phy_id >= PHY_MAX_ADDR) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -2127,7 +2324,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- phy_dev->mac_managed_pm = 1;
+ phy_dev->mac_managed_pm = true;
phy_attached_info(phy_dev);
@@ -2241,8 +2438,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
}
fep->mii_bus->name = "fec_enet_mii_bus";
- fep->mii_bus->read = fec_enet_mdio_read;
- fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->read = fec_enet_mdio_read_c22;
+ fep->mii_bus->write = fec_enet_mdio_write_c22;
+ if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
+ fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
+ fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
+ }
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
@@ -2281,9 +2482,9 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strlcpy(info->driver, fep->pdev->dev.driver->name,
+ strscpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
static int fec_enet_get_regs_len(struct net_device *ndev)
@@ -2333,6 +2534,31 @@ static u32 fec_enet_register_offset[] = {
IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
IEEE_R_FDXFC, IEEE_R_OCTETS_OK
};
+/* for i.MX6ul */
+static u32 fec_enet_register_offset_6ul[] = {
+ FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+ FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+ FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
+ FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
+ FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
+ FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+ FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
+ RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+ RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+ RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+ RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+ RMON_T_P_GTE2048, RMON_T_OCTETS,
+ IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+ IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+ IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+ RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+ RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+ RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+ RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+ RMON_R_P_GTE2048, RMON_R_OCTETS,
+ IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+ IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
#else
static __u32 fec_enet_register_version = 1;
static u32 fec_enet_register_offset[] = {
@@ -2357,7 +2583,24 @@ static void fec_enet_get_regs(struct net_device *ndev,
u32 *buf = (u32 *)regbuf;
u32 i, off;
int ret;
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+ defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+ u32 *reg_list;
+ u32 reg_cnt;
+ if (!of_machine_is_compatible("fsl,imx6ul")) {
+ reg_list = fec_enet_register_offset;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+ } else {
+ reg_list = fec_enet_register_offset_6ul;
+ reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
+ }
+#else
+ /* coldfire */
+ static u32 *reg_list = fec_enet_register_offset;
+ static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
+#endif
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return;
@@ -2366,8 +2609,8 @@ static void fec_enet_get_regs(struct net_device *ndev,
memset(buf, 0, regs->len);
- for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
- off = fec_enet_register_offset[i];
+ for (i = 0; i < reg_cnt; i++) {
+ off = reg_list[i];
if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
!(fep->quirks & FEC_QUIRK_HAS_FRREG))
@@ -2530,6 +2773,16 @@ static const struct fec_stat {
#define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
+static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
+ "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */
+ "rx_xdp_pass", /* RX_XDP_PASS, */
+ "rx_xdp_drop", /* RX_XDP_DROP, */
+ "rx_xdp_tx", /* RX_XDP_TX, */
+ "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */
+ "tx_xdp_xmit", /* TX_XDP_XMIT, */
+ "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */
+};
+
static void fec_enet_update_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
@@ -2539,6 +2792,40 @@ static void fec_enet_update_ethtool_stats(struct net_device *dev)
fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
}
+static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
+{
+ u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
+ struct fec_enet_priv_rx_q *rxq;
+ int i, j;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ for (j = 0; j < XDP_STATS_TOTAL; j++)
+ xdp_stats[j] += rxq->stats[j];
+ }
+
+ memcpy(data, xdp_stats, sizeof(xdp_stats));
+}
+
+static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
+
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+
+ if (!rxq->page_pool)
+ continue;
+
+ page_pool_get_stats(rxq->page_pool, &stats);
+ }
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void fec_enet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -2548,6 +2835,12 @@ static void fec_enet_get_ethtool_stats(struct net_device *dev,
fec_enet_update_ethtool_stats(dev);
memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
+ data += FEC_STATS_SIZE / sizeof(u64);
+
+ fec_enet_get_xdp_stats(fep, data);
+ data += XDP_STATS_TOTAL;
+
+ fec_enet_page_pool_stats(fep, data);
}
static void fec_enet_get_strings(struct net_device *netdev,
@@ -2556,9 +2849,16 @@ static void fec_enet_get_strings(struct net_device *netdev,
int i;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- fec_stats[i].name, ETH_GSTRING_LEN);
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
+ memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
+ strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ page_pool_ethtool_stats_get_strings(data);
+
break;
case ETH_SS_TEST:
net_selftest_get_strings(data);
@@ -2568,9 +2868,14 @@ static void fec_enet_get_strings(struct net_device *netdev,
static int fec_enet_get_sset_count(struct net_device *dev, int sset)
{
+ int count;
+
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(fec_stats);
+ count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+
case ETH_SS_TEST:
return net_selftest_get_count();
default:
@@ -2581,7 +2886,8 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
static void fec_enet_clear_ethtool_stats(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
- int i;
+ struct fec_enet_priv_rx_q *rxq;
+ int i, j;
/* Disable MIB statistics counters */
writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
@@ -2589,6 +2895,12 @@ static void fec_enet_clear_ethtool_stats(struct net_device *dev)
for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
writel(0, fep->hwp + fec_stats[i].offset);
+ for (i = fep->num_rx_queues - 1; i >= 0; i--) {
+ rxq = fep->rx_queue[i];
+ for (j = 0; j < XDP_STATS_TOTAL; j++)
+ rxq->stats[j] = 0;
+ }
+
/* Don't disable MIB statistics counters */
writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
}
@@ -2715,19 +3027,6 @@ static int fec_enet_set_coalesce(struct net_device *ndev,
return 0;
}
-static void fec_enet_itr_coal_init(struct net_device *ndev)
-{
- struct ethtool_coalesce ec;
-
- ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
- ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
-
- ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
- ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
-
- fec_enet_set_coalesce(ndev, &ec, NULL, NULL);
-}
-
static int fec_enet_get_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
void *data)
@@ -2784,7 +3083,7 @@ static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
int ret = 0;
if (enable) {
- ret = phy_init_eee(ndev->phydev, 0);
+ ret = phy_init_eee(ndev->phydev, false);
if (ret)
return ret;
@@ -2877,15 +3176,10 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
- if (device_may_wakeup(&ndev->dev)) {
+ if (device_may_wakeup(&ndev->dev))
fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
- if (fep->wake_irq > 0)
- enable_irq_wake(fep->wake_irq);
- } else {
+ else
fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
- if (fep->wake_irq > 0)
- disable_irq_wake(fep->wake_irq);
- }
return 0;
}
@@ -2951,26 +3245,22 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- bdp = rxq->bd.base;
- for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = rxq->rx_skbuff[i];
- rxq->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- }
- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
- }
+ for (i = 0; i < rxq->bd.ring_size; i++)
+ page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
+
+ for (i = 0; i < XDP_STATS_TOTAL; i++)
+ rxq->stats[i] = 0;
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3060,24 +3350,31 @@ static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- unsigned int i;
- struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_rx_q *rxq;
+ dma_addr_t phys_addr;
+ struct bufdesc *bdp;
+ struct page *page;
+ int i, err;
rxq = fep->rx_queue[queue];
bdp = rxq->bd.base;
+
+ err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ if (err < 0) {
+ netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ return err;
+ }
+
for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
goto err_alloc;
- if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
- dev_kfree_skb(skb);
- goto err_alloc;
- }
+ phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skbuff[i] = skb;
+ rxq->rx_skb_info[i].page = page;
+ rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3202,6 +3499,9 @@ fec_enet_open(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -3243,6 +3543,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
fec_enet_clk_enable(ndev, false);
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_remove_request(&fep->pm_qos_req);
+
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
@@ -3433,6 +3736,155 @@ static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
}
+static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ bool is_run = netif_running(dev);
+ struct bpf_prog *old_prog;
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ /* No need to support the SoCs that require to
+ * do the frame swap because the performance wouldn't be
+ * better than the skb mode.
+ */
+ if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+ return -EOPNOTSUPP;
+
+ if (is_run) {
+ napi_disable(&fep->napi);
+ netif_tx_disable(dev);
+ }
+
+ old_prog = xchg(&fep->xdp_prog, bpf->prog);
+ fec_restart(dev);
+
+ if (is_run) {
+ napi_enable(&fep->napi);
+ netif_tx_start_all_queues(dev);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+
+ case XDP_SETUP_XSK_POOL:
+ return -EOPNOTSUPP;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
+{
+ if (unlikely(index < 0))
+ return 0;
+
+ return (index % fep->num_tx_queues);
+}
+
+static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
+ struct fec_enet_priv_tx_q *txq,
+ struct xdp_frame *frame)
+{
+ unsigned int index, status, estatus;
+ struct bufdesc *bdp, *last_bdp;
+ dma_addr_t dma_addr;
+ int entries_free;
+
+ entries_free = fec_enet_get_free_txdesc_num(txq);
+ if (entries_free < MAX_SKB_FRAGS + 1) {
+ netdev_err(fep->netdev, "NOT enough BD for SG!\n");
+ xdp_return_frame(frame);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Fill in a Tx ring entry */
+ bdp = txq->bd.cur;
+ last_bdp = bdp;
+ status = fec16_to_cpu(bdp->cbd_sc);
+ status &= ~BD_ENET_TX_STATS;
+
+ index = fec_enet_get_bd_index(bdp, &txq->bd);
+
+ dma_addr = dma_map_single(&fep->pdev->dev, frame->data,
+ frame->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dma_addr))
+ return FEC_ENET_XDP_CONSUMED;
+
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex)
+ estatus = BD_ENET_TX_INT;
+
+ bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
+ bdp->cbd_datlen = cpu_to_fec16(frame->len);
+
+ if (fep->bufdesc_ex) {
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+
+ if (fep->quirks & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = cpu_to_fec32(estatus);
+ }
+
+ index = fec_enet_get_bd_index(last_bdp, &txq->bd);
+ txq->tx_skbuff[index] = NULL;
+
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+ bdp->cbd_sc = cpu_to_fec16(status);
+
+ /* If this was the last BD in the ring, start at the beginning again. */
+ bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
+
+ txq->bd.cur = bdp;
+
+ return 0;
+}
+
+static int fec_enet_xdp_xmit(struct net_device *dev,
+ int num_frames,
+ struct xdp_frame **frames,
+ u32 flags)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_priv_tx_q *txq;
+ int cpu = smp_processor_id();
+ unsigned int sent_frames = 0;
+ struct netdev_queue *nq;
+ unsigned int queue;
+ int i;
+
+ queue = fec_enet_xdp_get_tx_queue(fep, cpu);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(fep->netdev, queue);
+
+ __netif_tx_lock(nq, cpu);
+
+ for (i = 0; i < num_frames; i++) {
+ if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) != 0)
+ break;
+ sent_frames++;
+ }
+
+ /* Make sure the update to bdp and tx_skbuff are performed. */
+ wmb();
+
+ /* Trigger transmission start */
+ writel(0, txq->bd.reg_desc_active);
+
+ __netif_tx_unlock(nq);
+
+ return sent_frames;
+}
+
static const struct net_device_ops fec_netdev_ops = {
.ndo_open = fec_enet_open,
.ndo_stop = fec_enet_close,
@@ -3447,6 +3899,8 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_poll_controller = fec_poll_controller,
#endif
.ndo_set_features = fec_set_features,
+ .ndo_bpf = fec_enet_bpf,
+ .ndo_xdp_xmit = fec_enet_xdp_xmit,
};
static const unsigned short offset_des_active_rxq[] = {
@@ -3481,6 +3935,10 @@ static int fec_enet_init(struct net_device *ndev)
fep->rx_align = 0x3;
fep->tx_align = 0x3;
#endif
+ fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
+ fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
+ fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
+ fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
/* Check mask of the streaming and coherent API */
ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
@@ -3551,14 +4009,14 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
- ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+ netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
@@ -3590,10 +4048,10 @@ free_queue_mem:
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
- int err, phy_reset;
- bool active_high = false;
+ struct gpio_desc *phy_reset;
int msec = 1, phy_post_delay = 0;
struct device_node *np = pdev->dev.of_node;
+ int err;
if (!np)
return 0;
@@ -3603,33 +4061,26 @@ static int fec_reset_phy(struct platform_device *pdev)
if (!err && msec > 1000)
msec = 1;
- phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- if (phy_reset == -EPROBE_DEFER)
- return phy_reset;
- else if (!gpio_is_valid(phy_reset))
- return 0;
-
err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
/* valid reset duration should be less than 1s */
if (!err && phy_post_delay > 1000)
return -EINVAL;
- active_high = of_property_read_bool(np, "phy-reset-active-high");
+ phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(phy_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
+ "failed to get phy-reset-gpios\n");
- err = devm_gpio_request_one(&pdev->dev, phy_reset,
- active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "phy-reset");
- if (err) {
- dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return err;
- }
+ if (!phy_reset)
+ return 0;
if (msec > 20)
msleep(msec);
else
usleep_range(msec * 1000, msec * 1000 + 1000);
- gpio_set_value_cansleep(phy_reset, !active_high);
+ gpiod_set_value_cansleep(phy_reset, 0);
if (!phy_post_delay)
return 0;
@@ -3723,7 +4174,7 @@ static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
ARRAY_SIZE(out_val));
if (ret) {
dev_dbg(&fep->pdev->dev, "no stop mode property\n");
- return ret;
+ goto out;
}
fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
@@ -3809,7 +4260,11 @@ fec_probe(struct platform_device *pdev)
!of_property_read_bool(np, "fsl,err006687-workaround-present"))
fep->quirks |= FEC_QUIRK_ERR006687;
- if (of_get_property(np, "fsl,magic-packet", NULL))
+ ret = fec_enet_ipc_handle_init(fep);
+ if (ret)
+ goto failed_ipc_init;
+
+ if (of_property_read_bool(np, "fsl,magic-packet"))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
ret = fec_enet_init_stop_mode(fep, np);
@@ -3858,17 +4313,21 @@ fec_probe(struct platform_device *pdev)
fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
/* enet_out is optional, depends on board */
- fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
- if (IS_ERR(fep->clk_enet_out))
- fep->clk_enet_out = NULL;
+ fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
+ if (IS_ERR(fep->clk_enet_out)) {
+ ret = PTR_ERR(fep->clk_enet_out);
+ goto failed_clk;
+ }
fep->ptp_clk_on = false;
mutex_init(&fep->ptp_clk_mutex);
/* clk_ref is optional, depends on board */
- fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
- if (IS_ERR(fep->clk_ref))
- fep->clk_ref = NULL;
+ fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
+ if (IS_ERR(fep->clk_ref)) {
+ ret = PTR_ERR(fep->clk_ref);
+ goto failed_clk;
+ }
fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
/* clk_2x_txclk is optional, depends on board */
@@ -4002,6 +4461,7 @@ failed_rgmii_delay:
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
failed_stop_mode:
+failed_ipc_init:
failed_phy:
dev_id--;
failed_ioremap:
@@ -4046,6 +4506,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
rtnl_lock();
if (netif_running(ndev)) {
@@ -4057,9 +4518,28 @@ static int __maybe_unused fec_suspend(struct device *dev)
netif_device_detach(ndev);
netif_tx_unlock_bh(ndev);
fec_stop(ndev);
- fec_enet_clk_enable(ndev, false);
- if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+ if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+ fec_irqs_disable(ndev);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ } else {
+ fec_irqs_disable_except_wakeup(ndev);
+ if (fep->wake_irq > 0) {
+ disable_irq(fep->wake_irq);
+ enable_irq_wake(fep->wake_irq);
+ }
+ fec_enet_stop_mode(fep, true);
+ }
+ /* It's safe to disable clocks since interrupts are masked */
+ fec_enet_clk_enable(ndev, false);
+
+ fep->rpm_active = !pm_runtime_status_suspended(dev);
+ if (fep->rpm_active) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0) {
+ rtnl_unlock();
+ return ret;
+ }
+ }
}
rtnl_unlock();
@@ -4090,6 +4570,9 @@ static int __maybe_unused fec_resume(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
+ if (fep->rpm_active)
+ pm_runtime_force_resume(dev);
+
ret = fec_enet_clk_enable(ndev, true);
if (ret) {
rtnl_unlock();
@@ -4097,6 +4580,10 @@ static int __maybe_unused fec_resume(struct device *dev)
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
fec_enet_stop_mode(fep, false);
+ if (fep->wake_irq) {
+ disable_irq_wake(fep->wake_irq);
+ enable_irq(fep->wake_irq);
+ }
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index bbbde9f701c2..b88816b71ddf 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -29,7 +29,9 @@
#include <linux/crc32.h>
#include <linux/hardirq.h>
#include <linux/delay.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/of_platform.h>
@@ -99,13 +101,13 @@ static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
netif_wake_queue(dev);
}
-static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+static void mpc52xx_fec_set_paddr(struct net_device *dev, const u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
- out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+ out_be32(&fec->paddr1, *(const u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(const u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
@@ -893,13 +895,15 @@ static int mpc52xx_fec_probe(struct platform_device *op)
rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
+ u8 addr[ETH_ALEN] __aligned(4);
/*
* If the MAC addresse is not provided via DT then read
* it back from the controller regs
*/
- *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ *(u32 *)(&addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&addr[4]) = in_be32(&fec->paddr2) >> 16;
+ eth_hw_addr_set(ndev, addr);
}
/*
@@ -920,7 +924,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
/* Start with safe defaults for link connection */
priv->speed = 100;
priv->duplex = DUPLEX_HALF;
- priv->mdio_speed = ((mpc5xxx_get_bus_frequency(np) >> 20) / 5) << 1;
+ priv->mdio_speed = ((mpc5xxx_get_bus_frequency(&op->dev) >> 20) / 5) << 1;
/* The current speed preconfigures the speed of the MII link */
prop = of_get_property(np, "current-speed", &prop_size);
@@ -933,7 +937,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* the 7-wire property means don't use MII mode */
- if (of_find_property(np, "fsl,7-wire-mode", NULL)) {
+ if (of_property_read_bool(np, "fsl,7-wire-mode")) {
priv->seven_wire_mode = 1;
dev_info(&ndev->dev, "using 7-wire PHY mode\n");
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index b5497e308302..95f778cce98c 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -15,6 +15,7 @@
#include <linux/phy.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
@@ -99,8 +100,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed,
- ((mpc5xxx_get_bus_frequency(of->dev.of_node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed, ((mpc5xxx_get_bus_frequency(dev) >> 20) / 5) << 1);
err = of_mdiobus_register(bus, np);
if (err)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index d71eac7e1924..ab86bb8562ef 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -88,6 +88,9 @@
#define FEC_CHANNLE_0 0
#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
+#define FEC_PTP_MAX_NSEC_PERIOD 4000000000ULL
+#define FEC_PTP_MAX_NSEC_COUNTER 0x80000000ULL
+
/**
* fec_ptp_enable_pps
* @fep: the fec_enet_private structure handle
@@ -101,7 +104,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
u32 val, tempval;
struct timespec64 ts;
u64 ns;
- val = 0;
if (fep->pps_enable == enable)
return 0;
@@ -136,11 +138,7 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
* NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
* to current timer would be next second.
*/
- tempval = readl(fep->hwp + FEC_ATIME_CTRL);
- tempval |= FEC_T_CTRL_CAPTURE;
- writel(tempval, fep->hwp + FEC_ATIME_CTRL);
-
- tempval = readl(fep->hwp + FEC_ATIME);
+ tempval = fep->cc.read(&fep->cc);
/* Convert the ptp local counter to 1588 timestamp */
ns = timecounter_cyc2time(&fep->tc, tempval);
ts = ns_to_timespec64(ns);
@@ -203,6 +201,78 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
return 0;
}
+static int fec_ptp_pps_perout(struct fec_enet_private *fep)
+{
+ u32 compare_val, ptp_hc, temp_val;
+ u64 curr_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ /* Update time counter */
+ timecounter_read(&fep->tc);
+
+ /* Get the current ptp hardware time counter */
+ temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
+ temp_val |= FEC_T_CTRL_CAPTURE;
+ writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
+ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
+ ptp_hc = readl(fep->hwp + FEC_ATIME);
+
+ /* Convert the ptp local counter to 1588 timestamp */
+ curr_time = timecounter_cyc2time(&fep->tc, ptp_hc);
+
+ /* If the pps start time less than current time add 100ms, just return.
+ * Because the software might not able to set the comparison time into
+ * the FEC_TCCR register in time and missed the start time.
+ */
+ if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
+ dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return -1;
+ }
+
+ compare_val = fep->perout_stime - curr_time + ptp_hc;
+ compare_val &= fep->cc.mask;
+
+ writel(compare_val, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (compare_val + fep->reload_period) & fep->cc.mask;
+
+ /* Enable compare event when overflow */
+ temp_val = readl(fep->hwp + FEC_ATIME_CTRL);
+ temp_val |= FEC_T_CTRL_PINPER;
+ writel(temp_val, fep->hwp + FEC_ATIME_CTRL);
+
+ /* Compare channel setting. */
+ temp_val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ temp_val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
+ temp_val &= ~(1 << FEC_T_TDRE_OFFSET);
+ temp_val &= ~(FEC_T_TMODE_MASK);
+ temp_val |= (FEC_TMODE_TOGGLE << FEC_T_TMODE_OFFSET);
+ writel(temp_val, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* Write the second compare event timestamp and calculate
+ * the third timestamp. Refer the TCCR register detail in the spec.
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
+static enum hrtimer_restart fec_ptp_pps_perout_handler(struct hrtimer *timer)
+{
+ struct fec_enet_private *fep = container_of(timer,
+ struct fec_enet_private, perout_timer);
+
+ fec_ptp_pps_perout(fep);
+
+ return HRTIMER_NORESTART;
+}
+
/**
* fec_ptp_read - read raw cycle counter (to be used by time counter)
* @cc: the cyclecounter structure
@@ -268,18 +338,21 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
}
/**
- * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * fec_ptp_adjfine - adjust ptp cycle frequency
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
*
* Adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * indicated amount from the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*
* Because ENET hardware frequency adjust is complex,
* using software method to do that.
*/
-static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
+ s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
unsigned long flags;
int neg_adj = 0;
u32 i, tmp;
@@ -430,6 +503,17 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
return 0;
}
+static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ writel(0, fep->hwp + FEC_TCSR(channel));
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
/**
* fec_ptp_enable
* @ptp: the ptp clock structure
@@ -442,14 +526,84 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
{
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
+ ktime_t timeout;
+ struct timespec64 start_time, period;
+ u64 curr_time, delta, period_ns;
+ unsigned long flags;
int ret = 0;
if (rq->type == PTP_CLK_REQ_PPS) {
ret = fec_ptp_enable_pps(fep, on);
return ret;
+ } else if (rq->type == PTP_CLK_REQ_PEROUT) {
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index != DEFAULT_PPS_CHANNEL)
+ return -EOPNOTSUPP;
+
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ period.tv_sec = rq->perout.period.sec;
+ period.tv_nsec = rq->perout.period.nsec;
+ period_ns = timespec64_to_ns(&period);
+
+ /* FEC PTP timer only has 31 bits, so if the period exceed
+ * 4s is not supported.
+ */
+ if (period_ns > FEC_PTP_MAX_NSEC_PERIOD) {
+ dev_err(&fep->pdev->dev, "The period must equal to or less than 4s!\n");
+ return -EOPNOTSUPP;
+ }
+
+ fep->reload_period = div_u64(period_ns, 2);
+ if (on && fep->reload_period) {
+ /* Convert 1588 timestamp to ns*/
+ start_time.tv_sec = rq->perout.start.sec;
+ start_time.tv_nsec = rq->perout.start.nsec;
+ fep->perout_stime = timespec64_to_ns(&start_time);
+
+ mutex_lock(&fep->ptp_clk_mutex);
+ if (!fep->ptp_clk_on) {
+ dev_err(&fep->pdev->dev, "Error: PTP clock is closed!\n");
+ mutex_unlock(&fep->ptp_clk_mutex);
+ return -EOPNOTSUPP;
+ }
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+ /* Read current timestamp */
+ curr_time = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ mutex_unlock(&fep->ptp_clk_mutex);
+
+ /* Calculate time difference */
+ delta = fep->perout_stime - curr_time;
+
+ if (fep->perout_stime <= curr_time) {
+ dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
+ return -EINVAL;
+ }
+
+ /* Because the timer counter of FEC only has 31-bits, correspondingly,
+ * the time comparison register FEC_TCCR also only low 31 bits can be
+ * set. If the start time of pps signal exceeds current time more than
+ * 0x80000000 ns, a software timer is used and the timer expires about
+ * 1 second before the start time to be able to set FEC_TCCR.
+ */
+ if (delta > FEC_PTP_MAX_NSEC_COUNTER) {
+ timeout = ns_to_ktime(delta - NSEC_PER_SEC);
+ hrtimer_start(&fep->perout_timer, timeout, HRTIMER_MODE_REL);
+ } else {
+ return fec_ptp_pps_perout(fep);
+ }
+ } else {
+ fec_ptp_pps_disable(fep, fep->pps_channel);
+ }
+
+ return 0;
+ } else {
+ return -EOPNOTSUPP;
}
- return -EOPNOTSUPP;
}
/**
@@ -473,10 +627,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
fep->hwts_tx_en = 0;
@@ -587,15 +737,15 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
int ret;
fep->ptp_caps.owner = THIS_MODULE;
- strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
fep->ptp_caps.n_ext_ts = 0;
- fep->ptp_caps.n_per_out = 0;
+ fep->ptp_caps.n_per_out = 1;
fep->ptp_caps.n_pins = 0;
fep->ptp_caps.pps = 1;
- fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+ fep->ptp_caps.adjfine = fec_ptp_adjfine;
fep->ptp_caps.adjtime = fec_ptp_adjtime;
fep->ptp_caps.gettime64 = fec_ptp_gettime;
fep->ptp_caps.settime64 = fec_ptp_settime;
@@ -614,6 +764,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
+ hrtimer_init(&fep->perout_timer, CLOCK_REALTIME, HRTIMER_MODE_REL);
+ fep->perout_timer.function = fec_ptp_pps_perout_handler;
+
irq = platform_get_irq_byname_optional(pdev, "pps");
if (irq < 0)
irq = platform_get_irq_optional(pdev, irq_idx);
@@ -643,6 +796,7 @@ void fec_ptp_stop(struct platform_device *pdev)
struct fec_enet_private *fep = netdev_priv(ndev);
cancel_delayed_work_sync(&fep->time_keep);
+ hrtimer_cancel(&fep->perout_timer);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
}
diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
index 48bf8088795d..a55542c1ad65 100644
--- a/drivers/net/ethernet/freescale/fman/Kconfig
+++ b/drivers/net/ethernet/freescale/fman/Kconfig
@@ -3,7 +3,8 @@ config FSL_FMAN
tristate "FMan support"
depends on FSL_SOC || ARCH_LAYERSCAPE || COMPILE_TEST
select GENERIC_ALLOCATOR
- select PHYLIB
+ select PHYLINK
+ select PCS_LYNX
select CRC32
default n
help
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ce0a121580f6..9d85fb136e34 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -2727,7 +2700,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman = kzalloc(sizeof(*fman), GFP_KERNEL);
if (!fman)
- return NULL;
+ return ERR_PTR(-ENOMEM);
fm_node = of_node_get(of_dev->dev.of_node);
@@ -2740,26 +2713,21 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dts_params.id = (u8)val;
/* Get the FM interrupt */
- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan IRQ resource\n",
- __func__);
+ err = platform_get_irq(of_dev, 0);
+ if (err < 0)
goto fman_node_put;
- }
- irq = res->start;
+ irq = err;
/* Get the FM error interrupt */
- res = platform_get_resource(of_dev, IORESOURCE_IRQ, 1);
- if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan Error IRQ resource\n",
- __func__);
+ err = platform_get_irq(of_dev, 1);
+ if (err < 0)
goto fman_node_put;
- }
- fman->dts_params.err_irq = res->start;
+ fman->dts_params.err_irq = err;
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
__func__);
goto fman_node_put;
@@ -2770,6 +2738,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
clk = of_clk_get(fm_node, 0);
if (IS_ERR(clk)) {
+ err = PTR_ERR(clk);
dev_err(&of_dev->dev, "%s: Failed to get FM%d clock structure\n",
__func__, fman->dts_params.id);
goto fman_node_put;
@@ -2777,6 +2746,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
clk_rate = clk_get_rate(clk);
if (!clk_rate) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: Failed to determine FM%d clock rate\n",
__func__, fman->dts_params.id);
goto fman_node_put;
@@ -2797,6 +2767,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the MURAM base address and size */
muram_node = of_find_matching_node(fm_node, fman_muram_match);
if (!muram_node) {
+ err = -EINVAL;
dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
__func__);
goto fman_free;
@@ -2836,6 +2807,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
devm_request_mem_region(&of_dev->dev, phys_base_addr,
mem_size, "fman");
if (!fman->dts_params.res) {
+ err = -EBUSY;
dev_err(&of_dev->dev, "%s: request_mem_region() failed\n",
__func__);
goto fman_free;
@@ -2844,6 +2816,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
fman->dts_params.base_addr =
devm_ioremap(&of_dev->dev, phys_base_addr, mem_size);
if (!fman->dts_params.base_addr) {
+ err = -ENOMEM;
dev_err(&of_dev->dev, "%s: devm_ioremap() failed\n", __func__);
goto fman_free;
}
@@ -2868,7 +2841,7 @@ fman_node_put:
of_node_put(fm_node);
fman_free:
kfree(fman);
- return NULL;
+ return ERR_PTR(err);
}
static int fman_probe(struct platform_device *of_dev)
@@ -2880,8 +2853,8 @@ static int fman_probe(struct platform_device *of_dev)
dev = &of_dev->dev;
fman = read_dts_node(of_dev);
- if (!fman)
- return -EIO;
+ if (IS_ERR(fman))
+ return PTR_ERR(fman);
err = fman_config(fman);
if (err) {
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index f2ede1360f03..2ea575a46675 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1950a8936bc0..d528ca681b6f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_dtsec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
@@ -43,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/of_mdio.h>
#include <linux/mii.h>
+#include <linux/netdevice.h>
/* TBI register addresses */
#define MII_TBICON 0x11
@@ -55,9 +30,6 @@
#define TBICON_CLK_SELECT 0x0020 /* Clock select */
#define TBICON_MI_MODE 0x0010 /* GMII mode (TBI if not set) */
-#define TBIANA_SGMII 0x4001
-#define TBIANA_1000X 0x01a0
-
/* Interrupt Mask Register (IMASK) */
#define DTSEC_IMASK_BREN 0x80000000
#define DTSEC_IMASK_RXCEN 0x40000000
@@ -118,9 +90,10 @@
#define DTSEC_ECNTRL_GMIIM 0x00000040
#define DTSEC_ECNTRL_TBIM 0x00000020
-#define DTSEC_ECNTRL_SGMIIM 0x00000002
#define DTSEC_ECNTRL_RPM 0x00000010
#define DTSEC_ECNTRL_R100M 0x00000008
+#define DTSEC_ECNTRL_RMM 0x00000004
+#define DTSEC_ECNTRL_SGMIIM 0x00000002
#define DTSEC_ECNTRL_QSGMIIM 0x00000001
#define TCTRL_TTSE 0x00000040
@@ -327,7 +300,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Number of individual addresses in registers for this station */
@@ -344,7 +317,8 @@ struct fman_mac {
void *fm;
struct fman_rev_info fm_rev_info;
bool basex_if;
- struct phy_device *tbiphy;
+ struct mdio_device *tbidev;
+ struct phylink_pcs pcs;
};
static void set_dflts(struct dtsec_cfg *cfg)
@@ -382,56 +356,14 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
phy_interface_t iface, u16 iface_speed, u64 addr,
u32 exception_mask, u8 tbi_addr)
{
- bool is_rgmii, is_sgmii, is_qsgmii;
enet_addr_t eth_addr;
- u32 tmp;
+ u32 tmp = 0;
int i;
/* Soft reset */
iowrite32be(MACCFG1_SOFT_RESET, &regs->maccfg1);
iowrite32be(0, &regs->maccfg1);
- /* dtsec_id2 */
- tmp = ioread32be(&regs->tsec_id2);
-
- /* check RGMII support */
- if (iface == PHY_INTERFACE_MODE_RGMII ||
- iface == PHY_INTERFACE_MODE_RGMII_ID ||
- iface == PHY_INTERFACE_MODE_RGMII_RXID ||
- iface == PHY_INTERFACE_MODE_RGMII_TXID ||
- iface == PHY_INTERFACE_MODE_RMII)
- if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
- return -EINVAL;
-
- if (iface == PHY_INTERFACE_MODE_SGMII ||
- iface == PHY_INTERFACE_MODE_MII)
- if (tmp & DTSEC_ID2_INT_REDUCED_OFF)
- return -EINVAL;
-
- is_rgmii = iface == PHY_INTERFACE_MODE_RGMII ||
- iface == PHY_INTERFACE_MODE_RGMII_ID ||
- iface == PHY_INTERFACE_MODE_RGMII_RXID ||
- iface == PHY_INTERFACE_MODE_RGMII_TXID;
- is_sgmii = iface == PHY_INTERFACE_MODE_SGMII;
- is_qsgmii = iface == PHY_INTERFACE_MODE_QSGMII;
-
- tmp = 0;
- if (is_rgmii || iface == PHY_INTERFACE_MODE_GMII)
- tmp |= DTSEC_ECNTRL_GMIIM;
- if (is_sgmii)
- tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM);
- if (is_qsgmii)
- tmp |= (DTSEC_ECNTRL_SGMIIM | DTSEC_ECNTRL_TBIM |
- DTSEC_ECNTRL_QSGMIIM);
- if (is_rgmii)
- tmp |= DTSEC_ECNTRL_RPM;
- if (iface_speed == SPEED_100)
- tmp |= DTSEC_ECNTRL_R100M;
-
- iowrite32be(tmp, &regs->ecntrl);
-
- tmp = 0;
-
if (cfg->tx_pause_time)
tmp |= cfg->tx_pause_time;
if (cfg->tx_pause_time_extd)
@@ -472,17 +404,10 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
tmp = 0;
- if (iface_speed < SPEED_1000)
- tmp |= MACCFG2_NIBBLE_MODE;
- else if (iface_speed == SPEED_1000)
- tmp |= MACCFG2_BYTE_MODE;
-
tmp |= (cfg->preamble_len << MACCFG2_PREAMBLE_LENGTH_SHIFT) &
MACCFG2_PREAMBLE_LENGTH_MASK;
if (cfg->tx_pad_crc)
tmp |= MACCFG2_PAD_CRC_EN;
- /* Full Duplex */
- tmp |= MACCFG2_FULL_DUPLEX;
iowrite32be(tmp, &regs->maccfg2);
tmp = (((cfg->non_back_to_back_ipg1 <<
@@ -551,10 +476,6 @@ static void set_bucket(struct dtsec_regs __iomem *regs, int bucket,
static int check_init_parameters(struct fman_mac *dtsec)
{
- if (dtsec->max_speed >= SPEED_10000) {
- pr_err("1G MAC driver supports 1G or lower speeds\n");
- return -EINVAL;
- }
if ((dtsec->dtsec_drv_param)->rx_prepend >
MAX_PACKET_ALIGNMENT) {
pr_err("packetAlignmentPadding can't be > than %d\n",
@@ -656,22 +577,10 @@ static int get_exception_flag(enum fman_mac_exceptions exception)
return bit_mask;
}
-static bool is_init_done(struct dtsec_cfg *dtsec_drv_params)
-{
- /* Checks if dTSEC driver parameters were initialized */
- if (!dtsec_drv_params)
- return true;
-
- return false;
-}
-
static u16 dtsec_get_max_frame_length(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (is_init_done(dtsec->dtsec_drv_param))
- return 0;
-
return (u16)ioread32be(&regs->maxfrm);
}
@@ -708,6 +617,7 @@ static void dtsec_isr(void *handle)
dtsec->exception_cb(dtsec->dev_id, FM_MAC_EX_1G_COL_RET_LMT);
if (event & DTSEC_IMASK_XFUNEN) {
/* FM_TX_LOCKUP_ERRATA_DTSEC6 Errata workaround */
+ /* FIXME: This races with the rest of the driver! */
if (dtsec->fm_rev_info.major == 2) {
u32 tpkt1, tmp_reg1, tpkt2, tmp_reg2, i;
/* a. Write 0x00E0_0C00 to DTSEC_ID
@@ -840,135 +750,98 @@ static void free_init_resources(struct fman_mac *dtsec)
dtsec->unicast_addr_hash = NULL;
}
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
+static struct fman_mac *pcs_to_dtsec(struct phylink_pcs *pcs)
{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ return container_of(pcs, struct fman_mac, pcs);
+}
- dtsec->dtsec_drv_param->maximum_frame = new_val;
+static void dtsec_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- return 0;
+ phylink_mii_c22_pcs_get_state(dtsec->tbidev, state);
}
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
+static int dtsec_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
- dtsec->dtsec_drv_param->tx_pad_crc = new_val;
+ return phylink_mii_c22_pcs_config(dtsec->tbidev, mode, interface,
+ advertising);
+}
- return 0;
+static void dtsec_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct fman_mac *dtsec = pcs_to_dtsec(pcs);
+
+ phylink_mii_c22_pcs_an_restart(dtsec->tbidev);
}
-static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
+static const struct phylink_pcs_ops dtsec_pcs_ops = {
+ .pcs_get_state = dtsec_pcs_get_state,
+ .pcs_config = dtsec_pcs_config,
+ .pcs_an_restart = dtsec_pcs_an_restart,
+};
+
+static void graceful_start(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (mode & COMM_MODE_TX)
- iowrite32be(ioread32be(&regs->tctrl) &
- ~TCTRL_GTS, &regs->tctrl);
- if (mode & COMM_MODE_RX)
- iowrite32be(ioread32be(&regs->rctrl) &
- ~RCTRL_GRS, &regs->rctrl);
+ iowrite32be(ioread32be(&regs->tctrl) & ~TCTRL_GTS, &regs->tctrl);
+ iowrite32be(ioread32be(&regs->rctrl) & ~RCTRL_GRS, &regs->rctrl);
}
-static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_stop(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
/* Graceful stop - Assert the graceful Rx stop bit */
- if (mode & COMM_MODE_RX) {
- tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
- iowrite32be(tmp, &regs->rctrl);
+ tmp = ioread32be(&regs->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, &regs->rctrl);
- if (dtsec->fm_rev_info.major == 2) {
- /* Workaround for dTSEC Errata A002 */
- usleep_range(100, 200);
- } else {
- /* Workaround for dTSEC Errata A004839 */
- usleep_range(10, 50);
- }
+ if (dtsec->fm_rev_info.major == 2) {
+ /* Workaround for dTSEC Errata A002 */
+ usleep_range(100, 200);
+ } else {
+ /* Workaround for dTSEC Errata A004839 */
+ usleep_range(10, 50);
}
/* Graceful stop - Assert the graceful Tx stop bit */
- if (mode & COMM_MODE_TX) {
- if (dtsec->fm_rev_info.major == 2) {
- /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
- pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
- } else {
- tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
- iowrite32be(tmp, &regs->tctrl);
+ if (dtsec->fm_rev_info.major == 2) {
+ /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
+ pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
+ } else {
+ tmp = ioread32be(&regs->tctrl) | TCTRL_GTS;
+ iowrite32be(tmp, &regs->tctrl);
- /* Workaround for dTSEC Errata A0012, A0014 */
- usleep_range(10, 50);
- }
+ /* Workaround for dTSEC Errata A0012, A0014 */
+ usleep_range(10, 50);
}
}
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+static int dtsec_enable(struct fman_mac *dtsec)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- u32 tmp;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- /* Enable */
- tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp |= MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= MACCFG1_TX_EN;
-
- iowrite32be(tmp, &regs->maccfg1);
-
- /* Graceful start - clear the graceful Rx/Tx stop bit */
- graceful_start(dtsec, mode);
-
return 0;
}
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+static void dtsec_disable(struct fman_mac *dtsec)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- u32 tmp;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- /* Graceful stop - Assert the graceful Rx/Tx stop bit */
- graceful_stop(dtsec, mode);
-
- tmp = ioread32be(&regs->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp &= ~MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~MACCFG1_TX_EN;
-
- iowrite32be(tmp, &regs->maccfg1);
-
- return 0;
}
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
- u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 ptv = 0;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
-
if (pause_time) {
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) {
@@ -989,27 +862,14 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
iowrite32be(ioread32be(&regs->maccfg1) & ~MACCFG1_TX_FLOW,
&regs->maccfg1);
- graceful_start(dtsec, mode);
-
return 0;
}
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
-
tmp = ioread32be(&regs->maccfg1);
if (en)
tmp |= MACCFG1_RX_FLOW;
@@ -1017,25 +877,125 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
tmp &= ~MACCFG1_RX_FLOW;
iowrite32be(tmp, &regs->maccfg1);
- graceful_start(dtsec, mode);
-
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
+static struct phylink_pcs *dtsec_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
{
+ struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ return &dtsec->pcs;
+ default:
+ return NULL;
+ }
+}
+
+static void dtsec_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct dtsec_regs __iomem *regs = mac_dev->fman_mac->regs;
+ u32 tmp;
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ tmp = DTSEC_ECNTRL_RMM;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ tmp = DTSEC_ECNTRL_GMIIM | DTSEC_ECNTRL_RPM;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ tmp = DTSEC_ECNTRL_TBIM | DTSEC_ECNTRL_SGMIIM;
+ break;
+ default:
+ dev_warn(mac_dev->dev, "cannot configure dTSEC for %s\n",
+ phy_modes(state->interface));
+ return;
+ }
+
+ iowrite32be(tmp, &regs->ecntrl);
+}
+
+static void dtsec_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *dtsec = mac_dev->fman_mac;
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
+ u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ dtsec_set_tx_pause_frames(dtsec, 0, pause_time, 0);
+ dtsec_accept_rx_pause_frames(dtsec, rx_pause);
+
+ tmp = ioread32be(&regs->ecntrl);
+ if (speed == SPEED_100)
+ tmp |= DTSEC_ECNTRL_R100M;
+ else
+ tmp &= ~DTSEC_ECNTRL_R100M;
+ iowrite32be(tmp, &regs->ecntrl);
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
+ tmp = ioread32be(&regs->maccfg2);
+ tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE | MACCFG2_FULL_DUPLEX);
+ if (speed >= SPEED_1000)
+ tmp |= MACCFG2_BYTE_MODE;
+ else
+ tmp |= MACCFG2_NIBBLE_MODE;
+
+ if (duplex == DUPLEX_FULL)
+ tmp |= MACCFG2_FULL_DUPLEX;
- graceful_stop(dtsec, mode);
+ iowrite32be(tmp, &regs->maccfg2);
+
+ mac_dev->update_speed(mac_dev, speed);
+
+ /* Enable */
+ tmp = ioread32be(&regs->maccfg1);
+ tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
+ iowrite32be(tmp, &regs->maccfg1);
+
+ /* Graceful start - clear the graceful Rx/Tx stop bit */
+ graceful_start(dtsec);
+}
+
+static void dtsec_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct fman_mac *dtsec = fman_config_to_mac(config)->fman_mac;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+ u32 tmp;
+
+ /* Graceful stop - Assert the graceful Rx/Tx stop bit */
+ graceful_stop(dtsec);
+
+ tmp = ioread32be(&regs->maccfg1);
+ tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
+ iowrite32be(tmp, &regs->maccfg1);
+}
+
+static const struct phylink_mac_ops dtsec_mac_ops = {
+ .mac_select_pcs = dtsec_select_pcs,
+ .mac_config = dtsec_mac_config,
+ .mac_link_up = dtsec_link_up,
+ .mac_link_down = dtsec_link_down,
+};
+
+static int dtsec_modify_mac_address(struct fman_mac *dtsec,
+ const enet_addr_t *enet_addr)
+{
+ graceful_stop(dtsec);
/* Initialize MAC Station Address registers (1 & 2)
* Station address have to be swapped (big endian to little endian
@@ -1043,12 +1003,13 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_add
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct eth_hash_entry *hash_entry;
@@ -1057,9 +1018,6 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
@@ -1114,14 +1072,11 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
{
u32 tmp;
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->rctrl);
if (enable)
tmp |= RCTRL_MPROM;
@@ -1133,14 +1088,11 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 rctrl, tctrl;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
rctrl = ioread32be(&regs->rctrl);
tctrl = ioread32be(&regs->tctrl);
@@ -1158,7 +1110,8 @@ int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct list_head *pos;
@@ -1168,9 +1121,6 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
u32 crc = 0xFFFFFFFF;
bool mcast, ghtx;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
ghtx = (bool)((ioread32be(&regs->rctrl) & RCTRL_GHTX) ? true : false);
@@ -1229,14 +1179,11 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
/* Set unicast promiscuous */
tmp = ioread32be(&regs->rctrl);
if (new_val)
@@ -1258,85 +1205,12 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
return 0;
}
-int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
-{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
- u32 tmp;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- if ((ioread32be(&regs->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(&regs->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
-
- tmp = ioread32be(&regs->maccfg2);
-
- /* Full Duplex */
- tmp |= MACCFG2_FULL_DUPLEX;
-
- tmp &= ~(MACCFG2_NIBBLE_MODE | MACCFG2_BYTE_MODE);
- if (speed < SPEED_1000)
- tmp |= MACCFG2_NIBBLE_MODE;
- else if (speed == SPEED_1000)
- tmp |= MACCFG2_BYTE_MODE;
- iowrite32be(tmp, &regs->maccfg2);
-
- tmp = ioread32be(&regs->ecntrl);
- if (speed == SPEED_100)
- tmp |= DTSEC_ECNTRL_R100M;
- else
- tmp &= ~DTSEC_ECNTRL_R100M;
- iowrite32be(tmp, &regs->ecntrl);
-
- graceful_start(dtsec, mode);
-
- return 0;
-}
-
-int dtsec_restart_autoneg(struct fman_mac *dtsec)
-{
- u16 tmp_reg16;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- tmp_reg16 = phy_read(dtsec->tbiphy, MII_BMCR);
-
- tmp_reg16 &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
- tmp_reg16 |= (BMCR_ANENABLE | BMCR_ANRESTART |
- BMCR_FULLDPLX | BMCR_SPEED1000);
-
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
-
- return 0;
-}
-
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
-{
- struct dtsec_regs __iomem *regs = dtsec->regs;
-
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- *mac_version = ioread32be(&regs->tsec_id);
-
- return 0;
-}
-
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable)
+static int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 bit_mask = 0;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
if (exception != FM_MAC_EX_1G_1588_TS_RX_ERR) {
bit_mask = get_exception_flag(exception);
if (bit_mask) {
@@ -1382,16 +1256,13 @@ int dtsec_set_exception(struct fman_mac *dtsec,
return 0;
}
-int dtsec_init(struct fman_mac *dtsec)
+static int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
- u16 max_frm_ln;
+ u16 max_frm_ln, tbicon;
int err;
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
if (DEFAULT_RESET_ON_INIT &&
(fman_reset_mac(dtsec->fm, dtsec->mac_id) != 0)) {
pr_err("Can't reset MAC!\n");
@@ -1406,38 +1277,19 @@ int dtsec_init(struct fman_mac *dtsec)
err = init(dtsec->regs, dtsec_drv_param, dtsec->phy_if,
dtsec->max_speed, dtsec->addr, dtsec->exceptions,
- dtsec->tbiphy->mdio.addr);
+ dtsec->tbidev->addr);
if (err) {
free_init_resources(dtsec);
pr_err("DTSEC version doesn't support this i/f mode\n");
return err;
}
- if (dtsec->phy_if == PHY_INTERFACE_MODE_SGMII) {
- u16 tmp_reg16;
-
- /* Configure the TBI PHY Control Register */
- tmp_reg16 = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
- phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
+ /* Configure the TBI PHY Control Register */
+ tbicon = TBICON_CLK_SELECT | TBICON_SOFT_RESET;
+ mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
- tmp_reg16 = TBICON_CLK_SELECT;
- phy_write(dtsec->tbiphy, MII_TBICON, tmp_reg16);
-
- tmp_reg16 = (BMCR_RESET | BMCR_ANENABLE |
- BMCR_FULLDPLX | BMCR_SPEED1000);
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
-
- if (dtsec->basex_if)
- tmp_reg16 = TBIANA_1000X;
- else
- tmp_reg16 = TBIANA_SGMII;
- phy_write(dtsec->tbiphy, MII_ADVERTISE, tmp_reg16);
-
- tmp_reg16 = (BMCR_ANENABLE | BMCR_ANRESTART |
- BMCR_FULLDPLX | BMCR_SPEED1000);
-
- phy_write(dtsec->tbiphy, MII_BMCR, tmp_reg16);
- }
+ tbicon = TBICON_CLK_SELECT;
+ mdiodev_write(dtsec->tbidev, MII_TBICON, tbicon);
/* Max Frame Length */
max_frm_ln = (u16)ioread32be(&regs->maxfrm);
@@ -1476,24 +1328,24 @@ int dtsec_init(struct fman_mac *dtsec)
return 0;
}
-int dtsec_free(struct fman_mac *dtsec)
+static int dtsec_free(struct fman_mac *dtsec)
{
free_init_resources(dtsec);
kfree(dtsec->dtsec_drv_param);
dtsec->dtsec_drv_param = NULL;
+ if (!IS_ERR_OR_NULL(dtsec->tbidev))
+ put_device(&dtsec->tbidev->dev);
kfree(dtsec);
return 0;
}
-struct fman_mac *dtsec_config(struct fman_mac_params *params)
+static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *dtsec;
struct dtsec_cfg *dtsec_drv_param;
- void __iomem *base_addr;
-
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
@@ -1510,10 +1362,9 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
set_dflts(dtsec_drv_param);
- dtsec->regs = base_addr;
- dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
- dtsec->max_speed = params->max_speed;
- dtsec->phy_if = params->phy_if;
+ dtsec->regs = mac_dev->vaddr;
+ dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
+ dtsec->phy_if = mac_dev->phy_if;
dtsec->mac_id = params->mac_id;
dtsec->exceptions = (DTSEC_IMASK_BREN |
DTSEC_IMASK_RXCEN |
@@ -1530,34 +1381,123 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
DTSEC_IMASK_RDPEEN);
dtsec->exception_cb = params->exception_cb;
dtsec->event_cb = params->event_cb;
- dtsec->dev_id = params->dev_id;
+ dtsec->dev_id = mac_dev;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
- dtsec->basex_if = params->basex_if;
-
- if (!params->internal_phy_node) {
- pr_err("TBI PHY node is not available\n");
- goto err_dtsec_drv_param;
- }
-
- dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
- if (!dtsec->tbiphy) {
- pr_err("of_phy_find_device (TBI PHY) failed\n");
- goto err_dtsec_drv_param;
- }
-
- put_device(&dtsec->tbiphy->mdio.dev);
/* Save FMan revision */
fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
return dtsec;
-err_dtsec_drv_param:
- kfree(dtsec_drv_param);
err_dtsec:
kfree(dtsec);
return NULL;
}
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *dtsec;
+ struct device_node *phy_node;
+ unsigned long capabilities;
+ unsigned long *supported;
+
+ mac_dev->phylink_ops = &dtsec_mac_ops;
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->enable = dtsec_enable;
+ mac_dev->disable = dtsec_disable;
+
+ mac_dev->fman_mac = dtsec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ dtsec = mac_dev->fman_mac;
+ dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
+ dtsec->dtsec_drv_param->tx_pad_crc = true;
+
+ phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (!phy_node || !of_device_is_available(phy_node)) {
+ of_node_put(phy_node);
+ err = -EINVAL;
+ dev_err_probe(mac_dev->dev, err,
+ "TBI PCS node is not available\n");
+ goto _return_fm_mac_free;
+ }
+
+ dtsec->tbidev = of_mdio_find_device(phy_node);
+ of_node_put(phy_node);
+ if (!dtsec->tbidev) {
+ err = -EPROBE_DEFER;
+ dev_err_probe(mac_dev->dev, err,
+ "could not find mdiodev for PCS\n");
+ goto _return_fm_mac_free;
+ }
+ dtsec->pcs.ops = &dtsec_pcs_ops;
+ dtsec->pcs.poll = true;
+
+ supported = mac_dev->phylink_config.supported_interfaces;
+
+ /* FIXME: Can we use DTSEC_ID2_INT_FULL_OFF to determine if these are
+ * supported? If not, we can determine support via the phy if SerDes
+ * support is added.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII ||
+ mac_dev->phy_if == PHY_INTERFACE_MODE_1000BASEX) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ } else if (mac_dev->phy_if == PHY_INTERFACE_MODE_2500BASEX) {
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+ }
+
+ if (!(ioread32be(&dtsec->regs->tsec_id2) & DTSEC_ID2_INT_REDUCED_OFF)) {
+ phy_interface_set_rgmii(supported);
+
+ /* DTSEC_ID2_INT_REDUCED_OFF indicates that the dTSEC supports
+ * RMII and RGMII. However, the only SoCs which support RMII
+ * are the P1017 and P1023. Avoid advertising this mode on
+ * other SoCs. This is a bit of a moot point, since there's no
+ * in-tree support for ethernet on these platforms...
+ */
+ if (of_machine_is_compatible("fsl,P1023") ||
+ of_machine_is_compatible("fsl,P1023RDB"))
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ }
+
+ capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
+ capabilities |= MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD;
+ mac_dev->phylink_config.mac_capabilities = capabilities;
+
+ err = dtsec_init(dtsec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
+ ioread32be(&dtsec->regs->tsec_id));
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(dtsec);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 68512c3bd6e5..8c72d280c51a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __DTSEC_H
@@ -35,27 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *dtsec_config(struct fman_mac_params *params);
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
-int dtsec_adjust_link(struct fman_mac *dtsec,
- u16 speed);
-int dtsec_restart_autoneg(struct fman_mac *dtsec);
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_init(struct fman_mac *dtsec);
-int dtsec_free(struct fman_mac *dtsec);
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable);
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
+struct mac_device;
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index e1bdfed16134..e73f6ef3c6ee 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h
index c4640de3f4cb..2cb0df453074 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.h
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __KEYGEN_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 19f327efdaff..e5d6cddea731 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -41,6 +41,7 @@
#include <linux/if_ether.h>
struct fman_mac;
+struct mac_device;
/* Ethernet Address */
typedef u8 enet_addr_t[ETH_ALEN];
@@ -75,16 +76,6 @@ typedef u8 enet_addr_t[ETH_ALEN];
#define ETH_HASH_ENTRY_OBJ(ptr) \
hlist_entry_safe(ptr, struct eth_hash_entry, node)
-/* Enumeration (bit flags) of communication modes (Transmit,
- * receive or both).
- */
-enum comm_mode {
- COMM_MODE_NONE = 0, /* No transmit/receive communication */
- COMM_MODE_RX = 1, /* Only receive communication */
- COMM_MODE_TX = 2, /* Only transmit communication */
- COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
-};
-
/* FM MAC Exceptions */
enum fman_mac_exceptions {
FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
@@ -168,40 +159,21 @@ struct eth_hash_entry {
struct list_head node;
};
-typedef void (fman_mac_exception_cb)(void *dev_id,
- enum fman_mac_exceptions exceptions);
+typedef void (fman_mac_exception_cb)(struct mac_device *dev_id,
+ enum fman_mac_exceptions exceptions);
/* FMan MAC config input */
struct fman_mac_params {
- /* Base of memory mapped FM MAC registers */
- void __iomem *base_addr;
- /* MAC address of device; First octet is sent first */
- enet_addr_t addr;
/* MAC ID; numbering of dTSEC and 1G-mEMAC:
* 0 - FM_MAX_NUM_OF_1G_MACS;
* numbering of 10G-MAC (TGEC) and 10G-mEMAC:
* 0 - FM_MAX_NUM_OF_10G_MACS
*/
u8 mac_id;
- /* PHY interface */
- phy_interface_t phy_if;
- /* Note that the speed should indicate the maximum rate that
- * this MAC should support rather than the actual speed;
- */
- u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
- /* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
- * and phy or backplane; Note: 1000BaseX auto-negotiation relates only
- * to interface between MAC and phy/backplane, SGMII phy can still
- * synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
- */
- bool basex_if;
- /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
- struct device_node *internal_phy_node;
};
struct eth_hash_t {
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 2216b7f51d26..625c79d5636f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1,78 +1,22 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_memac.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/pcs-lynx.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
+#include <linux/phy/phy.h>
#include <linux/of_mdio.h>
-/* PCS registers */
-#define MDIO_SGMII_CR 0x00
-#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
-#define MDIO_SGMII_LINK_TMR_L 0x12
-#define MDIO_SGMII_LINK_TMR_H 0x13
-#define MDIO_SGMII_IF_MODE 0x14
-
-/* SGMII Control defines */
-#define SGMII_CR_AN_EN 0x1000
-#define SGMII_CR_RESTART_AN 0x0200
-#define SGMII_CR_FD 0x0100
-#define SGMII_CR_SPEED_SEL1_1G 0x0040
-#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
- SGMII_CR_SPEED_SEL1_1G)
-
-/* SGMII Device Ability for SGMII defines */
-#define MDIO_SGMII_DEV_ABIL_SGMII_MODE 0x4001
-#define MDIO_SGMII_DEV_ABIL_BASEX_MODE 0x01A0
-
-/* Link timer define */
-#define LINK_TMR_L 0xa120
-#define LINK_TMR_H 0x0007
-#define LINK_TMR_L_BASEX 0xaf08
-#define LINK_TMR_H_BASEX 0x002f
-
-/* SGMII IF Mode defines */
-#define IF_MODE_USE_SGMII_AN 0x0002
-#define IF_MODE_SGMII_EN 0x0001
-#define IF_MODE_SGMII_SPEED_100M 0x0004
-#define IF_MODE_SGMII_SPEED_1G 0x0008
-#define IF_MODE_SGMII_DUPLEX_HALF 0x0010
-
/* Num of additional exact match MAC adr regs */
#define MEMAC_NUM_OF_PADDRS 7
@@ -334,10 +278,7 @@ struct fman_mac {
struct memac_regs __iomem *regs;
/* MAC address of device */
u64 addr;
- /* Ethernet physical interface */
- phy_interface_t phy_if;
- u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Pointer to driver's global address hash table */
@@ -349,9 +290,12 @@ struct fman_mac {
struct memac_cfg *memac_drv_param;
void *fm;
struct fman_rev_info fm_rev_info;
- bool basex_if;
- struct phy_device *pcsphy;
+ struct phy *serdes;
+ struct phylink_pcs *sgmii_pcs;
+ struct phylink_pcs *qsgmii_pcs;
+ struct phylink_pcs *xfi_pcs;
bool allmulti_enabled;
+ bool rgmii_no_half_duplex;
};
static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
@@ -409,7 +353,6 @@ static void set_exception(struct memac_regs __iomem *regs, u32 val,
}
static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
- phy_interface_t phy_if, u16 speed, bool slow_10g_if,
u32 exceptions)
{
u32 tmp;
@@ -437,41 +380,6 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
iowrite32be((u32)cfg->pause_quanta, &regs->pause_quanta[0]);
iowrite32be((u32)0, &regs->pause_thresh[0]);
- /* IF_MODE */
- tmp = 0;
- switch (phy_if) {
- case PHY_INTERFACE_MODE_XGMII:
- tmp |= IF_MODE_10G;
- break;
- case PHY_INTERFACE_MODE_MII:
- tmp |= IF_MODE_MII;
- break;
- default:
- tmp |= IF_MODE_GMII;
- if (phy_if == PHY_INTERFACE_MODE_RGMII ||
- phy_if == PHY_INTERFACE_MODE_RGMII_ID ||
- phy_if == PHY_INTERFACE_MODE_RGMII_RXID ||
- phy_if == PHY_INTERFACE_MODE_RGMII_TXID)
- tmp |= IF_MODE_RGMII | IF_MODE_RGMII_AUTO;
- }
- iowrite32be(tmp, &regs->if_mode);
-
- /* TX_FIFO_SECTIONS */
- tmp = 0;
- if (phy_if == PHY_INTERFACE_MODE_XGMII) {
- if (slow_10g_if) {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
- } else {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_10G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G);
- }
- } else {
- tmp |= (TX_FIFO_SECTIONS_TX_AVAIL_1G |
- TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G);
- }
- iowrite32be(tmp, &regs->tx_fifo_sections);
-
/* clear all pending events and set-up interrupts */
iowrite32be(0xffffffff, &regs->ievent);
set_exception(regs, exceptions, true);
@@ -511,93 +419,6 @@ static u32 get_mac_addr_hash_code(u64 eth_addr)
return xor_val;
}
-static void setup_sgmii_internal_phy(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
-{
- u16 tmp_reg16;
-
- if (WARN_ON(!memac->pcsphy))
- return;
-
- /* SGMII mode */
- tmp_reg16 = IF_MODE_SGMII_EN;
- if (!fixed_link)
- /* AN enable */
- tmp_reg16 |= IF_MODE_USE_SGMII_AN;
- else {
- switch (fixed_link->speed) {
- case 10:
- /* For 10M: IF_MODE[SPEED_10M] = 0 */
- break;
- case 100:
- tmp_reg16 |= IF_MODE_SGMII_SPEED_100M;
- break;
- case 1000:
- default:
- tmp_reg16 |= IF_MODE_SGMII_SPEED_1G;
- break;
- }
- if (!fixed_link->duplex)
- tmp_reg16 |= IF_MODE_SGMII_DUPLEX_HALF;
- }
- phy_write(memac->pcsphy, MDIO_SGMII_IF_MODE, tmp_reg16);
-
- /* Device ability according to SGMII specification */
- tmp_reg16 = MDIO_SGMII_DEV_ABIL_SGMII_MODE;
- phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
-
- /* Adjust link timer for SGMII -
- * According to Cisco SGMII specification the timer should be 1.6 ms.
- * The link_timer register is configured in units of the clock.
- * - When running as 1G SGMII, Serdes clock is 125 MHz, so
- * unit = 1 / (125*10^6 Hz) = 8 ns.
- * 1.6 ms in units of 8 ns = 1.6ms / 8ns = 2*10^5 = 0x30d40
- * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
- * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
- * 1.6 ms in units of 3.2 ns = 1.6ms / 3.2ns = 5*10^5 = 0x7a120.
- * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
- * we always set up here a value of 2.5 SGMII.
- */
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H);
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L);
-
- if (!fixed_link)
- /* Restart AN */
- tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
- else
- /* AN disabled */
- tmp_reg16 = SGMII_CR_DEF_VAL & ~SGMII_CR_AN_EN;
- phy_write(memac->pcsphy, 0x0, tmp_reg16);
-}
-
-static void setup_sgmii_internal_phy_base_x(struct fman_mac *memac)
-{
- u16 tmp_reg16;
-
- /* AN Device capability */
- tmp_reg16 = MDIO_SGMII_DEV_ABIL_BASEX_MODE;
- phy_write(memac->pcsphy, MDIO_SGMII_DEV_ABIL_SGMII, tmp_reg16);
-
- /* Adjust link timer for SGMII -
- * For Serdes 1000BaseX auto-negotiation the timer should be 10 ms.
- * The link_timer register is configured in units of the clock.
- * - When running as 1G SGMII, Serdes clock is 125 MHz, so
- * unit = 1 / (125*10^6 Hz) = 8 ns.
- * 10 ms in units of 8 ns = 10ms / 8ns = 1250000 = 0x1312d0
- * - When running as 2.5G SGMII, Serdes clock is 312.5 MHz, so
- * unit = 1 / (312.5*10^6 Hz) = 3.2 ns.
- * 10 ms in units of 3.2 ns = 10ms / 3.2ns = 3125000 = 0x2faf08.
- * Since link_timer value of 1G SGMII will be too short for 2.5 SGMII,
- * we always set up here a value of 2.5 SGMII.
- */
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_H, LINK_TMR_H_BASEX);
- phy_write(memac->pcsphy, MDIO_SGMII_LINK_TMR_L, LINK_TMR_L_BASEX);
-
- /* Restart AN */
- tmp_reg16 = SGMII_CR_DEF_VAL | SGMII_CR_RESTART_AN;
- phy_write(memac->pcsphy, 0x0, tmp_reg16);
-}
-
static int check_init_parameters(struct fman_mac *memac)
{
if (!memac->exception_cb) {
@@ -703,220 +524,282 @@ static void free_init_resources(struct fman_mac *memac)
memac->unicast_addr_hash = NULL;
}
-static bool is_init_done(struct memac_cfg *memac_drv_params)
+static int memac_enable(struct fman_mac *memac)
{
- /* Checks if mEMAC driver parameters were initialized */
- if (!memac_drv_params)
- return true;
+ int ret;
- return false;
+ ret = phy_init(memac->serdes);
+ if (ret) {
+ dev_err(memac->dev_id->dev,
+ "could not initialize serdes: %pe\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = phy_power_on(memac->serdes);
+ if (ret) {
+ dev_err(memac->dev_id->dev,
+ "could not power on serdes: %pe\n", ERR_PTR(ret));
+ phy_exit(memac->serdes);
+ }
+
+ return ret;
}
-int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+static void memac_disable(struct fman_mac *memac)
+{
+ phy_power_off(memac->serdes);
+ phy_exit(memac->serdes);
+}
+
+static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ tmp = ioread32be(&regs->tx_fifo_sections);
+
+ GET_TX_EMPTY_DEFAULT_VALUE(tmp);
+ iowrite32be(tmp, &regs->tx_fifo_sections);
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
+ tmp &= ~CMD_CFG_PFC_MODE;
iowrite32be(tmp, &regs->command_config);
+ tmp = ioread32be(&regs->pause_quanta[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
+ else
+ tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
+ tmp |= ((u32)pause_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
+
+ tmp = ioread32be(&regs->pause_thresh[priority / 2]);
+ if (priority % 2)
+ tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
+ else
+ tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
+ tmp |= ((u32)thresh_time << (16 * (priority % 2)));
+ iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
+
return 0;
}
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (new_val)
- tmp |= CMD_CFG_PROMIS_EN;
+ if (en)
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
else
- tmp &= ~CMD_CFG_PROMIS_EN;
+ tmp |= CMD_CFG_PAUSE_IGNORE;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int memac_adjust_link(struct fman_mac *memac, u16 speed)
+static void memac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
{
- struct memac_regs __iomem *regs = memac->regs;
- u32 tmp;
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+ unsigned long caps = config->mac_capabilities;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ if (phy_interface_mode_is_rgmii(state->interface) &&
+ memac->rgmii_no_half_duplex)
+ caps &= ~(MAC_10HD | MAC_100HD);
- tmp = ioread32be(&regs->if_mode);
-
- /* Set full duplex */
- tmp &= ~IF_MODE_HD;
-
- if (phy_interface_mode_is_rgmii(memac->phy_if)) {
- /* Configure RGMII in manual mode */
- tmp &= ~IF_MODE_RGMII_AUTO;
- tmp &= ~IF_MODE_RGMII_SP_MASK;
- /* Full duplex */
- tmp |= IF_MODE_RGMII_FD;
+ phylink_validate_mask_caps(supported, state, caps);
+}
- switch (speed) {
- case SPEED_1000:
- tmp |= IF_MODE_RGMII_1000;
- break;
- case SPEED_100:
- tmp |= IF_MODE_RGMII_100;
- break;
- case SPEED_10:
- tmp |= IF_MODE_RGMII_10;
- break;
- default:
- break;
- }
+/**
+ * memac_if_mode() - Convert an interface mode into an IF_MODE config
+ * @interface: A phy interface mode
+ *
+ * Return: A configuration word, suitable for programming into the lower bits
+ * of %IF_MODE.
+ */
+static u32 memac_if_mode(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_MII:
+ return IF_MODE_MII;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return IF_MODE_GMII | IF_MODE_RGMII;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return IF_MODE_GMII;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return IF_MODE_10G;
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
}
-
- iowrite32be(tmp, &regs->if_mode);
-
- return 0;
}
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
+static struct phylink_pcs *memac_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->max_frame_length = new_val;
-
- return 0;
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ return memac->sgmii_pcs;
+ case PHY_INTERFACE_MODE_QSGMII:
+ return memac->qsgmii_pcs;
+ case PHY_INTERFACE_MODE_10GBASER:
+ return memac->xfi_pcs;
+ default:
+ return NULL;
+ }
}
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
+static int memac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->reset_on_init = enable;
-
- return 0;
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_10GBASER:
+ return phy_set_mode_ext(memac->serdes, PHY_MODE_ETHERNET,
+ iface);
+ default:
+ return 0;
+ }
}
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
+static void memac_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->fixed_link = fixed_link;
-
- return 0;
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct memac_regs __iomem *regs = mac_dev->fman_mac->regs;
+ u32 tmp = ioread32be(&regs->if_mode);
+
+ tmp &= ~(IF_MODE_MASK | IF_MODE_RGMII);
+ tmp |= memac_if_mode(state->interface);
+ if (phylink_autoneg_inband(mode))
+ tmp |= IF_MODE_RGMII_AUTO;
+ iowrite32be(tmp, &regs->if_mode);
}
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time)
+static void memac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *memac = mac_dev->fman_mac;
struct memac_regs __iomem *regs = memac->regs;
- u32 tmp;
-
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ u32 tmp = memac_if_mode(interface);
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
- tmp = ioread32be(&regs->tx_fifo_sections);
+ memac_set_tx_pause_frames(memac, 0, pause_time, 0);
+ memac_accept_rx_pause_frames(memac, rx_pause);
- GET_TX_EMPTY_DEFAULT_VALUE(tmp);
- iowrite32be(tmp, &regs->tx_fifo_sections);
+ if (duplex == DUPLEX_HALF)
+ tmp |= IF_MODE_HD;
- tmp = ioread32be(&regs->command_config);
- tmp &= ~CMD_CFG_PFC_MODE;
+ switch (speed) {
+ case SPEED_1000:
+ tmp |= IF_MODE_RGMII_1000;
+ break;
+ case SPEED_100:
+ tmp |= IF_MODE_RGMII_100;
+ break;
+ case SPEED_10:
+ tmp |= IF_MODE_RGMII_10;
+ break;
+ }
+ iowrite32be(tmp, &regs->if_mode);
- iowrite32be(tmp, &regs->command_config);
+ /* TODO: EEE? */
- tmp = ioread32be(&regs->pause_quanta[priority / 2]);
- if (priority % 2)
- tmp &= CLXY_PAUSE_QUANTA_CLX_PQNT;
- else
- tmp &= CLXY_PAUSE_QUANTA_CLY_PQNT;
- tmp |= ((u32)pause_time << (16 * (priority % 2)));
- iowrite32be(tmp, &regs->pause_quanta[priority / 2]);
+ if (speed == SPEED_10000) {
+ if (memac->fm_rev_info.major == 6 &&
+ memac->fm_rev_info.minor == 4)
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_SLOW_10G;
+ else
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_10G;
+ tmp |= TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_10G;
+ } else {
+ tmp = TX_FIFO_SECTIONS_TX_AVAIL_1G |
+ TX_FIFO_SECTIONS_TX_EMPTY_DEFAULT_1G;
+ }
+ iowrite32be(tmp, &regs->tx_fifo_sections);
- tmp = ioread32be(&regs->pause_thresh[priority / 2]);
- if (priority % 2)
- tmp &= CLXY_PAUSE_THRESH_CLX_QTH;
- else
- tmp &= CLXY_PAUSE_THRESH_CLY_QTH;
- tmp |= ((u32)thresh_time << (16 * (priority % 2)));
- iowrite32be(tmp, &regs->pause_thresh[priority / 2]);
+ mac_dev->update_speed(mac_dev, speed);
- return 0;
+ tmp = ioread32be(&regs->command_config);
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
}
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+static void memac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
+ struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
+ /* TODO: graceful */
tmp = ioread32be(&regs->command_config);
- if (en)
- tmp &= ~CMD_CFG_PAUSE_IGNORE;
- else
- tmp |= CMD_CFG_PAUSE_IGNORE;
-
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
-{
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+static const struct phylink_mac_ops memac_mac_ops = {
+ .validate = memac_validate,
+ .mac_select_pcs = memac_select_pcs,
+ .mac_prepare = memac_prepare,
+ .mac_config = memac_mac_config,
+ .mac_link_up = memac_link_up,
+ .mac_link_down = memac_link_down,
+};
+static int memac_modify_mac_address(struct fman_mac *memac,
+ const enet_addr_t *enet_addr)
+{
add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
return 0;
}
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_add_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry;
u32 hash;
u64 addr;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
if (!(addr & GROUP_ADDRESS)) {
@@ -940,14 +823,11 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_allmulti(struct fman_mac *memac, bool enable)
+static int memac_set_allmulti(struct fman_mac *memac, bool enable)
{
u32 entry;
struct memac_regs __iomem *regs = memac->regs;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
if (enable) {
for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
iowrite32be(entry | HASH_CTRL_MCAST_EN,
@@ -963,12 +843,13 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
-int memac_set_tstamp(struct fman_mac *memac, bool enable)
+static int memac_set_tstamp(struct fman_mac *memac, bool enable)
{
return 0; /* Always enabled. */
}
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_del_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -976,9 +857,6 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
u32 hash;
u64 addr;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
@@ -1001,14 +879,11 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable)
+static int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
{
u32 bit_mask = 0;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -1024,28 +899,19 @@ int memac_set_exception(struct fman_mac *memac,
return 0;
}
-int memac_init(struct fman_mac *memac)
+static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
- u8 i;
enet_addr_t eth_addr;
- bool slow_10g_if = false;
- struct fixed_phy_status *fixed_link;
int err;
u32 reg32 = 0;
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
err = check_init_parameters(memac);
if (err)
return err;
memac_drv_param = memac->memac_drv_param;
- if (memac->fm_rev_info.major == 6 && memac->fm_rev_info.minor == 4)
- slow_10g_if = true;
-
/* First, reset the MAC if desired. */
if (memac_drv_param->reset_on_init) {
err = reset(memac->regs);
@@ -1061,10 +927,7 @@ int memac_init(struct fman_mac *memac)
add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
}
- fixed_link = memac_drv_param->fixed_link;
-
- init(memac->regs, memac->memac_drv_param, memac->phy_if,
- memac->max_speed, slow_10g_if, memac->exceptions);
+ init(memac->regs, memac->memac_drv_param, memac->exceptions);
/* FM_RX_FIFO_CORRUPT_ERRATA_10GMAC_A006320 errata workaround
* Exists only in FMan 6.0 and 6.3.
@@ -1080,33 +943,6 @@ int memac_init(struct fman_mac *memac)
iowrite32be(reg32, &memac->regs->command_config);
}
- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII) {
- /* Configure internal SGMII PHY */
- if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac);
- else
- setup_sgmii_internal_phy(memac, fixed_link);
- } else if (memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- /* Configure 4 internal SGMII PHYs */
- for (i = 0; i < 4; i++) {
- u8 qsmgii_phy_addr, phy_addr;
- /* QSGMII PHY address occupies 3 upper bits of 5-bit
- * phy_address; the lower 2 bits are used to extend
- * register address space and access each one of 4
- * ports inside QSGMII.
- */
- phy_addr = memac->pcsphy->mdio.addr;
- qsmgii_phy_addr = (u8)((phy_addr << 2) | i);
- memac->pcsphy->mdio.addr = qsmgii_phy_addr;
- if (memac->basex_if)
- setup_sgmii_internal_phy_base_x(memac);
- else
- setup_sgmii_internal_phy(memac, fixed_link);
-
- memac->pcsphy->mdio.addr = phy_addr;
- }
- }
-
/* Max Frame Length */
err = fman_set_mac_max_frame(memac->fm, memac->mac_id,
memac_drv_param->max_frame_length);
@@ -1135,32 +971,40 @@ int memac_init(struct fman_mac *memac)
fman_register_intr(memac->fm, FMAN_MOD_MAC, memac->mac_id,
FMAN_INTR_TYPE_NORMAL, memac_exception, memac);
- kfree(memac_drv_param);
- memac->memac_drv_param = NULL;
-
return 0;
}
-int memac_free(struct fman_mac *memac)
+static void pcs_put(struct phylink_pcs *pcs)
{
- free_init_resources(memac);
+ struct mdio_device *mdiodev;
- if (memac->pcsphy)
- put_device(&memac->pcsphy->mdio.dev);
+ if (IS_ERR_OR_NULL(pcs))
+ return;
+
+ mdiodev = lynx_get_mdio_device(pcs);
+ lynx_pcs_destroy(pcs);
+ mdio_device_free(mdiodev);
+}
+static int memac_free(struct fman_mac *memac)
+{
+ free_init_resources(memac);
+
+ pcs_put(memac->sgmii_pcs);
+ pcs_put(memac->qsgmii_pcs);
+ pcs_put(memac->xfi_pcs);
kfree(memac->memac_drv_param);
kfree(memac);
return 0;
}
-struct fman_mac *memac_config(struct fman_mac_params *params)
+static struct fman_mac *memac_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *memac;
struct memac_cfg *memac_drv_param;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the m_emac data structure */
memac = kzalloc(sizeof(*memac), GFP_KERNEL);
if (!memac)
@@ -1178,38 +1022,240 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
set_dflts(memac_drv_param);
- memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+ memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
- memac->regs = base_addr;
- memac->max_speed = params->max_speed;
- memac->phy_if = params->phy_if;
+ memac->regs = mac_dev->vaddr;
memac->mac_id = params->mac_id;
memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
memac->exception_cb = params->exception_cb;
memac->event_cb = params->event_cb;
- memac->dev_id = params->dev_id;
+ memac->dev_id = mac_dev;
memac->fm = params->fm;
- memac->basex_if = params->basex_if;
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
- if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
- memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- if (!params->internal_phy_node) {
- pr_err("PCS PHY node is not available\n");
- memac_free(memac);
- return NULL;
+ return memac;
+}
+
+static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node,
+ int index)
+{
+ struct device_node *node;
+ struct mdio_device *mdiodev = NULL;
+ struct phylink_pcs *pcs;
+
+ node = of_parse_phandle(mac_node, "pcsphy-handle", index);
+ if (node && of_device_is_available(node))
+ mdiodev = of_mdio_find_device(node);
+ of_node_put(node);
+
+ if (!mdiodev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ pcs = lynx_pcs_create(mdiodev);
+ if (!pcs)
+ mdio_device_free(mdiodev);
+
+ return pcs;
+}
+
+static bool memac_supports(struct mac_device *mac_dev, phy_interface_t iface)
+{
+ /* If there's no serdes device, assume that it's been configured for
+ * whatever the default interface mode is.
+ */
+ if (!mac_dev->fman_mac->serdes)
+ return mac_dev->phy_if == iface;
+ /* Otherwise, ask the serdes */
+ return !phy_validate(mac_dev->fman_mac->serdes, PHY_MODE_ETHERNET,
+ iface, NULL);
+}
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct device_node *fixed;
+ struct phylink_pcs *pcs;
+ struct fman_mac *memac;
+ unsigned long capabilities;
+ unsigned long *supported;
+
+ mac_dev->phylink_ops = &memac_mac_ops;
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->enable = memac_enable;
+ mac_dev->disable = memac_disable;
+
+ mac_dev->fman_mac = memac_config(mac_dev, params);
+ if (!mac_dev->fman_mac)
+ return -EINVAL;
+
+ memac = mac_dev->fman_mac;
+ memac->memac_drv_param->max_frame_length = fman_get_max_frm();
+ memac->memac_drv_param->reset_on_init = true;
+
+ err = of_property_match_string(mac_node, "pcs-handle-names", "xfi");
+ if (err >= 0) {
+ memac->xfi_pcs = memac_pcs_create(mac_node, err);
+ if (IS_ERR(memac->xfi_pcs)) {
+ err = PTR_ERR(memac->xfi_pcs);
+ dev_err_probe(mac_dev->dev, err, "missing xfi pcs\n");
+ goto _return_fm_mac_free;
}
+ } else if (err != -EINVAL && err != -ENODATA) {
+ goto _return_fm_mac_free;
+ }
- memac->pcsphy = of_phy_find_device(params->internal_phy_node);
- if (!memac->pcsphy) {
- pr_err("of_phy_find_device (PCS PHY) failed\n");
- memac_free(memac);
- return NULL;
+ err = of_property_match_string(mac_node, "pcs-handle-names", "qsgmii");
+ if (err >= 0) {
+ memac->qsgmii_pcs = memac_pcs_create(mac_node, err);
+ if (IS_ERR(memac->qsgmii_pcs)) {
+ err = PTR_ERR(memac->qsgmii_pcs);
+ dev_err_probe(mac_dev->dev, err,
+ "missing qsgmii pcs\n");
+ goto _return_fm_mac_free;
}
+ } else if (err != -EINVAL && err != -ENODATA) {
+ goto _return_fm_mac_free;
}
- return memac;
+ /* For compatibility, if pcs-handle-names is missing, we assume this
+ * phy is the first one in pcsphy-handle
+ */
+ err = of_property_match_string(mac_node, "pcs-handle-names", "sgmii");
+ if (err == -EINVAL || err == -ENODATA)
+ pcs = memac_pcs_create(mac_node, 0);
+ else if (err < 0)
+ goto _return_fm_mac_free;
+ else
+ pcs = memac_pcs_create(mac_node, err);
+
+ if (IS_ERR(pcs)) {
+ err = PTR_ERR(pcs);
+ dev_err_probe(mac_dev->dev, err, "missing pcs\n");
+ goto _return_fm_mac_free;
+ }
+
+ /* If err is set here, it means that pcs-handle-names was missing above
+ * (and therefore that xfi_pcs cannot be set). If we are defaulting to
+ * XGMII, assume this is for XFI. Otherwise, assume it is for SGMII.
+ */
+ if (err && mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ memac->xfi_pcs = pcs;
+ else
+ memac->sgmii_pcs = pcs;
+
+ memac->serdes = devm_of_phy_optional_get(mac_dev->dev, mac_node,
+ "serdes");
+ if (!memac->serdes) {
+ dev_dbg(mac_dev->dev, "could not get (optional) serdes\n");
+ } else if (IS_ERR(memac->serdes)) {
+ err = PTR_ERR(memac->serdes);
+ goto _return_fm_mac_free;
+ }
+
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * 10GBASE-R (aka XFI), so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_10GBASER;
+
+ /* TODO: The following interface modes are supported by (some) hardware
+ * but not by this driver:
+ * - 1000BASE-KX
+ * - 10GBASE-KR
+ * - XAUI/HiGig
+ */
+ supported = mac_dev->phylink_config.supported_interfaces;
+
+ /* Note that half duplex is only supported on 10/100M interfaces. */
+
+ if (memac->sgmii_pcs &&
+ (memac_supports(mac_dev, PHY_INTERFACE_MODE_SGMII) ||
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_1000BASEX))) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX, supported);
+ }
+
+ if (memac->sgmii_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_2500BASEX))
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX, supported);
+
+ if (memac->qsgmii_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_QSGMII))
+ __set_bit(PHY_INTERFACE_MODE_QSGMII, supported);
+ else if (mac_dev->phy_if == PHY_INTERFACE_MODE_QSGMII)
+ dev_warn(mac_dev->dev, "no QSGMII pcs specified\n");
+
+ if (memac->xfi_pcs &&
+ memac_supports(mac_dev, PHY_INTERFACE_MODE_10GBASER)) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER, supported);
+ } else {
+ /* From what I can tell, no 10g macs support RGMII. */
+ phy_interface_set_rgmii(supported);
+ __set_bit(PHY_INTERFACE_MODE_MII, supported);
+ }
+
+ capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100;
+ capabilities |= MAC_1000FD | MAC_2500FD | MAC_10000FD;
+
+ /* These SoCs don't support half duplex at all; there's no different
+ * FMan version or compatible, so we just have to check the machine
+ * compatible instead
+ */
+ if (of_machine_is_compatible("fsl,ls1043a") ||
+ of_machine_is_compatible("fsl,ls1046a") ||
+ of_machine_is_compatible("fsl,B4QDS"))
+ capabilities &= ~(MAC_10HD | MAC_100HD);
+
+ mac_dev->phylink_config.mac_capabilities = capabilities;
+
+ /* The T2080 and T4240 don't support half duplex RGMII. There is no
+ * other way to identify these SoCs, so just use the machine
+ * compatible.
+ */
+ if (of_machine_is_compatible("fsl,T2080QDS") ||
+ of_machine_is_compatible("fsl,T2080RDB") ||
+ of_machine_is_compatible("fsl,T2081QDS") ||
+ of_machine_is_compatible("fsl,T4240QDS") ||
+ of_machine_is_compatible("fsl,T4240RDB"))
+ memac->rgmii_no_half_duplex = true;
+
+ /* Most boards should use MLO_AN_INBAND, but existing boards don't have
+ * a managed property. Default to MLO_AN_INBAND if nothing else is
+ * specified. We need to be careful and not enable this if we have a
+ * fixed link or if we are using MII or RGMII, since those
+ * configurations modes don't use in-band autonegotiation.
+ */
+ fixed = of_get_child_by_name(mac_node, "fixed-link");
+ if (!fixed && !of_property_read_bool(mac_node, "fixed-link") &&
+ !of_property_read_bool(mac_node, "managed") &&
+ mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
+ !phy_interface_mode_is_rgmii(mac_dev->phy_if))
+ mac_dev->phylink_config.ovr_an_inband = true;
+ of_node_put(fixed);
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(mac_dev->dev, "FMan MEMAC\n");
+
+ return 0;
+
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index 3820f7a22983..5a3a14f9684f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MEMAC_H
@@ -38,26 +11,10 @@
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
-struct fman_mac *memac_config(struct fman_mac_params *params);
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
-int memac_adjust_link(struct fman_mac *memac, u16 speed);
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link);
-int memac_enable(struct fman_mac *memac, enum comm_mode mode);
-int memac_disable(struct fman_mac *memac, enum comm_mode mode);
-int memac_init(struct fman_mac *memac);
-int memac_free(struct fman_mac *memac);
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time);
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable);
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_set_allmulti(struct fman_mac *memac, bool enable);
-int memac_set_tstamp(struct fman_mac *memac, bool enable);
+struct mac_device;
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 7ad317e622bc..f557d68e5b76 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#include "fman_muram.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index 453bf849eee1..3643af61bae2 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -1,34 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
+
#ifndef __FM_MURAM_EXT
#define __FM_MURAM_EXT
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index d9baac0dbc7d..ab90fe2bee5e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1805,7 +1778,7 @@ static int fman_port_probe(struct platform_device *of_dev)
fman = dev_get_drvdata(&fm_pdev->dev);
if (!fman) {
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
err = of_property_read_u32(port_node, "cell-index", &val);
@@ -1813,7 +1786,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: reading cell-index for %pOF failed\n",
__func__, port_node);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port_id = (u8)val;
port->dts_params.id = port_id;
@@ -1847,7 +1820,7 @@ static int fman_port_probe(struct platform_device *of_dev)
} else {
dev_err(port->dev, "%s: Illegal port type\n", __func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.type = port_type;
@@ -1861,7 +1834,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: incorrect qman-channel-id\n",
__func__);
err = -EINVAL;
- goto return_err;
+ goto put_device;
}
port->dts_params.qman_channel_id = qman_channel_id;
}
@@ -1871,7 +1844,7 @@ static int fman_port_probe(struct platform_device *of_dev)
dev_err(port->dev, "%s: of_address_to_resource() failed\n",
__func__);
err = -ENOMEM;
- goto return_err;
+ goto put_device;
}
port->dts_params.fman = fman;
@@ -1896,6 +1869,8 @@ static int fman_port_probe(struct platform_device *of_dev)
return 0;
+put_device:
+ put_device(&fm_pdev->dev);
return_err:
of_node_put(port_node);
free_port:
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 82f12661a46d..4917fe8f0617 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FMAN_PORT_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
index 248f5bcca468..0fac60aa5283 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fman_sp.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
index 820b7f63088f..a62dd21c81f1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.h
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -1,32 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_SP_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 311c1906e044..c2261d26db5b 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -1,44 +1,19 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_tgec.h"
#include "fman.h"
+#include "mac.h"
#include <linux/slab.h>
#include <linux/bitrev.h>
#include <linux/io.h>
#include <linux/crc32.h>
+#include <linux/netdevice.h>
/* Transmit Inter-Packet Gap Length Register (TX_IPG_LENGTH) */
#define TGEC_TX_IPG_LENGTH_MASK 0x000003ff
@@ -206,7 +181,7 @@ struct fman_mac {
/* MAC address of device; */
u64 addr;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* pointer to driver's global address hash table */
@@ -269,10 +244,6 @@ static int init(struct tgec_regs __iomem *regs, struct tgec_cfg *cfg,
static int check_init_parameters(struct fman_mac *tgec)
{
- if (tgec->max_speed < SPEED_10000) {
- pr_err("10G MAC driver only support 10G speed\n");
- return -EINVAL;
- }
if (!tgec->exception_cb) {
pr_err("uninitialized exception_cb\n");
return -EINVAL;
@@ -410,131 +381,116 @@ static void free_init_resources(struct fman_mac *tgec)
tgec->unicast_addr_hash = NULL;
}
-static bool is_init_done(struct tgec_cfg *cfg)
+static int tgec_enable(struct fman_mac *tgec)
{
- /* Checks if tGEC driver parameters were initialized */
- if (!cfg)
- return true;
+ return 0;
+}
- return false;
+static void tgec_disable(struct fman_mac *tgec)
+{
}
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
+ if (new_val)
+ tmp |= CMD_CFG_PROMIS_EN;
+ else
+ tmp &= ~CMD_CFG_PROMIS_EN;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+static int tgec_set_tx_pause_frames(struct fman_mac *tgec,
+ u8 __maybe_unused priority, u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct tgec_regs __iomem *regs = tgec->regs;
- u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
- tmp = ioread32be(&regs->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
- iowrite32be(tmp, &regs->command_config);
+ iowrite32be((u32)pause_time, &regs->pause_quant);
return 0;
}
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (new_val)
- tmp |= CMD_CFG_PROMIS_EN;
+ if (!en)
+ tmp |= CMD_CFG_PAUSE_IGNORE;
else
- tmp &= ~CMD_CFG_PROMIS_EN;
+ tmp &= ~CMD_CFG_PAUSE_IGNORE;
iowrite32be(tmp, &regs->command_config);
return 0;
}
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
+static void tgec_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
{
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
- tgec->cfg->max_frame_length = new_val;
-
- return 0;
}
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static void tgec_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
+ struct mac_device *mac_dev = fman_config_to_mac(config);
+ struct fman_mac *tgec = mac_dev->fman_mac;
struct tgec_regs __iomem *regs = tgec->regs;
+ u16 pause_time = tx_pause ? FSL_FM_PAUSE_TIME_ENABLE :
+ FSL_FM_PAUSE_TIME_DISABLE;
+ u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ tgec_set_tx_pause_frames(tgec, 0, pause_time, 0);
+ tgec_accept_rx_pause_frames(tgec, rx_pause);
+ mac_dev->update_speed(mac_dev, speed);
- iowrite32be((u32)pause_time, &regs->pause_quant);
-
- return 0;
+ tmp = ioread32be(&regs->command_config);
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
+ iowrite32be(tmp, &regs->command_config);
}
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+static void tgec_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
+ struct fman_mac *tgec = fman_config_to_mac(config)->fman_mac;
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
- if (!en)
- tmp |= CMD_CFG_PAUSE_IGNORE;
- else
- tmp &= ~CMD_CFG_PAUSE_IGNORE;
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, &regs->command_config);
-
- return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
-{
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+static const struct phylink_mac_ops tgec_mac_ops = {
+ .mac_config = tgec_mac_config,
+ .mac_link_up = tgec_link_up,
+ .mac_link_down = tgec_link_down,
+};
+static int tgec_modify_mac_address(struct fman_mac *tgec,
+ const enet_addr_t *p_enet_addr)
+{
tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
return 0;
}
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_add_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry;
u32 crc = 0xFFFFFFFF, hash;
u64 addr;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
addr = ENET_ADDR_TO_UINT64(*eth_addr);
if (!(addr & GROUP_ADDRESS)) {
@@ -562,14 +518,11 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
+static int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
{
u32 entry;
struct tgec_regs __iomem *regs = tgec->regs;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
if (enable) {
for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
iowrite32be(entry | TGEC_HASH_MCAST_EN,
@@ -585,14 +538,11 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+static int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
tmp = ioread32be(&regs->command_config);
if (enable)
@@ -605,7 +555,8 @@ int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_del_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -613,9 +564,6 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
u32 crc = 0xFFFFFFFF, hash;
u64 addr;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
addr = ((*(u64 *)eth_addr) >> 16);
/* CRC calculation */
@@ -642,27 +590,12 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
-{
- struct tgec_regs __iomem *regs = tgec->regs;
-
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
- *mac_version = ioread32be(&regs->tgec_id);
-
- return 0;
-}
-
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable)
+static int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 bit_mask = 0;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
bit_mask = get_exception_flag(exception);
if (bit_mask) {
if (enable)
@@ -681,15 +614,12 @@ int tgec_set_exception(struct fman_mac *tgec,
return 0;
}
-int tgec_init(struct fman_mac *tgec)
+static int tgec_init(struct fman_mac *tgec)
{
struct tgec_cfg *cfg;
enet_addr_t eth_addr;
int err;
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
if (DEFAULT_RESET_ON_INIT &&
(fman_reset_mac(tgec->fm, tgec->mac_id) != 0)) {
pr_err("Can't reset MAC!\n");
@@ -764,7 +694,7 @@ int tgec_init(struct fman_mac *tgec)
return 0;
}
-int tgec_free(struct fman_mac *tgec)
+static int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
@@ -774,13 +704,12 @@ int tgec_free(struct fman_mac *tgec)
return 0;
}
-struct fman_mac *tgec_config(struct fman_mac_params *params)
+static struct fman_mac *tgec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *tgec;
struct tgec_cfg *cfg;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
if (!tgec)
@@ -798,9 +727,8 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
set_dflts(cfg);
- tgec->regs = base_addr;
- tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
- tgec->max_speed = params->max_speed;
+ tgec->regs = mac_dev->vaddr;
+ tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
tgec->mac_id = params->mac_id;
tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
TGEC_IMASK_REM_FAULT |
@@ -819,7 +747,7 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
TGEC_IMASK_RX_ALIGN_ER);
tgec->exception_cb = params->exception_cb;
tgec->event_cb = params->event_cb;
- tgec->dev_id = params->dev_id;
+ tgec->dev_id = mac_dev;
tgec->fm = params->fm;
/* Save FMan revision */
@@ -827,3 +755,63 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
return tgec;
}
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *tgec;
+
+ mac_dev->phylink_ops = &tgec_mac_ops;
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->enable = tgec_enable;
+ mac_dev->disable = tgec_disable;
+
+ mac_dev->fman_mac = tgec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ /* The internal connection to the serdes is XGMII, but this isn't
+ * really correct for the phy mode (which is the external connection).
+ * However, this is how all older device trees say that they want
+ * XAUI, so just convert it for them.
+ */
+ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_XAUI;
+
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ mac_dev->phylink_config.supported_interfaces);
+ mac_dev->phylink_config.mac_capabilities =
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10000FD;
+
+ tgec = mac_dev->fman_mac;
+ tgec->cfg->max_frame_length = fman_get_max_frm();
+ err = tgec_init(tgec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = tgec_set_exception(tgec, FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ pr_info("FMan XGEC version: 0x%08x\n",
+ ioread32be(&tgec->regs->tgec_id));
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index b28b20b26148..768b8d165e05 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __TGEC_H
@@ -35,23 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *tgec_config(struct fman_mac_params *params);
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_init(struct fman_mac *tgec);
-int tgec_free(struct fman_mac *tgec);
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable);
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
+struct mac_device;
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d9fc5c456bf3..43665806c590 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,6 +15,7 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/phy_fixed.h>
+#include <linux/phylink.h>
#include <linux/etherdevice.h>
#include <linux/libfdt_env.h>
@@ -54,20 +29,12 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
- struct device *dev;
- void __iomem *vaddr;
u8 cell_index;
struct fman *fman;
- struct device_node *internal_phy_node;
/* List of multicast addresses */
struct list_head mc_addr_list;
struct platform_device *eth_dev;
- struct fixed_phy_status *fixed_link;
u16 speed;
- u16 max_speed;
-
- int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
- int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
};
struct mac_address {
@@ -75,211 +42,21 @@ struct mac_address {
struct list_head list;
};
-static void mac_exception(void *handle, enum fman_mac_exceptions ex)
+static void mac_exception(struct mac_device *mac_dev,
+ enum fman_mac_exceptions ex)
{
- struct mac_device *mac_dev;
- struct mac_priv_s *priv;
-
- mac_dev = handle;
- priv = mac_dev->priv;
-
if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
/* don't flag RX FIFO after the first */
mac_dev->set_exception(mac_dev->fman_mac,
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
- dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", ex);
}
- dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
__func__, ex);
}
-static void set_fman_mac_params(struct mac_device *mac_dev,
- struct fman_mac_params *params)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- params->base_addr = (typeof(params->base_addr))
- devm_ioremap(priv->dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr));
- params->max_speed = priv->max_speed;
- params->phy_if = mac_dev->phy_if;
- params->basex_if = false;
- params->mac_id = priv->cell_index;
- params->fm = (void *)priv->fman;
- params->exception_cb = mac_exception;
- params->event_cb = mac_exception;
- params->dev_id = mac_dev;
- params->internal_phy_node = priv->internal_phy_node;
-}
-
-static int tgec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- set_fman_mac_params(mac_dev, &params);
-
- mac_dev->fman_mac = tgec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 10G MAC, disable Tx ECC exception */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_10G_TX_ECC_ER, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- tgec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int dtsec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- set_fman_mac_params(mac_dev, &params);
-
- mac_dev->fman_mac = dtsec_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 1G MAC, disable by default the MIB counters overflow interrupt */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- dtsec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int memac_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
-
- priv = mac_dev->priv;
-
- set_fman_mac_params(mac_dev, &params);
-
- if (priv->max_speed == SPEED_10000)
- params.phy_if = PHY_INTERFACE_MODE_XGMII;
-
- mac_dev->fman_mac = memac_config(&params);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan MEMAC\n");
-
- goto _return;
-
-_return_fm_mac_free:
- memac_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int start(struct mac_device *mac_dev)
-{
- int err;
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct mac_priv_s *priv = mac_dev->priv;
-
- err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
- if (!err && phy_dev)
- phy_start(phy_dev);
-
- return err;
-}
-
-static int stop(struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- if (mac_dev->phy_dev)
- phy_stop(mac_dev->phy_dev);
-
- return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
-}
-
-static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
{
struct mac_priv_s *priv;
struct mac_address *old_addr, *tmp;
@@ -317,233 +94,8 @@ static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
return 0;
}
-/**
- * fman_set_mac_active_pause
- * @mac_dev: A pointer to the MAC device
- * @rx: Pause frame setting for RX
- * @tx: Pause frame setting for TX
- *
- * Set the MAC RX/TX PAUSE frames settings
- *
- * Avoid redundant calls to FMD, if the MAC driver already contains the desired
- * active PAUSE settings. Otherwise, the new active settings should be reflected
- * in FMan.
- *
- * Return: 0 on success; Error code otherwise.
- */
-int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx)
-{
- struct fman_mac *fman_mac = mac_dev->fman_mac;
- int err = 0;
-
- if (rx != mac_dev->rx_pause_active) {
- err = mac_dev->set_rx_pause(fman_mac, rx);
- if (likely(err == 0))
- mac_dev->rx_pause_active = rx;
- }
-
- if (tx != mac_dev->tx_pause_active) {
- u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE :
- FSL_FM_PAUSE_TIME_DISABLE);
-
- err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0);
-
- if (likely(err == 0))
- mac_dev->tx_pause_active = tx;
- }
-
- return err;
-}
-EXPORT_SYMBOL(fman_set_mac_active_pause);
-
-/**
- * fman_get_pause_cfg
- * @mac_dev: A pointer to the MAC device
- * @rx_pause: Return value for RX setting
- * @tx_pause: Return value for TX setting
- *
- * Determine the MAC RX/TX PAUSE frames settings based on PHY
- * autonegotiation or values set by eththool.
- *
- * Return: Pointer to FMan device.
- */
-void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
- bool *tx_pause)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- u16 lcl_adv, rmt_adv;
- u8 flowctrl;
-
- *rx_pause = *tx_pause = false;
-
- if (!phy_dev->duplex)
- return;
-
- /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings
- * are those set by ethtool.
- */
- if (!mac_dev->autoneg_pause) {
- *rx_pause = mac_dev->rx_pause_req;
- *tx_pause = mac_dev->tx_pause_req;
- return;
- }
-
- /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE
- * settings depend on the result of the link negotiation.
- */
-
- /* get local capabilities */
- lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
-
- /* get link partner capabilities */
- rmt_adv = 0;
- if (phy_dev->pause)
- rmt_adv |= LPA_PAUSE_CAP;
- if (phy_dev->asym_pause)
- rmt_adv |= LPA_PAUSE_ASYM;
-
- /* Calculate TX/RX settings based on local and peer advertised
- * symmetric/asymmetric PAUSE capabilities.
- */
- flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
- if (flowctrl & FLOW_CTRL_RX)
- *rx_pause = true;
- if (flowctrl & FLOW_CTRL_TX)
- *tx_pause = true;
-}
-EXPORT_SYMBOL(fman_get_pause_cfg);
-
-static void adjust_link_void(struct mac_device *mac_dev)
-{
-}
-
-static void adjust_link_dtsec(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- if (!phy_dev->link) {
- dtsec_restart_autoneg(fman_mac);
-
- return;
- }
-
- dtsec_adjust_link(fman_mac, phy_dev->speed);
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void adjust_link_memac(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- memac_adjust_link(fman_mac, phy_dev->speed);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void setup_dtsec(struct mac_device *mac_dev)
-{
- mac_dev->init = dtsec_initialization;
- mac_dev->set_promisc = dtsec_set_promiscuous;
- mac_dev->change_addr = dtsec_modify_mac_address;
- mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
- mac_dev->set_exception = dtsec_set_exception;
- mac_dev->set_allmulti = dtsec_set_allmulti;
- mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_dtsec;
- mac_dev->priv->enable = dtsec_enable;
- mac_dev->priv->disable = dtsec_disable;
-}
-
-static void setup_tgec(struct mac_device *mac_dev)
-{
- mac_dev->init = tgec_initialization;
- mac_dev->set_promisc = tgec_set_promiscuous;
- mac_dev->change_addr = tgec_modify_mac_address;
- mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
- mac_dev->set_exception = tgec_set_exception;
- mac_dev->set_allmulti = tgec_set_allmulti;
- mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_void;
- mac_dev->priv->enable = tgec_enable;
- mac_dev->priv->disable = tgec_disable;
-}
-
-static void setup_memac(struct mac_device *mac_dev)
-{
- mac_dev->init = memac_initialization;
- mac_dev->set_promisc = memac_set_promiscuous;
- mac_dev->change_addr = memac_modify_mac_address;
- mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
- mac_dev->set_exception = memac_set_exception;
- mac_dev->set_allmulti = memac_set_allmulti;
- mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_memac;
- mac_dev->priv->enable = memac_enable;
- mac_dev->priv->disable = memac_disable;
-}
-
-#define DTSEC_SUPPORTED \
- (SUPPORTED_10baseT_Half \
- | SUPPORTED_10baseT_Full \
- | SUPPORTED_100baseT_Half \
- | SUPPORTED_100baseT_Full \
- | SUPPORTED_Autoneg \
- | SUPPORTED_Pause \
- | SUPPORTED_Asym_Pause \
- | SUPPORTED_FIBRE \
- | SUPPORTED_MII)
-
static DEFINE_MUTEX(eth_lock);
-static const u16 phy2speed[] = {
- [PHY_INTERFACE_MODE_MII] = SPEED_100,
- [PHY_INTERFACE_MODE_GMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_SGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_TBI] = SPEED_1000,
- [PHY_INTERFACE_MODE_RMII] = SPEED_100,
- [PHY_INTERFACE_MODE_RGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000,
- [PHY_INTERFACE_MODE_RTBI] = SPEED_1000,
- [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000,
- [PHY_INTERFACE_MODE_XGMII] = SPEED_10000
-};
-
static struct platform_device *dpaa_eth_add_device(int fman_id,
struct mac_device *mac_dev)
{
@@ -566,7 +118,7 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
- pdev->dev.parent = priv->dev;
+ pdev->dev.parent = mac_dev->dev;
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
@@ -590,9 +142,9 @@ no_mem:
}
static const struct of_device_id mac_match[] = {
- { .compatible = "fsl,fman-dtsec" },
- { .compatible = "fsl,fman-xgec" },
- { .compatible = "fsl,fman-memac" },
+ { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
+ { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
+ { .compatible = "fsl,fman-memac", .data = memac_initialization },
{}
};
MODULE_DEVICE_TABLE(of, mac_match);
@@ -600,50 +152,33 @@ MODULE_DEVICE_TABLE(of, mac_match);
static int mac_probe(struct platform_device *_of_dev)
{
int err, i, nph;
+ int (*init)(struct mac_device *mac_dev, struct device_node *mac_node,
+ struct fman_mac_params *params);
struct device *dev;
struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
struct platform_device *of_dev;
- struct resource res;
struct mac_priv_s *priv;
+ struct fman_mac_params params;
u32 val;
u8 fman_id;
phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
+ init = of_device_get_match_data(dev);
mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
- if (!mac_dev) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!mac_dev)
+ return -ENOMEM;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(_of_dev, mac_dev);
/* Save private information */
mac_dev->priv = priv;
- priv->dev = dev;
-
- if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
- setup_dtsec(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "tbi-handle", 0);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
- setup_tgec(mac_dev);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
- setup_memac(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "pcsphy-handle", 0);
- } else {
- dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n",
- mac_node);
- err = -EINVAL;
- goto _return;
- }
+ mac_dev->dev = dev;
INIT_LIST_HEAD(&priv->mc_addr_list);
@@ -652,8 +187,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_get_parent(%pOF) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -683,42 +217,34 @@ static int mac_probe(struct platform_device *_of_dev)
of_node_put(dev_node);
/* Get the address of the memory mapped registers */
- err = of_address_to_resource(mac_node, 0, &res);
- if (err < 0) {
- dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
- mac_node, err);
- goto _return_of_get_parent;
+ mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
+ if (!mac_dev->res) {
+ dev_err(dev, "could not get registers\n");
+ return -EINVAL;
}
- mac_dev->res = __devm_request_region(dev,
- fman_get_mem_region(priv->fman),
- res.start, resource_size(&res),
- "mac");
- if (!mac_dev->res) {
- dev_err(dev, "__devm_request_mem_region(mac) failed\n");
- err = -EBUSY;
- goto _return_of_get_parent;
+ err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
+ mac_dev->res);
+ if (err) {
+ dev_err_probe(dev, err, "could not request resource\n");
+ return err;
}
- priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!priv->vaddr) {
+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ resource_size(mac_dev->res));
+ if (!mac_dev->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
- err = -EIO;
- goto _return_of_get_parent;
+ return -EIO;
}
- if (!of_device_is_available(mac_node)) {
- err = -ENODEV;
- goto _return_of_get_parent;
- }
+ if (!of_device_is_available(mac_node))
+ return -ENODEV;
/* Get the cell-index */
err = of_property_read_u32(mac_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
priv->cell_index = (u8)val;
@@ -732,15 +258,13 @@ static int mac_probe(struct platform_device *_of_dev)
if (unlikely(nph < 0)) {
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = nph;
- goto _return_of_get_parent;
+ return nph;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -749,8 +273,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_node_put;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -781,102 +304,48 @@ static int mac_probe(struct platform_device *_of_dev)
}
mac_dev->phy_if = phy_if;
- priv->speed = phy2speed[mac_dev->phy_if];
- priv->max_speed = priv->speed;
- mac_dev->if_support = DTSEC_SUPPORTED;
- /* We don't support half-duplex in SGMII mode */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
- mac_dev->if_support &= ~(SUPPORTED_10baseT_Half |
- SUPPORTED_100baseT_Half);
-
- /* Gigabit support (no half-duplex) */
- if (priv->max_speed == 1000)
- mac_dev->if_support |= SUPPORTED_1000baseT_Full;
-
- /* The 10G interface only supports one mode */
- if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII)
- mac_dev->if_support = SUPPORTED_10000baseT_Full;
-
- /* Get the rest of the PHY information */
- mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
- if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
- struct phy_device *phy;
-
- err = of_phy_register_fixed_link(mac_node);
- if (err)
- goto _return_of_get_parent;
-
- priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
- GFP_KERNEL);
- if (!priv->fixed_link) {
- err = -ENOMEM;
- goto _return_of_get_parent;
- }
-
- mac_dev->phy_node = of_node_get(mac_node);
- phy = of_phy_find_device(mac_dev->phy_node);
- if (!phy) {
- err = -EINVAL;
- of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
- }
-
- priv->fixed_link->link = phy->link;
- priv->fixed_link->speed = phy->speed;
- priv->fixed_link->duplex = phy->duplex;
- priv->fixed_link->pause = phy->pause;
- priv->fixed_link->asym_pause = phy->asym_pause;
+ params.mac_id = priv->cell_index;
+ params.fm = (void *)priv->fman;
+ params.exception_cb = mac_exception;
+ params.event_cb = mac_exception;
- put_device(&phy->mdio.dev);
- }
-
- err = mac_dev->init(mac_dev);
- if (err < 0) {
- dev_err(dev, "mac_dev->init() = %d\n", err);
- of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
- }
-
- /* pause frame autonegotiation enabled */
- mac_dev->autoneg_pause = true;
-
- /* By intializing the values to false, force FMD to enable PAUSE frames
- * on RX and TX
- */
- mac_dev->rx_pause_req = true;
- mac_dev->tx_pause_req = true;
- mac_dev->rx_pause_active = false;
- mac_dev->tx_pause_active = false;
- err = fman_set_mac_active_pause(mac_dev, true, true);
+ err = init(mac_dev, mac_node, &params);
if (err < 0)
- dev_err(dev, "fman_set_mac_active_pause() = %d\n", err);
+ return err;
if (!is_zero_ether_addr(mac_dev->addr))
dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev);
if (IS_ERR(priv->eth_dev)) {
+ err = PTR_ERR(priv->eth_dev);
dev_err(dev, "failed to add Ethernet platform device for MAC %d\n",
priv->cell_index);
priv->eth_dev = NULL;
}
- goto _return;
+ return err;
_return_of_node_put:
of_node_put(dev_node);
-_return_of_get_parent:
- kfree(priv->fixed_link);
-_return:
return err;
}
+static int mac_remove(struct platform_device *pdev)
+{
+ struct mac_device *mac_dev = platform_get_drvdata(pdev);
+
+ platform_device_unregister(mac_dev->priv->eth_dev);
+ return 0;
+}
+
static struct platform_driver mac_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = mac_match,
},
.probe = mac_probe,
+ .remove = mac_remove,
};
builtin_platform_driver(mac_driver);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index daa285a9b8b2..ad06f8d7924b 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MAC_H
@@ -35,6 +9,7 @@
#include <linux/device.h>
#include <linux/if_ether.h>
#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/list.h>
#include "fman_port.h"
@@ -45,35 +20,27 @@ struct fman_mac;
struct mac_priv_s;
struct mac_device {
+ void __iomem *vaddr;
+ struct device *dev;
struct resource *res;
u8 addr[ETH_ALEN];
struct fman_port *port[2];
- u32 if_support;
- struct phy_device *phy_dev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
phy_interface_t phy_if;
- struct device_node *phy_node;
- bool autoneg_pause;
- bool rx_pause_req;
- bool tx_pause_req;
- bool rx_pause_active;
- bool tx_pause_active;
bool promisc;
bool allmulti;
- int (*init)(struct mac_device *mac_dev);
- int (*start)(struct mac_device *mac_dev);
- int (*stop)(struct mac_device *mac_dev);
- void (*adjust_link)(struct mac_device *mac_dev);
+ const struct phylink_mac_ops *phylink_ops;
+ int (*enable)(struct fman_mac *mac_dev);
+ void (*disable)(struct fman_mac *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
struct mac_device *mac_dev);
- int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
- int (*set_tx_pause)(struct fman_mac *mac_dev, u8 priority,
- u16 pause_time, u16 thresh_time);
int (*set_exception)(struct fman_mac *mac_dev,
enum fman_mac_exceptions exception, bool enable);
int (*add_hash_mac_addr)(struct fman_mac *mac_dev,
@@ -81,10 +48,18 @@ struct mac_device {
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*update_speed)(struct mac_device *mac_dev, int speed);
+
struct fman_mac *fman_mac;
struct mac_priv_s *priv;
};
+static inline struct mac_device
+*fman_config_to_mac(struct phylink_config *config)
+{
+ return container_of(config, struct mac_device, phylink_config);
+}
+
struct dpaa_eth_data {
struct mac_device *mac_dev;
int mac_hw_id;
@@ -97,5 +72,6 @@ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause);
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev);
#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index bacf25318f87..8844a9a04fcf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -791,7 +791,7 @@ static int fs_enet_close(struct net_device *dev)
static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int fs_get_regs_len(struct net_device *dev)
@@ -883,9 +883,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_tunable = fs_set_tunable,
};
-extern int fs_mii_connect(struct net_device *dev);
-extern void fs_mii_disconnect(struct net_device *dev);
-
/**************************************************************************************/
#ifdef CONFIG_FS_ENET_HAS_FEC
@@ -1020,7 +1017,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
INIT_WORK(&fep->timeout_work, fs_timeout_work);
- netif_napi_add(ndev, &fep->napi, fs_enet_napi, fpi->napi_weight);
+ netif_napi_add_weight(ndev, &fep->napi, fs_enet_napi,
+ fpi->napi_weight);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 5ff2634bee2f..cb419aef8d1b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -201,7 +201,7 @@ void fs_enet_platform_cleanup(void);
/* access macros */
#if defined(CONFIG_CPM1)
-/* for a a CPM1 __raw_xxx's are sufficient */
+/* for a CPM1 __raw_xxx's are sufficient */
#define __cbd_out32(addr, x) __raw_writel(x, addr)
#define __cbd_out16(addr, x) __raw_writew(x, addr)
#define __cbd_in32(addr) __raw_readl(addr)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 99fe2c210d0f..61f4b6e50d29 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -98,7 +98,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
- if (!fep->fcc.fccp)
+ if (!fep->fec.fecp)
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 152f4d83765a..d37d7a19a759 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -102,7 +102,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
- int (*get_bus_freq)(struct device_node *);
+ int (*get_bus_freq)(struct device *);
int ret = -ENOMEM, clock, speed;
match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
@@ -136,7 +136,7 @@ static int fs_enet_mdio_probe(struct platform_device *ofdev)
}
if (get_bus_freq) {
- clock = get_bus_freq(ofdev->dev.of_node);
+ clock = get_bus_freq(&ofdev->dev);
if (!clock) {
/* Use maximum divider if clock is unknown */
dev_warn(&ofdev->dev, "could not determine IPS clock\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index acab58fd3db3..38d5013c6fed 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -787,10 +787,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
else
priv->interface = gfar_get_interface(dev);
- if (of_find_property(np, "fsl,magic-packet", NULL))
+ if (of_property_read_bool(np, "fsl,magic-packet"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
- if (of_get_property(np, "fsl,wake-on-filer", NULL))
+ if (of_property_read_bool(np, "fsl,wake-on-filer"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -1944,6 +1944,7 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
+ skb_tx_timestamp(skb);
netdev_tx_sent_queue(txq, bytes_sent);
gfar_wmb();
@@ -2076,10 +2077,6 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
priv->hwts_tx_en = 0;
@@ -3236,9 +3233,9 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
- netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
- gfar_poll_tx_sq, 2);
+ gfar_poll_rx_sq);
+ netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
+ gfar_poll_tx_sq, 2);
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index ca5e14f908fe..68b59d3202e3 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -52,9 +52,6 @@ struct ethtool_rx_list {
unsigned int count;
};
-/* The maximum number of packets to be handled in one call of gfar_poll */
-#define GFAR_DEV_WEIGHT 64
-
/* Length for FCB */
#define GMAC_FCB_LEN 8
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 7b32ed29bf4c..b2b0d3c26fcc 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -163,7 +163,7 @@ static int gfar_sset_count(struct net_device *dev, int sset)
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
}
/* Return the length of the register structure */
@@ -372,7 +372,9 @@ static int gfar_scoalesce(struct net_device *dev,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
static void gfar_gringparam(struct net_device *dev,
- struct ethtool_ringparam *rvals)
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -399,7 +401,9 @@ static void gfar_gringparam(struct net_device *dev,
* necessary so that we don't mess things up while we're in motion.
*/
static int gfar_sringparam(struct net_device *dev,
- struct ethtool_ringparam *rvals)
+ struct ethtool_ringparam *rvals,
+ struct kernel_ethtool_ringparam *kernel_rvals,
+ struct netlink_ext_ack *extack)
{
struct gfar_private *priv = netdev_priv(dev);
int err = 0, i;
@@ -1453,6 +1457,7 @@ static int gfar_get_ts_info(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
return 0;
}
@@ -1460,6 +1465,7 @@ static int gfar_get_ts_info(struct net_device *dev,
ptp_node = of_find_compatible_node(NULL, NULL, "fsl,etsec-ptp");
if (ptp_node) {
ptp_dev = of_find_device_by_node(ptp_node);
+ of_node_put(ptp_node);
if (ptp_dev)
ptp = platform_get_drvdata(ptp_dev);
}
@@ -1469,7 +1475,10 @@ static int gfar_get_ts_info(struct net_device *dev,
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
(1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 823221c912ab..7a4cb4f07c32 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3712,7 +3712,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
- netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll);
dev->mtu = 1500;
dev->max_mtu = 1518;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 14c08a868190..601beb93d3b3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -207,7 +207,9 @@ uec_get_regs(struct net_device *netdev,
static void
uec_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
@@ -226,7 +228,9 @@ uec_get_ringparam(struct net_device *netdev,
static int
uec_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct ucc_geth_info *ug_info = ugeth->ug_info;
@@ -333,8 +337,8 @@ static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 0b68852379da..a13b4ba4d6e1 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -14,6 +14,7 @@
#include <linux/acpi.h>
#include <linux/acpi_mdio.h>
+#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mdio.h>
@@ -36,9 +37,10 @@ struct tgec_mdio_controller {
} __packed;
#define MDIO_STAT_ENC BIT(6)
-#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8)
+#define MDIO_STAT_CLKDIV(x) (((x) & 0x1ff) << 7)
#define MDIO_STAT_BSY BIT(0)
#define MDIO_STAT_RD_ER BIT(1)
+#define MDIO_STAT_PRE_DIS BIT(5)
#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5)
#define MDIO_CTL_PRE_DIS BIT(10)
@@ -47,11 +49,13 @@ struct tgec_mdio_controller {
#define MDIO_CTL_READ BIT(15)
#define MDIO_DATA(x) (x & 0xffff)
-#define MDIO_DATA_BSY BIT(31)
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
+ struct clk *enet_clk;
+ u32 mdc_freq;
bool is_little_endian;
+ bool has_a009885;
bool has_a011043;
};
@@ -124,30 +128,49 @@ static int xgmac_wait_until_done(struct device *dev,
return 0;
}
-/*
- * Write value to the PHY for this device to the register at regnum,waiting
- * until the write is done before it returns. All PHY configuration has to be
- * done through the TSEC1 MIIM regs.
- */
-static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+static int xgmac_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
- uint16_t dev_addr;
+ bool endian = priv->is_little_endian;
+ u16 dev_addr = regnum & 0x1f;
u32 mdio_ctl, mdio_stat;
int ret;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ mdio_stat &= ~MDIO_STAT_ENC;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ /* Write the value to the register */
+ xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xgmac_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
bool endian = priv->is_little_endian;
+ u32 mdio_ctl, mdio_stat;
+ int ret;
mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
- if (regnum & MII_ADDR_C45) {
- /* Clause 45 (ie 10G) */
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_stat |= MDIO_STAT_ENC;
- } else {
- /* Clause 22 (ie 1G) */
- dev_addr = regnum & 0x1f;
- mdio_stat &= ~MDIO_STAT_ENC;
- }
+ mdio_stat |= MDIO_STAT_ENC;
xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
@@ -160,13 +183,11 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
/* Set the register address */
- if (regnum & MII_ADDR_C45) {
- xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs, endian);
- if (ret)
- return ret;
- }
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
/* Write the value to the register */
xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
@@ -178,31 +199,82 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
return 0;
}
-/*
- * Reads from register regnum in the PHY for device dev, returning the value.
+/* Reads from register regnum in the PHY for device dev, returning the value.
* Clears miimcom first. All PHY configuration has to be done through the
* TSEC1 MIIM regs.
*/
-static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static int xgmac_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
- uint16_t dev_addr;
+ bool endian = priv->is_little_endian;
+ u16 dev_addr = regnum & 0x1f;
+ unsigned long flags;
uint32_t mdio_stat;
uint32_t mdio_ctl;
- uint16_t value;
int ret;
- bool endian = priv->is_little_endian;
mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_stat |= MDIO_STAT_ENC;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
+
+ /* Initiate the read */
+ xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ goto irq_restore;
+
+ /* Return all Fs if nothing was there */
+ if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ ret = 0xffff;
} else {
- dev_addr = regnum & 0x1f;
- mdio_stat &= ~MDIO_STAT_ENC;
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
}
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+/* Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first. All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ bool endian = priv->is_little_endian;
+ u32 mdio_stat, mdio_ctl;
+ unsigned long flags;
+ int ret;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ mdio_stat |= MDIO_STAT_ENC;
+
xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
ret = xgmac_wait_until_free(&bus->dev, regs, endian);
@@ -214,34 +286,86 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
/* Set the register address */
- if (regnum & MII_ADDR_C45) {
- xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs, endian);
- if (ret)
- return ret;
- }
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
/* Initiate the read */
xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
ret = xgmac_wait_until_done(&bus->dev, regs, endian);
if (ret)
- return ret;
+ goto irq_restore;
/* Return all Fs if nothing was there */
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
!priv->has_a011043) {
dev_dbg(&bus->dev,
- "Error while reading PHY%d reg at %d.%hhu\n",
+ "Error while reading PHY%d reg at %d.%d\n",
phy_id, dev_addr, regnum);
- return 0xffff;
+ ret = 0xffff;
+ } else {
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
+ }
+
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static int xgmac_mdio_set_mdc_freq(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat, div;
+
+ if (device_property_read_u32(dev, "clock-frequency", &priv->mdc_freq))
+ return 0;
+
+ priv->enet_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->enet_clk)) {
+ dev_err(dev, "Input clock unknown, not changing MDC frequency");
+ return PTR_ERR(priv->enet_clk);
}
- value = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
- dev_dbg(&bus->dev, "read %04x\n", value);
+ div = ((clk_get_rate(priv->enet_clk) / priv->mdc_freq) - 1) / 2;
+ if (div < 5 || div > 0x1ff) {
+ dev_err(dev, "Requested MDC frequency is out of range, ignoring");
+ return -EINVAL;
+ }
- return value;
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat &= ~MDIO_STAT_CLKDIV(0x1ff);
+ mdio_stat |= MDIO_STAT_CLKDIV(div);
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
+ return 0;
+}
+
+static void xgmac_mdio_set_suppress_preamble(struct mii_bus *bus)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ struct device *dev = bus->parent;
+ u32 mdio_stat;
+
+ if (!device_property_read_bool(dev, "suppress-preamble"))
+ return;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, priv->is_little_endian);
+ mdio_stat |= MDIO_STAT_PRE_DIS;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, priv->is_little_endian);
}
static int xgmac_mdio_probe(struct platform_device *pdev)
@@ -263,24 +387,23 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return -EINVAL;
}
- bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
+ bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(struct mdio_fsl_priv));
if (!bus)
return -ENOMEM;
bus->name = "Freescale XGMAC MDIO Bus";
- bus->read = xgmac_mdio_read;
- bus->write = xgmac_mdio_write;
+ bus->read = xgmac_mdio_read_c22;
+ bus->write = xgmac_mdio_write_c22;
+ bus->read_c45 = xgmac_mdio_read_c45;
+ bus->write_c45 = xgmac_mdio_write_c45;
bus->parent = &pdev->dev;
- bus->probe_capabilities = MDIOBUS_C22_C45;
snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start);
- /* Set the PHY base address */
priv = bus->priv;
- priv->mdio_base = ioremap(res->start, resource_size(res));
- if (!priv->mdio_base) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
+ priv->mdio_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!priv->mdio_base)
+ return -ENOMEM;
/* For both ACPI and DT cases, endianness of MDIO controller
* needs to be specified using "little-endian" property.
@@ -288,10 +411,18 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = device_property_read_bool(&pdev->dev,
"little-endian");
+ priv->has_a009885 = device_property_read_bool(&pdev->dev,
+ "fsl,erratum-a009885");
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
- fwnode = pdev->dev.fwnode;
+ xgmac_mdio_set_suppress_preamble(bus);
+
+ ret = xgmac_mdio_set_mdc_freq(bus);
+ if (ret)
+ return ret;
+
+ fwnode = dev_fwnode(&pdev->dev);
if (is_of_node(fwnode))
ret = of_mdiobus_register(bus, to_of_node(fwnode));
else if (is_acpi_node(fwnode))
@@ -300,31 +431,12 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
ret = -EINVAL;
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
- goto err_registration;
+ return ret;
}
platform_set_drvdata(pdev, bus);
return 0;
-
-err_registration:
- iounmap(priv->mdio_base);
-
-err_ioremap:
- mdiobus_free(bus);
-
- return ret;
-}
-
-static int xgmac_mdio_remove(struct platform_device *pdev)
-{
- struct mii_bus *bus = platform_get_drvdata(pdev);
-
- mdiobus_unregister(bus);
- iounmap(bus->priv);
- mdiobus_free(bus);
-
- return 0;
}
static const struct of_device_id xgmac_mdio_match[] = {
@@ -351,7 +463,6 @@ static struct platform_driver xgmac_mdio_driver = {
.acpi_match_table = xgmac_acpi_match,
},
.probe = xgmac_mdio_probe,
- .remove = xgmac_mdio_remove,
};
module_platform_driver(xgmac_mdio_driver);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index b0d733e9a7c6..4859493471db 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1046,8 +1046,8 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/fungible/Kconfig b/drivers/net/ethernet/fungible/Kconfig
new file mode 100644
index 000000000000..1ecedecc0f6c
--- /dev/null
+++ b/drivers/net/ethernet/fungible/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Fungible network driver configuration
+#
+
+config NET_VENDOR_FUNGIBLE
+ bool "Fungible devices"
+ default y
+ help
+ If you have a Fungible network device, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Fungible cards. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_FUNGIBLE
+
+config FUN_CORE
+ tristate
+ select SBITMAP
+ help
+ A service module offering basic common services to Fungible
+ device drivers.
+
+source "drivers/net/ethernet/fungible/funeth/Kconfig"
+
+endif # NET_VENDOR_FUNGIBLE
diff --git a/drivers/net/ethernet/fungible/Makefile b/drivers/net/ethernet/fungible/Makefile
new file mode 100644
index 000000000000..df759f1585a1
--- /dev/null
+++ b/drivers/net/ethernet/fungible/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+#
+# Makefile for the Fungible network device drivers.
+#
+
+obj-$(CONFIG_FUN_CORE) += funcore/
+obj-$(CONFIG_FUN_ETH) += funeth/
diff --git a/drivers/net/ethernet/fungible/funcore/Makefile b/drivers/net/ethernet/fungible/funcore/Makefile
new file mode 100644
index 000000000000..bc16b264b53e
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+obj-$(CONFIG_FUN_CORE) += funcore.o
+
+funcore-y := fun_dev.o fun_queue.o
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.c b/drivers/net/ethernet/fungible/funcore/fun_dev.c
new file mode 100644
index 000000000000..a7fbd4cd560a
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/nvme.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/sched/signal.h>
+
+#include "fun_queue.h"
+#include "fun_dev.h"
+
+#define FUN_ADMIN_CMD_TO_MS 3000
+
+enum {
+ AQA_ASQS_SHIFT = 0,
+ AQA_ACQS_SHIFT = 16,
+ AQA_MIN_QUEUE_SIZE = 2,
+ AQA_MAX_QUEUE_SIZE = 4096
+};
+
+/* context for admin commands */
+struct fun_cmd_ctx {
+ fun_admin_callback_t cb; /* callback to invoke on completion */
+ void *cb_data; /* user data provided to callback */
+ int cpu; /* CPU where the cmd's tag was allocated */
+};
+
+/* Context for synchronous admin commands. */
+struct fun_sync_cmd_ctx {
+ struct completion compl;
+ u8 *rsp_buf; /* caller provided response buffer */
+ unsigned int rsp_len; /* response buffer size */
+ u8 rsp_status; /* command response status */
+};
+
+/* Wait for the CSTS.RDY bit to match @enabled. */
+static int fun_wait_ready(struct fun_dev *fdev, bool enabled)
+{
+ unsigned int cap_to = NVME_CAP_TIMEOUT(fdev->cap_reg);
+ u32 bit = enabled ? NVME_CSTS_RDY : 0;
+ unsigned long deadline;
+
+ deadline = ((cap_to + 1) * HZ / 2) + jiffies; /* CAP.TO is in 500ms */
+
+ for (;;) {
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+
+ if (csts == ~0) {
+ dev_err(fdev->dev, "CSTS register read %#x\n", csts);
+ return -EIO;
+ }
+
+ if ((csts & NVME_CSTS_RDY) == bit)
+ return 0;
+
+ if (time_is_before_jiffies(deadline))
+ break;
+
+ msleep(100);
+ }
+
+ dev_err(fdev->dev,
+ "Timed out waiting for device to indicate RDY %u; aborting %s\n",
+ enabled, enabled ? "initialization" : "reset");
+ return -ETIMEDOUT;
+}
+
+/* Check CSTS and return an error if it is unreadable or has unexpected
+ * RDY value.
+ */
+static int fun_check_csts_rdy(struct fun_dev *fdev, unsigned int expected_rdy)
+{
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+ u32 actual_rdy = csts & NVME_CSTS_RDY;
+
+ if (csts == ~0) {
+ dev_err(fdev->dev, "CSTS register read %#x\n", csts);
+ return -EIO;
+ }
+ if (actual_rdy != expected_rdy) {
+ dev_err(fdev->dev, "Unexpected CSTS RDY %u\n", actual_rdy);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Check that CSTS RDY has the expected value. Then write a new value to the CC
+ * register and wait for CSTS RDY to match the new CC ENABLE state.
+ */
+static int fun_update_cc_enable(struct fun_dev *fdev, unsigned int initial_rdy)
+{
+ int rc = fun_check_csts_rdy(fdev, initial_rdy);
+
+ if (rc)
+ return rc;
+ writel(fdev->cc_reg, fdev->bar + NVME_REG_CC);
+ return fun_wait_ready(fdev, !!(fdev->cc_reg & NVME_CC_ENABLE));
+}
+
+static int fun_disable_ctrl(struct fun_dev *fdev)
+{
+ fdev->cc_reg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
+ return fun_update_cc_enable(fdev, 1);
+}
+
+static int fun_enable_ctrl(struct fun_dev *fdev, u32 admin_cqesz_log2,
+ u32 admin_sqesz_log2)
+{
+ fdev->cc_reg = (admin_cqesz_log2 << NVME_CC_IOCQES_SHIFT) |
+ (admin_sqesz_log2 << NVME_CC_IOSQES_SHIFT) |
+ ((PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT) |
+ NVME_CC_ENABLE;
+
+ return fun_update_cc_enable(fdev, 0);
+}
+
+static int fun_map_bars(struct fun_dev *fdev, const char *name)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+ int err;
+
+ err = pci_request_mem_regions(pdev, name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Couldn't get PCI memory resources, err %d\n", err);
+ return err;
+ }
+
+ fdev->bar = pci_ioremap_bar(pdev, 0);
+ if (!fdev->bar) {
+ dev_err(&pdev->dev, "Couldn't map BAR 0\n");
+ pci_release_mem_regions(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void fun_unmap_bars(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+
+ if (fdev->bar) {
+ iounmap(fdev->bar);
+ fdev->bar = NULL;
+ pci_release_mem_regions(pdev);
+ }
+}
+
+static int fun_set_dma_masks(struct device *dev)
+{
+ int err;
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err)
+ dev_err(dev, "DMA mask configuration failed, err %d\n", err);
+ return err;
+}
+
+static irqreturn_t fun_admin_irq(int irq, void *data)
+{
+ struct fun_queue *funq = data;
+
+ return fun_process_cq(funq, 0) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static void fun_complete_admin_cmd(struct fun_queue *funq, void *data,
+ void *entry, const struct fun_cqe_info *info)
+{
+ const struct fun_admin_rsp_common *rsp_common = entry;
+ struct fun_dev *fdev = funq->fdev;
+ struct fun_cmd_ctx *cmd_ctx;
+ int cpu;
+ u16 cid;
+
+ if (info->sqhd == cpu_to_be16(0xffff)) {
+ dev_dbg(fdev->dev, "adminq event");
+ if (fdev->adminq_cb)
+ fdev->adminq_cb(fdev, entry);
+ return;
+ }
+
+ cid = be16_to_cpu(rsp_common->cid);
+ dev_dbg(fdev->dev, "admin CQE cid %u, op %u, ret %u\n", cid,
+ rsp_common->op, rsp_common->ret);
+
+ cmd_ctx = &fdev->cmd_ctx[cid];
+ if (cmd_ctx->cpu < 0) {
+ dev_err(fdev->dev,
+ "admin CQE with CID=%u, op=%u does not match a pending command\n",
+ cid, rsp_common->op);
+ return;
+ }
+
+ if (cmd_ctx->cb)
+ cmd_ctx->cb(fdev, entry, xchg(&cmd_ctx->cb_data, NULL));
+
+ cpu = cmd_ctx->cpu;
+ cmd_ctx->cpu = -1;
+ sbitmap_queue_clear(&fdev->admin_sbq, cid, cpu);
+}
+
+static int fun_init_cmd_ctx(struct fun_dev *fdev, unsigned int ntags)
+{
+ unsigned int i;
+
+ fdev->cmd_ctx = kvcalloc(ntags, sizeof(*fdev->cmd_ctx), GFP_KERNEL);
+ if (!fdev->cmd_ctx)
+ return -ENOMEM;
+
+ for (i = 0; i < ntags; i++)
+ fdev->cmd_ctx[i].cpu = -1;
+
+ return 0;
+}
+
+/* Allocate and enable an admin queue and assign it the first IRQ vector. */
+static int fun_enable_admin_queue(struct fun_dev *fdev,
+ const struct fun_dev_params *areq)
+{
+ struct fun_queue_alloc_req qreq = {
+ .cqe_size_log2 = areq->cqe_size_log2,
+ .sqe_size_log2 = areq->sqe_size_log2,
+ .cq_depth = areq->cq_depth,
+ .sq_depth = areq->sq_depth,
+ .rq_depth = areq->rq_depth,
+ };
+ unsigned int ntags = areq->sq_depth - 1;
+ struct fun_queue *funq;
+ int rc;
+
+ if (fdev->admin_q)
+ return -EEXIST;
+
+ if (areq->sq_depth < AQA_MIN_QUEUE_SIZE ||
+ areq->sq_depth > AQA_MAX_QUEUE_SIZE ||
+ areq->cq_depth < AQA_MIN_QUEUE_SIZE ||
+ areq->cq_depth > AQA_MAX_QUEUE_SIZE)
+ return -EINVAL;
+
+ fdev->admin_q = fun_alloc_queue(fdev, 0, &qreq);
+ if (!fdev->admin_q)
+ return -ENOMEM;
+
+ rc = fun_init_cmd_ctx(fdev, ntags);
+ if (rc)
+ goto free_q;
+
+ rc = sbitmap_queue_init_node(&fdev->admin_sbq, ntags, -1, false,
+ GFP_KERNEL, dev_to_node(fdev->dev));
+ if (rc)
+ goto free_cmd_ctx;
+
+ funq = fdev->admin_q;
+ funq->cq_vector = 0;
+ rc = fun_request_irq(funq, dev_name(fdev->dev), fun_admin_irq, funq);
+ if (rc)
+ goto free_sbq;
+
+ fun_set_cq_callback(funq, fun_complete_admin_cmd, NULL);
+ fdev->adminq_cb = areq->event_cb;
+
+ writel((funq->sq_depth - 1) << AQA_ASQS_SHIFT |
+ (funq->cq_depth - 1) << AQA_ACQS_SHIFT,
+ fdev->bar + NVME_REG_AQA);
+
+ writeq(funq->sq_dma_addr, fdev->bar + NVME_REG_ASQ);
+ writeq(funq->cq_dma_addr, fdev->bar + NVME_REG_ACQ);
+
+ rc = fun_enable_ctrl(fdev, areq->cqe_size_log2, areq->sqe_size_log2);
+ if (rc)
+ goto free_irq;
+
+ if (areq->rq_depth) {
+ rc = fun_create_rq(funq);
+ if (rc)
+ goto disable_ctrl;
+
+ funq_rq_post(funq);
+ }
+
+ return 0;
+
+disable_ctrl:
+ fun_disable_ctrl(fdev);
+free_irq:
+ fun_free_irq(funq);
+free_sbq:
+ sbitmap_queue_free(&fdev->admin_sbq);
+free_cmd_ctx:
+ kvfree(fdev->cmd_ctx);
+ fdev->cmd_ctx = NULL;
+free_q:
+ fun_free_queue(fdev->admin_q);
+ fdev->admin_q = NULL;
+ return rc;
+}
+
+static void fun_disable_admin_queue(struct fun_dev *fdev)
+{
+ struct fun_queue *admq = fdev->admin_q;
+
+ if (!admq)
+ return;
+
+ fun_disable_ctrl(fdev);
+
+ fun_free_irq(admq);
+ __fun_process_cq(admq, 0);
+
+ sbitmap_queue_free(&fdev->admin_sbq);
+
+ kvfree(fdev->cmd_ctx);
+ fdev->cmd_ctx = NULL;
+
+ fun_free_queue(admq);
+ fdev->admin_q = NULL;
+}
+
+/* Return %true if the admin queue has stopped servicing commands as can be
+ * detected through registers. This isn't exhaustive and may provide false
+ * negatives.
+ */
+static bool fun_adminq_stopped(struct fun_dev *fdev)
+{
+ u32 csts = readl(fdev->bar + NVME_REG_CSTS);
+
+ return (csts & (NVME_CSTS_CFS | NVME_CSTS_RDY)) != NVME_CSTS_RDY;
+}
+
+static int fun_wait_for_tag(struct fun_dev *fdev, int *cpup)
+{
+ struct sbitmap_queue *sbq = &fdev->admin_sbq;
+ struct sbq_wait_state *ws = &sbq->ws[0];
+ DEFINE_SBQ_WAIT(wait);
+ int tag;
+
+ for (;;) {
+ sbitmap_prepare_to_wait(sbq, ws, &wait, TASK_UNINTERRUPTIBLE);
+ if (fdev->suppress_cmds) {
+ tag = -ESHUTDOWN;
+ break;
+ }
+ tag = sbitmap_queue_get(sbq, cpup);
+ if (tag >= 0)
+ break;
+ schedule();
+ }
+
+ sbitmap_finish_wait(sbq, ws, &wait);
+ return tag;
+}
+
+/* Submit an asynchronous admin command. Caller is responsible for implementing
+ * any waiting or timeout. Upon command completion the callback @cb is called.
+ */
+int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
+ fun_admin_callback_t cb, void *cb_data, bool wait_ok)
+{
+ struct fun_queue *funq = fdev->admin_q;
+ unsigned int cmdsize = cmd->len8 * 8;
+ struct fun_cmd_ctx *cmd_ctx;
+ int tag, cpu, rc = 0;
+
+ if (WARN_ON(cmdsize > (1 << funq->sqe_size_log2)))
+ return -EMSGSIZE;
+
+ tag = sbitmap_queue_get(&fdev->admin_sbq, &cpu);
+ if (tag < 0) {
+ if (!wait_ok)
+ return -EAGAIN;
+ tag = fun_wait_for_tag(fdev, &cpu);
+ if (tag < 0)
+ return tag;
+ }
+
+ cmd->cid = cpu_to_be16(tag);
+
+ cmd_ctx = &fdev->cmd_ctx[tag];
+ cmd_ctx->cb = cb;
+ cmd_ctx->cb_data = cb_data;
+
+ spin_lock(&funq->sq_lock);
+
+ if (unlikely(fdev->suppress_cmds)) {
+ rc = -ESHUTDOWN;
+ sbitmap_queue_clear(&fdev->admin_sbq, tag, cpu);
+ } else {
+ cmd_ctx->cpu = cpu;
+ memcpy(fun_sqe_at(funq, funq->sq_tail), cmd, cmdsize);
+
+ dev_dbg(fdev->dev, "admin cmd @ %u: %8ph\n", funq->sq_tail,
+ cmd);
+
+ if (++funq->sq_tail == funq->sq_depth)
+ funq->sq_tail = 0;
+ writel(funq->sq_tail, funq->sq_db);
+ }
+ spin_unlock(&funq->sq_lock);
+ return rc;
+}
+
+/* Abandon a pending admin command by clearing the issuer's callback data.
+ * Failure indicates that the command either has already completed or its
+ * completion is racing with this call.
+ */
+static bool fun_abandon_admin_cmd(struct fun_dev *fd,
+ const struct fun_admin_req_common *cmd,
+ void *cb_data)
+{
+ u16 cid = be16_to_cpu(cmd->cid);
+ struct fun_cmd_ctx *cmd_ctx = &fd->cmd_ctx[cid];
+
+ return cmpxchg(&cmd_ctx->cb_data, cb_data, NULL) == cb_data;
+}
+
+/* Stop submission of new admin commands and wake up any processes waiting for
+ * tags. Already submitted commands are left to complete or time out.
+ */
+static void fun_admin_stop(struct fun_dev *fdev)
+{
+ spin_lock(&fdev->admin_q->sq_lock);
+ fdev->suppress_cmds = true;
+ spin_unlock(&fdev->admin_q->sq_lock);
+ sbitmap_queue_wake_all(&fdev->admin_sbq);
+}
+
+/* The callback for synchronous execution of admin commands. It copies the
+ * command response to the caller's buffer and signals completion.
+ */
+static void fun_admin_cmd_sync_cb(struct fun_dev *fd, void *rsp, void *cb_data)
+{
+ const struct fun_admin_rsp_common *rsp_common = rsp;
+ struct fun_sync_cmd_ctx *ctx = cb_data;
+
+ if (!ctx)
+ return; /* command issuer timed out and left */
+ if (ctx->rsp_buf) {
+ unsigned int rsp_len = rsp_common->len8 * 8;
+
+ if (unlikely(rsp_len > ctx->rsp_len)) {
+ dev_err(fd->dev,
+ "response for op %u is %uB > response buffer %uB\n",
+ rsp_common->op, rsp_len, ctx->rsp_len);
+ rsp_len = ctx->rsp_len;
+ }
+ memcpy(ctx->rsp_buf, rsp, rsp_len);
+ }
+ ctx->rsp_status = rsp_common->ret;
+ complete(&ctx->compl);
+}
+
+/* Submit a synchronous admin command. */
+int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
+ struct fun_admin_req_common *cmd, void *rsp,
+ size_t rspsize, unsigned int timeout)
+{
+ struct fun_sync_cmd_ctx ctx = {
+ .compl = COMPLETION_INITIALIZER_ONSTACK(ctx.compl),
+ .rsp_buf = rsp,
+ .rsp_len = rspsize,
+ };
+ unsigned int cmdlen = cmd->len8 * 8;
+ unsigned long jiffies_left;
+ int ret;
+
+ ret = fun_submit_admin_cmd(fdev, cmd, fun_admin_cmd_sync_cb, &ctx,
+ true);
+ if (ret)
+ return ret;
+
+ if (!timeout)
+ timeout = FUN_ADMIN_CMD_TO_MS;
+
+ jiffies_left = wait_for_completion_timeout(&ctx.compl,
+ msecs_to_jiffies(timeout));
+ if (!jiffies_left) {
+ /* The command timed out. Attempt to cancel it so we can return.
+ * But if the command is in the process of completing we'll
+ * wait for it.
+ */
+ if (fun_abandon_admin_cmd(fdev, cmd, &ctx)) {
+ dev_err(fdev->dev, "admin command timed out: %*ph\n",
+ cmdlen, cmd);
+ fun_admin_stop(fdev);
+ /* see if the timeout was due to a queue failure */
+ if (fun_adminq_stopped(fdev))
+ dev_err(fdev->dev,
+ "device does not accept admin commands\n");
+
+ return -ETIMEDOUT;
+ }
+ wait_for_completion(&ctx.compl);
+ }
+
+ if (ctx.rsp_status) {
+ dev_err(fdev->dev, "admin command failed, err %d: %*ph\n",
+ ctx.rsp_status, cmdlen, cmd);
+ }
+
+ return -ctx.rsp_status;
+}
+EXPORT_SYMBOL_GPL(fun_submit_admin_sync_cmd);
+
+/* Return the number of device resources of the requested type. */
+int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res)
+{
+ union {
+ struct fun_admin_res_count_req req;
+ struct fun_admin_res_count_rsp rsp;
+ } cmd;
+ int rc;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(cmd.req));
+ cmd.req.count = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_RES_COUNT,
+ 0, 0);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd), 0);
+ return rc ? rc : be32_to_cpu(cmd.rsp.count.data);
+}
+EXPORT_SYMBOL_GPL(fun_get_res_count);
+
+/* Request that the instance of resource @res with the given id be deleted. */
+int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
+ unsigned int flags, u32 id)
+{
+ struct fun_admin_generic_destroy_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(res, sizeof(req)),
+ .destroy = FUN_ADMIN_SIMPLE_SUBOP_INIT(FUN_ADMIN_SUBOP_DESTROY,
+ flags, id)
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(fun_res_destroy);
+
+/* Bind two entities of the given types and IDs. */
+int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
+ unsigned int id0, enum fun_admin_bind_type type1,
+ unsigned int id1)
+{
+ struct {
+ struct fun_admin_bind_req req;
+ struct fun_admin_bind_entry entry[2];
+ } cmd = {
+ .req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_BIND,
+ sizeof(cmd)),
+ .entry[0] = FUN_ADMIN_BIND_ENTRY_INIT(type0, id0),
+ .entry[1] = FUN_ADMIN_BIND_ENTRY_INIT(type1, id1),
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &cmd.req.common, NULL, 0, 0);
+}
+EXPORT_SYMBOL_GPL(fun_bind);
+
+static int fun_get_dev_limits(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+ unsigned int cq_count, sq_count, num_dbs;
+ int rc;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPCQ);
+ if (rc < 0)
+ return rc;
+ cq_count = rc;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_EPSQ);
+ if (rc < 0)
+ return rc;
+ sq_count = rc;
+
+ /* The admin queue consumes 1 CQ and at least 1 SQ. To be usable the
+ * device must provide additional queues.
+ */
+ if (cq_count < 2 || sq_count < 2 + !!fdev->admin_q->rq_depth)
+ return -EINVAL;
+
+ /* Calculate the max QID based on SQ/CQ/doorbell counts.
+ * SQ/CQ doorbells alternate.
+ */
+ num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >>
+ (2 + NVME_CAP_STRIDE(fdev->cap_reg));
+ fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
+ fdev->kern_end_qid = fdev->max_qid + 1;
+ return 0;
+}
+
+/* Allocate all MSI-X vectors available on a function and at least @min_vecs. */
+static int fun_alloc_irqs(struct pci_dev *pdev, unsigned int min_vecs)
+{
+ int vecs, num_msix = pci_msix_vec_count(pdev);
+
+ if (num_msix < 0)
+ return num_msix;
+ if (min_vecs > num_msix)
+ return -ERANGE;
+
+ vecs = pci_alloc_irq_vectors(pdev, min_vecs, num_msix, PCI_IRQ_MSIX);
+ if (vecs > 0) {
+ dev_info(&pdev->dev,
+ "Allocated %d IRQ vectors of %d requested\n",
+ vecs, num_msix);
+ } else {
+ dev_err(&pdev->dev,
+ "Unable to allocate at least %u IRQ vectors\n",
+ min_vecs);
+ }
+ return vecs;
+}
+
+/* Allocate and initialize the IRQ manager state. */
+static int fun_alloc_irq_mgr(struct fun_dev *fdev)
+{
+ fdev->irq_map = bitmap_zalloc(fdev->num_irqs, GFP_KERNEL);
+ if (!fdev->irq_map)
+ return -ENOMEM;
+
+ spin_lock_init(&fdev->irqmgr_lock);
+ /* mark IRQ 0 allocated, it is used by the admin queue */
+ __set_bit(0, fdev->irq_map);
+ fdev->irqs_avail = fdev->num_irqs - 1;
+ return 0;
+}
+
+/* Reserve @nirqs of the currently available IRQs and return their indices. */
+int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs, u16 *irq_indices)
+{
+ unsigned int b, n = 0;
+ int err = -ENOSPC;
+
+ if (!nirqs)
+ return 0;
+
+ spin_lock(&fdev->irqmgr_lock);
+ if (nirqs > fdev->irqs_avail)
+ goto unlock;
+
+ for_each_clear_bit(b, fdev->irq_map, fdev->num_irqs) {
+ __set_bit(b, fdev->irq_map);
+ irq_indices[n++] = b;
+ if (n >= nirqs)
+ break;
+ }
+
+ WARN_ON(n < nirqs);
+ fdev->irqs_avail -= n;
+ err = n;
+unlock:
+ spin_unlock(&fdev->irqmgr_lock);
+ return err;
+}
+EXPORT_SYMBOL(fun_reserve_irqs);
+
+/* Release @nirqs previously allocated IRQS with the supplied indices. */
+void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices)
+{
+ unsigned int i;
+
+ spin_lock(&fdev->irqmgr_lock);
+ for (i = 0; i < nirqs; i++)
+ __clear_bit(irq_indices[i], fdev->irq_map);
+ fdev->irqs_avail += nirqs;
+ spin_unlock(&fdev->irqmgr_lock);
+}
+EXPORT_SYMBOL(fun_release_irqs);
+
+static void fun_serv_handler(struct work_struct *work)
+{
+ struct fun_dev *fd = container_of(work, struct fun_dev, service_task);
+
+ if (test_bit(FUN_SERV_DISABLED, &fd->service_flags))
+ return;
+ if (fd->serv_cb)
+ fd->serv_cb(fd);
+}
+
+void fun_serv_stop(struct fun_dev *fd)
+{
+ set_bit(FUN_SERV_DISABLED, &fd->service_flags);
+ cancel_work_sync(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_stop);
+
+void fun_serv_restart(struct fun_dev *fd)
+{
+ clear_bit(FUN_SERV_DISABLED, &fd->service_flags);
+ if (fd->service_flags)
+ schedule_work(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_restart);
+
+void fun_serv_sched(struct fun_dev *fd)
+{
+ if (!test_bit(FUN_SERV_DISABLED, &fd->service_flags))
+ schedule_work(&fd->service_task);
+}
+EXPORT_SYMBOL_GPL(fun_serv_sched);
+
+/* Check and try to get the device into a proper state for initialization,
+ * i.e., CSTS.RDY = CC.EN = 0.
+ */
+static int sanitize_dev(struct fun_dev *fdev)
+{
+ int rc;
+
+ fdev->cap_reg = readq(fdev->bar + NVME_REG_CAP);
+ fdev->cc_reg = readl(fdev->bar + NVME_REG_CC);
+
+ /* First get RDY to agree with the current EN. Give RDY the opportunity
+ * to complete a potential recent EN change.
+ */
+ rc = fun_wait_ready(fdev, fdev->cc_reg & NVME_CC_ENABLE);
+ if (rc)
+ return rc;
+
+ /* Next, reset the device if EN is currently 1. */
+ if (fdev->cc_reg & NVME_CC_ENABLE)
+ rc = fun_disable_ctrl(fdev);
+
+ return rc;
+}
+
+/* Undo the device initialization of fun_dev_enable(). */
+void fun_dev_disable(struct fun_dev *fdev)
+{
+ struct pci_dev *pdev = to_pci_dev(fdev->dev);
+
+ pci_set_drvdata(pdev, NULL);
+
+ if (fdev->fw_handle != FUN_HCI_ID_INVALID) {
+ fun_res_destroy(fdev, FUN_ADMIN_OP_SWUPGRADE, 0,
+ fdev->fw_handle);
+ fdev->fw_handle = FUN_HCI_ID_INVALID;
+ }
+
+ fun_disable_admin_queue(fdev);
+
+ bitmap_free(fdev->irq_map);
+ pci_free_irq_vectors(pdev);
+
+ pci_disable_device(pdev);
+
+ fun_unmap_bars(fdev);
+}
+EXPORT_SYMBOL(fun_dev_disable);
+
+/* Perform basic initialization of a device, including
+ * - PCI config space setup and BAR0 mapping
+ * - interrupt management initialization
+ * - 1 admin queue setup
+ * - determination of some device limits, such as number of queues.
+ */
+int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
+ const struct fun_dev_params *areq, const char *name)
+{
+ int rc;
+
+ fdev->dev = &pdev->dev;
+ rc = fun_map_bars(fdev, name);
+ if (rc)
+ return rc;
+
+ rc = fun_set_dma_masks(fdev->dev);
+ if (rc)
+ goto unmap;
+
+ rc = pci_enable_device_mem(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Couldn't enable device, err %d\n", rc);
+ goto unmap;
+ }
+
+ rc = sanitize_dev(fdev);
+ if (rc)
+ goto disable_dev;
+
+ fdev->fw_handle = FUN_HCI_ID_INVALID;
+ fdev->q_depth = NVME_CAP_MQES(fdev->cap_reg) + 1;
+ fdev->db_stride = 1 << NVME_CAP_STRIDE(fdev->cap_reg);
+ fdev->dbs = fdev->bar + NVME_REG_DBS;
+
+ INIT_WORK(&fdev->service_task, fun_serv_handler);
+ fdev->service_flags = FUN_SERV_DISABLED;
+ fdev->serv_cb = areq->serv_cb;
+
+ rc = fun_alloc_irqs(pdev, areq->min_msix + 1); /* +1 for admin CQ */
+ if (rc < 0)
+ goto disable_dev;
+ fdev->num_irqs = rc;
+
+ rc = fun_alloc_irq_mgr(fdev);
+ if (rc)
+ goto free_irqs;
+
+ pci_set_master(pdev);
+ rc = fun_enable_admin_queue(fdev, areq);
+ if (rc)
+ goto free_irq_mgr;
+
+ rc = fun_get_dev_limits(fdev);
+ if (rc < 0)
+ goto disable_admin;
+
+ pci_save_state(pdev);
+ pci_set_drvdata(pdev, fdev);
+ pcie_print_link_status(pdev);
+ dev_dbg(fdev->dev, "q_depth %u, db_stride %u, max qid %d kern_end_qid %d\n",
+ fdev->q_depth, fdev->db_stride, fdev->max_qid,
+ fdev->kern_end_qid);
+ return 0;
+
+disable_admin:
+ fun_disable_admin_queue(fdev);
+free_irq_mgr:
+ bitmap_free(fdev->irq_map);
+free_irqs:
+ pci_free_irq_vectors(pdev);
+disable_dev:
+ pci_disable_device(pdev);
+unmap:
+ fun_unmap_bars(fdev);
+ return rc;
+}
+EXPORT_SYMBOL(fun_dev_enable);
+
+MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
+MODULE_DESCRIPTION("Core services driver for Fungible devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/fungible/funcore/fun_dev.h b/drivers/net/ethernet/fungible/funcore/fun_dev.h
new file mode 100644
index 000000000000..9e8c17ce8887
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_dev.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNDEV_H
+#define _FUNDEV_H
+
+#include <linux/sbitmap.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+#include "fun_hci.h"
+
+struct pci_dev;
+struct fun_dev;
+struct fun_queue;
+struct fun_cmd_ctx;
+struct fun_queue_alloc_req;
+
+/* doorbell fields */
+enum {
+ FUN_DB_QIDX_S = 0,
+ FUN_DB_INTCOAL_ENTRIES_S = 16,
+ FUN_DB_INTCOAL_ENTRIES_M = 0x7f,
+ FUN_DB_INTCOAL_USEC_S = 23,
+ FUN_DB_INTCOAL_USEC_M = 0x7f,
+ FUN_DB_IRQ_S = 30,
+ FUN_DB_IRQ_F = 1 << FUN_DB_IRQ_S,
+ FUN_DB_IRQ_ARM_S = 31,
+ FUN_DB_IRQ_ARM_F = 1U << FUN_DB_IRQ_ARM_S
+};
+
+/* Callback for asynchronous admin commands.
+ * Invoked on reception of command response.
+ */
+typedef void (*fun_admin_callback_t)(struct fun_dev *fdev, void *rsp,
+ void *cb_data);
+
+/* Callback for events/notifications received by an admin queue. */
+typedef void (*fun_admin_event_cb)(struct fun_dev *fdev, void *cqe);
+
+/* Callback for pending work handled by the service task. */
+typedef void (*fun_serv_cb)(struct fun_dev *fd);
+
+/* service task flags */
+enum {
+ FUN_SERV_DISABLED, /* service task is disabled */
+ FUN_SERV_FIRST_AVAIL
+};
+
+/* Driver state associated with a PCI function. */
+struct fun_dev {
+ struct device *dev;
+
+ void __iomem *bar; /* start of BAR0 mapping */
+ u32 __iomem *dbs; /* start of doorbells in BAR0 mapping */
+
+ /* admin queue */
+ struct fun_queue *admin_q;
+ struct sbitmap_queue admin_sbq;
+ struct fun_cmd_ctx *cmd_ctx;
+ fun_admin_event_cb adminq_cb;
+ bool suppress_cmds; /* if set don't write commands to SQ */
+
+ /* address increment between consecutive doorbells, in 4B units */
+ unsigned int db_stride;
+
+ /* SW versions of device registers */
+ u32 cc_reg; /* CC register */
+ u64 cap_reg; /* CAPability register */
+
+ unsigned int q_depth; /* max queue depth supported by device */
+ unsigned int max_qid; /* = #queues - 1, separately for SQs and CQs */
+ unsigned int kern_end_qid; /* last qid in the kernel range + 1 */
+
+ unsigned int fw_handle;
+
+ /* IRQ manager */
+ unsigned int num_irqs;
+ unsigned int irqs_avail;
+ spinlock_t irqmgr_lock;
+ unsigned long *irq_map;
+
+ /* The service task handles work that needs a process context */
+ struct work_struct service_task;
+ unsigned long service_flags;
+ fun_serv_cb serv_cb;
+};
+
+struct fun_dev_params {
+ u8 cqe_size_log2; /* admin q CQE size */
+ u8 sqe_size_log2; /* admin q SQE size */
+
+ /* admin q depths */
+ u16 cq_depth;
+ u16 sq_depth;
+ u16 rq_depth;
+
+ u16 min_msix; /* min vectors needed by requesting driver */
+
+ fun_admin_event_cb event_cb;
+ fun_serv_cb serv_cb;
+};
+
+/* Return the BAR address of a doorbell. */
+static inline u32 __iomem *fun_db_addr(const struct fun_dev *fdev,
+ unsigned int db_index)
+{
+ return &fdev->dbs[db_index * fdev->db_stride];
+}
+
+/* Return the BAR address of an SQ doorbell. SQ and CQ DBs alternate,
+ * SQs have even DB indices.
+ */
+static inline u32 __iomem *fun_sq_db_addr(const struct fun_dev *fdev,
+ unsigned int sqid)
+{
+ return fun_db_addr(fdev, sqid * 2);
+}
+
+static inline u32 __iomem *fun_cq_db_addr(const struct fun_dev *fdev,
+ unsigned int cqid)
+{
+ return fun_db_addr(fdev, cqid * 2 + 1);
+}
+
+int fun_get_res_count(struct fun_dev *fdev, enum fun_admin_op res);
+int fun_res_destroy(struct fun_dev *fdev, enum fun_admin_op res,
+ unsigned int flags, u32 id);
+int fun_bind(struct fun_dev *fdev, enum fun_admin_bind_type type0,
+ unsigned int id0, enum fun_admin_bind_type type1,
+ unsigned int id1);
+
+int fun_submit_admin_cmd(struct fun_dev *fdev, struct fun_admin_req_common *cmd,
+ fun_admin_callback_t cb, void *cb_data, bool wait_ok);
+int fun_submit_admin_sync_cmd(struct fun_dev *fdev,
+ struct fun_admin_req_common *cmd, void *rsp,
+ size_t rspsize, unsigned int timeout);
+
+int fun_dev_enable(struct fun_dev *fdev, struct pci_dev *pdev,
+ const struct fun_dev_params *areq, const char *name);
+void fun_dev_disable(struct fun_dev *fdev);
+
+int fun_reserve_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices);
+void fun_release_irqs(struct fun_dev *fdev, unsigned int nirqs,
+ u16 *irq_indices);
+
+void fun_serv_stop(struct fun_dev *fd);
+void fun_serv_restart(struct fun_dev *fd);
+void fun_serv_sched(struct fun_dev *fd);
+
+#endif /* _FUNDEV_H */
diff --git a/drivers/net/ethernet/fungible/funcore/fun_hci.h b/drivers/net/ethernet/fungible/funcore/fun_hci.h
new file mode 100644
index 000000000000..f21819670106
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_hci.h
@@ -0,0 +1,1242 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __FUN_HCI_H
+#define __FUN_HCI_H
+
+enum {
+ FUN_HCI_ID_INVALID = 0xffffffff,
+};
+
+enum fun_admin_op {
+ FUN_ADMIN_OP_BIND = 0x1,
+ FUN_ADMIN_OP_EPCQ = 0x11,
+ FUN_ADMIN_OP_EPSQ = 0x12,
+ FUN_ADMIN_OP_PORT = 0x13,
+ FUN_ADMIN_OP_ETH = 0x14,
+ FUN_ADMIN_OP_VI = 0x15,
+ FUN_ADMIN_OP_SWUPGRADE = 0x1f,
+ FUN_ADMIN_OP_RSS = 0x21,
+ FUN_ADMIN_OP_ADI = 0x25,
+ FUN_ADMIN_OP_KTLS = 0x26,
+};
+
+enum {
+ FUN_REQ_COMMON_FLAG_RSP = 0x1,
+ FUN_REQ_COMMON_FLAG_HEAD_WB = 0x2,
+ FUN_REQ_COMMON_FLAG_INT = 0x4,
+ FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF = 0x8,
+};
+
+struct fun_admin_req_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 rsvd0;
+ __be16 cid;
+};
+
+#define FUN_ADMIN_REQ_COMMON_INIT(_op, _len8, _flags, _suboff8, _cid) \
+ (struct fun_admin_req_common) { \
+ .op = (_op), .len8 = (_len8), .flags = cpu_to_be16(_flags), \
+ .suboff8 = (_suboff8), .cid = cpu_to_be16(_cid), \
+ }
+
+#define FUN_ADMIN_REQ_COMMON_INIT2(_op, _len) \
+ (struct fun_admin_req_common) { \
+ .op = (_op), .len8 = (_len) / 8, \
+ }
+
+struct fun_admin_rsp_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 ret;
+ __be16 cid;
+};
+
+struct fun_admin_write48_req {
+ __be64 key_to_data;
+};
+
+#define FUN_ADMIN_WRITE48_REQ_KEY_S 56U
+#define FUN_ADMIN_WRITE48_REQ_KEY_M 0xff
+#define FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_WRITE48_REQ_KEY_S)
+
+#define FUN_ADMIN_WRITE48_REQ_DATA_S 0U
+#define FUN_ADMIN_WRITE48_REQ_DATA_M 0xffffffffffff
+#define FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_WRITE48_REQ_DATA_S)
+
+#define FUN_ADMIN_WRITE48_REQ_INIT(key, data) \
+ (struct fun_admin_write48_req) { \
+ .key_to_data = cpu_to_be64( \
+ FUN_ADMIN_WRITE48_REQ_KEY_P_NOSWAP(key) | \
+ FUN_ADMIN_WRITE48_REQ_DATA_P_NOSWAP(data)), \
+ }
+
+struct fun_admin_write48_rsp {
+ __be64 key_to_data;
+};
+
+struct fun_admin_read48_req {
+ __be64 key_pack;
+};
+
+#define FUN_ADMIN_READ48_REQ_KEY_S 56U
+#define FUN_ADMIN_READ48_REQ_KEY_M 0xff
+#define FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(x) \
+ (((__u64)x) << FUN_ADMIN_READ48_REQ_KEY_S)
+
+#define FUN_ADMIN_READ48_REQ_INIT(key) \
+ (struct fun_admin_read48_req) { \
+ .key_pack = \
+ cpu_to_be64(FUN_ADMIN_READ48_REQ_KEY_P_NOSWAP(key)), \
+ }
+
+struct fun_admin_read48_rsp {
+ __be64 key_to_data;
+};
+
+#define FUN_ADMIN_READ48_RSP_KEY_S 56U
+#define FUN_ADMIN_READ48_RSP_KEY_M 0xff
+#define FUN_ADMIN_READ48_RSP_KEY_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_KEY_S) & \
+ FUN_ADMIN_READ48_RSP_KEY_M)
+
+#define FUN_ADMIN_READ48_RSP_RET_S 48U
+#define FUN_ADMIN_READ48_RSP_RET_M 0xff
+#define FUN_ADMIN_READ48_RSP_RET_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_RET_S) & \
+ FUN_ADMIN_READ48_RSP_RET_M)
+
+#define FUN_ADMIN_READ48_RSP_DATA_S 0U
+#define FUN_ADMIN_READ48_RSP_DATA_M 0xffffffffffff
+#define FUN_ADMIN_READ48_RSP_DATA_G(x) \
+ ((be64_to_cpu(x) >> FUN_ADMIN_READ48_RSP_DATA_S) & \
+ FUN_ADMIN_READ48_RSP_DATA_M)
+
+enum fun_admin_bind_type {
+ FUN_ADMIN_BIND_TYPE_EPCQ = 0x1,
+ FUN_ADMIN_BIND_TYPE_EPSQ = 0x2,
+ FUN_ADMIN_BIND_TYPE_PORT = 0x3,
+ FUN_ADMIN_BIND_TYPE_RSS = 0x4,
+ FUN_ADMIN_BIND_TYPE_VI = 0x5,
+ FUN_ADMIN_BIND_TYPE_ETH = 0x6,
+};
+
+struct fun_admin_bind_entry {
+ __u8 type;
+ __u8 rsvd0[3];
+ __be32 id;
+};
+
+#define FUN_ADMIN_BIND_ENTRY_INIT(_type, _id) \
+ (struct fun_admin_bind_entry) { \
+ .type = (_type), .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_bind_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_bind_entry entry[];
+};
+
+struct fun_admin_bind_rsp {
+ struct fun_admin_rsp_common bind_rsp_common;
+};
+
+struct fun_admin_simple_subop {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 data;
+};
+
+#define FUN_ADMIN_SIMPLE_SUBOP_INIT(_subop, _flags, _data) \
+ (struct fun_admin_simple_subop) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .data = cpu_to_be32(_data), \
+ }
+
+enum fun_admin_subop {
+ FUN_ADMIN_SUBOP_CREATE = 0x10,
+ FUN_ADMIN_SUBOP_DESTROY = 0x11,
+ FUN_ADMIN_SUBOP_MODIFY = 0x12,
+ FUN_ADMIN_SUBOP_RES_COUNT = 0x14,
+ FUN_ADMIN_SUBOP_READ = 0x15,
+ FUN_ADMIN_SUBOP_WRITE = 0x16,
+ FUN_ADMIN_SUBOP_NOTIFY = 0x17,
+};
+
+enum {
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR = 0x1,
+};
+
+struct fun_admin_generic_destroy_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_simple_subop destroy;
+};
+
+struct fun_admin_generic_create_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+};
+
+struct fun_admin_res_count_req {
+ struct fun_admin_req_common common;
+ struct fun_admin_simple_subop count;
+};
+
+struct fun_admin_res_count_rsp {
+ struct fun_admin_rsp_common common;
+ struct fun_admin_simple_subop count;
+};
+
+enum {
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_EPCQ = 0x2,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_ENTRY_WR_TPH = 0x4,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_SL_WR_TPH = 0x8,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_RQ = 0x80,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_IQ = 0x100,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_INT_NOARM = 0x200,
+ FUN_ADMIN_EPCQ_CREATE_FLAG_DROP_ON_OVERFLOW = 0x400,
+};
+
+struct fun_admin_epcq_req {
+ struct fun_admin_req_common common;
+ union epcq_req_subop {
+ struct fun_admin_epcq_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 epsqid;
+ __u8 rsvd1;
+ __u8 entry_size_log2;
+ __be16 nentries;
+
+ __be64 address;
+
+ __be16 tailroom; /* per packet tailroom in bytes */
+ __u8 headroom; /* per packet headroom in 2B units */
+ __u8 intcoal_kbytes;
+ __u8 intcoal_holdoff_nentries;
+ __u8 intcoal_holdoff_usecs;
+ __be16 intid;
+
+ __be32 scan_start_id;
+ __be32 scan_end_id;
+
+ __be16 tph_cpuid;
+ __u8 rsvd3[6];
+ } create;
+
+ struct fun_admin_epcq_modify_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be16 headroom; /* headroom in bytes */
+ __u8 rsvd1[6];
+ } modify;
+ } u;
+};
+
+#define FUN_ADMIN_EPCQ_CREATE_REQ_INIT( \
+ _subop, _flags, _id, _epsqid, _entry_size_log2, _nentries, _address, \
+ _tailroom, _headroom, _intcoal_kbytes, _intcoal_holdoff_nentries, \
+ _intcoal_holdoff_usecs, _intid, _scan_start_id, _scan_end_id, \
+ _tph_cpuid) \
+ (struct fun_admin_epcq_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .epsqid = cpu_to_be32(_epsqid), \
+ .entry_size_log2 = _entry_size_log2, \
+ .nentries = cpu_to_be16(_nentries), \
+ .address = cpu_to_be64(_address), \
+ .tailroom = cpu_to_be16(_tailroom), .headroom = _headroom, \
+ .intcoal_kbytes = _intcoal_kbytes, \
+ .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \
+ .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \
+ .intid = cpu_to_be16(_intid), \
+ .scan_start_id = cpu_to_be32(_scan_start_id), \
+ .scan_end_id = cpu_to_be32(_scan_end_id), \
+ .tph_cpuid = cpu_to_be16(_tph_cpuid), \
+ }
+
+#define FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(_subop, _flags, _id, _headroom) \
+ (struct fun_admin_epcq_modify_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .headroom = cpu_to_be16(_headroom), \
+ }
+
+enum {
+ FUN_ADMIN_EPSQ_CREATE_FLAG_INT_EPSQ = 0x2,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_ENTRY_RD_TPH = 0x4,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_GL_RD_TPH = 0x8,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS = 0x10,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS_TPH = 0x20,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_EPCQ = 0x40,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_RQ = 0x80,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_INT_IQ = 0x100,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_NO_CMPL = 0x200,
+};
+
+struct fun_admin_epsq_req {
+ struct fun_admin_req_common common;
+
+ union epsq_req_subop {
+ struct fun_admin_epsq_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 epcqid;
+ __u8 rsvd1;
+ __u8 entry_size_log2;
+ __be16 nentries;
+
+ __be64 address; /* DMA address of epsq */
+
+ __u8 rsvd2[3];
+ __u8 intcoal_kbytes;
+ __u8 intcoal_holdoff_nentries;
+ __u8 intcoal_holdoff_usecs;
+ __be16 intid;
+
+ __be32 scan_start_id;
+ __be32 scan_end_id;
+
+ __u8 rsvd3[4];
+ __be16 tph_cpuid;
+ __u8 buf_size_log2; /* log2 of RQ buffer size */
+ __u8 head_wb_size_log2; /* log2 of head write back size */
+
+ __be64 head_wb_address; /* DMA address for head writeback */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_EPSQ_CREATE_REQ_INIT( \
+ _subop, _flags, _id, _epcqid, _entry_size_log2, _nentries, _address, \
+ _intcoal_kbytes, _intcoal_holdoff_nentries, _intcoal_holdoff_usecs, \
+ _intid, _scan_start_id, _scan_end_id, _tph_cpuid, _buf_size_log2, \
+ _head_wb_size_log2, _head_wb_address) \
+ (struct fun_admin_epsq_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .epcqid = cpu_to_be32(_epcqid), \
+ .entry_size_log2 = _entry_size_log2, \
+ .nentries = cpu_to_be16(_nentries), \
+ .address = cpu_to_be64(_address), \
+ .intcoal_kbytes = _intcoal_kbytes, \
+ .intcoal_holdoff_nentries = _intcoal_holdoff_nentries, \
+ .intcoal_holdoff_usecs = _intcoal_holdoff_usecs, \
+ .intid = cpu_to_be16(_intid), \
+ .scan_start_id = cpu_to_be32(_scan_start_id), \
+ .scan_end_id = cpu_to_be32(_scan_end_id), \
+ .tph_cpuid = cpu_to_be16(_tph_cpuid), \
+ .buf_size_log2 = _buf_size_log2, \
+ .head_wb_size_log2 = _head_wb_size_log2, \
+ .head_wb_address = cpu_to_be64(_head_wb_address), \
+ }
+
+enum {
+ FUN_PORT_CAP_OFFLOADS = 0x1,
+ FUN_PORT_CAP_STATS = 0x2,
+ FUN_PORT_CAP_LOOPBACK = 0x4,
+ FUN_PORT_CAP_VPORT = 0x8,
+ FUN_PORT_CAP_TX_PAUSE = 0x10,
+ FUN_PORT_CAP_RX_PAUSE = 0x20,
+ FUN_PORT_CAP_AUTONEG = 0x40,
+ FUN_PORT_CAP_RSS = 0x80,
+ FUN_PORT_CAP_VLAN_OFFLOADS = 0x100,
+ FUN_PORT_CAP_ENCAP_OFFLOADS = 0x200,
+ FUN_PORT_CAP_1000_X = 0x1000,
+ FUN_PORT_CAP_10G_R = 0x2000,
+ FUN_PORT_CAP_40G_R4 = 0x4000,
+ FUN_PORT_CAP_25G_R = 0x8000,
+ FUN_PORT_CAP_50G_R2 = 0x10000,
+ FUN_PORT_CAP_50G_R = 0x20000,
+ FUN_PORT_CAP_100G_R4 = 0x40000,
+ FUN_PORT_CAP_100G_R2 = 0x80000,
+ FUN_PORT_CAP_200G_R4 = 0x100000,
+ FUN_PORT_CAP_FEC_NONE = 0x10000000,
+ FUN_PORT_CAP_FEC_FC = 0x20000000,
+ FUN_PORT_CAP_FEC_RS = 0x40000000,
+};
+
+enum fun_port_brkout_mode {
+ FUN_PORT_BRKMODE_NA = 0x0,
+ FUN_PORT_BRKMODE_NONE = 0x1,
+ FUN_PORT_BRKMODE_2X = 0x2,
+ FUN_PORT_BRKMODE_4X = 0x3,
+};
+
+enum {
+ FUN_PORT_SPEED_AUTO = 0x0,
+ FUN_PORT_SPEED_10M = 0x1,
+ FUN_PORT_SPEED_100M = 0x2,
+ FUN_PORT_SPEED_1G = 0x4,
+ FUN_PORT_SPEED_10G = 0x8,
+ FUN_PORT_SPEED_25G = 0x10,
+ FUN_PORT_SPEED_40G = 0x20,
+ FUN_PORT_SPEED_50G = 0x40,
+ FUN_PORT_SPEED_100G = 0x80,
+ FUN_PORT_SPEED_200G = 0x100,
+};
+
+enum fun_port_duplex_mode {
+ FUN_PORT_FULL_DUPLEX = 0x0,
+ FUN_PORT_HALF_DUPLEX = 0x1,
+};
+
+enum {
+ FUN_PORT_FEC_NA = 0x0,
+ FUN_PORT_FEC_OFF = 0x1,
+ FUN_PORT_FEC_RS = 0x2,
+ FUN_PORT_FEC_FC = 0x4,
+ FUN_PORT_FEC_AUTO = 0x8,
+};
+
+enum fun_port_link_status {
+ FUN_PORT_LINK_UP = 0x0,
+ FUN_PORT_LINK_UP_WITH_ERR = 0x1,
+ FUN_PORT_LINK_DOWN = 0x2,
+};
+
+enum fun_port_led_type {
+ FUN_PORT_LED_OFF = 0x0,
+ FUN_PORT_LED_AMBER = 0x1,
+ FUN_PORT_LED_GREEN = 0x2,
+ FUN_PORT_LED_BEACON_ON = 0x3,
+ FUN_PORT_LED_BEACON_OFF = 0x4,
+};
+
+enum {
+ FUN_PORT_FLAG_MAC_DOWN = 0x1,
+ FUN_PORT_FLAG_MAC_UP = 0x2,
+ FUN_PORT_FLAG_NH_DOWN = 0x4,
+ FUN_PORT_FLAG_NH_UP = 0x8,
+};
+
+enum {
+ FUN_PORT_FLAG_ENABLE_NOTIFY = 0x1,
+};
+
+enum fun_port_lane_attr {
+ FUN_PORT_LANE_1 = 0x1,
+ FUN_PORT_LANE_2 = 0x2,
+ FUN_PORT_LANE_4 = 0x4,
+ FUN_PORT_LANE_SPEED_10G = 0x100,
+ FUN_PORT_LANE_SPEED_25G = 0x200,
+ FUN_PORT_LANE_SPEED_50G = 0x400,
+ FUN_PORT_LANE_SPLIT = 0x8000,
+};
+
+enum fun_admin_port_subop {
+ FUN_ADMIN_PORT_SUBOP_XCVR_READ = 0x23,
+ FUN_ADMIN_PORT_SUBOP_INETADDR_EVENT = 0x24,
+};
+
+enum fun_admin_port_key {
+ FUN_ADMIN_PORT_KEY_ILLEGAL = 0x0,
+ FUN_ADMIN_PORT_KEY_MTU = 0x1,
+ FUN_ADMIN_PORT_KEY_FEC = 0x2,
+ FUN_ADMIN_PORT_KEY_SPEED = 0x3,
+ FUN_ADMIN_PORT_KEY_DEBOUNCE = 0x4,
+ FUN_ADMIN_PORT_KEY_DUPLEX = 0x5,
+ FUN_ADMIN_PORT_KEY_MACADDR = 0x6,
+ FUN_ADMIN_PORT_KEY_LINKMODE = 0x7,
+ FUN_ADMIN_PORT_KEY_BREAKOUT = 0x8,
+ FUN_ADMIN_PORT_KEY_ENABLE = 0x9,
+ FUN_ADMIN_PORT_KEY_DISABLE = 0xa,
+ FUN_ADMIN_PORT_KEY_ERR_DISABLE = 0xb,
+ FUN_ADMIN_PORT_KEY_CAPABILITIES = 0xc,
+ FUN_ADMIN_PORT_KEY_LP_CAPABILITIES = 0xd,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_LOW = 0xe,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH = 0xf,
+ FUN_ADMIN_PORT_KEY_LANE_ATTRS = 0x10,
+ FUN_ADMIN_PORT_KEY_LED = 0x11,
+ FUN_ADMIN_PORT_KEY_ADVERT = 0x12,
+};
+
+struct fun_subop_imm {
+ __u8 subop; /* see fun_data_subop enum */
+ __u8 flags;
+ __u8 nsgl;
+ __u8 rsvd0;
+ __be32 len;
+
+ __u8 data[];
+};
+
+enum fun_subop_sgl_flags {
+ FUN_SUBOP_SGL_USE_OFF8 = 0x1,
+ FUN_SUBOP_FLAG_FREE_BUF = 0x2,
+ FUN_SUBOP_FLAG_IS_REFBUF = 0x4,
+ FUN_SUBOP_SGL_FLAG_LOCAL = 0x8,
+};
+
+enum fun_data_op {
+ FUN_DATAOP_INVALID = 0x0,
+ FUN_DATAOP_SL = 0x1, /* scatter */
+ FUN_DATAOP_GL = 0x2, /* gather */
+ FUN_DATAOP_SGL = 0x3, /* scatter-gather */
+ FUN_DATAOP_IMM = 0x4, /* immediate data */
+ FUN_DATAOP_RQBUF = 0x8, /* rq buffer */
+};
+
+struct fun_dataop_gl {
+ __u8 subop;
+ __u8 flags;
+ __be16 sgl_off;
+ __be32 sgl_len;
+
+ __be64 sgl_data;
+};
+
+static inline void fun_dataop_gl_init(struct fun_dataop_gl *s, u8 flags,
+ u16 sgl_off, u32 sgl_len, u64 sgl_data)
+{
+ s->subop = FUN_DATAOP_GL;
+ s->flags = flags;
+ s->sgl_off = cpu_to_be16(sgl_off);
+ s->sgl_len = cpu_to_be32(sgl_len);
+ s->sgl_data = cpu_to_be64(sgl_data);
+}
+
+struct fun_dataop_imm {
+ __u8 subop;
+ __u8 flags;
+ __be16 rsvd0;
+ __be32 sgl_len;
+};
+
+struct fun_subop_sgl {
+ __u8 subop;
+ __u8 flags;
+ __u8 nsgl;
+ __u8 rsvd0;
+ __be32 sgl_len;
+
+ __be64 sgl_data;
+};
+
+#define FUN_SUBOP_SGL_INIT(_subop, _flags, _nsgl, _sgl_len, _sgl_data) \
+ (struct fun_subop_sgl) { \
+ .subop = (_subop), .flags = (_flags), .nsgl = (_nsgl), \
+ .sgl_len = cpu_to_be32(_sgl_len), \
+ .sgl_data = cpu_to_be64(_sgl_data), \
+ }
+
+struct fun_dataop_rqbuf {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 cid;
+ __be32 bufoff;
+};
+
+struct fun_dataop_hdr {
+ __u8 nsgl;
+ __u8 flags;
+ __u8 ngather;
+ __u8 nscatter;
+ __be32 total_len;
+
+ struct fun_dataop_imm imm[];
+};
+
+#define FUN_DATAOP_HDR_INIT(_nsgl, _flags, _ngather, _nscatter, _total_len) \
+ (struct fun_dataop_hdr) { \
+ .nsgl = _nsgl, .flags = _flags, .ngather = _ngather, \
+ .nscatter = _nscatter, .total_len = cpu_to_be32(_total_len), \
+ }
+
+enum fun_port_inetaddr_event_type {
+ FUN_PORT_INETADDR_ADD = 0x1,
+ FUN_PORT_INETADDR_DEL = 0x2,
+};
+
+enum fun_port_inetaddr_addr_family {
+ FUN_PORT_INETADDR_IPV4 = 0x1,
+ FUN_PORT_INETADDR_IPV6 = 0x2,
+};
+
+struct fun_admin_port_req {
+ struct fun_admin_req_common common;
+
+ union port_req_subop {
+ struct fun_admin_port_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_port_write_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id; /* portid */
+
+ struct fun_admin_write48_req write48[];
+ } write;
+ struct fun_admin_port_read_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id; /* portid */
+
+ struct fun_admin_read48_req read48[];
+ } read;
+ struct fun_admin_port_xcvr_read_req {
+ u8 subop;
+ u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+ } xcvr_read;
+ struct fun_admin_port_inetaddr_event_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __u8 event_type;
+ __u8 addr_family;
+ __be32 id;
+
+ __u8 addr[];
+ } inetaddr_event;
+ } u;
+};
+
+#define FUN_ADMIN_PORT_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_WRITE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_write_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_READ_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_port_read_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(_flags, _id, _bank, _page, \
+ _offset, _length, _dev_addr) \
+ ((struct fun_admin_port_xcvr_read_req) { \
+ .subop = FUN_ADMIN_PORT_SUBOP_XCVR_READ, \
+ .flags = cpu_to_be16(_flags), .id = cpu_to_be32(_id), \
+ .bank = (_bank), .page = (_page), .offset = (_offset), \
+ .length = (_length), .dev_addr = (_dev_addr), \
+ })
+
+struct fun_admin_port_rsp {
+ struct fun_admin_rsp_common common;
+
+ union port_rsp_subop {
+ struct fun_admin_port_create_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be16 lport;
+ __u8 rsvd1[6];
+ } create;
+ struct fun_admin_port_write_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+
+ struct fun_admin_write48_rsp write48[];
+ } write;
+ struct fun_admin_port_read_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+
+ struct fun_admin_read48_rsp read48[];
+ } read;
+ struct fun_admin_port_inetaddr_event_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id; /* portid */
+ } inetaddr_event;
+ } u;
+};
+
+struct fun_admin_port_xcvr_read_rsp {
+ struct fun_admin_rsp_common common;
+
+ u8 subop;
+ u8 rsvd0[3];
+ __be32 id;
+
+ u8 bank;
+ u8 page;
+ u8 offset;
+ u8 length;
+ u8 dev_addr;
+ u8 rsvd1[3];
+
+ u8 data[128];
+};
+
+enum fun_xcvr_type {
+ FUN_XCVR_BASET = 0x0,
+ FUN_XCVR_CU = 0x1,
+ FUN_XCVR_SMF = 0x2,
+ FUN_XCVR_MMF = 0x3,
+ FUN_XCVR_AOC = 0x4,
+ FUN_XCVR_SFPP = 0x10, /* SFP+ or later */
+ FUN_XCVR_QSFPP = 0x11, /* QSFP+ or later */
+ FUN_XCVR_QSFPDD = 0x12, /* QSFP-DD */
+};
+
+struct fun_admin_port_notif {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 id;
+ __be32 speed; /* in 10 Mbps units */
+
+ __u8 link_state;
+ __u8 missed_events;
+ __u8 link_down_reason;
+ __u8 xcvr_type;
+ __u8 flow_ctrl;
+ __u8 fec;
+ __u8 active_lanes;
+ __u8 rsvd1;
+
+ __be64 advertising;
+
+ __be64 lp_advertising;
+};
+
+enum fun_eth_rss_const {
+ FUN_ETH_RSS_MAX_KEY_SIZE = 0x28,
+ FUN_ETH_RSS_MAX_INDIR_ENT = 0x40,
+};
+
+enum fun_eth_hash_alg {
+ FUN_ETH_RSS_ALG_INVALID = 0x0,
+ FUN_ETH_RSS_ALG_TOEPLITZ = 0x1,
+ FUN_ETH_RSS_ALG_CRC32 = 0x2,
+};
+
+struct fun_admin_rss_req {
+ struct fun_admin_req_common common;
+
+ union rss_req_subop {
+ struct fun_admin_rss_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 viid; /* VI flow id */
+
+ __be64 metadata[1];
+
+ __u8 alg;
+ __u8 keylen;
+ __u8 indir_nent;
+ __u8 rsvd2;
+ __be16 key_off;
+ __be16 indir_off;
+
+ struct fun_dataop_hdr dataop;
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_RSS_CREATE_REQ_INIT(_subop, _flags, _id, _viid, _alg, \
+ _keylen, _indir_nent, _key_off, \
+ _indir_off) \
+ (struct fun_admin_rss_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .viid = cpu_to_be32(_viid), \
+ .alg = _alg, .keylen = _keylen, .indir_nent = _indir_nent, \
+ .key_off = cpu_to_be16(_key_off), \
+ .indir_off = cpu_to_be16(_indir_off), \
+ }
+
+struct fun_admin_vi_req {
+ struct fun_admin_req_common common;
+
+ union vi_req_subop {
+ struct fun_admin_vi_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 portid; /* port flow id */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_VI_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \
+ (struct fun_admin_vi_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \
+ }
+
+struct fun_admin_eth_req {
+ struct fun_admin_req_common common;
+
+ union eth_req_subop {
+ struct fun_admin_eth_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 rsvd1;
+ __be32 portid; /* port flow id */
+ } create;
+ } u;
+};
+
+#define FUN_ADMIN_ETH_CREATE_REQ_INIT(_subop, _flags, _id, _portid) \
+ (struct fun_admin_eth_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .portid = cpu_to_be32(_portid), \
+ }
+
+enum {
+ FUN_ADMIN_SWU_UPGRADE_FLAG_INIT = 0x10,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_COMPLETE = 0x20,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_DOWNGRADE = 0x40,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_ACTIVE_IMAGE = 0x80,
+ FUN_ADMIN_SWU_UPGRADE_FLAG_ASYNC = 0x1,
+};
+
+enum fun_admin_swu_subop {
+ FUN_ADMIN_SWU_SUBOP_GET_VERSION = 0x20,
+ FUN_ADMIN_SWU_SUBOP_UPGRADE = 0x21,
+ FUN_ADMIN_SWU_SUBOP_UPGRADE_DATA = 0x22,
+ FUN_ADMIN_SWU_SUBOP_GET_ALL_VERSIONS = 0x23,
+};
+
+struct fun_admin_swu_req {
+ struct fun_admin_req_common common;
+
+ union swu_req_subop {
+ struct fun_admin_swu_create_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_swu_upgrade_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 fourcc;
+ __be32 rsvd1;
+
+ __be64 image_size; /* upgrade image length */
+ } upgrade;
+ struct fun_admin_swu_upgrade_data_req {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 offset; /* offset of data in this command */
+ __be32 size; /* total size of data in this command */
+ } upgrade_data;
+ } u;
+
+ struct fun_subop_sgl sgl[]; /* in, out buffers through sgl */
+};
+
+#define FUN_ADMIN_SWU_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_swu_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#define FUN_ADMIN_SWU_UPGRADE_REQ_INIT(_subop, _flags, _id, _fourcc, \
+ _image_size) \
+ (struct fun_admin_swu_upgrade_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .fourcc = cpu_to_be32(_fourcc), \
+ .image_size = cpu_to_be64(_image_size), \
+ }
+
+#define FUN_ADMIN_SWU_UPGRADE_DATA_REQ_INIT(_subop, _flags, _id, _offset, \
+ _size) \
+ (struct fun_admin_swu_upgrade_data_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .offset = cpu_to_be32(_offset), \
+ .size = cpu_to_be32(_size), \
+ }
+
+struct fun_admin_swu_rsp {
+ struct fun_admin_rsp_common common;
+
+ union swu_rsp_subop {
+ struct fun_admin_swu_create_rsp {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+ } create;
+ struct fun_admin_swu_upgrade_rsp {
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be32 fourcc;
+ __be32 status;
+
+ __be32 progress;
+ __be32 unused;
+ } upgrade;
+ struct fun_admin_swu_upgrade_data_rsp {
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be32 offset;
+ __be32 size;
+ } upgrade_data;
+ } u;
+};
+
+enum fun_ktls_version {
+ FUN_KTLS_TLSV2 = 0x20,
+ FUN_KTLS_TLSV3 = 0x30,
+};
+
+enum fun_ktls_cipher {
+ FUN_KTLS_CIPHER_AES_GCM_128 = 0x33,
+ FUN_KTLS_CIPHER_AES_GCM_256 = 0x34,
+ FUN_KTLS_CIPHER_AES_CCM_128 = 0x35,
+ FUN_KTLS_CIPHER_CHACHA20_POLY1305 = 0x36,
+};
+
+enum fun_ktls_modify_flags {
+ FUN_KTLS_MODIFY_REMOVE = 0x1,
+};
+
+struct fun_admin_ktls_create_req {
+ struct fun_admin_req_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+};
+
+#define FUN_ADMIN_KTLS_CREATE_REQ_INIT(_subop, _flags, _id) \
+ (struct fun_admin_ktls_create_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), \
+ }
+
+struct fun_admin_ktls_create_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+};
+
+struct fun_admin_ktls_modify_req {
+ struct fun_admin_req_common common;
+
+ __u8 subop;
+ __u8 rsvd0;
+ __be16 flags;
+ __be32 id;
+
+ __be64 tlsid;
+
+ __be32 tcp_seq;
+ __u8 version;
+ __u8 cipher;
+ __u8 rsvd1[2];
+
+ __u8 record_seq[8];
+
+ __u8 key[32];
+
+ __u8 iv[16];
+
+ __u8 salt[8];
+};
+
+#define FUN_ADMIN_KTLS_MODIFY_REQ_INIT(_subop, _flags, _id, _tlsid, _tcp_seq, \
+ _version, _cipher) \
+ (struct fun_admin_ktls_modify_req) { \
+ .subop = (_subop), .flags = cpu_to_be16(_flags), \
+ .id = cpu_to_be32(_id), .tlsid = cpu_to_be64(_tlsid), \
+ .tcp_seq = cpu_to_be32(_tcp_seq), .version = _version, \
+ .cipher = _cipher, \
+ }
+
+struct fun_admin_ktls_modify_rsp {
+ struct fun_admin_rsp_common common;
+
+ __u8 subop;
+ __u8 rsvd0[3];
+ __be32 id;
+
+ __be64 tlsid;
+};
+
+struct fun_req_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 rsvd0;
+ __be16 cid;
+};
+
+struct fun_rsp_common {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 ret;
+ __be16 cid;
+};
+
+struct fun_cqe_info {
+ __be16 sqhd;
+ __be16 sqid;
+ __be16 cid;
+ __be16 sf_p;
+};
+
+enum fun_eprq_def {
+ FUN_EPRQ_PKT_ALIGN = 0x80,
+};
+
+struct fun_eprq_rqbuf {
+ __be64 bufaddr;
+};
+
+#define FUN_EPRQ_RQBUF_INIT(_bufaddr) \
+ (struct fun_eprq_rqbuf) { \
+ .bufaddr = cpu_to_be64(_bufaddr), \
+ }
+
+enum fun_eth_op {
+ FUN_ETH_OP_TX = 0x1,
+ FUN_ETH_OP_RX = 0x2,
+};
+
+enum {
+ FUN_ETH_OFFLOAD_EN = 0x8000,
+ FUN_ETH_OUTER_EN = 0x4000,
+ FUN_ETH_INNER_LSO = 0x2000,
+ FUN_ETH_INNER_TSO = 0x1000,
+ FUN_ETH_OUTER_IPV6 = 0x800,
+ FUN_ETH_OUTER_UDP = 0x400,
+ FUN_ETH_INNER_IPV6 = 0x200,
+ FUN_ETH_INNER_UDP = 0x100,
+ FUN_ETH_UPDATE_OUTER_L3_LEN = 0x80,
+ FUN_ETH_UPDATE_OUTER_L3_CKSUM = 0x40,
+ FUN_ETH_UPDATE_OUTER_L4_LEN = 0x20,
+ FUN_ETH_UPDATE_OUTER_L4_CKSUM = 0x10,
+ FUN_ETH_UPDATE_INNER_L3_LEN = 0x8,
+ FUN_ETH_UPDATE_INNER_L3_CKSUM = 0x4,
+ FUN_ETH_UPDATE_INNER_L4_LEN = 0x2,
+ FUN_ETH_UPDATE_INNER_L4_CKSUM = 0x1,
+};
+
+struct fun_eth_offload {
+ __be16 flags; /* combination of above flags */
+ __be16 mss; /* TSO max seg size */
+ __be16 tcp_doff_flags; /* TCP data offset + flags 16b word */
+ __be16 vlan;
+
+ __be16 inner_l3_off; /* Inner L3 header offset */
+ __be16 inner_l4_off; /* Inner L4 header offset */
+ __be16 outer_l3_off; /* Outer L3 header offset */
+ __be16 outer_l4_off; /* Outer L4 header offset */
+};
+
+static inline void fun_eth_offload_init(struct fun_eth_offload *s, u16 flags,
+ u16 mss, __be16 tcp_doff_flags,
+ __be16 vlan, u16 inner_l3_off,
+ u16 inner_l4_off, u16 outer_l3_off,
+ u16 outer_l4_off)
+{
+ s->flags = cpu_to_be16(flags);
+ s->mss = cpu_to_be16(mss);
+ s->tcp_doff_flags = tcp_doff_flags;
+ s->vlan = vlan;
+ s->inner_l3_off = cpu_to_be16(inner_l3_off);
+ s->inner_l4_off = cpu_to_be16(inner_l4_off);
+ s->outer_l3_off = cpu_to_be16(outer_l3_off);
+ s->outer_l4_off = cpu_to_be16(outer_l4_off);
+}
+
+struct fun_eth_tls {
+ __be64 tlsid;
+};
+
+enum {
+ FUN_ETH_TX_TLS = 0x8000,
+};
+
+struct fun_eth_tx_req {
+ __u8 op;
+ __u8 len8;
+ __be16 flags;
+ __u8 suboff8;
+ __u8 repr_idn;
+ __be16 encap_proto;
+
+ struct fun_eth_offload offload;
+
+ struct fun_dataop_hdr dataop;
+};
+
+struct fun_eth_rx_cv {
+ __be16 il4_prot_to_l2_type;
+};
+
+#define FUN_ETH_RX_CV_IL4_PROT_S 13U
+#define FUN_ETH_RX_CV_IL4_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_IL3_PROT_S 11U
+#define FUN_ETH_RX_CV_IL3_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_OL4_PROT_S 8U
+#define FUN_ETH_RX_CV_OL4_PROT_M 0x7
+
+#define FUN_ETH_RX_CV_ENCAP_TYPE_S 6U
+#define FUN_ETH_RX_CV_ENCAP_TYPE_M 0x3
+
+#define FUN_ETH_RX_CV_OL3_PROT_S 4U
+#define FUN_ETH_RX_CV_OL3_PROT_M 0x3
+
+#define FUN_ETH_RX_CV_VLAN_TYPE_S 3U
+#define FUN_ETH_RX_CV_VLAN_TYPE_M 0x1
+
+#define FUN_ETH_RX_CV_L2_TYPE_S 2U
+#define FUN_ETH_RX_CV_L2_TYPE_M 0x1
+
+enum fun_rx_cv {
+ FUN_RX_CV_NONE = 0x0,
+ FUN_RX_CV_IP = 0x2,
+ FUN_RX_CV_IP6 = 0x3,
+ FUN_RX_CV_TCP = 0x2,
+ FUN_RX_CV_UDP = 0x3,
+ FUN_RX_CV_VXLAN = 0x2,
+ FUN_RX_CV_MPLS = 0x3,
+};
+
+struct fun_eth_cqe {
+ __u8 op;
+ __u8 len8;
+ __u8 nsgl;
+ __u8 repr_idn;
+ __be32 pkt_len;
+
+ __be64 timestamp;
+
+ __be16 pkt_cv;
+ __be16 rsvd0;
+ __be32 hash;
+
+ __be16 encap_proto;
+ __be16 vlan;
+ __be32 rsvd1;
+
+ __be32 buf_offset;
+ __be16 headroom;
+ __be16 csum;
+};
+
+enum fun_admin_adi_attr {
+ FUN_ADMIN_ADI_ATTR_MACADDR = 0x1,
+ FUN_ADMIN_ADI_ATTR_VLAN = 0x2,
+ FUN_ADMIN_ADI_ATTR_RATE = 0x3,
+};
+
+struct fun_adi_param {
+ union adi_param {
+ struct fun_adi_mac {
+ __be64 addr;
+ } mac;
+ struct fun_adi_vlan {
+ __be32 rsvd;
+ __be16 eth_type;
+ __be16 tci;
+ } vlan;
+ struct fun_adi_rate {
+ __be32 rsvd;
+ __be32 tx_mbps;
+ } rate;
+ } u;
+};
+
+#define FUN_ADI_MAC_INIT(_addr) \
+ (struct fun_adi_mac) { \
+ .addr = cpu_to_be64(_addr), \
+ }
+
+#define FUN_ADI_VLAN_INIT(_eth_type, _tci) \
+ (struct fun_adi_vlan) { \
+ .eth_type = cpu_to_be16(_eth_type), .tci = cpu_to_be16(_tci), \
+ }
+
+#define FUN_ADI_RATE_INIT(_tx_mbps) \
+ (struct fun_adi_rate) { \
+ .tx_mbps = cpu_to_be32(_tx_mbps), \
+ }
+
+struct fun_admin_adi_req {
+ struct fun_admin_req_common common;
+
+ union adi_req_subop {
+ struct fun_admin_adi_write_req {
+ __u8 subop;
+ __u8 attribute;
+ __be16 rsvd;
+ __be32 id;
+
+ struct fun_adi_param param;
+ } write;
+ } u;
+};
+
+#define FUN_ADMIN_ADI_WRITE_REQ_INIT(_subop, _attribute, _id) \
+ (struct fun_admin_adi_write_req) { \
+ .subop = (_subop), .attribute = (_attribute), \
+ .id = cpu_to_be32(_id), \
+ }
+
+#endif /* __FUN_HCI_H */
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.c b/drivers/net/ethernet/fungible/funcore/fun_queue.c
new file mode 100644
index 000000000000..8ab9f68434f5
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "fun_dev.h"
+#include "fun_queue.h"
+
+/* Allocate memory for a queue. This includes the memory for the HW descriptor
+ * ring, an optional 64b HW write-back area, and an optional SW state ring.
+ * Returns the virtual and DMA addresses of the HW ring, the VA of the SW ring,
+ * and the VA of the write-back area.
+ */
+void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
+ size_t hw_desc_sz, size_t sw_desc_sz, bool wb,
+ int numa_node, dma_addr_t *dma_addr, void **sw_va,
+ volatile __be64 **wb_va)
+{
+ int dev_node = dev_to_node(dma_dev);
+ size_t dma_sz;
+ void *va;
+
+ if (numa_node == NUMA_NO_NODE)
+ numa_node = dev_node;
+
+ /* Place optional write-back area at end of descriptor ring. */
+ dma_sz = hw_desc_sz * depth;
+ if (wb)
+ dma_sz += sizeof(u64);
+
+ set_dev_node(dma_dev, numa_node);
+ va = dma_alloc_coherent(dma_dev, dma_sz, dma_addr, GFP_KERNEL);
+ set_dev_node(dma_dev, dev_node);
+ if (!va)
+ return NULL;
+
+ if (sw_desc_sz) {
+ *sw_va = kvzalloc_node(sw_desc_sz * depth, GFP_KERNEL,
+ numa_node);
+ if (!*sw_va) {
+ dma_free_coherent(dma_dev, dma_sz, va, *dma_addr);
+ return NULL;
+ }
+ }
+
+ if (wb)
+ *wb_va = va + dma_sz - sizeof(u64);
+ return va;
+}
+EXPORT_SYMBOL_GPL(fun_alloc_ring_mem);
+
+void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
+ bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va)
+{
+ if (hw_va) {
+ size_t sz = depth * hw_desc_sz;
+
+ if (wb)
+ sz += sizeof(u64);
+ dma_free_coherent(dma_dev, sz, hw_va, dma_addr);
+ }
+ kvfree(sw_va);
+}
+EXPORT_SYMBOL_GPL(fun_free_ring_mem);
+
+/* Prepare and issue an admin command to create an SQ on the device with the
+ * provided parameters. If the queue ID is auto-allocated by the device it is
+ * returned in *sqidp.
+ */
+int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
+ u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
+ u8 coal_nentries, u8 coal_usec, u32 irq_num,
+ u32 scan_start_id, u32 scan_end_id,
+ u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp)
+{
+ union {
+ struct fun_admin_epsq_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ dma_addr_t wb_addr;
+ u32 hw_qid;
+ int rc;
+
+ if (sq_depth > fdev->q_depth)
+ return -EINVAL;
+ if (flags & FUN_ADMIN_EPSQ_CREATE_FLAG_RQ)
+ sqe_size_log2 = ilog2(sizeof(struct fun_eprq_rqbuf));
+
+ wb_addr = dma_addr + (sq_depth << sqe_size_log2);
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPSQ,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_EPSQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
+ sqid, cqid, sqe_size_log2,
+ sq_depth - 1, dma_addr, 0,
+ coal_nentries, coal_usec,
+ irq_num, scan_start_id,
+ scan_end_id, 0,
+ rq_buf_size_log2,
+ ilog2(sizeof(u64)), wb_addr);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ hw_qid = be32_to_cpu(cmd.rsp.id);
+ *dbp = fun_sq_db_addr(fdev, hw_qid);
+ if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
+ *sqidp = hw_qid;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fun_sq_create);
+
+/* Prepare and issue an admin command to create a CQ on the device with the
+ * provided parameters. If the queue ID is auto-allocated by the device it is
+ * returned in *cqidp.
+ */
+int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
+ u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
+ u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
+ u32 irq_num, u32 scan_start_id, u32 scan_end_id, u32 *cqidp,
+ u32 __iomem **dbp)
+{
+ union {
+ struct fun_admin_epcq_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ u32 hw_qid;
+ int rc;
+
+ if (cq_depth > fdev->q_depth)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_EPCQ_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, flags,
+ cqid, rqid, cqe_size_log2,
+ cq_depth - 1, dma_addr, tailroom,
+ headroom / 2, 0, coal_nentries,
+ coal_usec, irq_num,
+ scan_start_id, scan_end_id, 0);
+
+ rc = fun_submit_admin_sync_cmd(fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ hw_qid = be32_to_cpu(cmd.rsp.id);
+ *dbp = fun_cq_db_addr(fdev, hw_qid);
+ if (flags & FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR)
+ *cqidp = hw_qid;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(fun_cq_create);
+
+static bool fun_sq_is_head_wb(const struct fun_queue *funq)
+{
+ return funq->sq_flags & FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS;
+}
+
+static void fun_clean_rq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ struct fun_rq_info *rqinfo;
+ unsigned int i;
+
+ for (i = 0; i < funq->rq_depth; i++) {
+ rqinfo = &funq->rq_info[i];
+ if (rqinfo->page) {
+ dma_unmap_page(fdev->dev, rqinfo->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ put_page(rqinfo->page);
+ rqinfo->page = NULL;
+ }
+ }
+}
+
+static int fun_fill_rq(struct fun_queue *funq)
+{
+ struct device *dev = funq->fdev->dev;
+ int i, node = dev_to_node(dev);
+ struct fun_rq_info *rqinfo;
+
+ for (i = 0; i < funq->rq_depth; i++) {
+ rqinfo = &funq->rq_info[i];
+ rqinfo->page = alloc_pages_node(node, GFP_KERNEL, 0);
+ if (unlikely(!rqinfo->page))
+ return -ENOMEM;
+
+ rqinfo->dma = dma_map_page(dev, rqinfo->page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, rqinfo->dma))) {
+ put_page(rqinfo->page);
+ rqinfo->page = NULL;
+ return -ENOMEM;
+ }
+
+ funq->rqes[i] = FUN_EPRQ_RQBUF_INIT(rqinfo->dma);
+ }
+
+ funq->rq_tail = funq->rq_depth - 1;
+ return 0;
+}
+
+static void fun_rq_update_pos(struct fun_queue *funq, int buf_offset)
+{
+ if (buf_offset <= funq->rq_buf_offset) {
+ struct fun_rq_info *rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ struct device *dev = funq->fdev->dev;
+
+ dma_sync_single_for_device(dev, rqinfo->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ funq->num_rqe_to_fill++;
+ if (++funq->rq_buf_idx == funq->rq_depth)
+ funq->rq_buf_idx = 0;
+ }
+ funq->rq_buf_offset = buf_offset;
+}
+
+/* Given a command response with data scattered across >= 1 RQ buffers return
+ * a pointer to a contiguous buffer containing all the data. If the data is in
+ * one RQ buffer the start address within that buffer is returned, otherwise a
+ * new buffer is allocated and the data is gathered into it.
+ */
+static void *fun_data_from_rq(struct fun_queue *funq,
+ const struct fun_rsp_common *rsp, bool *need_free)
+{
+ u32 bufoff, total_len, remaining, fragsize, dataoff;
+ struct device *dma_dev = funq->fdev->dev;
+ const struct fun_dataop_rqbuf *databuf;
+ const struct fun_dataop_hdr *dataop;
+ const struct fun_rq_info *rqinfo;
+ void *data;
+
+ dataop = (void *)rsp + rsp->suboff8 * 8;
+ total_len = be32_to_cpu(dataop->total_len);
+
+ if (likely(dataop->nsgl == 1)) {
+ databuf = (struct fun_dataop_rqbuf *)dataop->imm;
+ bufoff = be32_to_cpu(databuf->bufoff);
+ fun_rq_update_pos(funq, bufoff);
+ rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ dma_sync_single_for_cpu(dma_dev, rqinfo->dma + bufoff,
+ total_len, DMA_FROM_DEVICE);
+ *need_free = false;
+ return page_address(rqinfo->page) + bufoff;
+ }
+
+ /* For scattered completions gather the fragments into one buffer. */
+
+ data = kmalloc(total_len, GFP_ATOMIC);
+ /* NULL is OK here. In case of failure we still need to consume the data
+ * for proper buffer accounting but indicate an error in the response.
+ */
+ if (likely(data))
+ *need_free = true;
+
+ dataoff = 0;
+ for (remaining = total_len; remaining; remaining -= fragsize) {
+ fun_rq_update_pos(funq, 0);
+ fragsize = min_t(unsigned int, PAGE_SIZE, remaining);
+ if (data) {
+ rqinfo = &funq->rq_info[funq->rq_buf_idx];
+ dma_sync_single_for_cpu(dma_dev, rqinfo->dma, fragsize,
+ DMA_FROM_DEVICE);
+ memcpy(data + dataoff, page_address(rqinfo->page),
+ fragsize);
+ dataoff += fragsize;
+ }
+ }
+ return data;
+}
+
+unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max)
+{
+ const struct fun_cqe_info *info;
+ struct fun_rsp_common *rsp;
+ unsigned int new_cqes;
+ u16 sf_p, flags;
+ bool need_free;
+ void *cqe;
+
+ if (!max)
+ max = funq->cq_depth - 1;
+
+ for (new_cqes = 0; new_cqes < max; new_cqes++) {
+ cqe = funq->cqes + (funq->cq_head << funq->cqe_size_log2);
+ info = funq_cqe_info(funq, cqe);
+ sf_p = be16_to_cpu(info->sf_p);
+
+ if ((sf_p & 1) != funq->cq_phase)
+ break;
+
+ /* ensure the phase tag is read before other CQE fields */
+ dma_rmb();
+
+ if (++funq->cq_head == funq->cq_depth) {
+ funq->cq_head = 0;
+ funq->cq_phase = !funq->cq_phase;
+ }
+
+ rsp = cqe;
+ flags = be16_to_cpu(rsp->flags);
+
+ need_free = false;
+ if (unlikely(flags & FUN_REQ_COMMON_FLAG_CQE_IN_RQBUF)) {
+ rsp = fun_data_from_rq(funq, rsp, &need_free);
+ if (!rsp) {
+ rsp = cqe;
+ rsp->len8 = 1;
+ if (rsp->ret == 0)
+ rsp->ret = ENOMEM;
+ }
+ }
+
+ if (funq->cq_cb)
+ funq->cq_cb(funq, funq->cb_data, rsp, info);
+ if (need_free)
+ kfree(rsp);
+ }
+
+ dev_dbg(funq->fdev->dev, "CQ %u, new CQEs %u/%u, head %u, phase %u\n",
+ funq->cqid, new_cqes, max, funq->cq_head, funq->cq_phase);
+ return new_cqes;
+}
+
+unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max)
+{
+ unsigned int processed;
+ u32 db;
+
+ processed = __fun_process_cq(funq, max);
+
+ if (funq->num_rqe_to_fill) {
+ funq->rq_tail = (funq->rq_tail + funq->num_rqe_to_fill) %
+ funq->rq_depth;
+ funq->num_rqe_to_fill = 0;
+ writel(funq->rq_tail, funq->rq_db);
+ }
+
+ db = funq->cq_head | FUN_DB_IRQ_ARM_F;
+ writel(db, funq->cq_db);
+ return processed;
+}
+
+static int fun_alloc_sqes(struct fun_queue *funq)
+{
+ funq->sq_cmds = fun_alloc_ring_mem(funq->fdev->dev, funq->sq_depth,
+ 1 << funq->sqe_size_log2, 0,
+ fun_sq_is_head_wb(funq),
+ NUMA_NO_NODE, &funq->sq_dma_addr,
+ NULL, &funq->sq_head);
+ return funq->sq_cmds ? 0 : -ENOMEM;
+}
+
+static int fun_alloc_cqes(struct fun_queue *funq)
+{
+ funq->cqes = fun_alloc_ring_mem(funq->fdev->dev, funq->cq_depth,
+ 1 << funq->cqe_size_log2, 0, false,
+ NUMA_NO_NODE, &funq->cq_dma_addr, NULL,
+ NULL);
+ return funq->cqes ? 0 : -ENOMEM;
+}
+
+static int fun_alloc_rqes(struct fun_queue *funq)
+{
+ funq->rqes = fun_alloc_ring_mem(funq->fdev->dev, funq->rq_depth,
+ sizeof(*funq->rqes),
+ sizeof(*funq->rq_info), false,
+ NUMA_NO_NODE, &funq->rq_dma_addr,
+ (void **)&funq->rq_info, NULL);
+ return funq->rqes ? 0 : -ENOMEM;
+}
+
+/* Free a queue's structures. */
+void fun_free_queue(struct fun_queue *funq)
+{
+ struct device *dev = funq->fdev->dev;
+
+ fun_free_ring_mem(dev, funq->cq_depth, 1 << funq->cqe_size_log2, false,
+ funq->cqes, funq->cq_dma_addr, NULL);
+ fun_free_ring_mem(dev, funq->sq_depth, 1 << funq->sqe_size_log2,
+ fun_sq_is_head_wb(funq), funq->sq_cmds,
+ funq->sq_dma_addr, NULL);
+
+ if (funq->rqes) {
+ fun_clean_rq(funq);
+ fun_free_ring_mem(dev, funq->rq_depth, sizeof(*funq->rqes),
+ false, funq->rqes, funq->rq_dma_addr,
+ funq->rq_info);
+ }
+
+ kfree(funq);
+}
+
+/* Allocate and initialize a funq's structures. */
+struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
+ const struct fun_queue_alloc_req *req)
+{
+ struct fun_queue *funq = kzalloc(sizeof(*funq), GFP_KERNEL);
+
+ if (!funq)
+ return NULL;
+
+ funq->fdev = fdev;
+ spin_lock_init(&funq->sq_lock);
+
+ funq->qid = qid;
+
+ /* Initial CQ/SQ/RQ ids */
+ if (req->rq_depth) {
+ funq->cqid = 2 * qid;
+ if (funq->qid) {
+ /* I/O Q: use rqid = cqid, sqid = +1 */
+ funq->rqid = funq->cqid;
+ funq->sqid = funq->rqid + 1;
+ } else {
+ /* Admin Q: sqid is always 0, use ID 1 for RQ */
+ funq->sqid = 0;
+ funq->rqid = 1;
+ }
+ } else {
+ funq->cqid = qid;
+ funq->sqid = qid;
+ }
+
+ funq->cq_flags = req->cq_flags;
+ funq->sq_flags = req->sq_flags;
+
+ funq->cqe_size_log2 = req->cqe_size_log2;
+ funq->sqe_size_log2 = req->sqe_size_log2;
+
+ funq->cq_depth = req->cq_depth;
+ funq->sq_depth = req->sq_depth;
+
+ funq->cq_intcoal_nentries = req->cq_intcoal_nentries;
+ funq->cq_intcoal_usec = req->cq_intcoal_usec;
+
+ funq->sq_intcoal_nentries = req->sq_intcoal_nentries;
+ funq->sq_intcoal_usec = req->sq_intcoal_usec;
+
+ if (fun_alloc_cqes(funq))
+ goto free_funq;
+
+ funq->cq_phase = 1;
+
+ if (fun_alloc_sqes(funq))
+ goto free_funq;
+
+ if (req->rq_depth) {
+ funq->rq_flags = req->rq_flags | FUN_ADMIN_EPSQ_CREATE_FLAG_RQ;
+ funq->rq_depth = req->rq_depth;
+ funq->rq_buf_offset = -1;
+
+ if (fun_alloc_rqes(funq) || fun_fill_rq(funq))
+ goto free_funq;
+ }
+
+ funq->cq_vector = -1;
+ funq->cqe_info_offset = (1 << funq->cqe_size_log2) - sizeof(struct fun_cqe_info);
+
+ /* SQ/CQ 0 are implicitly created, assign their doorbells now.
+ * Other queues are assigned doorbells at their explicit creation.
+ */
+ if (funq->sqid == 0)
+ funq->sq_db = fun_sq_db_addr(fdev, 0);
+ if (funq->cqid == 0)
+ funq->cq_db = fun_cq_db_addr(fdev, 0);
+
+ return funq;
+
+free_funq:
+ fun_free_queue(funq);
+ return NULL;
+}
+
+/* Create a funq's CQ on the device. */
+static int fun_create_cq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ unsigned int rqid;
+ int rc;
+
+ rqid = funq->cq_flags & FUN_ADMIN_EPCQ_CREATE_FLAG_RQ ?
+ funq->rqid : FUN_HCI_ID_INVALID;
+ rc = fun_cq_create(fdev, funq->cq_flags, funq->cqid, rqid,
+ funq->cqe_size_log2, funq->cq_depth,
+ funq->cq_dma_addr, 0, 0, funq->cq_intcoal_nentries,
+ funq->cq_intcoal_usec, funq->cq_vector, 0, 0,
+ &funq->cqid, &funq->cq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created CQ %u\n", funq->cqid);
+
+ return rc;
+}
+
+/* Create a funq's SQ on the device. */
+static int fun_create_sq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ int rc;
+
+ rc = fun_sq_create(fdev, funq->sq_flags, funq->sqid, funq->cqid,
+ funq->sqe_size_log2, funq->sq_depth,
+ funq->sq_dma_addr, funq->sq_intcoal_nentries,
+ funq->sq_intcoal_usec, funq->cq_vector, 0, 0,
+ 0, &funq->sqid, &funq->sq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created SQ %u\n", funq->sqid);
+
+ return rc;
+}
+
+/* Create a funq's RQ on the device. */
+int fun_create_rq(struct fun_queue *funq)
+{
+ struct fun_dev *fdev = funq->fdev;
+ int rc;
+
+ rc = fun_sq_create(fdev, funq->rq_flags, funq->rqid, funq->cqid, 0,
+ funq->rq_depth, funq->rq_dma_addr, 0, 0,
+ funq->cq_vector, 0, 0, PAGE_SHIFT, &funq->rqid,
+ &funq->rq_db);
+ if (!rc)
+ dev_dbg(fdev->dev, "created RQ %u\n", funq->rqid);
+
+ return rc;
+}
+
+static unsigned int funq_irq(struct fun_queue *funq)
+{
+ return pci_irq_vector(to_pci_dev(funq->fdev->dev), funq->cq_vector);
+}
+
+int fun_request_irq(struct fun_queue *funq, const char *devname,
+ irq_handler_t handler, void *data)
+{
+ int rc;
+
+ if (funq->cq_vector < 0)
+ return -EINVAL;
+
+ funq->irq_handler = handler;
+ funq->irq_data = data;
+
+ snprintf(funq->irqname, sizeof(funq->irqname),
+ funq->qid ? "%s-q[%d]" : "%s-adminq", devname, funq->qid);
+
+ rc = request_irq(funq_irq(funq), handler, 0, funq->irqname, data);
+ if (rc)
+ funq->irq_handler = NULL;
+
+ return rc;
+}
+
+/* Create all component queues of a funq on the device. */
+int fun_create_queue(struct fun_queue *funq)
+{
+ int rc;
+
+ rc = fun_create_cq(funq);
+ if (rc)
+ return rc;
+
+ if (funq->rq_depth) {
+ rc = fun_create_rq(funq);
+ if (rc)
+ goto release_cq;
+ }
+
+ rc = fun_create_sq(funq);
+ if (rc)
+ goto release_rq;
+
+ return 0;
+
+release_rq:
+ fun_destroy_sq(funq->fdev, funq->rqid);
+release_cq:
+ fun_destroy_cq(funq->fdev, funq->cqid);
+ return rc;
+}
+
+void fun_free_irq(struct fun_queue *funq)
+{
+ if (funq->irq_handler) {
+ unsigned int vector = funq_irq(funq);
+
+ free_irq(vector, funq->irq_data);
+ funq->irq_handler = NULL;
+ funq->irq_data = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/fungible/funcore/fun_queue.h b/drivers/net/ethernet/fungible/funcore/fun_queue.h
new file mode 100644
index 000000000000..7fb53d0ae8b0
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funcore/fun_queue.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_QEUEUE_H
+#define _FUN_QEUEUE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+struct device;
+struct fun_dev;
+struct fun_queue;
+struct fun_cqe_info;
+struct fun_rsp_common;
+
+typedef void (*cq_callback_t)(struct fun_queue *funq, void *data, void *msg,
+ const struct fun_cqe_info *info);
+
+struct fun_rq_info {
+ dma_addr_t dma;
+ struct page *page;
+};
+
+/* A queue group consisting of an SQ, a CQ, and an optional RQ. */
+struct fun_queue {
+ struct fun_dev *fdev;
+ spinlock_t sq_lock;
+
+ dma_addr_t cq_dma_addr;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t rq_dma_addr;
+
+ u32 __iomem *cq_db;
+ u32 __iomem *sq_db;
+ u32 __iomem *rq_db;
+
+ void *cqes;
+ void *sq_cmds;
+ struct fun_eprq_rqbuf *rqes;
+ struct fun_rq_info *rq_info;
+
+ u32 cqid;
+ u32 sqid;
+ u32 rqid;
+
+ u32 cq_depth;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ u16 cq_head;
+ u16 sq_tail;
+ u16 rq_tail;
+
+ u8 cqe_size_log2;
+ u8 sqe_size_log2;
+
+ u16 cqe_info_offset;
+
+ u16 rq_buf_idx;
+ int rq_buf_offset;
+ u16 num_rqe_to_fill;
+
+ u8 cq_intcoal_usec;
+ u8 cq_intcoal_nentries;
+ u8 sq_intcoal_usec;
+ u8 sq_intcoal_nentries;
+
+ u16 cq_flags;
+ u16 sq_flags;
+ u16 rq_flags;
+
+ /* SQ head writeback */
+ u16 sq_comp;
+
+ volatile __be64 *sq_head;
+
+ cq_callback_t cq_cb;
+ void *cb_data;
+
+ irq_handler_t irq_handler;
+ void *irq_data;
+ s16 cq_vector;
+ u8 cq_phase;
+
+ /* I/O q index */
+ u16 qid;
+
+ char irqname[24];
+};
+
+static inline void *fun_sqe_at(const struct fun_queue *funq, unsigned int pos)
+{
+ return funq->sq_cmds + (pos << funq->sqe_size_log2);
+}
+
+static inline void funq_sq_post_tail(struct fun_queue *funq, u16 tail)
+{
+ if (++tail == funq->sq_depth)
+ tail = 0;
+ funq->sq_tail = tail;
+ writel(tail, funq->sq_db);
+}
+
+static inline struct fun_cqe_info *funq_cqe_info(const struct fun_queue *funq,
+ void *cqe)
+{
+ return cqe + funq->cqe_info_offset;
+}
+
+static inline void funq_rq_post(struct fun_queue *funq)
+{
+ writel(funq->rq_tail, funq->rq_db);
+}
+
+struct fun_queue_alloc_req {
+ u8 cqe_size_log2;
+ u8 sqe_size_log2;
+
+ u16 cq_flags;
+ u16 sq_flags;
+ u16 rq_flags;
+
+ u32 cq_depth;
+ u32 sq_depth;
+ u32 rq_depth;
+
+ u8 cq_intcoal_usec;
+ u8 cq_intcoal_nentries;
+ u8 sq_intcoal_usec;
+ u8 sq_intcoal_nentries;
+};
+
+int fun_sq_create(struct fun_dev *fdev, u16 flags, u32 sqid, u32 cqid,
+ u8 sqe_size_log2, u32 sq_depth, dma_addr_t dma_addr,
+ u8 coal_nentries, u8 coal_usec, u32 irq_num,
+ u32 scan_start_id, u32 scan_end_id,
+ u32 rq_buf_size_log2, u32 *sqidp, u32 __iomem **dbp);
+int fun_cq_create(struct fun_dev *fdev, u16 flags, u32 cqid, u32 rqid,
+ u8 cqe_size_log2, u32 cq_depth, dma_addr_t dma_addr,
+ u16 headroom, u16 tailroom, u8 coal_nentries, u8 coal_usec,
+ u32 irq_num, u32 scan_start_id, u32 scan_end_id,
+ u32 *cqidp, u32 __iomem **dbp);
+void *fun_alloc_ring_mem(struct device *dma_dev, size_t depth,
+ size_t hw_desc_sz, size_t sw_desc_size, bool wb,
+ int numa_node, dma_addr_t *dma_addr, void **sw_va,
+ volatile __be64 **wb_va);
+void fun_free_ring_mem(struct device *dma_dev, size_t depth, size_t hw_desc_sz,
+ bool wb, void *hw_va, dma_addr_t dma_addr, void *sw_va);
+
+#define fun_destroy_sq(fdev, sqid) \
+ fun_res_destroy((fdev), FUN_ADMIN_OP_EPSQ, 0, (sqid))
+#define fun_destroy_cq(fdev, cqid) \
+ fun_res_destroy((fdev), FUN_ADMIN_OP_EPCQ, 0, (cqid))
+
+struct fun_queue *fun_alloc_queue(struct fun_dev *fdev, int qid,
+ const struct fun_queue_alloc_req *req);
+void fun_free_queue(struct fun_queue *funq);
+
+static inline void fun_set_cq_callback(struct fun_queue *funq, cq_callback_t cb,
+ void *cb_data)
+{
+ funq->cq_cb = cb;
+ funq->cb_data = cb_data;
+}
+
+int fun_create_rq(struct fun_queue *funq);
+int fun_create_queue(struct fun_queue *funq);
+
+void fun_free_irq(struct fun_queue *funq);
+int fun_request_irq(struct fun_queue *funq, const char *devname,
+ irq_handler_t handler, void *data);
+
+unsigned int __fun_process_cq(struct fun_queue *funq, unsigned int max);
+unsigned int fun_process_cq(struct fun_queue *funq, unsigned int max);
+
+#endif /* _FUN_QEUEUE_H */
diff --git a/drivers/net/ethernet/fungible/funeth/Kconfig b/drivers/net/ethernet/fungible/funeth/Kconfig
new file mode 100644
index 000000000000..e742e7663449
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Fungible Ethernet driver configuration
+#
+
+config FUN_ETH
+ tristate "Fungible Ethernet device driver"
+ depends on PCI_MSI
+ depends on TLS && TLS_DEVICE || TLS_DEVICE=n
+ select NET_DEVLINK
+ select FUN_CORE
+ help
+ This driver supports the Ethernet functionality of Fungible adapters.
+ It works with both physical and virtual functions.
+
+ To compile this driver as a module, choose M here. The module
+ will be called funeth.
diff --git a/drivers/net/ethernet/fungible/funeth/Makefile b/drivers/net/ethernet/fungible/funeth/Makefile
new file mode 100644
index 000000000000..646d69595b4f
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
+
+obj-$(CONFIG_FUN_ETH) += funeth.o
+
+funeth-y := funeth_main.o funeth_rx.o funeth_tx.o funeth_devlink.o \
+ funeth_ethtool.o
+
+funeth-$(CONFIG_TLS_DEVICE) += funeth_ktls.o
diff --git a/drivers/net/ethernet/fungible/funeth/fun_port.h b/drivers/net/ethernet/fungible/funeth/fun_port.h
new file mode 100644
index 000000000000..0f9da44e3786
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/fun_port.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_PORT_H
+#define _FUN_PORT_H
+
+enum port_mac_rx_stats {
+ PORT_MAC_RX_etherStatsOctets = 0x0,
+ PORT_MAC_RX_OctetsReceivedOK = 0x1,
+ PORT_MAC_RX_aAlignmentErrors = 0x2,
+ PORT_MAC_RX_aPAUSEMACCtrlFramesReceived = 0x3,
+ PORT_MAC_RX_aFrameTooLongErrors = 0x4,
+ PORT_MAC_RX_aInRangeLengthErrors = 0x5,
+ PORT_MAC_RX_aFramesReceivedOK = 0x6,
+ PORT_MAC_RX_aFrameCheckSequenceErrors = 0x7,
+ PORT_MAC_RX_VLANReceivedOK = 0x8,
+ PORT_MAC_RX_ifInErrors = 0x9,
+ PORT_MAC_RX_ifInUcastPkts = 0xa,
+ PORT_MAC_RX_ifInMulticastPkts = 0xb,
+ PORT_MAC_RX_ifInBroadcastPkts = 0xc,
+ PORT_MAC_RX_etherStatsDropEvents = 0xd,
+ PORT_MAC_RX_etherStatsPkts = 0xe,
+ PORT_MAC_RX_etherStatsUndersizePkts = 0xf,
+ PORT_MAC_RX_etherStatsPkts64Octets = 0x10,
+ PORT_MAC_RX_etherStatsPkts65to127Octets = 0x11,
+ PORT_MAC_RX_etherStatsPkts128to255Octets = 0x12,
+ PORT_MAC_RX_etherStatsPkts256to511Octets = 0x13,
+ PORT_MAC_RX_etherStatsPkts512to1023Octets = 0x14,
+ PORT_MAC_RX_etherStatsPkts1024to1518Octets = 0x15,
+ PORT_MAC_RX_etherStatsPkts1519toMaxOctets = 0x16,
+ PORT_MAC_RX_etherStatsOversizePkts = 0x17,
+ PORT_MAC_RX_etherStatsJabbers = 0x18,
+ PORT_MAC_RX_etherStatsFragments = 0x19,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_0 = 0x1a,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_1 = 0x1b,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_2 = 0x1c,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_3 = 0x1d,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_4 = 0x1e,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_5 = 0x1f,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_6 = 0x20,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_7 = 0x21,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_8 = 0x22,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_9 = 0x23,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_10 = 0x24,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_11 = 0x25,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_12 = 0x26,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_13 = 0x27,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_14 = 0x28,
+ PORT_MAC_RX_CBFCPAUSEFramesReceived_15 = 0x29,
+ PORT_MAC_RX_MACControlFramesReceived = 0x2a,
+ PORT_MAC_RX_STATS_MAX = 0x2b,
+};
+
+enum port_mac_tx_stats {
+ PORT_MAC_TX_etherStatsOctets = 0x0,
+ PORT_MAC_TX_OctetsTransmittedOK = 0x1,
+ PORT_MAC_TX_aPAUSEMACCtrlFramesTransmitted = 0x2,
+ PORT_MAC_TX_aFramesTransmittedOK = 0x3,
+ PORT_MAC_TX_VLANTransmittedOK = 0x4,
+ PORT_MAC_TX_ifOutErrors = 0x5,
+ PORT_MAC_TX_ifOutUcastPkts = 0x6,
+ PORT_MAC_TX_ifOutMulticastPkts = 0x7,
+ PORT_MAC_TX_ifOutBroadcastPkts = 0x8,
+ PORT_MAC_TX_etherStatsPkts64Octets = 0x9,
+ PORT_MAC_TX_etherStatsPkts65to127Octets = 0xa,
+ PORT_MAC_TX_etherStatsPkts128to255Octets = 0xb,
+ PORT_MAC_TX_etherStatsPkts256to511Octets = 0xc,
+ PORT_MAC_TX_etherStatsPkts512to1023Octets = 0xd,
+ PORT_MAC_TX_etherStatsPkts1024to1518Octets = 0xe,
+ PORT_MAC_TX_etherStatsPkts1519toMaxOctets = 0xf,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_0 = 0x10,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_1 = 0x11,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_2 = 0x12,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_3 = 0x13,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_4 = 0x14,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_5 = 0x15,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_6 = 0x16,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_7 = 0x17,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_8 = 0x18,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_9 = 0x19,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_10 = 0x1a,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_11 = 0x1b,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_12 = 0x1c,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_13 = 0x1d,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_14 = 0x1e,
+ PORT_MAC_TX_CBFCPAUSEFramesTransmitted_15 = 0x1f,
+ PORT_MAC_TX_MACControlFramesTransmitted = 0x20,
+ PORT_MAC_TX_etherStatsPkts = 0x21,
+ PORT_MAC_TX_STATS_MAX = 0x22,
+};
+
+enum port_mac_fec_stats {
+ PORT_MAC_FEC_Correctable = 0x0,
+ PORT_MAC_FEC_Uncorrectable = 0x1,
+ PORT_MAC_FEC_STATS_MAX = 0x2,
+};
+
+#endif /* _FUN_PORT_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth.h b/drivers/net/ethernet/fungible/funeth/funeth.h
new file mode 100644
index 000000000000..1250e10d21db
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNETH_H
+#define _FUNETH_H
+
+#include <uapi/linux/if_ether.h>
+#include <uapi/linux/net_tstamp.h>
+#include <linux/mutex.h>
+#include <linux/seqlock.h>
+#include <linux/xarray.h>
+#include <net/devlink.h>
+#include "fun_dev.h"
+
+#define ADMIN_SQE_SIZE SZ_128
+#define ADMIN_CQE_SIZE SZ_64
+#define ADMIN_RSP_MAX_LEN (ADMIN_CQE_SIZE - sizeof(struct fun_cqe_info))
+
+#define FUN_MAX_MTU 9024
+
+#define SQ_DEPTH 512U
+#define CQ_DEPTH 1024U
+#define RQ_DEPTH (512U / (PAGE_SIZE / 4096))
+
+#define CQ_INTCOAL_USEC 10
+#define CQ_INTCOAL_NPKT 16
+#define SQ_INTCOAL_USEC 10
+#define SQ_INTCOAL_NPKT 16
+
+#define INVALID_LPORT 0xffff
+
+#define FUN_PORT_CAP_PAUSE_MASK (FUN_PORT_CAP_TX_PAUSE | FUN_PORT_CAP_RX_PAUSE)
+
+struct fun_vport_info {
+ u8 mac[ETH_ALEN];
+ u16 vlan;
+ __be16 vlan_proto;
+ u8 qos;
+ u8 spoofchk:1;
+ u8 trusted:1;
+ unsigned int max_rate;
+};
+
+/* "subclass" of fun_dev for Ethernet functions */
+struct fun_ethdev {
+ struct fun_dev fdev;
+
+ /* the function's network ports */
+ struct net_device **netdevs;
+ unsigned int num_ports;
+
+ /* configuration for the function's virtual ports */
+ unsigned int num_vports;
+ struct fun_vport_info *vport_info;
+
+ struct mutex state_mutex; /* nests inside RTNL if both taken */
+
+ unsigned int nsqs_per_port;
+};
+
+static inline struct fun_ethdev *to_fun_ethdev(struct fun_dev *p)
+{
+ return container_of(p, struct fun_ethdev, fdev);
+}
+
+struct fun_qset {
+ struct funeth_rxq **rxqs;
+ struct funeth_txq **txqs;
+ struct funeth_txq **xdpqs;
+ unsigned int nrxqs;
+ unsigned int ntxqs;
+ unsigned int nxdpqs;
+ unsigned int rxq_start;
+ unsigned int txq_start;
+ unsigned int xdpq_start;
+ unsigned int cq_depth;
+ unsigned int rq_depth;
+ unsigned int sq_depth;
+ int state;
+};
+
+/* Per netdevice driver state, i.e., netdev_priv. */
+struct funeth_priv {
+ struct fun_dev *fdev;
+ struct pci_dev *pdev;
+ struct net_device *netdev;
+
+ struct funeth_rxq * __rcu *rxqs;
+ struct funeth_txq **txqs;
+ struct funeth_txq * __rcu *xdpqs;
+
+ struct xarray irqs;
+ unsigned int num_tx_irqs;
+ unsigned int num_rx_irqs;
+ unsigned int rx_irq_ofst;
+
+ unsigned int lane_attrs;
+ u16 lport;
+
+ /* link settings */
+ u64 port_caps;
+ u64 advertising;
+ u64 lp_advertising;
+ unsigned int link_speed;
+ u8 xcvr_type;
+ u8 active_fc;
+ u8 active_fec;
+ u8 link_down_reason;
+ seqcount_t link_seq;
+
+ u32 msg_enable;
+
+ unsigned int num_xdpqs;
+
+ /* ethtool, etc. config parameters */
+ unsigned int sq_depth;
+ unsigned int rq_depth;
+ unsigned int cq_depth;
+ unsigned int cq_irq_db;
+ u8 tx_coal_usec;
+ u8 tx_coal_count;
+ u8 rx_coal_usec;
+ u8 rx_coal_count;
+
+ struct hwtstamp_config hwtstamp_cfg;
+
+ /* cumulative queue stats from earlier queue instances */
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+
+ /* RSS */
+ unsigned int rss_hw_id;
+ enum fun_eth_hash_alg hash_algo;
+ u8 rss_key[FUN_ETH_RSS_MAX_KEY_SIZE];
+ unsigned int indir_table_nentries;
+ u32 indir_table[FUN_ETH_RSS_MAX_INDIR_ENT];
+ dma_addr_t rss_dma_addr;
+ void *rss_cfg;
+
+ /* DMA area for port stats */
+ dma_addr_t stats_dma_addr;
+ __be64 *stats;
+
+ struct bpf_prog *xdp_prog;
+
+ struct devlink_port dl_port;
+
+ /* kTLS state */
+ unsigned int ktls_id;
+ atomic64_t tx_tls_add;
+ atomic64_t tx_tls_del;
+ atomic64_t tx_tls_resync;
+};
+
+void fun_set_ethtool_ops(struct net_device *netdev);
+int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data);
+int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data);
+int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid);
+int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
+ struct netlink_ext_ack *extack);
+int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx);
+void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
+ unsigned int nrx);
+int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
+ const u32 *qtable, u8 op);
+
+#endif /* _FUNETH_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.c b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
new file mode 100644
index 000000000000..4fbeb3fd71a8
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include "funeth.h"
+#include "funeth_devlink.h"
+
+static const struct devlink_ops fun_dl_ops = {
+};
+
+struct devlink *fun_devlink_alloc(struct device *dev)
+{
+ return devlink_alloc(&fun_dl_ops, sizeof(struct fun_ethdev), dev);
+}
+
+void fun_devlink_free(struct devlink *devlink)
+{
+ devlink_free(devlink);
+}
+
+void fun_devlink_register(struct devlink *devlink)
+{
+ devlink_register(devlink);
+}
+
+void fun_devlink_unregister(struct devlink *devlink)
+{
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_devlink.h b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h
new file mode 100644
index 000000000000..e40464d57ff4
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_devlink.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef __FUNETH_DEVLINK_H
+#define __FUNETH_DEVLINK_H
+
+#include <net/devlink.h>
+
+struct devlink *fun_devlink_alloc(struct device *dev);
+void fun_devlink_free(struct devlink *devlink);
+void fun_devlink_register(struct devlink *devlink);
+void fun_devlink_unregister(struct devlink *devlink);
+
+#endif /* __FUNETH_DEVLINK_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
new file mode 100644
index 000000000000..31aa185f4d17
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -0,0 +1,1198 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/ethtool.h>
+#include <linux/linkmode.h>
+#include <linux/netdevice.h>
+#include <linux/nvme.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include "funeth.h"
+#include "fun_port.h"
+#include "funeth_txrx.h"
+
+/* Min queue depth. The smallest power-of-2 supporting jumbo frames with 4K
+ * pages is 8. Require it for all types of queues though some could work with
+ * fewer entries.
+ */
+#define FUNETH_MIN_QDEPTH 8
+
+static const char mac_tx_stat_names[][ETH_GSTRING_LEN] = {
+ "mac_tx_octets_total",
+ "mac_tx_frames_total",
+ "mac_tx_vlan_frames_ok",
+ "mac_tx_unicast_frames",
+ "mac_tx_multicast_frames",
+ "mac_tx_broadcast_frames",
+ "mac_tx_errors",
+ "mac_tx_CBFCPAUSE0",
+ "mac_tx_CBFCPAUSE1",
+ "mac_tx_CBFCPAUSE2",
+ "mac_tx_CBFCPAUSE3",
+ "mac_tx_CBFCPAUSE4",
+ "mac_tx_CBFCPAUSE5",
+ "mac_tx_CBFCPAUSE6",
+ "mac_tx_CBFCPAUSE7",
+ "mac_tx_CBFCPAUSE8",
+ "mac_tx_CBFCPAUSE9",
+ "mac_tx_CBFCPAUSE10",
+ "mac_tx_CBFCPAUSE11",
+ "mac_tx_CBFCPAUSE12",
+ "mac_tx_CBFCPAUSE13",
+ "mac_tx_CBFCPAUSE14",
+ "mac_tx_CBFCPAUSE15",
+};
+
+static const char mac_rx_stat_names[][ETH_GSTRING_LEN] = {
+ "mac_rx_octets_total",
+ "mac_rx_frames_total",
+ "mac_rx_VLAN_frames_ok",
+ "mac_rx_unicast_frames",
+ "mac_rx_multicast_frames",
+ "mac_rx_broadcast_frames",
+ "mac_rx_drop_events",
+ "mac_rx_errors",
+ "mac_rx_alignment_errors",
+ "mac_rx_CBFCPAUSE0",
+ "mac_rx_CBFCPAUSE1",
+ "mac_rx_CBFCPAUSE2",
+ "mac_rx_CBFCPAUSE3",
+ "mac_rx_CBFCPAUSE4",
+ "mac_rx_CBFCPAUSE5",
+ "mac_rx_CBFCPAUSE6",
+ "mac_rx_CBFCPAUSE7",
+ "mac_rx_CBFCPAUSE8",
+ "mac_rx_CBFCPAUSE9",
+ "mac_rx_CBFCPAUSE10",
+ "mac_rx_CBFCPAUSE11",
+ "mac_rx_CBFCPAUSE12",
+ "mac_rx_CBFCPAUSE13",
+ "mac_rx_CBFCPAUSE14",
+ "mac_rx_CBFCPAUSE15",
+};
+
+static const char * const txq_stat_names[] = {
+ "tx_pkts",
+ "tx_bytes",
+ "tx_cso",
+ "tx_tso",
+ "tx_encapsulated_tso",
+ "tx_uso",
+ "tx_more",
+ "tx_queue_stops",
+ "tx_queue_restarts",
+ "tx_mapping_errors",
+ "tx_tls_encrypted_packets",
+ "tx_tls_encrypted_bytes",
+ "tx_tls_ooo",
+ "tx_tls_drop_no_sync_data",
+};
+
+static const char * const xdpq_stat_names[] = {
+ "tx_xdp_pkts",
+ "tx_xdp_bytes",
+ "tx_xdp_full",
+ "tx_xdp_mapping_errors",
+};
+
+static const char * const rxq_stat_names[] = {
+ "rx_pkts",
+ "rx_bytes",
+ "rx_cso",
+ "gro_pkts",
+ "gro_merged",
+ "rx_xdp_tx",
+ "rx_xdp_redir",
+ "rx_xdp_drops",
+ "rx_buffers",
+ "rx_page_allocs",
+ "rx_drops",
+ "rx_budget_exhausted",
+ "rx_mapping_errors",
+};
+
+static const char * const tls_stat_names[] = {
+ "tx_tls_ctx",
+ "tx_tls_del",
+ "tx_tls_resync",
+};
+
+static void fun_link_modes_to_ethtool(u64 modes,
+ unsigned long *ethtool_modes_map)
+{
+#define ADD_LINK_MODE(mode) \
+ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, ethtool_modes_map)
+
+ if (modes & FUN_PORT_CAP_AUTONEG)
+ ADD_LINK_MODE(Autoneg);
+ if (modes & FUN_PORT_CAP_1000_X)
+ ADD_LINK_MODE(1000baseX_Full);
+ if (modes & FUN_PORT_CAP_10G_R) {
+ ADD_LINK_MODE(10000baseCR_Full);
+ ADD_LINK_MODE(10000baseSR_Full);
+ ADD_LINK_MODE(10000baseLR_Full);
+ ADD_LINK_MODE(10000baseER_Full);
+ }
+ if (modes & FUN_PORT_CAP_25G_R) {
+ ADD_LINK_MODE(25000baseCR_Full);
+ ADD_LINK_MODE(25000baseSR_Full);
+ }
+ if (modes & FUN_PORT_CAP_40G_R4) {
+ ADD_LINK_MODE(40000baseCR4_Full);
+ ADD_LINK_MODE(40000baseSR4_Full);
+ ADD_LINK_MODE(40000baseLR4_Full);
+ }
+ if (modes & FUN_PORT_CAP_50G_R2) {
+ ADD_LINK_MODE(50000baseCR2_Full);
+ ADD_LINK_MODE(50000baseSR2_Full);
+ }
+ if (modes & FUN_PORT_CAP_50G_R) {
+ ADD_LINK_MODE(50000baseCR_Full);
+ ADD_LINK_MODE(50000baseSR_Full);
+ ADD_LINK_MODE(50000baseLR_ER_FR_Full);
+ }
+ if (modes & FUN_PORT_CAP_100G_R4) {
+ ADD_LINK_MODE(100000baseCR4_Full);
+ ADD_LINK_MODE(100000baseSR4_Full);
+ ADD_LINK_MODE(100000baseLR4_ER4_Full);
+ }
+ if (modes & FUN_PORT_CAP_100G_R2) {
+ ADD_LINK_MODE(100000baseCR2_Full);
+ ADD_LINK_MODE(100000baseSR2_Full);
+ ADD_LINK_MODE(100000baseLR2_ER2_FR2_Full);
+ }
+ if (modes & FUN_PORT_CAP_FEC_NONE)
+ ADD_LINK_MODE(FEC_NONE);
+ if (modes & FUN_PORT_CAP_FEC_FC)
+ ADD_LINK_MODE(FEC_BASER);
+ if (modes & FUN_PORT_CAP_FEC_RS)
+ ADD_LINK_MODE(FEC_RS);
+ if (modes & FUN_PORT_CAP_RX_PAUSE)
+ ADD_LINK_MODE(Pause);
+
+#undef ADD_LINK_MODE
+}
+
+static void set_asym_pause(u64 advertising, struct ethtool_link_ksettings *ks)
+{
+ bool rx_pause, tx_pause;
+
+ rx_pause = advertising & FUN_PORT_CAP_RX_PAUSE;
+ tx_pause = advertising & FUN_PORT_CAP_TX_PAUSE;
+ if (tx_pause ^ rx_pause)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ Asym_Pause);
+}
+
+static unsigned int fun_port_type(unsigned int xcvr)
+{
+ if (!xcvr)
+ return PORT_NONE;
+
+ switch (xcvr & 7) {
+ case FUN_XCVR_BASET:
+ return PORT_TP;
+ case FUN_XCVR_CU:
+ return PORT_DA;
+ default:
+ return PORT_FIBRE;
+ }
+}
+
+static int fun_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int seq, speed, xcvr;
+ u64 lp_advertising;
+ bool link_up;
+
+ ethtool_link_ksettings_zero_link_mode(ks, supported);
+ ethtool_link_ksettings_zero_link_mode(ks, advertising);
+ ethtool_link_ksettings_zero_link_mode(ks, lp_advertising);
+
+ /* Link settings change asynchronously, take a consistent snapshot */
+ do {
+ seq = read_seqcount_begin(&fp->link_seq);
+ link_up = netif_carrier_ok(netdev);
+ speed = fp->link_speed;
+ xcvr = fp->xcvr_type;
+ lp_advertising = fp->lp_advertising;
+ } while (read_seqcount_retry(&fp->link_seq, seq));
+
+ if (link_up) {
+ ks->base.speed = speed;
+ ks->base.duplex = DUPLEX_FULL;
+ fun_link_modes_to_ethtool(lp_advertising,
+ ks->link_modes.lp_advertising);
+ } else {
+ ks->base.speed = SPEED_UNKNOWN;
+ ks->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ks->base.autoneg = (fp->advertising & FUN_PORT_CAP_AUTONEG) ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE;
+ ks->base.port = fun_port_type(xcvr);
+
+ fun_link_modes_to_ethtool(fp->port_caps, ks->link_modes.supported);
+ if (fp->port_caps & (FUN_PORT_CAP_RX_PAUSE | FUN_PORT_CAP_TX_PAUSE))
+ ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
+
+ fun_link_modes_to_ethtool(fp->advertising, ks->link_modes.advertising);
+ set_asym_pause(fp->advertising, ks);
+ return 0;
+}
+
+static u64 fun_advert_modes(const struct ethtool_link_ksettings *ks)
+{
+ u64 modes = 0;
+
+#define HAS_MODE(mode) \
+ ethtool_link_ksettings_test_link_mode(ks, advertising, mode)
+
+ if (HAS_MODE(1000baseX_Full))
+ modes |= FUN_PORT_CAP_1000_X;
+ if (HAS_MODE(10000baseCR_Full) || HAS_MODE(10000baseSR_Full) ||
+ HAS_MODE(10000baseLR_Full) || HAS_MODE(10000baseER_Full))
+ modes |= FUN_PORT_CAP_10G_R;
+ if (HAS_MODE(25000baseCR_Full) || HAS_MODE(25000baseSR_Full))
+ modes |= FUN_PORT_CAP_25G_R;
+ if (HAS_MODE(40000baseCR4_Full) || HAS_MODE(40000baseSR4_Full) ||
+ HAS_MODE(40000baseLR4_Full))
+ modes |= FUN_PORT_CAP_40G_R4;
+ if (HAS_MODE(50000baseCR2_Full) || HAS_MODE(50000baseSR2_Full))
+ modes |= FUN_PORT_CAP_50G_R2;
+ if (HAS_MODE(50000baseCR_Full) || HAS_MODE(50000baseSR_Full) ||
+ HAS_MODE(50000baseLR_ER_FR_Full))
+ modes |= FUN_PORT_CAP_50G_R;
+ if (HAS_MODE(100000baseCR4_Full) || HAS_MODE(100000baseSR4_Full) ||
+ HAS_MODE(100000baseLR4_ER4_Full))
+ modes |= FUN_PORT_CAP_100G_R4;
+ if (HAS_MODE(100000baseCR2_Full) || HAS_MODE(100000baseSR2_Full) ||
+ HAS_MODE(100000baseLR2_ER2_FR2_Full))
+ modes |= FUN_PORT_CAP_100G_R2;
+
+ return modes;
+#undef HAS_MODE
+}
+
+static u64 fun_speed_to_link_mode(unsigned int speed)
+{
+ switch (speed) {
+ case SPEED_100000:
+ return FUN_PORT_CAP_100G_R4 | FUN_PORT_CAP_100G_R2;
+ case SPEED_50000:
+ return FUN_PORT_CAP_50G_R | FUN_PORT_CAP_50G_R2;
+ case SPEED_40000:
+ return FUN_PORT_CAP_40G_R4;
+ case SPEED_25000:
+ return FUN_PORT_CAP_25G_R;
+ case SPEED_10000:
+ return FUN_PORT_CAP_10G_R;
+ case SPEED_1000:
+ return FUN_PORT_CAP_1000_X;
+ default:
+ return 0;
+ }
+}
+
+static int fun_change_advert(struct funeth_priv *fp, u64 new_advert)
+{
+ int err;
+
+ if (new_advert == fp->advertising)
+ return 0;
+
+ err = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT, new_advert);
+ if (!err)
+ fp->advertising = new_advert;
+ return err;
+}
+
+#define FUN_PORT_CAP_FEC_MASK \
+ (FUN_PORT_CAP_FEC_NONE | FUN_PORT_CAP_FEC_FC | FUN_PORT_CAP_FEC_RS)
+
+static int fun_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = {};
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 new_advert;
+
+ /* eswitch ports don't support mode changes */
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+
+ if (ks->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+ if (ks->base.autoneg == AUTONEG_ENABLE &&
+ !(fp->port_caps & FUN_PORT_CAP_AUTONEG))
+ return -EINVAL;
+
+ if (ks->base.autoneg == AUTONEG_ENABLE) {
+ if (linkmode_empty(ks->link_modes.advertising))
+ return -EINVAL;
+
+ fun_link_modes_to_ethtool(fp->port_caps, supported);
+ if (!linkmode_subset(ks->link_modes.advertising, supported))
+ return -EINVAL;
+
+ new_advert = fun_advert_modes(ks) | FUN_PORT_CAP_AUTONEG;
+ } else {
+ new_advert = fun_speed_to_link_mode(ks->base.speed);
+ new_advert &= fp->port_caps;
+ if (!new_advert)
+ return -EINVAL;
+ }
+ new_advert |= fp->advertising &
+ (FUN_PORT_CAP_PAUSE_MASK | FUN_PORT_CAP_FEC_MASK);
+
+ return fun_change_advert(fp, new_advert);
+}
+
+static void fun_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ u8 active_pause = fp->active_fc;
+
+ pause->rx_pause = !!(active_pause & FUN_PORT_CAP_RX_PAUSE);
+ pause->tx_pause = !!(active_pause & FUN_PORT_CAP_TX_PAUSE);
+ pause->autoneg = !!(fp->advertising & FUN_PORT_CAP_AUTONEG);
+}
+
+static int fun_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 new_advert;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+ /* Forcing PAUSE settings with AN enabled is unsupported. */
+ if (!pause->autoneg && (fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EOPNOTSUPP;
+ if (pause->autoneg && !(fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EINVAL;
+ if (pause->tx_pause && !(fp->port_caps & FUN_PORT_CAP_TX_PAUSE))
+ return -EINVAL;
+ if (pause->rx_pause && !(fp->port_caps & FUN_PORT_CAP_RX_PAUSE))
+ return -EINVAL;
+
+ new_advert = fp->advertising & ~FUN_PORT_CAP_PAUSE_MASK;
+ if (pause->tx_pause)
+ new_advert |= FUN_PORT_CAP_TX_PAUSE;
+ if (pause->rx_pause)
+ new_advert |= FUN_PORT_CAP_RX_PAUSE;
+
+ return fun_change_advert(fp, new_advert);
+}
+
+static int fun_restart_an(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->advertising & FUN_PORT_CAP_AUTONEG))
+ return -EOPNOTSUPP;
+
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_ADVERT,
+ FUN_PORT_CAP_AUTONEG);
+}
+
+static int fun_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int beacon;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT)
+ return -EOPNOTSUPP;
+ if (state != ETHTOOL_ID_ACTIVE && state != ETHTOOL_ID_INACTIVE)
+ return -EOPNOTSUPP;
+
+ beacon = state == ETHTOOL_ID_ACTIVE ? FUN_PORT_LED_BEACON_ON :
+ FUN_PORT_LED_BEACON_OFF;
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_LED, beacon);
+}
+
+static void fun_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(fp->pdev), sizeof(info->bus_info));
+}
+
+static u32 fun_get_msglevel(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return fp->msg_enable;
+}
+
+static void fun_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ fp->msg_enable = value;
+}
+
+static int fun_get_regs_len(struct net_device *dev)
+{
+ return NVME_REG_ACQ + sizeof(u64);
+}
+
+static void fun_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *buf)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+ void __iomem *bar = fp->fdev->bar;
+
+ regs->version = 0;
+ *(u64 *)(buf + NVME_REG_CAP) = readq(bar + NVME_REG_CAP);
+ *(u32 *)(buf + NVME_REG_VS) = readl(bar + NVME_REG_VS);
+ *(u32 *)(buf + NVME_REG_INTMS) = readl(bar + NVME_REG_INTMS);
+ *(u32 *)(buf + NVME_REG_INTMC) = readl(bar + NVME_REG_INTMC);
+ *(u32 *)(buf + NVME_REG_CC) = readl(bar + NVME_REG_CC);
+ *(u32 *)(buf + NVME_REG_CSTS) = readl(bar + NVME_REG_CSTS);
+ *(u32 *)(buf + NVME_REG_AQA) = readl(bar + NVME_REG_AQA);
+ *(u64 *)(buf + NVME_REG_ASQ) = readq(bar + NVME_REG_ASQ);
+ *(u64 *)(buf + NVME_REG_ACQ) = readq(bar + NVME_REG_ACQ);
+}
+
+static int fun_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kcoal,
+ struct netlink_ext_ack *ext_ack)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = fp->rx_coal_usec;
+ coal->rx_max_coalesced_frames = fp->rx_coal_count;
+ coal->use_adaptive_rx_coalesce = !fp->cq_irq_db;
+ coal->tx_coalesce_usecs = fp->tx_coal_usec;
+ coal->tx_max_coalesced_frames = fp->tx_coal_count;
+ return 0;
+}
+
+static int fun_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kcoal,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_rxq **rxqs;
+ unsigned int i, db_val;
+
+ if (coal->rx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
+ coal->rx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
+ (coal->rx_coalesce_usecs | coal->rx_max_coalesced_frames) == 0 ||
+ coal->tx_coalesce_usecs > FUN_DB_INTCOAL_USEC_M ||
+ coal->tx_max_coalesced_frames > FUN_DB_INTCOAL_ENTRIES_M ||
+ (coal->tx_coalesce_usecs | coal->tx_max_coalesced_frames) == 0)
+ return -EINVAL;
+
+ /* a timer is required if there's any coalescing */
+ if ((coal->rx_max_coalesced_frames > 1 && !coal->rx_coalesce_usecs) ||
+ (coal->tx_max_coalesced_frames > 1 && !coal->tx_coalesce_usecs))
+ return -EINVAL;
+
+ fp->rx_coal_usec = coal->rx_coalesce_usecs;
+ fp->rx_coal_count = coal->rx_max_coalesced_frames;
+ fp->tx_coal_usec = coal->tx_coalesce_usecs;
+ fp->tx_coal_count = coal->tx_max_coalesced_frames;
+
+ db_val = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
+ WRITE_ONCE(fp->cq_irq_db, db_val);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ if (!rxqs)
+ return 0;
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++)
+ WRITE_ONCE(rxqs[i]->irq_db_val, db_val);
+
+ db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, fp->tx_coal_count);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ WRITE_ONCE(fp->txqs[i]->irq_db_val, db_val);
+
+ return 0;
+}
+
+static void fun_get_channels(struct net_device *netdev,
+ struct ethtool_channels *chan)
+{
+ chan->max_rx = netdev->num_rx_queues;
+ chan->rx_count = netdev->real_num_rx_queues;
+
+ chan->max_tx = netdev->num_tx_queues;
+ chan->tx_count = netdev->real_num_tx_queues;
+}
+
+static int fun_set_channels(struct net_device *netdev,
+ struct ethtool_channels *chan)
+{
+ if (!chan->tx_count || !chan->rx_count)
+ return -EINVAL;
+
+ if (chan->tx_count == netdev->real_num_tx_queues &&
+ chan->rx_count == netdev->real_num_rx_queues)
+ return 0;
+
+ if (netif_running(netdev))
+ return fun_change_num_queues(netdev, chan->tx_count,
+ chan->rx_count);
+
+ fun_set_ring_count(netdev, chan->tx_count, chan->rx_count);
+ return 0;
+}
+
+static void fun_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *extack)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int max_depth = fp->fdev->q_depth;
+
+ /* We size CQs to be twice the RQ depth so max RQ depth is half the
+ * max queue depth.
+ */
+ ring->rx_max_pending = max_depth / 2;
+ ring->tx_max_pending = max_depth;
+
+ ring->rx_pending = fp->rq_depth;
+ ring->tx_pending = fp->sq_depth;
+
+ kring->rx_buf_len = PAGE_SIZE;
+ kring->cqe_size = FUNETH_CQE_SIZE;
+}
+
+static int fun_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *extack)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ /* queue depths must be powers-of-2 */
+ if (!is_power_of_2(ring->rx_pending) ||
+ !is_power_of_2(ring->tx_pending))
+ return -EINVAL;
+
+ if (ring->rx_pending < FUNETH_MIN_QDEPTH ||
+ ring->tx_pending < FUNETH_MIN_QDEPTH)
+ return -EINVAL;
+
+ if (fp->sq_depth == ring->tx_pending &&
+ fp->rq_depth == ring->rx_pending)
+ return 0;
+
+ if (netif_running(netdev)) {
+ struct fun_qset req = {
+ .cq_depth = 2 * ring->rx_pending,
+ .rq_depth = ring->rx_pending,
+ .sq_depth = ring->tx_pending
+ };
+
+ rc = fun_replace_queues(netdev, &req, extack);
+ if (rc)
+ return rc;
+ }
+
+ fp->sq_depth = ring->tx_pending;
+ fp->rq_depth = ring->rx_pending;
+ fp->cq_depth = 2 * fp->rq_depth;
+ return 0;
+}
+
+static int fun_get_sset_count(struct net_device *dev, int sset)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+ int n;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ n = (dev->real_num_tx_queues + 1) * ARRAY_SIZE(txq_stat_names) +
+ (dev->real_num_rx_queues + 1) * ARRAY_SIZE(rxq_stat_names) +
+ (fp->num_xdpqs + 1) * ARRAY_SIZE(xdpq_stat_names) +
+ ARRAY_SIZE(tls_stat_names);
+ if (fp->port_caps & FUN_PORT_CAP_STATS) {
+ n += ARRAY_SIZE(mac_tx_stat_names) +
+ ARRAY_SIZE(mac_rx_stat_names);
+ }
+ return n;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void fun_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int i, j;
+ u8 *p = data;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ if (fp->port_caps & FUN_PORT_CAP_STATS) {
+ memcpy(p, mac_tx_stat_names, sizeof(mac_tx_stat_names));
+ p += sizeof(mac_tx_stat_names);
+ memcpy(p, mac_rx_stat_names, sizeof(mac_rx_stat_names));
+ p += sizeof(mac_rx_stat_names);
+ }
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]", txq_stat_names[j],
+ i);
+ }
+ for (j = 0; j < ARRAY_SIZE(txq_stat_names); j++)
+ ethtool_sprintf(&p, txq_stat_names[j]);
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]",
+ xdpq_stat_names[j], i);
+ }
+ for (j = 0; j < ARRAY_SIZE(xdpq_stat_names); j++)
+ ethtool_sprintf(&p, xdpq_stat_names[j]);
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
+ ethtool_sprintf(&p, "%s[%u]", rxq_stat_names[j],
+ i);
+ }
+ for (j = 0; j < ARRAY_SIZE(rxq_stat_names); j++)
+ ethtool_sprintf(&p, rxq_stat_names[j]);
+
+ for (j = 0; j < ARRAY_SIZE(tls_stat_names); j++)
+ ethtool_sprintf(&p, tls_stat_names[j]);
+ break;
+ default:
+ break;
+ }
+}
+
+static u64 *get_mac_stats(const struct funeth_priv *fp, u64 *data)
+{
+#define TX_STAT(s) \
+ *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
+
+ TX_STAT(etherStatsOctets);
+ TX_STAT(etherStatsPkts);
+ TX_STAT(VLANTransmittedOK);
+ TX_STAT(ifOutUcastPkts);
+ TX_STAT(ifOutMulticastPkts);
+ TX_STAT(ifOutBroadcastPkts);
+ TX_STAT(ifOutErrors);
+ TX_STAT(CBFCPAUSEFramesTransmitted_0);
+ TX_STAT(CBFCPAUSEFramesTransmitted_1);
+ TX_STAT(CBFCPAUSEFramesTransmitted_2);
+ TX_STAT(CBFCPAUSEFramesTransmitted_3);
+ TX_STAT(CBFCPAUSEFramesTransmitted_4);
+ TX_STAT(CBFCPAUSEFramesTransmitted_5);
+ TX_STAT(CBFCPAUSEFramesTransmitted_6);
+ TX_STAT(CBFCPAUSEFramesTransmitted_7);
+ TX_STAT(CBFCPAUSEFramesTransmitted_8);
+ TX_STAT(CBFCPAUSEFramesTransmitted_9);
+ TX_STAT(CBFCPAUSEFramesTransmitted_10);
+ TX_STAT(CBFCPAUSEFramesTransmitted_11);
+ TX_STAT(CBFCPAUSEFramesTransmitted_12);
+ TX_STAT(CBFCPAUSEFramesTransmitted_13);
+ TX_STAT(CBFCPAUSEFramesTransmitted_14);
+ TX_STAT(CBFCPAUSEFramesTransmitted_15);
+
+#define RX_STAT(s) *data++ = be64_to_cpu(fp->stats[PORT_MAC_RX_##s])
+
+ RX_STAT(etherStatsOctets);
+ RX_STAT(etherStatsPkts);
+ RX_STAT(VLANReceivedOK);
+ RX_STAT(ifInUcastPkts);
+ RX_STAT(ifInMulticastPkts);
+ RX_STAT(ifInBroadcastPkts);
+ RX_STAT(etherStatsDropEvents);
+ RX_STAT(ifInErrors);
+ RX_STAT(aAlignmentErrors);
+ RX_STAT(CBFCPAUSEFramesReceived_0);
+ RX_STAT(CBFCPAUSEFramesReceived_1);
+ RX_STAT(CBFCPAUSEFramesReceived_2);
+ RX_STAT(CBFCPAUSEFramesReceived_3);
+ RX_STAT(CBFCPAUSEFramesReceived_4);
+ RX_STAT(CBFCPAUSEFramesReceived_5);
+ RX_STAT(CBFCPAUSEFramesReceived_6);
+ RX_STAT(CBFCPAUSEFramesReceived_7);
+ RX_STAT(CBFCPAUSEFramesReceived_8);
+ RX_STAT(CBFCPAUSEFramesReceived_9);
+ RX_STAT(CBFCPAUSEFramesReceived_10);
+ RX_STAT(CBFCPAUSEFramesReceived_11);
+ RX_STAT(CBFCPAUSEFramesReceived_12);
+ RX_STAT(CBFCPAUSEFramesReceived_13);
+ RX_STAT(CBFCPAUSEFramesReceived_14);
+ RX_STAT(CBFCPAUSEFramesReceived_15);
+
+ return data;
+
+#undef TX_STAT
+#undef RX_STAT
+}
+
+static void fun_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq_stats txs;
+ struct funeth_rxq_stats rxs;
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i, start;
+ u64 *totals, *tot;
+
+ if (fp->port_caps & FUN_PORT_CAP_STATS)
+ data = get_mac_stats(fp, data);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ if (!rxqs)
+ return;
+
+#define ADD_STAT(cnt) do { \
+ *data = (cnt); *tot++ += *data++; \
+} while (0)
+
+ /* Tx queues */
+ totals = data + netdev->real_num_tx_queues * ARRAY_SIZE(txq_stat_names);
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(fp->txqs[i], start, txs);
+
+ ADD_STAT(txs.tx_pkts);
+ ADD_STAT(txs.tx_bytes);
+ ADD_STAT(txs.tx_cso);
+ ADD_STAT(txs.tx_tso);
+ ADD_STAT(txs.tx_encap_tso);
+ ADD_STAT(txs.tx_uso);
+ ADD_STAT(txs.tx_more);
+ ADD_STAT(txs.tx_nstops);
+ ADD_STAT(txs.tx_nrestarts);
+ ADD_STAT(txs.tx_map_err);
+ ADD_STAT(txs.tx_tls_pkts);
+ ADD_STAT(txs.tx_tls_bytes);
+ ADD_STAT(txs.tx_tls_fallback);
+ ADD_STAT(txs.tx_tls_drops);
+ }
+ data += ARRAY_SIZE(txq_stat_names);
+
+ /* XDP Tx queues */
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ totals = data + fp->num_xdpqs * ARRAY_SIZE(xdpq_stat_names);
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(xdpqs[i], start, txs);
+
+ ADD_STAT(txs.tx_pkts);
+ ADD_STAT(txs.tx_bytes);
+ ADD_STAT(txs.tx_xdp_full);
+ ADD_STAT(txs.tx_map_err);
+ }
+ data += ARRAY_SIZE(xdpq_stat_names);
+
+ /* Rx queues */
+ totals = data + netdev->real_num_rx_queues * ARRAY_SIZE(rxq_stat_names);
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ tot = totals;
+
+ FUN_QSTAT_READ(rxqs[i], start, rxs);
+
+ ADD_STAT(rxs.rx_pkts);
+ ADD_STAT(rxs.rx_bytes);
+ ADD_STAT(rxs.rx_cso);
+ ADD_STAT(rxs.gro_pkts);
+ ADD_STAT(rxs.gro_merged);
+ ADD_STAT(rxs.xdp_tx);
+ ADD_STAT(rxs.xdp_redir);
+ ADD_STAT(rxs.xdp_drops);
+ ADD_STAT(rxs.rx_bufs);
+ ADD_STAT(rxs.rx_page_alloc);
+ ADD_STAT(rxs.rx_mem_drops + rxs.xdp_err);
+ ADD_STAT(rxs.rx_budget);
+ ADD_STAT(rxs.rx_map_err);
+ }
+ data += ARRAY_SIZE(rxq_stat_names);
+#undef ADD_STAT
+
+ *data++ = atomic64_read(&fp->tx_tls_add);
+ *data++ = atomic64_read(&fp->tx_tls_del);
+ *data++ = atomic64_read(&fp->tx_tls_resync);
+}
+
+#define RX_STAT(fp, s) be64_to_cpu((fp)->stats[PORT_MAC_RX_##s])
+#define TX_STAT(fp, s) \
+ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_##s])
+#define FEC_STAT(fp, s) \
+ be64_to_cpu((fp)->stats[PORT_MAC_RX_STATS_MAX + \
+ PORT_MAC_TX_STATS_MAX + PORT_MAC_FEC_##s])
+
+static void fun_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->tx_pause_frames = TX_STAT(fp, aPAUSEMACCtrlFramesTransmitted);
+ stats->rx_pause_frames = RX_STAT(fp, aPAUSEMACCtrlFramesReceived);
+}
+
+static void fun_get_802_3_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->FramesTransmittedOK = TX_STAT(fp, aFramesTransmittedOK);
+ stats->FramesReceivedOK = RX_STAT(fp, aFramesReceivedOK);
+ stats->FrameCheckSequenceErrors = RX_STAT(fp, aFrameCheckSequenceErrors);
+ stats->OctetsTransmittedOK = TX_STAT(fp, OctetsTransmittedOK);
+ stats->OctetsReceivedOK = RX_STAT(fp, OctetsReceivedOK);
+ stats->InRangeLengthErrors = RX_STAT(fp, aInRangeLengthErrors);
+ stats->FrameTooLongErrors = RX_STAT(fp, aFrameTooLongErrors);
+}
+
+static void fun_get_802_3_ctrl_stats(struct net_device *netdev,
+ struct ethtool_eth_ctrl_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->MACControlFramesTransmitted = TX_STAT(fp, MACControlFramesTransmitted);
+ stats->MACControlFramesReceived = RX_STAT(fp, MACControlFramesReceived);
+}
+
+static void fun_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ static const struct ethtool_rmon_hist_range rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 32767 },
+ {}
+ };
+
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->undersize_pkts = RX_STAT(fp, etherStatsUndersizePkts);
+ stats->oversize_pkts = RX_STAT(fp, etherStatsOversizePkts);
+ stats->fragments = RX_STAT(fp, etherStatsFragments);
+ stats->jabbers = RX_STAT(fp, etherStatsJabbers);
+
+ stats->hist[0] = RX_STAT(fp, etherStatsPkts64Octets);
+ stats->hist[1] = RX_STAT(fp, etherStatsPkts65to127Octets);
+ stats->hist[2] = RX_STAT(fp, etherStatsPkts128to255Octets);
+ stats->hist[3] = RX_STAT(fp, etherStatsPkts256to511Octets);
+ stats->hist[4] = RX_STAT(fp, etherStatsPkts512to1023Octets);
+ stats->hist[5] = RX_STAT(fp, etherStatsPkts1024to1518Octets);
+ stats->hist[6] = RX_STAT(fp, etherStatsPkts1519toMaxOctets);
+
+ stats->hist_tx[0] = TX_STAT(fp, etherStatsPkts64Octets);
+ stats->hist_tx[1] = TX_STAT(fp, etherStatsPkts65to127Octets);
+ stats->hist_tx[2] = TX_STAT(fp, etherStatsPkts128to255Octets);
+ stats->hist_tx[3] = TX_STAT(fp, etherStatsPkts256to511Octets);
+ stats->hist_tx[4] = TX_STAT(fp, etherStatsPkts512to1023Octets);
+ stats->hist_tx[5] = TX_STAT(fp, etherStatsPkts1024to1518Octets);
+ stats->hist_tx[6] = TX_STAT(fp, etherStatsPkts1519toMaxOctets);
+
+ *ranges = rmon_ranges;
+}
+
+static void fun_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *stats)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return;
+
+ stats->corrected_blocks.total = FEC_STAT(fp, Correctable);
+ stats->uncorrectable_blocks.total = FEC_STAT(fp, Uncorrectable);
+}
+
+#undef RX_STAT
+#undef TX_STAT
+#undef FEC_STAT
+
+static int fun_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = netdev->real_num_rx_queues;
+ return 0;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int fun_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
+{
+ return 0;
+}
+
+static u32 fun_get_rxfh_indir_size(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return fp->indir_table_nentries;
+}
+
+static u32 fun_get_rxfh_key_size(struct net_device *netdev)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ return sizeof(fp->rss_key);
+}
+
+static int fun_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ const struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (!fp->rss_cfg)
+ return -EOPNOTSUPP;
+
+ if (indir)
+ memcpy(indir, fp->indir_table,
+ sizeof(u32) * fp->indir_table_nentries);
+
+ if (key)
+ memcpy(key, fp->rss_key, sizeof(fp->rss_key));
+
+ if (hfunc)
+ *hfunc = fp->hash_algo == FUN_ETH_RSS_ALG_TOEPLITZ ?
+ ETH_RSS_HASH_TOP : ETH_RSS_HASH_CRC32;
+
+ return 0;
+}
+
+static int fun_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ const u32 *rss_indir = indir ? indir : fp->indir_table;
+ const u8 *rss_key = key ? key : fp->rss_key;
+ enum fun_eth_hash_alg algo;
+
+ if (!fp->rss_cfg)
+ return -EOPNOTSUPP;
+
+ if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+ algo = fp->hash_algo;
+ else if (hfunc == ETH_RSS_HASH_CRC32)
+ algo = FUN_ETH_RSS_ALG_CRC32;
+ else if (hfunc == ETH_RSS_HASH_TOP)
+ algo = FUN_ETH_RSS_ALG_TOEPLITZ;
+ else
+ return -EINVAL;
+
+ /* If the port is enabled try to reconfigure RSS and keep the new
+ * settings if successful. If it is down we update the RSS settings
+ * and apply them at the next UP time.
+ */
+ if (netif_running(netdev)) {
+ int rc = fun_config_rss(netdev, algo, rss_key, rss_indir,
+ FUN_ADMIN_SUBOP_MODIFY);
+ if (rc)
+ return rc;
+ }
+
+ fp->hash_algo = algo;
+ if (key)
+ memcpy(fp->rss_key, key, sizeof(fp->rss_key));
+ if (indir)
+ memcpy(fp->indir_table, indir,
+ sizeof(u32) * fp->indir_table_nentries);
+ return 0;
+}
+
+static int fun_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = -1;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static unsigned int to_ethtool_fec(unsigned int fun_fec)
+{
+ unsigned int fec = 0;
+
+ if (fun_fec == FUN_PORT_FEC_NA)
+ fec |= ETHTOOL_FEC_NONE;
+ if (fun_fec & FUN_PORT_FEC_OFF)
+ fec |= ETHTOOL_FEC_OFF;
+ if (fun_fec & FUN_PORT_FEC_RS)
+ fec |= ETHTOOL_FEC_RS;
+ if (fun_fec & FUN_PORT_FEC_FC)
+ fec |= ETHTOOL_FEC_BASER;
+ if (fun_fec & FUN_PORT_FEC_AUTO)
+ fec |= ETHTOOL_FEC_AUTO;
+ return fec;
+}
+
+static int fun_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 fec_data;
+ int rc;
+
+ rc = fun_port_read_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, &fec_data);
+ if (rc)
+ return rc;
+
+ fec->active_fec = to_ethtool_fec(fec_data & 0xff);
+ fec->fec = to_ethtool_fec(fec_data >> 8);
+ return 0;
+}
+
+static int fun_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 fec_mode;
+
+ switch (fec->fec) {
+ case ETHTOOL_FEC_AUTO:
+ fec_mode = FUN_PORT_FEC_AUTO;
+ break;
+ case ETHTOOL_FEC_OFF:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_NONE))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_BASER:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_FC))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_FC;
+ break;
+ case ETHTOOL_FEC_RS:
+ if (!(fp->port_caps & FUN_PORT_CAP_FEC_RS))
+ return -EINVAL;
+ fec_mode = FUN_PORT_FEC_RS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_FEC, fec_mode);
+}
+
+static int fun_get_port_module_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *req,
+ struct netlink_ext_ack *extack)
+{
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_xcvr_read_rsp rsp;
+ } cmd;
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Specified port is virtual, only physical ports have modules");
+ return -EOPNOTSUPP;
+ }
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.xcvr_read =
+ FUN_ADMIN_PORT_XCVR_READ_REQ_INIT(0, netdev->dev_port,
+ req->bank, req->page,
+ req->offset, req->length,
+ req->i2c_address);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ if (rc)
+ return rc;
+
+ memcpy(req->data, cmd.rsp.data, req->length);
+ return req->length;
+}
+
+static const struct ethtool_ops fun_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_link_ksettings = fun_get_link_ksettings,
+ .set_link_ksettings = fun_set_link_ksettings,
+ .set_phys_id = fun_set_phys_id,
+ .get_drvinfo = fun_get_drvinfo,
+ .get_msglevel = fun_get_msglevel,
+ .set_msglevel = fun_set_msglevel,
+ .get_regs_len = fun_get_regs_len,
+ .get_regs = fun_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = fun_get_coalesce,
+ .set_coalesce = fun_set_coalesce,
+ .get_ts_info = fun_get_ts_info,
+ .get_ringparam = fun_get_ringparam,
+ .set_ringparam = fun_set_ringparam,
+ .get_sset_count = fun_get_sset_count,
+ .get_strings = fun_get_strings,
+ .get_ethtool_stats = fun_get_ethtool_stats,
+ .get_rxnfc = fun_get_rxnfc,
+ .set_rxnfc = fun_set_rxnfc,
+ .get_rxfh_indir_size = fun_get_rxfh_indir_size,
+ .get_rxfh_key_size = fun_get_rxfh_key_size,
+ .get_rxfh = fun_get_rxfh,
+ .set_rxfh = fun_set_rxfh,
+ .get_channels = fun_get_channels,
+ .set_channels = fun_set_channels,
+ .get_fecparam = fun_get_fecparam,
+ .set_fecparam = fun_set_fecparam,
+ .get_pauseparam = fun_get_pauseparam,
+ .set_pauseparam = fun_set_pauseparam,
+ .nway_reset = fun_restart_an,
+ .get_pause_stats = fun_get_pause_stats,
+ .get_fec_stats = fun_get_fec_stats,
+ .get_eth_mac_stats = fun_get_802_3_stats,
+ .get_eth_ctrl_stats = fun_get_802_3_ctrl_stats,
+ .get_rmon_stats = fun_get_rmon_stats,
+ .get_module_eeprom_by_page = fun_get_port_module_page,
+};
+
+void fun_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &fun_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.c b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c
new file mode 100644
index 000000000000..f871def70d70
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include "funeth.h"
+#include "funeth_ktls.h"
+
+static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id)
+{
+ struct fun_admin_ktls_create_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ sizeof(req)),
+ .subop = FUN_ADMIN_SUBOP_CREATE,
+ .id = cpu_to_be32(id),
+ };
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+}
+
+static int fun_ktls_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ sizeof(req)),
+ .subop = FUN_ADMIN_SUBOP_MODIFY,
+ .id = cpu_to_be32(fp->ktls_id),
+ .tcp_seq = cpu_to_be32(start_offload_tcp_sn),
+ };
+ struct fun_admin_ktls_modify_rsp rsp;
+ struct fun_ktls_tx_ctx *tx_ctx;
+ int rc;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EOPNOTSUPP;
+
+ if (crypto_info->version == TLS_1_2_VERSION)
+ req.version = FUN_KTLS_TLSV2;
+ else
+ return -EOPNOTSUPP;
+
+ switch (crypto_info->cipher_type) {
+ case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info;
+
+ req.cipher = FUN_KTLS_CIPHER_AES_GCM_128;
+ memcpy(req.key, c->key, sizeof(c->key));
+ memcpy(req.iv, c->iv, sizeof(c->iv));
+ memcpy(req.salt, c->salt, sizeof(c->salt));
+ memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq));
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp,
+ sizeof(rsp), 0);
+ memzero_explicit(&req, sizeof(req));
+ if (rc)
+ return rc;
+
+ tx_ctx = tls_driver_ctx(sk, direction);
+ tx_ctx->tlsid = rsp.tlsid;
+ tx_ctx->next_seq = start_offload_tcp_sn;
+ atomic64_inc(&fp->tx_tls_add);
+ return 0;
+}
+
+static void fun_ktls_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req;
+ struct fun_ktls_tx_ctx *tx_ctx;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return;
+
+ tx_ctx = __tls_driver_ctx(tls_ctx, direction);
+
+ req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ offsetof(struct fun_admin_ktls_modify_req, tcp_seq));
+ req.subop = FUN_ADMIN_SUBOP_MODIFY;
+ req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE);
+ req.id = cpu_to_be32(fp->ktls_id);
+ req.tlsid = tx_ctx->tlsid;
+
+ fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+ atomic64_inc(&fp->tx_tls_del);
+}
+
+static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
+ u8 *rcd_sn, enum tls_offload_ctx_dir direction)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_admin_ktls_modify_req req;
+ struct fun_ktls_tx_ctx *tx_ctx;
+ int rc;
+
+ if (direction != TLS_OFFLOAD_CTX_DIR_TX)
+ return -EOPNOTSUPP;
+
+ tx_ctx = tls_driver_ctx(sk, direction);
+
+ req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
+ offsetof(struct fun_admin_ktls_modify_req, key));
+ req.subop = FUN_ADMIN_SUBOP_MODIFY;
+ req.flags = 0;
+ req.id = cpu_to_be32(fp->ktls_id);
+ req.tlsid = tx_ctx->tlsid;
+ req.tcp_seq = cpu_to_be32(seq);
+ req.version = 0;
+ req.cipher = 0;
+ memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq));
+
+ atomic64_inc(&fp->tx_tls_resync);
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+ if (!rc)
+ tx_ctx->next_seq = seq;
+ return rc;
+}
+
+static const struct tlsdev_ops fun_ktls_ops = {
+ .tls_dev_add = fun_ktls_add,
+ .tls_dev_del = fun_ktls_del,
+ .tls_dev_resync = fun_ktls_resync,
+};
+
+int fun_ktls_init(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ rc = fun_admin_ktls_create(fp, netdev->dev_port);
+ if (rc)
+ return rc;
+
+ fp->ktls_id = netdev->dev_port;
+ netdev->tlsdev_ops = &fun_ktls_ops;
+ netdev->hw_features |= NETIF_F_HW_TLS_TX;
+ netdev->features |= NETIF_F_HW_TLS_TX;
+ return 0;
+}
+
+void fun_ktls_cleanup(struct funeth_priv *fp)
+{
+ if (fp->ktls_id == FUN_HCI_ID_INVALID)
+ return;
+
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id);
+ fp->ktls_id = FUN_HCI_ID_INVALID;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ktls.h b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h
new file mode 100644
index 000000000000..9d6f2141a959
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ktls.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUN_KTLS_H
+#define _FUN_KTLS_H
+
+#include <net/tls.h>
+
+struct funeth_priv;
+
+struct fun_ktls_tx_ctx {
+ __be64 tlsid;
+ u32 next_seq;
+};
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+int fun_ktls_init(struct net_device *netdev);
+void fun_ktls_cleanup(struct funeth_priv *fp);
+
+#else
+
+static inline void fun_ktls_init(struct net_device *netdev)
+{
+}
+
+static inline void fun_ktls_cleanup(struct funeth_priv *fp)
+{
+}
+#endif
+
+#endif /* _FUN_KTLS_H */
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
new file mode 100644
index 000000000000..df86770731ad
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -0,0 +1,2081 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bpf.h>
+#include <linux/crash_dump.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/filter.h>
+#include <linux/idr.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/inetdevice.h>
+
+#include "funeth.h"
+#include "funeth_devlink.h"
+#include "funeth_ktls.h"
+#include "fun_port.h"
+#include "fun_queue.h"
+#include "funeth_txrx.h"
+
+#define ADMIN_SQ_DEPTH 32
+#define ADMIN_CQ_DEPTH 64
+#define ADMIN_RQ_DEPTH 16
+
+/* Default number of Tx/Rx queues. */
+#define FUN_DFLT_QUEUES 16U
+
+enum {
+ FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL,
+ FUN_SERV_DEL_PORTS,
+};
+
+static const struct pci_device_id funeth_id_table[] = {
+ { PCI_VDEVICE(FUNGIBLE, 0x0101) },
+ { PCI_VDEVICE(FUNGIBLE, 0x0181) },
+ { 0, }
+};
+
+/* Issue a port write admin command with @n key/value pairs. */
+static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n,
+ const int *keys, const u64 *data)
+{
+ unsigned int cmd_size, i;
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ u8 v[ADMIN_SQE_SIZE];
+ } cmd;
+
+ cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) +
+ n * sizeof(struct fun_admin_write48_req);
+ if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ cmd_size);
+ cmd.req.u.write =
+ FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0,
+ fp->netdev->dev_port);
+ for (i = 0; i < n; i++)
+ cmd.req.u.write.write48[i] =
+ FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]);
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, cmd_size, 0);
+}
+
+int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data)
+{
+ return fun_port_write_cmds(fp, 1, &key, &data);
+}
+
+/* Issue a port read admin command with @n key/value pairs. */
+static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n,
+ const int *keys, u64 *data)
+{
+ const struct fun_admin_read48_rsp *r48rsp;
+ unsigned int cmd_size, i;
+ int rc;
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ u8 v[ADMIN_SQE_SIZE];
+ } cmd;
+
+ cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) +
+ n * sizeof(struct fun_admin_read48_req);
+ if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
+ return -EINVAL;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ cmd_size);
+ cmd.req.u.read =
+ FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0,
+ fp->netdev->dev_port);
+ for (i = 0; i < n; i++)
+ cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, cmd_size, 0);
+ if (rc)
+ return rc;
+
+ for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) {
+ data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data);
+ dev_dbg(fp->fdev->dev,
+ "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld",
+ fp->lport, r48rsp->key_to_data, keys[i], data[i],
+ FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data));
+ }
+ return 0;
+}
+
+int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data)
+{
+ return fun_port_read_cmds(fp, 1, &key, data);
+}
+
+static void fun_report_link(struct net_device *netdev)
+{
+ if (netif_carrier_ok(netdev)) {
+ const struct funeth_priv *fp = netdev_priv(netdev);
+ const char *fec = "", *pause = "";
+ int speed = fp->link_speed;
+ char unit = 'M';
+
+ if (fp->link_speed >= SPEED_1000) {
+ speed /= 1000;
+ unit = 'G';
+ }
+
+ if (fp->active_fec & FUN_PORT_FEC_RS)
+ fec = ", RS-FEC";
+ else if (fp->active_fec & FUN_PORT_FEC_FC)
+ fec = ", BASER-FEC";
+
+ if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK)
+ pause = ", Tx/Rx PAUSE";
+ else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE)
+ pause = ", Rx PAUSE";
+ else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE)
+ pause = ", Tx PAUSE";
+
+ netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n",
+ speed, unit, pause, fec);
+ } else {
+ netdev_info(netdev, "Link down\n");
+ }
+}
+
+static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr,
+ unsigned int adi_id, const struct fun_adi_param *param)
+{
+ struct fun_admin_adi_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI,
+ sizeof(req)),
+ .u.write.subop = FUN_ADMIN_SUBOP_WRITE,
+ .u.write.attribute = attr,
+ .u.write.id = cpu_to_be32(adi_id),
+ .u.write.param = *param
+ };
+
+ return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
+}
+
+/* Configure RSS for the given port. @op determines whether a new RSS context
+ * is to be created or whether an existing one should be reconfigured. The
+ * remaining parameters specify the hashing algorithm, key, and indirection
+ * table.
+ *
+ * This initiates packet delivery to the Rx queues set in the indirection
+ * table.
+ */
+int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
+ const u32 *qtable, u8 op)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int table_len = fp->indir_table_nentries;
+ unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len;
+ struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
+ union {
+ struct {
+ struct fun_admin_rss_req req;
+ struct fun_dataop_gl gl;
+ };
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ __be32 *indir_tab;
+ u16 flags;
+ int rc;
+
+ if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID)
+ return -EINVAL;
+
+ flags = op == FUN_ADMIN_SUBOP_CREATE ?
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0;
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS,
+ sizeof(cmd));
+ cmd.req.u.create =
+ FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id,
+ dev->dev_port, algo,
+ FUN_ETH_RSS_MAX_KEY_SIZE,
+ table_len, 0,
+ FUN_ETH_RSS_MAX_KEY_SIZE);
+ cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
+ fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr);
+
+ /* write the key and indirection table into the RSS DMA area */
+ memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE);
+ indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE;
+ for (rc = 0; rc < table_len; rc++)
+ *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
+ &cmd.rsp, sizeof(cmd.rsp), 0);
+ if (!rc && op == FUN_ADMIN_SUBOP_CREATE)
+ fp->rss_hw_id = be32_to_cpu(cmd.rsp.id);
+ return rc;
+}
+
+/* Destroy the HW RSS conntext associated with the given port. This also stops
+ * all packet delivery to our Rx queues.
+ */
+static void fun_destroy_rss(struct funeth_priv *fp)
+{
+ if (fp->rss_hw_id != FUN_HCI_ID_INVALID) {
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id);
+ fp->rss_hw_id = FUN_HCI_ID_INVALID;
+ }
+}
+
+static void fun_irq_aff_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify);
+
+ cpumask_copy(&p->affinity_mask, mask);
+}
+
+static void fun_irq_aff_release(struct kref __always_unused *ref)
+{
+}
+
+/* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it,
+ * and add it to the IRQ XArray.
+ */
+static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx,
+ int node, unsigned int xa_idx_offset)
+{
+ struct fun_irq *irq;
+ int cpu, res;
+
+ cpu = cpumask_local_spread(idx, node);
+ node = cpu_to_mem(cpu);
+
+ irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node);
+ if (!irq)
+ return ERR_PTR(-ENOMEM);
+
+ res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx);
+ if (res != 1)
+ goto free_irq;
+
+ res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL);
+ if (res)
+ goto release_irq;
+
+ irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx);
+ cpumask_set_cpu(cpu, &irq->affinity_mask);
+ irq->aff_notify.notify = fun_irq_aff_notify;
+ irq->aff_notify.release = fun_irq_aff_release;
+ irq->state = FUN_IRQ_INIT;
+ return irq;
+
+release_irq:
+ fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
+free_irq:
+ kfree(irq);
+ return ERR_PTR(res);
+}
+
+static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq)
+{
+ netif_napi_del(&irq->napi);
+ fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
+ kfree(irq);
+}
+
+/* Release the IRQs reserved for Tx/Rx queues that aren't being used. */
+static void fun_prune_queue_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int nreleased = 0;
+ struct fun_irq *irq;
+ unsigned long idx;
+
+ xa_for_each(&fp->irqs, idx, irq) {
+ if (irq->txq || irq->rxq) /* skip those in use */
+ continue;
+
+ xa_erase(&fp->irqs, idx);
+ fun_free_qirq(fp, irq);
+ nreleased++;
+ if (idx < fp->rx_irq_ofst)
+ fp->num_tx_irqs--;
+ else
+ fp->num_rx_irqs--;
+ }
+ netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased);
+}
+
+/* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx
+ * and @nrx. IRQs are added incrementally to those we already have.
+ * We hold on to allocated IRQs until garbage collection of unused IRQs is
+ * separately requested.
+ */
+static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ int node = dev_to_node(&fp->pdev->dev);
+ struct fun_irq *irq;
+ unsigned int i;
+
+ for (i = fp->num_tx_irqs; i < ntx; i++) {
+ irq = fun_alloc_qirq(fp, i, node, 0);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ fp->num_tx_irqs++;
+ netif_napi_add_tx(dev, &irq->napi, fun_txq_napi_poll);
+ }
+
+ for (i = fp->num_rx_irqs; i < nrx; i++) {
+ irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst);
+ if (IS_ERR(irq))
+ return PTR_ERR(irq);
+
+ fp->num_rx_irqs++;
+ netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll);
+ }
+
+ netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
+ ntx, nrx);
+ return 0;
+}
+
+static void free_txqs(struct funeth_txq **txqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && txqs[i]; i++)
+ txqs[i] = funeth_txq_free(txqs[i], state);
+}
+
+static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs,
+ unsigned int nqs, unsigned int depth, unsigned int start,
+ int state)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i),
+ state, &txqs[i]);
+ if (err) {
+ free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && rxqs[i]; i++)
+ rxqs[i] = funeth_rxq_free(rxqs[i], state);
+}
+
+static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs,
+ unsigned int nqs, unsigned int ncqe, unsigned int nrqe,
+ unsigned int start, int state)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i;
+ int err;
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_rxq_create(dev, i, ncqe, nrqe,
+ xa_load(&fp->irqs, i + fp->rx_irq_ofst),
+ state, &rxqs[i]);
+ if (err) {
+ free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs,
+ unsigned int start, int state)
+{
+ unsigned int i;
+
+ for (i = start; i < nqs && xdpqs[i]; i++)
+ xdpqs[i] = funeth_txq_free(xdpqs[i], state);
+
+ if (state == FUN_QSTATE_DESTROYED)
+ kfree(xdpqs);
+}
+
+static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs,
+ unsigned int depth, unsigned int start,
+ int state)
+{
+ struct funeth_txq **xdpqs;
+ unsigned int i;
+ int err;
+
+ xdpqs = kcalloc(nqs, sizeof(*xdpqs), GFP_KERNEL);
+ if (!xdpqs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = start; i < nqs; i++) {
+ err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]);
+ if (err) {
+ free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED);
+ return ERR_PTR(err);
+ }
+ }
+ return xdpqs;
+}
+
+static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq **xdpqs = qset->xdpqs;
+ struct funeth_rxq **rxqs = qset->rxqs;
+
+ /* qset may not specify any queues to operate on. In that case the
+ * currently installed queues are implied.
+ */
+ if (!rxqs) {
+ rxqs = rtnl_dereference(fp->rxqs);
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ qset->txqs = fp->txqs;
+ qset->nrxqs = netdev->real_num_rx_queues;
+ qset->ntxqs = netdev->real_num_tx_queues;
+ qset->nxdpqs = fp->num_xdpqs;
+ }
+ if (!rxqs)
+ return;
+
+ if (rxqs == rtnl_dereference(fp->rxqs)) {
+ rcu_assign_pointer(fp->rxqs, NULL);
+ rcu_assign_pointer(fp->xdpqs, NULL);
+ synchronize_net();
+ fp->txqs = NULL;
+ }
+
+ free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state);
+ free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state);
+ free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state);
+ if (qset->state == FUN_QSTATE_DESTROYED)
+ kfree(rxqs);
+
+ /* Tell the caller which queues were operated on. */
+ qset->rxqs = rxqs;
+ qset->xdpqs = xdpqs;
+}
+
+static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset)
+{
+ struct funeth_txq **xdpqs = NULL, **txqs;
+ struct funeth_rxq **rxqs;
+ int err;
+
+ err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs);
+ if (err)
+ return err;
+
+ rxqs = kcalloc(qset->ntxqs + qset->nrxqs, sizeof(*rxqs), GFP_KERNEL);
+ if (!rxqs)
+ return -ENOMEM;
+
+ if (qset->nxdpqs) {
+ xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth,
+ qset->xdpq_start, qset->state);
+ if (IS_ERR(xdpqs)) {
+ err = PTR_ERR(xdpqs);
+ goto free_qvec;
+ }
+ }
+
+ txqs = (struct funeth_txq **)&rxqs[qset->nrxqs];
+ err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth,
+ qset->txq_start, qset->state);
+ if (err)
+ goto free_xdpqs;
+
+ err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth,
+ qset->rq_depth, qset->rxq_start, qset->state);
+ if (err)
+ goto free_txqs;
+
+ qset->rxqs = rxqs;
+ qset->txqs = txqs;
+ qset->xdpqs = xdpqs;
+ return 0;
+
+free_txqs:
+ free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED);
+free_xdpqs:
+ free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED);
+free_qvec:
+ kfree(rxqs);
+ return err;
+}
+
+/* Take queues to the next level. Presently this means creating them on the
+ * device.
+ */
+static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ int i, err;
+
+ for (i = 0; i < qset->nrxqs; i++) {
+ err = fun_rxq_create_dev(qset->rxqs[i],
+ xa_load(&fp->irqs,
+ i + fp->rx_irq_ofst));
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < qset->ntxqs; i++) {
+ err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i));
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < qset->nxdpqs; i++) {
+ err = fun_txq_create_dev(qset->xdpqs[i], NULL);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ fun_free_rings(dev, qset);
+ return err;
+}
+
+static int fun_port_create(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ union {
+ struct fun_admin_port_req req;
+ struct fun_admin_port_rsp rsp;
+ } cmd;
+ int rc;
+
+ if (fp->lport != INVALID_LPORT)
+ return 0;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
+ sizeof(cmd.req));
+ cmd.req.u.create =
+ FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0,
+ netdev->dev_port);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+
+ if (!rc)
+ fp->lport = be16_to_cpu(cmd.rsp.u.create.lport);
+ return rc;
+}
+
+static int fun_port_destroy(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+
+ if (fp->lport == INVALID_LPORT)
+ return 0;
+
+ fp->lport = INVALID_LPORT;
+ return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0,
+ netdev->dev_port);
+}
+
+static int fun_eth_create(struct funeth_priv *fp)
+{
+ union {
+ struct fun_admin_eth_req req;
+ struct fun_admin_generic_create_rsp rsp;
+ } cmd;
+ int rc;
+
+ cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH,
+ sizeof(cmd.req));
+ cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT(
+ FUN_ADMIN_SUBOP_CREATE,
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR,
+ 0, fp->netdev->dev_port);
+
+ rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
+ sizeof(cmd.rsp), 0);
+ return rc ? rc : be32_to_cpu(cmd.rsp.id);
+}
+
+static int fun_vi_create(struct funeth_priv *fp)
+{
+ struct fun_admin_vi_req req = {
+ .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI,
+ sizeof(req)),
+ .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE,
+ 0,
+ fp->netdev->dev_port,
+ fp->netdev->dev_port)
+ };
+
+ return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
+}
+
+/* Helper to create an ETH flow and bind an SQ to it.
+ * Returns the ETH id (>= 0) on success or a negative error.
+ */
+int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid)
+{
+ int rc, ethid;
+
+ ethid = fun_eth_create(fp);
+ if (ethid >= 0) {
+ rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid,
+ FUN_ADMIN_BIND_TYPE_ETH, ethid);
+ if (rc) {
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid);
+ ethid = rc;
+ }
+ }
+ return ethid;
+}
+
+static irqreturn_t fun_queue_irq_handler(int irq, void *data)
+{
+ struct fun_irq *p = data;
+
+ if (p->rxq) {
+ prefetch(p->rxq->next_cqe_info);
+ p->rxq->irq_cnt++;
+ }
+ napi_schedule_irqoff(&p->napi);
+ return IRQ_HANDLED;
+}
+
+static int fun_enable_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned long idx, last;
+ unsigned int qidx;
+ struct fun_irq *p;
+ const char *qtype;
+ int err;
+
+ xa_for_each(&fp->irqs, idx, p) {
+ if (p->txq) {
+ qtype = "tx";
+ qidx = p->txq->qidx;
+ } else if (p->rxq) {
+ qtype = "rx";
+ qidx = p->rxq->qidx;
+ } else {
+ continue;
+ }
+
+ if (p->state != FUN_IRQ_INIT)
+ continue;
+
+ snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name,
+ qtype, qidx);
+ err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p);
+ if (err) {
+ netdev_err(dev, "Failed to allocate IRQ %u, err %d\n",
+ p->irq, err);
+ goto unroll;
+ }
+ p->state = FUN_IRQ_REQUESTED;
+ }
+
+ xa_for_each(&fp->irqs, idx, p) {
+ if (p->state != FUN_IRQ_REQUESTED)
+ continue;
+ irq_set_affinity_notifier(p->irq, &p->aff_notify);
+ irq_set_affinity_and_hint(p->irq, &p->affinity_mask);
+ napi_enable(&p->napi);
+ p->state = FUN_IRQ_ENABLED;
+ }
+
+ return 0;
+
+unroll:
+ last = idx - 1;
+ xa_for_each_range(&fp->irqs, idx, p, 0, last)
+ if (p->state == FUN_IRQ_REQUESTED) {
+ free_irq(p->irq, p);
+ p->state = FUN_IRQ_INIT;
+ }
+
+ return err;
+}
+
+static void fun_disable_one_irq(struct fun_irq *irq)
+{
+ napi_disable(&irq->napi);
+ irq_set_affinity_notifier(irq->irq, NULL);
+ irq_update_affinity_hint(irq->irq, NULL);
+ free_irq(irq->irq, irq);
+ irq->state = FUN_IRQ_INIT;
+}
+
+static void fun_disable_irqs(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_irq *p;
+ unsigned long idx;
+
+ xa_for_each(&fp->irqs, idx, p)
+ if (p->state == FUN_IRQ_ENABLED)
+ fun_disable_one_irq(p);
+}
+
+static void fun_down(struct net_device *dev, struct fun_qset *qset)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ /* If we don't have queues the data path is already down.
+ * Note netif_running(dev) may be true.
+ */
+ if (!rcu_access_pointer(fp->rxqs))
+ return;
+
+ /* It is also down if the queues aren't on the device. */
+ if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) {
+ netif_info(fp, ifdown, dev,
+ "Tearing down data path on device\n");
+ fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+ fun_destroy_rss(fp);
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
+ fun_disable_irqs(dev);
+ }
+
+ fun_free_rings(dev, qset);
+}
+
+static int fun_up(struct net_device *dev, struct fun_qset *qset)
+{
+ static const int port_keys[] = {
+ FUN_ADMIN_PORT_KEY_STATS_DMA_LOW,
+ FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH,
+ FUN_ADMIN_PORT_KEY_ENABLE
+ };
+
+ struct funeth_priv *fp = netdev_priv(dev);
+ u64 vals[] = {
+ lower_32_bits(fp->stats_dma_addr),
+ upper_32_bits(fp->stats_dma_addr),
+ FUN_PORT_FLAG_ENABLE_NOTIFY
+ };
+ int err;
+
+ netif_info(fp, ifup, dev, "Setting up data path on device\n");
+
+ if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) {
+ err = fun_advance_ring_state(dev, qset);
+ if (err)
+ return err;
+ }
+
+ err = fun_vi_create(fp);
+ if (err)
+ goto free_queues;
+
+ fp->txqs = qset->txqs;
+ rcu_assign_pointer(fp->rxqs, qset->rxqs);
+ rcu_assign_pointer(fp->xdpqs, qset->xdpqs);
+
+ err = fun_enable_irqs(dev);
+ if (err)
+ goto destroy_vi;
+
+ if (fp->rss_cfg) {
+ err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
+ fp->indir_table, FUN_ADMIN_SUBOP_CREATE);
+ } else {
+ /* The non-RSS case has only 1 queue. */
+ err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port,
+ FUN_ADMIN_BIND_TYPE_EPCQ,
+ qset->rxqs[0]->hw_cqid);
+ }
+ if (err)
+ goto disable_irqs;
+
+ err = fun_port_write_cmds(fp, 3, port_keys, vals);
+ if (err)
+ goto free_rss;
+
+ netif_tx_start_all_queues(dev);
+ return 0;
+
+free_rss:
+ fun_destroy_rss(fp);
+disable_irqs:
+ fun_disable_irqs(dev);
+destroy_vi:
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
+free_queues:
+ fun_free_rings(dev, qset);
+ return err;
+}
+
+static int funeth_open(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct fun_qset qset = {
+ .nrxqs = netdev->real_num_rx_queues,
+ .ntxqs = netdev->real_num_tx_queues,
+ .nxdpqs = fp->num_xdpqs,
+ .cq_depth = fp->cq_depth,
+ .rq_depth = fp->rq_depth,
+ .sq_depth = fp->sq_depth,
+ .state = FUN_QSTATE_INIT_FULL,
+ };
+ int rc;
+
+ rc = fun_alloc_rings(netdev, &qset);
+ if (rc)
+ return rc;
+
+ rc = fun_up(netdev, &qset);
+ if (rc) {
+ qset.state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(netdev, &qset);
+ }
+
+ return rc;
+}
+
+static int funeth_close(struct net_device *netdev)
+{
+ struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED };
+
+ fun_down(netdev, &qset);
+ return 0;
+}
+
+static void fun_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i, start;
+
+ stats->tx_packets = fp->tx_packets;
+ stats->tx_bytes = fp->tx_bytes;
+ stats->tx_dropped = fp->tx_dropped;
+
+ stats->rx_packets = fp->rx_packets;
+ stats->rx_bytes = fp->rx_bytes;
+ stats->rx_dropped = fp->rx_dropped;
+
+ rcu_read_lock();
+ rxqs = rcu_dereference(fp->rxqs);
+ if (!rxqs)
+ goto unlock;
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ struct funeth_txq_stats txs;
+
+ FUN_QSTAT_READ(fp->txqs[i], start, txs);
+ stats->tx_packets += txs.tx_pkts;
+ stats->tx_bytes += txs.tx_bytes;
+ stats->tx_dropped += txs.tx_map_err;
+ }
+
+ for (i = 0; i < netdev->real_num_rx_queues; i++) {
+ struct funeth_rxq_stats rxs;
+
+ FUN_QSTAT_READ(rxqs[i], start, rxs);
+ stats->rx_packets += rxs.rx_pkts;
+ stats->rx_bytes += rxs.rx_bytes;
+ stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops;
+ }
+
+ xdpqs = rcu_dereference(fp->xdpqs);
+ if (!xdpqs)
+ goto unlock;
+
+ for (i = 0; i < fp->num_xdpqs; i++) {
+ struct funeth_txq_stats txs;
+
+ FUN_QSTAT_READ(xdpqs[i], start, txs);
+ stats->tx_packets += txs.tx_pkts;
+ stats->tx_bytes += txs.tx_bytes;
+ }
+unlock:
+ rcu_read_unlock();
+}
+
+static int fun_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ int rc;
+
+ rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
+ if (!rc)
+ netdev->mtu = new_mtu;
+ return rc;
+}
+
+static int fun_set_macaddr(struct net_device *netdev, void *addr)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+ int rc;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
+ return 0;
+
+ rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
+ ether_addr_to_u64(saddr->sa_data));
+ if (!rc)
+ eth_hw_addr_set(netdev, saddr->sa_data);
+ return rc;
+}
+
+static int fun_get_port_attributes(struct net_device *netdev)
+{
+ static const int keys[] = {
+ FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES,
+ FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU
+ };
+ static const int phys_keys[] = {
+ FUN_ADMIN_PORT_KEY_LANE_ATTRS,
+ };
+
+ struct funeth_priv *fp = netdev_priv(netdev);
+ u64 data[ARRAY_SIZE(keys)];
+ u8 mac[ETH_ALEN];
+ int i, rc;
+
+ rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(keys); i++) {
+ switch (keys[i]) {
+ case FUN_ADMIN_PORT_KEY_MACADDR:
+ u64_to_ether_addr(data[i], mac);
+ if (is_zero_ether_addr(mac)) {
+ eth_hw_addr_random(netdev);
+ } else if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(netdev, mac);
+ } else {
+ netdev_err(netdev,
+ "device provided a bad MAC address %pM\n",
+ mac);
+ return -EINVAL;
+ }
+ break;
+
+ case FUN_ADMIN_PORT_KEY_CAPABILITIES:
+ fp->port_caps = data[i];
+ break;
+
+ case FUN_ADMIN_PORT_KEY_ADVERT:
+ fp->advertising = data[i];
+ break;
+
+ case FUN_ADMIN_PORT_KEY_MTU:
+ netdev->mtu = data[i];
+ break;
+ }
+ }
+
+ if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) {
+ rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys,
+ data);
+ if (rc)
+ return rc;
+
+ fp->lane_attrs = data[0];
+ }
+
+ if (netdev->addr_assign_type == NET_ADDR_RANDOM)
+ return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
+ ether_addr_to_u64(netdev->dev_addr));
+ return 0;
+}
+
+static int fun_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ const struct funeth_priv *fp = netdev_priv(dev);
+
+ return copy_to_user(ifr->ifr_data, &fp->hwtstamp_cfg,
+ sizeof(fp->hwtstamp_cfg)) ? -EFAULT : 0;
+}
+
+static int fun_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct hwtstamp_config cfg;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* no TX HW timestamps */
+ cfg.tx_type = HWTSTAMP_TX_OFF;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ fp->hwtstamp_cfg = cfg;
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int fun_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return fun_hwtstamp_set(dev, ifr);
+ case SIOCGHWTSTAMP:
+ return fun_hwtstamp_get(dev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Prepare the queues for XDP. */
+static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ unsigned int i, nqs = num_online_cpus();
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ int err;
+
+ xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL);
+ if (IS_ERR(xdpqs))
+ return PTR_ERR(xdpqs);
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ for (i = 0; i < dev->real_num_rx_queues; i++) {
+ err = fun_rxq_set_bpf(rxqs[i], prog);
+ if (err)
+ goto out;
+ }
+
+ fp->num_xdpqs = nqs;
+ rcu_assign_pointer(fp->xdpqs, xdpqs);
+ return 0;
+out:
+ while (i--)
+ fun_rxq_set_bpf(rxqs[i], NULL);
+
+ free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED);
+ return err;
+}
+
+/* Set the queues for non-XDP operation. */
+static void fun_end_xdp(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq **xdpqs;
+ struct funeth_rxq **rxqs;
+ unsigned int i;
+
+ xdpqs = rtnl_dereference(fp->xdpqs);
+ rcu_assign_pointer(fp->xdpqs, NULL);
+ synchronize_net();
+ /* at this point both Rx and Tx XDP processing has ended */
+
+ free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED);
+ fp->num_xdpqs = 0;
+
+ rxqs = rtnl_dereference(fp->rxqs);
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ fun_rxq_set_bpf(rxqs[i], NULL);
+}
+
+#define XDP_MAX_MTU \
+ (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM)
+
+static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct bpf_prog *old_prog, *prog = xdp->prog;
+ struct funeth_priv *fp = netdev_priv(dev);
+ int i, err;
+
+ /* XDP uses at most one buffer */
+ if (prog && dev->mtu > XDP_MAX_MTU) {
+ netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu);
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Device MTU too large for XDP");
+ return -EINVAL;
+ }
+
+ if (!netif_running(dev)) {
+ fp->num_xdpqs = prog ? num_online_cpus() : 0;
+ } else if (prog && !fp->xdp_prog) {
+ err = fun_enter_xdp(dev, prog);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Failed to set queues for XDP.");
+ return err;
+ }
+ } else if (!prog && fp->xdp_prog) {
+ fun_end_xdp(dev);
+ } else {
+ struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
+
+ for (i = 0; i < dev->real_num_rx_queues; i++)
+ WRITE_ONCE(rxqs[i]->xdp_prog, prog);
+ }
+
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+
+ dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
+ old_prog = xchg(&fp->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
+
+static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return fun_xdp_setup(dev, xdp);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int fun_init_vports(struct fun_ethdev *ed, unsigned int n)
+{
+ if (ed->num_vports)
+ return -EINVAL;
+
+ ed->vport_info = kvcalloc(n, sizeof(*ed->vport_info), GFP_KERNEL);
+ if (!ed->vport_info)
+ return -ENOMEM;
+ ed->num_vports = n;
+ return 0;
+}
+
+static void fun_free_vports(struct fun_ethdev *ed)
+{
+ kvfree(ed->vport_info);
+ ed->vport_info = NULL;
+ ed->num_vports = 0;
+}
+
+static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed,
+ unsigned int vport)
+{
+ if (!ed->vport_info || vport >= ed->num_vports)
+ return NULL;
+
+ return ed->vport_info + vport;
+}
+
+static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param mac_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (is_multicast_ether_addr(mac))
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac));
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1,
+ &mac_param);
+ if (!rc)
+ ether_addr_copy(vi->mac, mac);
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param vlan_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (vlan > 4095 || qos > 7)
+ return -EINVAL;
+ if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) &&
+ vlan_proto != htons(ETH_P_8021AD))
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto),
+ ((u16)qos << VLAN_PRIO_SHIFT) | vlan);
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param);
+ if (!rc) {
+ vi->vlan = vlan;
+ vi->qos = qos;
+ vi->vlan_proto = vlan_proto;
+ }
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_adi_param rate_param = {};
+ struct fun_dev *fdev = fp->fdev;
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ struct fun_vport_info *vi;
+ int rc = -EINVAL;
+
+ if (min_tx_rate)
+ return -EINVAL;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate);
+ rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param);
+ if (!rc)
+ vi->max_rate = max_tx_rate;
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return rc;
+}
+
+static int fun_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_ethdev *ed = to_fun_ethdev(fp->fdev);
+ const struct fun_vport_info *vi;
+
+ mutex_lock(&ed->state_mutex);
+ vi = fun_get_vport(ed, vf);
+ if (!vi)
+ goto unlock;
+
+ memset(ivi, 0, sizeof(*ivi));
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, vi->mac);
+ ivi->vlan = vi->vlan;
+ ivi->qos = vi->qos;
+ ivi->vlan_proto = vi->vlan_proto;
+ ivi->max_tx_rate = vi->max_rate;
+ ivi->spoofchk = vi->spoofchk;
+unlock:
+ mutex_unlock(&ed->state_mutex);
+ return vi ? 0 : -EINVAL;
+}
+
+static void fun_uninit(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ fun_prune_queue_irqs(dev);
+ xa_destroy(&fp->irqs);
+}
+
+static const struct net_device_ops fun_netdev_ops = {
+ .ndo_open = funeth_open,
+ .ndo_stop = funeth_close,
+ .ndo_start_xmit = fun_start_xmit,
+ .ndo_get_stats64 = fun_get_stats64,
+ .ndo_change_mtu = fun_change_mtu,
+ .ndo_set_mac_address = fun_set_macaddr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = fun_ioctl,
+ .ndo_uninit = fun_uninit,
+ .ndo_bpf = fun_xdp,
+ .ndo_xdp_xmit = fun_xdp_xmit_frames,
+ .ndo_set_vf_mac = fun_set_vf_mac,
+ .ndo_set_vf_vlan = fun_set_vf_vlan,
+ .ndo_set_vf_rate = fun_set_vf_rate,
+ .ndo_get_vf_config = fun_get_vf_config,
+};
+
+#define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
+ NETIF_F_GSO_UDP_L4)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
+ GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
+
+static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx)
+{
+ unsigned int i;
+
+ for (i = 0; i < fp->indir_table_nentries; i++)
+ fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx);
+}
+
+/* Reset the RSS indirection table to equal distribution across the current
+ * number of Rx queues. Called at init time and whenever the number of Rx
+ * queues changes subsequently. Note that this may also resize the indirection
+ * table.
+ */
+static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+
+ if (!fp->rss_cfg)
+ return;
+
+ /* Set the table size to the max possible that allows an equal number
+ * of occurrences of each CQ.
+ */
+ fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx);
+ fun_dflt_rss_indir(fp, nrx);
+}
+
+/* Update the RSS LUT to contain only queues in [0, nrx). Normally this will
+ * update the LUT to an equal distribution among nrx queues, If @only_if_needed
+ * is set the LUT is left unchanged if it already does not reference any queues
+ * >= nrx.
+ */
+static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx,
+ bool only_if_needed)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT];
+ unsigned int i, oldsz;
+ int err;
+
+ if (!fp->rss_cfg)
+ return 0;
+
+ if (only_if_needed) {
+ for (i = 0; i < fp->indir_table_nentries; i++)
+ if (fp->indir_table[i] >= nrx)
+ break;
+
+ if (i >= fp->indir_table_nentries)
+ return 0;
+ }
+
+ memcpy(old_lut, fp->indir_table, sizeof(old_lut));
+ oldsz = fp->indir_table_nentries;
+ fun_reset_rss_indir(dev, nrx);
+
+ err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
+ fp->indir_table, FUN_ADMIN_SUBOP_MODIFY);
+ if (!err)
+ return 0;
+
+ memcpy(fp->indir_table, old_lut, sizeof(old_lut));
+ fp->indir_table_nentries = oldsz;
+ return err;
+}
+
+/* Allocate the DMA area for the RSS configuration commands to the device, and
+ * initialize the hash, hash key, indirection table size and its entries to
+ * their defaults. The indirection table defaults to equal distribution across
+ * the Rx queues.
+ */
+static int fun_init_rss(struct net_device *dev)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table);
+
+ fp->rss_hw_id = FUN_HCI_ID_INVALID;
+ if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS))
+ return 0;
+
+ fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size,
+ &fp->rss_dma_addr, GFP_KERNEL);
+ if (!fp->rss_cfg)
+ return -ENOMEM;
+
+ fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ;
+ netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key));
+ fun_reset_rss_indir(dev, dev->real_num_rx_queues);
+ return 0;
+}
+
+static void fun_free_rss(struct funeth_priv *fp)
+{
+ if (fp->rss_cfg) {
+ dma_free_coherent(&fp->pdev->dev,
+ sizeof(fp->rss_key) + sizeof(fp->indir_table),
+ fp->rss_cfg, fp->rss_dma_addr);
+ fp->rss_cfg = NULL;
+ }
+}
+
+void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
+ unsigned int nrx)
+{
+ netif_set_real_num_tx_queues(netdev, ntx);
+ if (nrx != netdev->real_num_rx_queues) {
+ netif_set_real_num_rx_queues(netdev, nrx);
+ fun_reset_rss_indir(netdev, nrx);
+ }
+}
+
+static int fun_init_stats_area(struct funeth_priv *fp)
+{
+ unsigned int nstats;
+
+ if (!(fp->port_caps & FUN_PORT_CAP_STATS))
+ return 0;
+
+ nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX +
+ PORT_MAC_FEC_STATS_MAX;
+
+ fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64),
+ &fp->stats_dma_addr, GFP_KERNEL);
+ if (!fp->stats)
+ return -ENOMEM;
+ return 0;
+}
+
+static void fun_free_stats_area(struct funeth_priv *fp)
+{
+ unsigned int nstats;
+
+ if (fp->stats) {
+ nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX;
+ dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64),
+ fp->stats, fp->stats_dma_addr);
+ fp->stats = NULL;
+ }
+}
+
+static int fun_dl_port_register(struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ struct devlink *dl = priv_to_devlink(fp->fdev);
+ struct devlink_port_attrs attrs = {};
+ unsigned int idx;
+
+ if (fp->port_caps & FUN_PORT_CAP_VPORT) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+ idx = fp->lport;
+ } else {
+ idx = netdev->dev_port;
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.lanes = fp->lane_attrs & 7;
+ if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) {
+ attrs.split = 1;
+ attrs.phys.port_number = fp->lport & ~3;
+ attrs.phys.split_subport_number = fp->lport & 3;
+ } else {
+ attrs.phys.port_number = fp->lport;
+ }
+ }
+
+ devlink_port_attrs_set(&fp->dl_port, &attrs);
+
+ return devlink_port_register(dl, &fp->dl_port, idx);
+}
+
+/* Determine the max Tx/Rx queues for a port. */
+static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx,
+ unsigned int *nrx)
+{
+ int neth;
+
+ if (ed->num_ports > 1 || is_kdump_kernel()) {
+ *ntx = 1;
+ *nrx = 1;
+ return 0;
+ }
+
+ neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH);
+ if (neth < 0)
+ return neth;
+
+ /* We determine the max number of queues based on the CPU
+ * cores, device interrupts and queues, RSS size, and device Tx flows.
+ *
+ * - At least 1 Rx and 1 Tx queues.
+ * - At most 1 Rx/Tx queue per core.
+ * - Each Rx/Tx queue needs 1 SQ.
+ */
+ *ntx = min(ed->nsqs_per_port - 1, num_online_cpus());
+ *nrx = *ntx;
+ if (*ntx > neth)
+ *ntx = neth;
+ if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT)
+ *nrx = FUN_ETH_RSS_MAX_INDIR_ENT;
+ return 0;
+}
+
+static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs)
+{
+ unsigned int ntx, nrx;
+
+ ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES);
+ nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES);
+ if (ntx <= nrx) {
+ ntx = min(ntx, nsqs / 2);
+ nrx = min(nrx, nsqs - ntx);
+ } else {
+ nrx = min(nrx, nsqs / 2);
+ ntx = min(ntx, nsqs - nrx);
+ }
+
+ netif_set_real_num_tx_queues(dev, ntx);
+ netif_set_real_num_rx_queues(dev, nrx);
+}
+
+/* Replace the existing Rx/Tx/XDP queues with equal number of queues with
+ * different settings, e.g. depth. This is a disruptive replacement that
+ * temporarily shuts down the data path and should be limited to changes that
+ * can't be applied to live queues. The old queues are always discarded.
+ */
+int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
+ struct netlink_ext_ack *extack)
+{
+ struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED };
+ struct funeth_priv *fp = netdev_priv(dev);
+ int err;
+
+ newqs->nrxqs = dev->real_num_rx_queues;
+ newqs->ntxqs = dev->real_num_tx_queues;
+ newqs->nxdpqs = fp->num_xdpqs;
+ newqs->state = FUN_QSTATE_INIT_SW;
+ err = fun_alloc_rings(dev, newqs);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unable to allocate memory for new queues, keeping current settings");
+ return err;
+ }
+
+ fun_down(dev, &oldqs);
+
+ err = fun_up(dev, newqs);
+ if (!err)
+ return 0;
+
+ /* The new queues couldn't be installed. We do not retry the old queues
+ * as they are the same to the device as the new queues and would
+ * similarly fail.
+ */
+ newqs->state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(dev, newqs);
+ NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues.");
+ return err;
+}
+
+/* Change the number of Rx/Tx queues of a device while it is up. This is done
+ * by incrementally adding/removing queues to meet the new requirements while
+ * handling ongoing traffic.
+ */
+int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
+ unsigned int nrx)
+{
+ unsigned int keep_tx = min(dev->real_num_tx_queues, ntx);
+ unsigned int keep_rx = min(dev->real_num_rx_queues, nrx);
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct fun_qset oldqs = {
+ .rxqs = rtnl_dereference(fp->rxqs),
+ .txqs = fp->txqs,
+ .nrxqs = dev->real_num_rx_queues,
+ .ntxqs = dev->real_num_tx_queues,
+ .rxq_start = keep_rx,
+ .txq_start = keep_tx,
+ .state = FUN_QSTATE_DESTROYED
+ };
+ struct fun_qset newqs = {
+ .nrxqs = nrx,
+ .ntxqs = ntx,
+ .rxq_start = keep_rx,
+ .txq_start = keep_tx,
+ .cq_depth = fp->cq_depth,
+ .rq_depth = fp->rq_depth,
+ .sq_depth = fp->sq_depth,
+ .state = FUN_QSTATE_INIT_FULL
+ };
+ int i, err;
+
+ err = fun_alloc_rings(dev, &newqs);
+ if (err)
+ goto free_irqs;
+
+ err = fun_enable_irqs(dev); /* of any newly added queues */
+ if (err)
+ goto free_rings;
+
+ /* copy the queues we are keeping to the new set */
+ memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs));
+ memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs));
+
+ if (nrx < dev->real_num_rx_queues) {
+ err = fun_rss_set_qnum(dev, nrx, true);
+ if (err)
+ goto disable_tx_irqs;
+
+ for (i = nrx; i < dev->real_num_rx_queues; i++)
+ fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi,
+ struct fun_irq, napi));
+
+ netif_set_real_num_rx_queues(dev, nrx);
+ }
+
+ if (ntx < dev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(dev, ntx);
+
+ rcu_assign_pointer(fp->rxqs, newqs.rxqs);
+ fp->txqs = newqs.txqs;
+ synchronize_net();
+
+ if (ntx > dev->real_num_tx_queues)
+ netif_set_real_num_tx_queues(dev, ntx);
+
+ if (nrx > dev->real_num_rx_queues) {
+ netif_set_real_num_rx_queues(dev, nrx);
+ fun_rss_set_qnum(dev, nrx, false);
+ }
+
+ /* disable interrupts of any excess Tx queues */
+ for (i = keep_tx; i < oldqs.ntxqs; i++)
+ fun_disable_one_irq(oldqs.txqs[i]->irq);
+
+ fun_free_rings(dev, &oldqs);
+ fun_prune_queue_irqs(dev);
+ return 0;
+
+disable_tx_irqs:
+ for (i = oldqs.ntxqs; i < ntx; i++)
+ fun_disable_one_irq(newqs.txqs[i]->irq);
+free_rings:
+ newqs.state = FUN_QSTATE_DESTROYED;
+ fun_free_rings(dev, &newqs);
+free_irqs:
+ fun_prune_queue_irqs(dev);
+ return err;
+}
+
+static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
+{
+ struct fun_dev *fdev = &ed->fdev;
+ struct net_device *netdev;
+ struct funeth_priv *fp;
+ unsigned int ntx, nrx;
+ int rc;
+
+ rc = fun_max_qs(ed, &ntx, &nrx);
+ if (rc)
+ return rc;
+
+ netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx);
+ if (!netdev) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ netdev->dev_port = portid;
+ fun_queue_defaults(netdev, ed->nsqs_per_port);
+
+ fp = netdev_priv(netdev);
+ fp->fdev = fdev;
+ fp->pdev = to_pci_dev(fdev->dev);
+ fp->netdev = netdev;
+ xa_init(&fp->irqs);
+ fp->rx_irq_ofst = ntx;
+ seqcount_init(&fp->link_seq);
+
+ fp->lport = INVALID_LPORT;
+ rc = fun_port_create(netdev);
+ if (rc)
+ goto free_netdev;
+
+ /* bind port to admin CQ for async events */
+ rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid,
+ FUN_ADMIN_BIND_TYPE_EPCQ, 0);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_get_port_attributes(netdev);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_init_rss(netdev);
+ if (rc)
+ goto destroy_port;
+
+ rc = fun_init_stats_area(fp);
+ if (rc)
+ goto free_rss;
+
+ SET_NETDEV_DEV(netdev, fdev->dev);
+ SET_NETDEV_DEVLINK_PORT(netdev, &fp->dl_port);
+ netdev->netdev_ops = &fun_netdev_ops;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM;
+ if (fp->port_caps & FUN_PORT_CAP_OFFLOADS)
+ netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS;
+ if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS)
+ netdev->hw_features |= GSO_ENCAP_FLAGS;
+
+ netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
+ netdev->vlan_features = netdev->features & VLAN_FEAT;
+ netdev->mpls_features = netdev->vlan_features;
+ netdev->hw_enc_features = netdev->hw_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = FUN_MAX_MTU;
+
+ fun_set_ethtool_ops(netdev);
+
+ /* configurable parameters */
+ fp->sq_depth = min(SQ_DEPTH, fdev->q_depth);
+ fp->cq_depth = min(CQ_DEPTH, fdev->q_depth);
+ fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth);
+ fp->rx_coal_usec = CQ_INTCOAL_USEC;
+ fp->rx_coal_count = CQ_INTCOAL_NPKT;
+ fp->tx_coal_usec = SQ_INTCOAL_USEC;
+ fp->tx_coal_count = SQ_INTCOAL_NPKT;
+ fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
+
+ rc = fun_dl_port_register(netdev);
+ if (rc)
+ goto free_stats;
+
+ fp->ktls_id = FUN_HCI_ID_INVALID;
+ fun_ktls_init(netdev); /* optional, failure OK */
+
+ netif_carrier_off(netdev);
+ ed->netdevs[portid] = netdev;
+ rc = register_netdev(netdev);
+ if (rc)
+ goto unreg_devlink;
+ return 0;
+
+unreg_devlink:
+ ed->netdevs[portid] = NULL;
+ fun_ktls_cleanup(fp);
+ devlink_port_unregister(&fp->dl_port);
+free_stats:
+ fun_free_stats_area(fp);
+free_rss:
+ fun_free_rss(fp);
+destroy_port:
+ fun_port_destroy(netdev);
+free_netdev:
+ free_netdev(netdev);
+done:
+ dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc);
+ return rc;
+}
+
+static void fun_destroy_netdev(struct net_device *netdev)
+{
+ struct funeth_priv *fp;
+
+ fp = netdev_priv(netdev);
+ unregister_netdev(netdev);
+ devlink_port_unregister(&fp->dl_port);
+ fun_ktls_cleanup(fp);
+ fun_free_stats_area(fp);
+ fun_free_rss(fp);
+ fun_port_destroy(netdev);
+ free_netdev(netdev);
+}
+
+static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports)
+{
+ struct fun_dev *fd = &ed->fdev;
+ int i, rc;
+
+ /* The admin queue takes 1 IRQ and 2 SQs. */
+ ed->nsqs_per_port = min(fd->num_irqs - 1,
+ fd->kern_end_qid - 2) / nports;
+ if (ed->nsqs_per_port < 2) {
+ dev_err(fd->dev, "Too few SQs for %u ports", nports);
+ return -EINVAL;
+ }
+
+ ed->netdevs = kcalloc(nports, sizeof(*ed->netdevs), GFP_KERNEL);
+ if (!ed->netdevs)
+ return -ENOMEM;
+
+ ed->num_ports = nports;
+ for (i = 0; i < nports; i++) {
+ rc = fun_create_netdev(ed, i);
+ if (rc)
+ goto free_netdevs;
+ }
+
+ return 0;
+
+free_netdevs:
+ while (i)
+ fun_destroy_netdev(ed->netdevs[--i]);
+ kfree(ed->netdevs);
+ ed->netdevs = NULL;
+ ed->num_ports = 0;
+ return rc;
+}
+
+static void fun_destroy_ports(struct fun_ethdev *ed)
+{
+ unsigned int i;
+
+ for (i = 0; i < ed->num_ports; i++)
+ fun_destroy_netdev(ed->netdevs[i]);
+
+ kfree(ed->netdevs);
+ ed->netdevs = NULL;
+ ed->num_ports = 0;
+}
+
+static void fun_update_link_state(const struct fun_ethdev *ed,
+ const struct fun_admin_port_notif *notif)
+{
+ unsigned int port_idx = be16_to_cpu(notif->id);
+ struct net_device *netdev;
+ struct funeth_priv *fp;
+
+ if (port_idx >= ed->num_ports)
+ return;
+
+ netdev = ed->netdevs[port_idx];
+ fp = netdev_priv(netdev);
+
+ write_seqcount_begin(&fp->link_seq);
+ fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */
+ fp->active_fc = notif->flow_ctrl;
+ fp->active_fec = notif->fec;
+ fp->xcvr_type = notif->xcvr_type;
+ fp->link_down_reason = notif->link_down_reason;
+ fp->lp_advertising = be64_to_cpu(notif->lp_advertising);
+
+ if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN)
+ netif_carrier_off(netdev);
+ if (notif->link_state & FUN_PORT_FLAG_MAC_UP)
+ netif_carrier_on(netdev);
+
+ write_seqcount_end(&fp->link_seq);
+ fun_report_link(netdev);
+}
+
+/* handler for async events delivered through the admin CQ */
+static void fun_event_cb(struct fun_dev *fdev, void *entry)
+{
+ u8 op = ((struct fun_admin_rsp_common *)entry)->op;
+
+ if (op == FUN_ADMIN_OP_PORT) {
+ const struct fun_admin_port_notif *rsp = entry;
+
+ if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) {
+ fun_update_link_state(to_fun_ethdev(fdev), rsp);
+ } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) {
+ const struct fun_admin_res_count_rsp *r = entry;
+
+ if (r->count.data)
+ set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags);
+ else
+ set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags);
+ fun_serv_sched(fdev);
+ } else {
+ dev_info(fdev->dev, "adminq event unexpected op %u subop %u",
+ op, rsp->subop);
+ }
+ } else {
+ dev_info(fdev->dev, "adminq event unexpected op %u", op);
+ }
+}
+
+/* handler for pending work managed by the service task */
+static void fun_service_cb(struct fun_dev *fdev)
+{
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ int rc;
+
+ if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags))
+ fun_destroy_ports(ed);
+
+ if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags))
+ return;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
+ if (rc < 0 || rc == ed->num_ports)
+ return;
+
+ if (ed->num_ports)
+ fun_destroy_ports(ed);
+ if (rc)
+ fun_create_ports(ed, rc);
+}
+
+static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs)
+{
+ struct fun_dev *fdev = pci_get_drvdata(pdev);
+ struct fun_ethdev *ed = to_fun_ethdev(fdev);
+ int rc;
+
+ if (nvfs == 0) {
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "Cannot disable SR-IOV while VFs are assigned\n");
+ return -EPERM;
+ }
+
+ mutex_lock(&ed->state_mutex);
+ fun_free_vports(ed);
+ mutex_unlock(&ed->state_mutex);
+ pci_disable_sriov(pdev);
+ return 0;
+ }
+
+ rc = pci_enable_sriov(pdev, nvfs);
+ if (rc)
+ return rc;
+
+ mutex_lock(&ed->state_mutex);
+ rc = fun_init_vports(ed, nvfs);
+ mutex_unlock(&ed->state_mutex);
+ if (rc) {
+ pci_disable_sriov(pdev);
+ return rc;
+ }
+
+ return nvfs;
+}
+
+static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct fun_dev_params aqreq = {
+ .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE),
+ .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE),
+ .cq_depth = ADMIN_CQ_DEPTH,
+ .sq_depth = ADMIN_SQ_DEPTH,
+ .rq_depth = ADMIN_RQ_DEPTH,
+ .min_msix = 2, /* 1 Rx + 1 Tx */
+ .event_cb = fun_event_cb,
+ .serv_cb = fun_service_cb,
+ };
+ struct devlink *devlink;
+ struct fun_ethdev *ed;
+ struct fun_dev *fdev;
+ int rc;
+
+ devlink = fun_devlink_alloc(&pdev->dev);
+ if (!devlink) {
+ dev_err(&pdev->dev, "devlink alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ed = devlink_priv(devlink);
+ mutex_init(&ed->state_mutex);
+
+ fdev = &ed->fdev;
+ rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME);
+ if (rc)
+ goto free_devlink;
+
+ rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
+ if (rc > 0)
+ rc = fun_create_ports(ed, rc);
+ if (rc < 0)
+ goto disable_dev;
+
+ fun_serv_restart(fdev);
+ fun_devlink_register(devlink);
+ return 0;
+
+disable_dev:
+ fun_dev_disable(fdev);
+free_devlink:
+ mutex_destroy(&ed->state_mutex);
+ fun_devlink_free(devlink);
+ return rc;
+}
+
+static void funeth_remove(struct pci_dev *pdev)
+{
+ struct fun_dev *fdev = pci_get_drvdata(pdev);
+ struct devlink *devlink;
+ struct fun_ethdev *ed;
+
+ ed = to_fun_ethdev(fdev);
+ devlink = priv_to_devlink(ed);
+ fun_devlink_unregister(devlink);
+
+#ifdef CONFIG_PCI_IOV
+ funeth_sriov_configure(pdev, 0);
+#endif
+
+ fun_serv_stop(fdev);
+ fun_destroy_ports(ed);
+ fun_dev_disable(fdev);
+ mutex_destroy(&ed->state_mutex);
+
+ fun_devlink_free(devlink);
+}
+
+static struct pci_driver funeth_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = funeth_id_table,
+ .probe = funeth_probe,
+ .remove = funeth_remove,
+ .shutdown = funeth_remove,
+ .sriov_configure = funeth_sriov_configure,
+};
+
+module_pci_driver(funeth_driver);
+
+MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
+MODULE_DESCRIPTION("Fungible Ethernet Network Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DEVICE_TABLE(pci, funeth_id_table);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_rx.c b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
new file mode 100644
index 000000000000..29a6c2ede43a
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_rx.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/bpf_trace.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/filter.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include "funeth_txrx.h"
+#include "funeth.h"
+#include "fun_queue.h"
+
+#define CREATE_TRACE_POINTS
+#include "funeth_trace.h"
+
+/* Given the device's max supported MTU and pages of at least 4KB a packet can
+ * be scattered into at most 4 buffers.
+ */
+#define RX_MAX_FRAGS 4
+
+/* Per packet headroom in non-XDP mode. Present only for 1-frag packets. */
+#define FUN_RX_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+/* We try to reuse pages for our buffers. To avoid frequent page ref writes we
+ * take EXTRA_PAGE_REFS references at once and then hand them out one per packet
+ * occupying the buffer.
+ */
+#define EXTRA_PAGE_REFS 1000000
+#define MIN_PAGE_REFS 1000
+
+enum {
+ FUN_XDP_FLUSH_REDIR = 1,
+ FUN_XDP_FLUSH_TX = 2,
+};
+
+/* See if a page is running low on refs we are holding and if so take more. */
+static void refresh_refs(struct funeth_rxbuf *buf)
+{
+ if (unlikely(buf->pg_refs < MIN_PAGE_REFS)) {
+ buf->pg_refs += EXTRA_PAGE_REFS;
+ page_ref_add(buf->page, EXTRA_PAGE_REFS);
+ }
+}
+
+/* Offer a buffer to the Rx buffer cache. The cache will hold the buffer if its
+ * page is worth retaining and there's room for it. Otherwise the page is
+ * unmapped and our references released.
+ */
+static void cache_offer(struct funeth_rxq *q, const struct funeth_rxbuf *buf)
+{
+ struct funeth_rx_cache *c = &q->cache;
+
+ if (c->prod_cnt - c->cons_cnt <= c->mask && buf->node == numa_mem_id()) {
+ c->bufs[c->prod_cnt & c->mask] = *buf;
+ c->prod_cnt++;
+ } else {
+ dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buf->page, buf->pg_refs);
+ }
+}
+
+/* Get a page from the Rx buffer cache. We only consider the next available
+ * page and return it if we own all its references.
+ */
+static bool cache_get(struct funeth_rxq *q, struct funeth_rxbuf *rb)
+{
+ struct funeth_rx_cache *c = &q->cache;
+ struct funeth_rxbuf *buf;
+
+ if (c->prod_cnt == c->cons_cnt)
+ return false; /* empty cache */
+
+ buf = &c->bufs[c->cons_cnt & c->mask];
+ if (page_ref_count(buf->page) == buf->pg_refs) {
+ dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ *rb = *buf;
+ buf->page = NULL;
+ refresh_refs(rb);
+ c->cons_cnt++;
+ return true;
+ }
+
+ /* Page can't be reused. If the cache is full drop this page. */
+ if (c->prod_cnt - c->cons_cnt > c->mask) {
+ dma_unmap_page_attrs(q->dma_dev, buf->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ __page_frag_cache_drain(buf->page, buf->pg_refs);
+ buf->page = NULL;
+ c->cons_cnt++;
+ }
+ return false;
+}
+
+/* Allocate and DMA-map a page for receive. */
+static int funeth_alloc_page(struct funeth_rxq *q, struct funeth_rxbuf *rb,
+ int node, gfp_t gfp)
+{
+ struct page *p;
+
+ if (cache_get(q, rb))
+ return 0;
+
+ p = __alloc_pages_node(node, gfp | __GFP_NOWARN, 0);
+ if (unlikely(!p))
+ return -ENOMEM;
+
+ rb->dma_addr = dma_map_page(q->dma_dev, p, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(q->dma_dev, rb->dma_addr))) {
+ FUN_QSTAT_INC(q, rx_map_err);
+ __free_page(p);
+ return -ENOMEM;
+ }
+
+ FUN_QSTAT_INC(q, rx_page_alloc);
+
+ rb->page = p;
+ rb->pg_refs = 1;
+ refresh_refs(rb);
+ rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p);
+ return 0;
+}
+
+static void funeth_free_page(struct funeth_rxq *q, struct funeth_rxbuf *rb)
+{
+ if (rb->page) {
+ dma_unmap_page(q->dma_dev, rb->dma_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __page_frag_cache_drain(rb->page, rb->pg_refs);
+ rb->page = NULL;
+ }
+}
+
+/* Run the XDP program assigned to an Rx queue.
+ * Return %NULL if the buffer is consumed, or the virtual address of the packet
+ * to turn into an skb.
+ */
+static void *fun_run_xdp(struct funeth_rxq *q, skb_frag_t *frags, void *buf_va,
+ int ref_ok, struct funeth_txq *xdp_q)
+{
+ struct bpf_prog *xdp_prog;
+ struct xdp_frame *xdpf;
+ struct xdp_buff xdp;
+ u32 act;
+
+ /* VA includes the headroom, frag size includes headroom + tailroom */
+ xdp_init_buff(&xdp, ALIGN(skb_frag_size(frags), FUN_EPRQ_PKT_ALIGN),
+ &q->xdp_rxq);
+ xdp_prepare_buff(&xdp, buf_va, FUN_XDP_HEADROOM, skb_frag_size(frags) -
+ (FUN_RX_TAILROOM + FUN_XDP_HEADROOM), false);
+
+ xdp_prog = READ_ONCE(q->xdp_prog);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ /* remove headroom, which may not be FUN_XDP_HEADROOM now */
+ skb_frag_size_set(frags, xdp.data_end - xdp.data);
+ skb_frag_off_add(frags, xdp.data - xdp.data_hard_start);
+ goto pass;
+ case XDP_TX:
+ if (unlikely(!ref_ok))
+ goto pass;
+
+ xdpf = xdp_convert_buff_to_frame(&xdp);
+ if (!xdpf || !fun_xdp_tx(xdp_q, xdpf))
+ goto xdp_error;
+ FUN_QSTAT_INC(q, xdp_tx);
+ q->xdp_flush |= FUN_XDP_FLUSH_TX;
+ break;
+ case XDP_REDIRECT:
+ if (unlikely(!ref_ok))
+ goto pass;
+ if (unlikely(xdp_do_redirect(q->netdev, &xdp, xdp_prog)))
+ goto xdp_error;
+ FUN_QSTAT_INC(q, xdp_redir);
+ q->xdp_flush |= FUN_XDP_FLUSH_REDIR;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(q->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(q->netdev, xdp_prog, act);
+xdp_error:
+ q->cur_buf->pg_refs++; /* return frags' page reference */
+ FUN_QSTAT_INC(q, xdp_err);
+ break;
+ case XDP_DROP:
+ q->cur_buf->pg_refs++;
+ FUN_QSTAT_INC(q, xdp_drops);
+ break;
+ }
+ return NULL;
+
+pass:
+ return xdp.data;
+}
+
+/* A CQE contains a fixed completion structure along with optional metadata and
+ * even packet data. Given the start address of a CQE return the start of the
+ * contained fixed structure, which lies at the end.
+ */
+static const void *cqe_to_info(const void *cqe)
+{
+ return cqe + FUNETH_CQE_INFO_OFFSET;
+}
+
+/* The inverse of cqe_to_info(). */
+static const void *info_to_cqe(const void *cqe_info)
+{
+ return cqe_info - FUNETH_CQE_INFO_OFFSET;
+}
+
+/* Return the type of hash provided by the device based on the L3 and L4
+ * protocols it parsed for the packet.
+ */
+static enum pkt_hash_types cqe_to_pkt_hash_type(u16 pkt_parse)
+{
+ static const enum pkt_hash_types htype_map[] = {
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L4,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3,
+ PKT_HASH_TYPE_NONE, PKT_HASH_TYPE_L3
+ };
+ u16 key;
+
+ /* Build the key from the TCP/UDP and IP/IPv6 bits */
+ key = ((pkt_parse >> FUN_ETH_RX_CV_OL4_PROT_S) & 6) |
+ ((pkt_parse >> (FUN_ETH_RX_CV_OL3_PROT_S + 1)) & 1);
+
+ return htype_map[key];
+}
+
+/* Each received packet can be scattered across several Rx buffers or can
+ * share a buffer with previously received packets depending on the buffer
+ * and packet sizes and the room available in the most recently used buffer.
+ *
+ * The rules are:
+ * - If the buffer at the head of an RQ has not been used it gets (part of) the
+ * next incoming packet.
+ * - Otherwise, if the packet fully fits in the buffer's remaining space the
+ * packet is written there.
+ * - Otherwise, the packet goes into the next Rx buffer.
+ *
+ * This function returns the Rx buffer for a packet or fragment thereof of the
+ * given length. If it isn't @buf it either recycles or frees that buffer
+ * before advancing the queue to the next buffer.
+ *
+ * If called repeatedly with the remaining length of a packet it will walk
+ * through all the buffers containing the packet.
+ */
+static struct funeth_rxbuf *
+get_buf(struct funeth_rxq *q, struct funeth_rxbuf *buf, unsigned int len)
+{
+ if (q->buf_offset + len <= PAGE_SIZE || !q->buf_offset)
+ return buf; /* @buf holds (part of) the packet */
+
+ /* The packet occupies part of the next buffer. Move there after
+ * replenishing the current buffer slot either with the spare page or
+ * by reusing the slot's existing page. Note that if a spare page isn't
+ * available and the current packet occupies @buf it is a multi-frag
+ * packet that will be dropped leaving @buf available for reuse.
+ */
+ if ((page_ref_count(buf->page) == buf->pg_refs &&
+ buf->node == numa_mem_id()) || !q->spare_buf.page) {
+ dma_sync_single_for_device(q->dma_dev, buf->dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ refresh_refs(buf);
+ } else {
+ cache_offer(q, buf);
+ *buf = q->spare_buf;
+ q->spare_buf.page = NULL;
+ q->rqes[q->rq_cons & q->rq_mask] =
+ FUN_EPRQ_RQBUF_INIT(buf->dma_addr);
+ }
+ q->buf_offset = 0;
+ q->rq_cons++;
+ return &q->bufs[q->rq_cons & q->rq_mask];
+}
+
+/* Gather the page fragments making up the first Rx packet on @q. Its total
+ * length @tot_len includes optional head- and tail-rooms.
+ *
+ * Return 0 if the device retains ownership of at least some of the pages.
+ * In this case the caller may only copy the packet.
+ *
+ * A non-zero return value gives the caller permission to use references to the
+ * pages, e.g., attach them to skbs. Additionally, if the value is <0 at least
+ * one of the pages is PF_MEMALLOC.
+ *
+ * Regardless of outcome the caller is granted a reference to each of the pages.
+ */
+static int fun_gather_pkt(struct funeth_rxq *q, unsigned int tot_len,
+ skb_frag_t *frags)
+{
+ struct funeth_rxbuf *buf = q->cur_buf;
+ unsigned int frag_len;
+ int ref_ok = 1;
+
+ for (;;) {
+ buf = get_buf(q, buf, tot_len);
+
+ /* We always keep the RQ full of buffers so before we can give
+ * one of our pages to the stack we require that we can obtain
+ * a replacement page. If we can't the packet will either be
+ * copied or dropped so we can retain ownership of the page and
+ * reuse it.
+ */
+ if (!q->spare_buf.page &&
+ funeth_alloc_page(q, &q->spare_buf, numa_mem_id(),
+ GFP_ATOMIC | __GFP_MEMALLOC))
+ ref_ok = 0;
+
+ frag_len = min_t(unsigned int, tot_len,
+ PAGE_SIZE - q->buf_offset);
+ dma_sync_single_for_cpu(q->dma_dev,
+ buf->dma_addr + q->buf_offset,
+ frag_len, DMA_FROM_DEVICE);
+ buf->pg_refs--;
+ if (ref_ok)
+ ref_ok |= buf->node;
+
+ __skb_frag_set_page(frags, buf->page);
+ skb_frag_off_set(frags, q->buf_offset);
+ skb_frag_size_set(frags++, frag_len);
+
+ tot_len -= frag_len;
+ if (!tot_len)
+ break;
+
+ q->buf_offset = PAGE_SIZE;
+ }
+ q->buf_offset = ALIGN(q->buf_offset + frag_len, FUN_EPRQ_PKT_ALIGN);
+ q->cur_buf = buf;
+ return ref_ok;
+}
+
+static bool rx_hwtstamp_enabled(const struct net_device *dev)
+{
+ const struct funeth_priv *d = netdev_priv(dev);
+
+ return d->hwtstamp_cfg.rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
+/* Advance the CQ pointers and phase tag to the next CQE. */
+static void advance_cq(struct funeth_rxq *q)
+{
+ if (unlikely(q->cq_head == q->cq_mask)) {
+ q->cq_head = 0;
+ q->phase ^= 1;
+ q->next_cqe_info = cqe_to_info(q->cqes);
+ } else {
+ q->cq_head++;
+ q->next_cqe_info += FUNETH_CQE_SIZE;
+ }
+ prefetch(q->next_cqe_info);
+}
+
+/* Process the packet represented by the head CQE of @q. Gather the packet's
+ * fragments, run it through the optional XDP program, and if needed construct
+ * an skb and pass it to the stack.
+ */
+static void fun_handle_cqe_pkt(struct funeth_rxq *q, struct funeth_txq *xdp_q)
+{
+ const struct fun_eth_cqe *rxreq = info_to_cqe(q->next_cqe_info);
+ unsigned int i, tot_len, pkt_len = be32_to_cpu(rxreq->pkt_len);
+ struct net_device *ndev = q->netdev;
+ skb_frag_t frags[RX_MAX_FRAGS];
+ struct skb_shared_info *si;
+ unsigned int headroom;
+ gro_result_t gro_res;
+ struct sk_buff *skb;
+ int ref_ok;
+ void *va;
+ u16 cv;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.rx_pkts++;
+ q->stats.rx_bytes += pkt_len;
+ u64_stats_update_end(&q->syncp);
+
+ advance_cq(q);
+
+ /* account for head- and tail-room, present only for 1-buffer packets */
+ tot_len = pkt_len;
+ headroom = be16_to_cpu(rxreq->headroom);
+ if (likely(headroom))
+ tot_len += FUN_RX_TAILROOM + headroom;
+
+ ref_ok = fun_gather_pkt(q, tot_len, frags);
+ va = skb_frag_address(frags);
+ if (xdp_q && headroom == FUN_XDP_HEADROOM) {
+ va = fun_run_xdp(q, frags, va, ref_ok, xdp_q);
+ if (!va)
+ return;
+ headroom = 0; /* XDP_PASS trims it */
+ }
+ if (unlikely(!ref_ok))
+ goto no_mem;
+
+ if (likely(headroom)) {
+ /* headroom is either FUN_RX_HEADROOM or FUN_XDP_HEADROOM */
+ prefetch(va + headroom);
+ skb = napi_build_skb(va, ALIGN(tot_len, FUN_EPRQ_PKT_ALIGN));
+ if (unlikely(!skb))
+ goto no_mem;
+
+ skb_reserve(skb, headroom);
+ __skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ } else {
+ prefetch(va);
+ skb = napi_get_frags(q->napi);
+ if (unlikely(!skb))
+ goto no_mem;
+
+ if (ref_ok < 0)
+ skb->pfmemalloc = 1;
+
+ si = skb_shinfo(skb);
+ si->nr_frags = rxreq->nsgl;
+ for (i = 0; i < si->nr_frags; i++)
+ si->frags[i] = frags[i];
+
+ skb->len = pkt_len;
+ skb->data_len = pkt_len;
+ skb->truesize += round_up(pkt_len, FUN_EPRQ_PKT_ALIGN);
+ }
+
+ skb_record_rx_queue(skb, q->qidx);
+ cv = be16_to_cpu(rxreq->pkt_cv);
+ if (likely((q->netdev->features & NETIF_F_RXHASH) && rxreq->hash))
+ skb_set_hash(skb, be32_to_cpu(rxreq->hash),
+ cqe_to_pkt_hash_type(cv));
+ if (likely((q->netdev->features & NETIF_F_RXCSUM) && rxreq->csum)) {
+ FUN_QSTAT_INC(q, rx_cso);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->csum_level = be16_to_cpu(rxreq->csum) - 1;
+ }
+ if (unlikely(rx_hwtstamp_enabled(q->netdev)))
+ skb_hwtstamps(skb)->hwtstamp = be64_to_cpu(rxreq->timestamp);
+
+ trace_funeth_rx(q, rxreq->nsgl, pkt_len, skb->hash, cv);
+
+ gro_res = skb->data_len ? napi_gro_frags(q->napi) :
+ napi_gro_receive(q->napi, skb);
+ if (gro_res == GRO_MERGED || gro_res == GRO_MERGED_FREE)
+ FUN_QSTAT_INC(q, gro_merged);
+ else if (gro_res == GRO_HELD)
+ FUN_QSTAT_INC(q, gro_pkts);
+ return;
+
+no_mem:
+ FUN_QSTAT_INC(q, rx_mem_drops);
+
+ /* Release the references we've been granted for the frag pages.
+ * We return the ref of the last frag and free the rest.
+ */
+ q->cur_buf->pg_refs++;
+ for (i = 0; i < rxreq->nsgl - 1; i++)
+ __free_page(skb_frag_page(frags + i));
+}
+
+/* Return 0 if the phase tag of the CQE at the CQ's head matches expectations
+ * indicating the CQE is new.
+ */
+static u16 cqe_phase_mismatch(const struct fun_cqe_info *ci, u16 phase)
+{
+ u16 sf_p = be16_to_cpu(ci->sf_p);
+
+ return (sf_p & 1) ^ phase;
+}
+
+/* Walk through a CQ identifying and processing fresh CQEs up to the given
+ * budget. Return the remaining budget.
+ */
+static int fun_process_cqes(struct funeth_rxq *q, int budget)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct funeth_txq **xdpqs, *xdp_q = NULL;
+
+ xdpqs = rcu_dereference_bh(fp->xdpqs);
+ if (xdpqs)
+ xdp_q = xdpqs[smp_processor_id()];
+
+ while (budget && !cqe_phase_mismatch(q->next_cqe_info, q->phase)) {
+ /* access other descriptor fields after the phase check */
+ dma_rmb();
+
+ fun_handle_cqe_pkt(q, xdp_q);
+ budget--;
+ }
+
+ if (unlikely(q->xdp_flush)) {
+ if (q->xdp_flush & FUN_XDP_FLUSH_TX)
+ fun_txq_wr_db(xdp_q);
+ if (q->xdp_flush & FUN_XDP_FLUSH_REDIR)
+ xdp_do_flush();
+ q->xdp_flush = 0;
+ }
+
+ return budget;
+}
+
+/* NAPI handler for Rx queues. Calls the CQE processing loop and writes RQ/CQ
+ * doorbells as needed.
+ */
+int fun_rxq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
+ struct funeth_rxq *q = irq->rxq;
+ int work_done = budget - fun_process_cqes(q, budget);
+ u32 cq_db_val = q->cq_head;
+
+ if (unlikely(work_done >= budget))
+ FUN_QSTAT_INC(q, rx_budget);
+ else if (napi_complete_done(napi, work_done))
+ cq_db_val |= q->irq_db_val;
+
+ /* check whether to post new Rx buffers */
+ if (q->rq_cons - q->rq_cons_db >= q->rq_db_thres) {
+ u64_stats_update_begin(&q->syncp);
+ q->stats.rx_bufs += q->rq_cons - q->rq_cons_db;
+ u64_stats_update_end(&q->syncp);
+ q->rq_cons_db = q->rq_cons;
+ writel((q->rq_cons - 1) & q->rq_mask, q->rq_db);
+ }
+
+ writel(cq_db_val, q->cq_db);
+ return work_done;
+}
+
+/* Free the Rx buffers of an Rx queue. */
+static void fun_rxq_free_bufs(struct funeth_rxq *q)
+{
+ struct funeth_rxbuf *b = q->bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->rq_mask; i++, b++)
+ funeth_free_page(q, b);
+
+ funeth_free_page(q, &q->spare_buf);
+ q->cur_buf = NULL;
+}
+
+/* Initially provision an Rx queue with Rx buffers. */
+static int fun_rxq_alloc_bufs(struct funeth_rxq *q, int node)
+{
+ struct funeth_rxbuf *b = q->bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->rq_mask; i++, b++) {
+ if (funeth_alloc_page(q, b, node, GFP_KERNEL)) {
+ fun_rxq_free_bufs(q);
+ return -ENOMEM;
+ }
+ q->rqes[i] = FUN_EPRQ_RQBUF_INIT(b->dma_addr);
+ }
+ q->cur_buf = q->bufs;
+ return 0;
+}
+
+/* Initialize a used-buffer cache of the given depth. */
+static int fun_rxq_init_cache(struct funeth_rx_cache *c, unsigned int depth,
+ int node)
+{
+ c->mask = depth - 1;
+ c->bufs = kvzalloc_node(depth * sizeof(*c->bufs), GFP_KERNEL, node);
+ return c->bufs ? 0 : -ENOMEM;
+}
+
+/* Deallocate an Rx queue's used-buffer cache and its contents. */
+static void fun_rxq_free_cache(struct funeth_rxq *q)
+{
+ struct funeth_rxbuf *b = q->cache.bufs;
+ unsigned int i;
+
+ for (i = 0; i <= q->cache.mask; i++, b++)
+ funeth_free_page(q, b);
+
+ kvfree(q->cache.bufs);
+ q->cache.bufs = NULL;
+}
+
+int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct fun_admin_epcq_req cmd;
+ u16 headroom;
+ int err;
+
+ headroom = prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
+ if (headroom != q->headroom) {
+ cmd.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_EPCQ,
+ sizeof(cmd));
+ cmd.u.modify =
+ FUN_ADMIN_EPCQ_MODIFY_REQ_INIT(FUN_ADMIN_SUBOP_MODIFY,
+ 0, q->hw_cqid, headroom);
+ err = fun_submit_admin_sync_cmd(fp->fdev, &cmd.common, NULL, 0,
+ 0);
+ if (err)
+ return err;
+ q->headroom = headroom;
+ }
+
+ WRITE_ONCE(q->xdp_prog, prog);
+ return 0;
+}
+
+/* Create an Rx queue, allocating the host memory it needs. */
+static struct funeth_rxq *fun_rxq_create_sw(struct net_device *dev,
+ unsigned int qidx,
+ unsigned int ncqe,
+ unsigned int nrqe,
+ struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_rxq *q;
+ int err = -ENOMEM;
+ int numa_node;
+
+ numa_node = fun_irq_node(irq);
+ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
+ if (!q)
+ goto err;
+
+ q->qidx = qidx;
+ q->netdev = dev;
+ q->cq_mask = ncqe - 1;
+ q->rq_mask = nrqe - 1;
+ q->numa_node = numa_node;
+ q->rq_db_thres = nrqe / 4;
+ u64_stats_init(&q->syncp);
+ q->dma_dev = &fp->pdev->dev;
+
+ q->rqes = fun_alloc_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes),
+ sizeof(*q->bufs), false, numa_node,
+ &q->rq_dma_addr, (void **)&q->bufs, NULL);
+ if (!q->rqes)
+ goto free_q;
+
+ q->cqes = fun_alloc_ring_mem(q->dma_dev, ncqe, FUNETH_CQE_SIZE, 0,
+ false, numa_node, &q->cq_dma_addr, NULL,
+ NULL);
+ if (!q->cqes)
+ goto free_rqes;
+
+ err = fun_rxq_init_cache(&q->cache, nrqe, numa_node);
+ if (err)
+ goto free_cqes;
+
+ err = fun_rxq_alloc_bufs(q, numa_node);
+ if (err)
+ goto free_cache;
+
+ q->stats.rx_bufs = q->rq_mask;
+ q->init_state = FUN_QSTATE_INIT_SW;
+ return q;
+
+free_cache:
+ fun_rxq_free_cache(q);
+free_cqes:
+ dma_free_coherent(q->dma_dev, ncqe * FUNETH_CQE_SIZE, q->cqes,
+ q->cq_dma_addr);
+free_rqes:
+ fun_free_ring_mem(q->dma_dev, nrqe, sizeof(*q->rqes), false, q->rqes,
+ q->rq_dma_addr, q->bufs);
+free_q:
+ kfree(q);
+err:
+ netdev_err(dev, "Unable to allocate memory for Rx queue %u\n", qidx);
+ return ERR_PTR(err);
+}
+
+static void fun_rxq_free_sw(struct funeth_rxq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ fun_rxq_free_cache(q);
+ fun_rxq_free_bufs(q);
+ fun_free_ring_mem(q->dma_dev, q->rq_mask + 1, sizeof(*q->rqes), false,
+ q->rqes, q->rq_dma_addr, q->bufs);
+ dma_free_coherent(q->dma_dev, (q->cq_mask + 1) * FUNETH_CQE_SIZE,
+ q->cqes, q->cq_dma_addr);
+
+ /* Before freeing the queue transfer key counters to the device. */
+ fp->rx_packets += q->stats.rx_pkts;
+ fp->rx_bytes += q->stats.rx_bytes;
+ fp->rx_dropped += q->stats.rx_map_err + q->stats.rx_mem_drops;
+
+ kfree(q);
+}
+
+/* Create an Rx queue's resources on the device. */
+int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ unsigned int ncqe = q->cq_mask + 1;
+ unsigned int nrqe = q->rq_mask + 1;
+ int err;
+
+ err = xdp_rxq_info_reg(&q->xdp_rxq, q->netdev, q->qidx,
+ irq->napi.napi_id);
+ if (err)
+ goto out;
+
+ err = xdp_rxq_info_reg_mem_model(&q->xdp_rxq, MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ goto xdp_unreg;
+
+ q->phase = 1;
+ q->irq_cnt = 0;
+ q->cq_head = 0;
+ q->rq_cons = 0;
+ q->rq_cons_db = 0;
+ q->buf_offset = 0;
+ q->napi = &irq->napi;
+ q->irq_db_val = fp->cq_irq_db;
+ q->next_cqe_info = cqe_to_info(q->cqes);
+
+ q->xdp_prog = fp->xdp_prog;
+ q->headroom = fp->xdp_prog ? FUN_XDP_HEADROOM : FUN_RX_HEADROOM;
+
+ err = fun_sq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
+ FUN_ADMIN_EPSQ_CREATE_FLAG_RQ, 0,
+ FUN_HCI_ID_INVALID, 0, nrqe, q->rq_dma_addr, 0, 0,
+ 0, 0, fp->fdev->kern_end_qid, PAGE_SHIFT,
+ &q->hw_sqid, &q->rq_db);
+ if (err)
+ goto xdp_unreg;
+
+ err = fun_cq_create(fp->fdev, FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR |
+ FUN_ADMIN_EPCQ_CREATE_FLAG_RQ, 0,
+ q->hw_sqid, ilog2(FUNETH_CQE_SIZE), ncqe,
+ q->cq_dma_addr, q->headroom, FUN_RX_TAILROOM, 0, 0,
+ irq->irq_idx, 0, fp->fdev->kern_end_qid,
+ &q->hw_cqid, &q->cq_db);
+ if (err)
+ goto free_rq;
+
+ irq->rxq = q;
+ writel(q->rq_mask, q->rq_db);
+ q->init_state = FUN_QSTATE_INIT_FULL;
+
+ netif_info(fp, ifup, q->netdev,
+ "Rx queue %u, depth %u/%u, HW qid %u/%u, IRQ idx %u, node %d, headroom %u\n",
+ q->qidx, ncqe, nrqe, q->hw_cqid, q->hw_sqid, irq->irq_idx,
+ q->numa_node, q->headroom);
+ return 0;
+
+free_rq:
+ fun_destroy_sq(fp->fdev, q->hw_sqid);
+xdp_unreg:
+ xdp_rxq_info_unreg(&q->xdp_rxq);
+out:
+ netdev_err(q->netdev,
+ "Failed to create Rx queue %u on device, error %d\n",
+ q->qidx, err);
+ return err;
+}
+
+static void fun_rxq_free_dev(struct funeth_rxq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ struct fun_irq *irq;
+
+ if (q->init_state < FUN_QSTATE_INIT_FULL)
+ return;
+
+ irq = container_of(q->napi, struct fun_irq, napi);
+ netif_info(fp, ifdown, q->netdev,
+ "Freeing Rx queue %u (id %u/%u), IRQ %u\n",
+ q->qidx, q->hw_cqid, q->hw_sqid, irq->irq_idx);
+
+ irq->rxq = NULL;
+ xdp_rxq_info_unreg(&q->xdp_rxq);
+ fun_destroy_sq(fp->fdev, q->hw_sqid);
+ fun_destroy_cq(fp->fdev, q->hw_cqid);
+ q->init_state = FUN_QSTATE_INIT_SW;
+}
+
+/* Create or advance an Rx queue, allocating all the host and device resources
+ * needed to reach the target state.
+ */
+int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
+ int state, struct funeth_rxq **qp)
+{
+ struct funeth_rxq *q = *qp;
+ int err;
+
+ if (!q) {
+ q = fun_rxq_create_sw(dev, qidx, ncqe, nrqe, irq);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
+ }
+
+ if (q->init_state >= state)
+ goto out;
+
+ err = fun_rxq_create_dev(q, irq);
+ if (err) {
+ if (!*qp)
+ fun_rxq_free_sw(q);
+ return err;
+ }
+
+out:
+ *qp = q;
+ return 0;
+}
+
+/* Free Rx queue resources until it reaches the target state. */
+struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state)
+{
+ if (state < FUN_QSTATE_INIT_FULL)
+ fun_rxq_free_dev(q);
+
+ if (state == FUN_QSTATE_DESTROYED) {
+ fun_rxq_free_sw(q);
+ q = NULL;
+ }
+
+ return q;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_trace.h b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
new file mode 100644
index 000000000000..9e58dfec19d5
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM funeth
+
+#if !defined(_TRACE_FUNETH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FUNETH_H
+
+#include <linux/tracepoint.h>
+
+#include "funeth_txrx.h"
+
+TRACE_EVENT(funeth_tx,
+
+ TP_PROTO(const struct funeth_txq *txq,
+ u32 len,
+ u32 sqe_idx,
+ u32 ngle),
+
+ TP_ARGS(txq, len, sqe_idx, ngle),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, len)
+ __field(u32, sqe_idx)
+ __field(u32, ngle)
+ __string(devname, txq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = txq->qidx;
+ __entry->len = len;
+ __entry->sqe_idx = sqe_idx;
+ __entry->ngle = ngle;
+ __assign_str(devname, txq->netdev->name);
+ ),
+
+ TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u",
+ __get_str(devname), __entry->qidx, __entry->sqe_idx,
+ __entry->len, __entry->ngle)
+);
+
+TRACE_EVENT(funeth_tx_free,
+
+ TP_PROTO(const struct funeth_txq *txq,
+ u32 sqe_idx,
+ u32 num_sqes,
+ u32 hw_head),
+
+ TP_ARGS(txq, sqe_idx, num_sqes, hw_head),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, sqe_idx)
+ __field(u32, num_sqes)
+ __field(u32, hw_head)
+ __string(devname, txq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = txq->qidx;
+ __entry->sqe_idx = sqe_idx;
+ __entry->num_sqes = num_sqes;
+ __entry->hw_head = hw_head;
+ __assign_str(devname, txq->netdev->name);
+ ),
+
+ TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u",
+ __get_str(devname), __entry->qidx, __entry->sqe_idx,
+ __entry->num_sqes, __entry->hw_head)
+);
+
+TRACE_EVENT(funeth_rx,
+
+ TP_PROTO(const struct funeth_rxq *rxq,
+ u32 num_rqes,
+ u32 pkt_len,
+ u32 hash,
+ u32 cls_vec),
+
+ TP_ARGS(rxq, num_rqes, pkt_len, hash, cls_vec),
+
+ TP_STRUCT__entry(
+ __field(u32, qidx)
+ __field(u32, cq_head)
+ __field(u32, num_rqes)
+ __field(u32, len)
+ __field(u32, hash)
+ __field(u32, cls_vec)
+ __string(devname, rxq->netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->qidx = rxq->qidx;
+ __entry->cq_head = rxq->cq_head;
+ __entry->num_rqes = num_rqes;
+ __entry->len = pkt_len;
+ __entry->hash = hash;
+ __entry->cls_vec = cls_vec;
+ __assign_str(devname, rxq->netdev->name);
+ ),
+
+ TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x",
+ __get_str(devname), __entry->qidx, __entry->cq_head,
+ __entry->num_rqes, __entry->len, __entry->hash,
+ __entry->cls_vec)
+);
+
+#endif /* _TRACE_FUNETH_H */
+
+/* Below must be outside protection. */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE funeth_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_tx.c b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
new file mode 100644
index 000000000000..706d81e39a54
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_tx.c
@@ -0,0 +1,801 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+
+#include <linux/dma-mapping.h>
+#include <linux/ip.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <uapi/linux/udp.h>
+#include "funeth.h"
+#include "funeth_ktls.h"
+#include "funeth_txrx.h"
+#include "funeth_trace.h"
+#include "fun_queue.h"
+
+#define FUN_XDP_CLEAN_THRES 32
+#define FUN_XDP_CLEAN_BATCH 16
+
+/* DMA-map a packet and return the (length, DMA_address) pairs for its
+ * segments. If a mapping error occurs -ENOMEM is returned. The packet
+ * consists of an skb_shared_info and one additional address/length pair.
+ */
+static int fun_map_pkt(struct device *dev, const struct skb_shared_info *si,
+ void *data, unsigned int data_len,
+ dma_addr_t *addr, unsigned int *len)
+{
+ const skb_frag_t *fp, *end;
+
+ *len = data_len;
+ *addr = dma_map_single(dev, data, *len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ return -ENOMEM;
+
+ if (!si)
+ return 0;
+
+ for (fp = si->frags, end = fp + si->nr_frags; fp < end; fp++) {
+ *++len = skb_frag_size(fp);
+ *++addr = skb_frag_dma_map(dev, fp, 0, *len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, *addr))
+ goto unwind;
+ }
+ return 0;
+
+unwind:
+ while (fp-- > si->frags)
+ dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
+
+ dma_unmap_single(dev, addr[-1], data_len, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+/* Return the address just past the end of a Tx queue's descriptor ring.
+ * It exploits the fact that the HW writeback area is just after the end
+ * of the descriptor ring.
+ */
+static void *txq_end(const struct funeth_txq *q)
+{
+ return (void *)q->hw_wb;
+}
+
+/* Return the amount of space within a Tx ring from the given address to the
+ * end.
+ */
+static unsigned int txq_to_end(const struct funeth_txq *q, void *p)
+{
+ return txq_end(q) - p;
+}
+
+/* Return the number of Tx descriptors occupied by a Tx request. */
+static unsigned int tx_req_ndesc(const struct fun_eth_tx_req *req)
+{
+ return DIV_ROUND_UP(req->len8, FUNETH_SQE_SIZE / 8);
+}
+
+/* Write a gather list to the Tx descriptor at @req from @ngle address/length
+ * pairs.
+ */
+static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q,
+ struct fun_eth_tx_req *req,
+ const dma_addr_t *addrs,
+ const unsigned int *lens,
+ unsigned int ngle)
+{
+ struct fun_dataop_gl *gle;
+ unsigned int i;
+
+ req->len8 = (sizeof(*req) + ngle * sizeof(*gle)) / 8;
+
+ for (i = 0, gle = (struct fun_dataop_gl *)req->dataop.imm;
+ i < ngle && txq_to_end(q, gle); i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+
+ if (txq_to_end(q, gle) == 0) {
+ gle = (struct fun_dataop_gl *)q->desc;
+ for ( ; i < ngle; i++, gle++)
+ fun_dataop_gl_init(gle, 0, 0, lens[i], addrs[i]);
+ }
+
+ return gle;
+}
+
+static __be16 tcp_hdr_doff_flags(const struct tcphdr *th)
+{
+ return *(__be16 *)&tcp_flag_word(th);
+}
+
+static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q,
+ unsigned int *tls_len)
+{
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+ const struct fun_ktls_tx_ctx *tls_ctx;
+ u32 datalen, seq;
+
+ datalen = skb->len - skb_tcp_all_headers(skb);
+ if (!datalen)
+ return skb;
+
+ if (likely(!tls_offload_tx_resync_pending(skb->sk))) {
+ seq = ntohl(tcp_hdr(skb)->seq);
+ tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+
+ if (likely(tls_ctx->next_seq == seq)) {
+ *tls_len = datalen;
+ return skb;
+ }
+ if (seq - tls_ctx->next_seq < U32_MAX / 4) {
+ tls_offload_tx_resync_request(skb->sk, seq,
+ tls_ctx->next_seq);
+ }
+ }
+
+ FUN_QSTAT_INC(q, tx_tls_fallback);
+ skb = tls_encrypt_skb(skb);
+ if (!skb)
+ FUN_QSTAT_INC(q, tx_tls_drops);
+
+ return skb;
+#else
+ return NULL;
+#endif
+}
+
+/* Write as many descriptors as needed for the supplied skb starting at the
+ * current producer location. The caller has made certain enough descriptors
+ * are available.
+ *
+ * Returns the number of descriptors written, 0 on error.
+ */
+static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q,
+ unsigned int tls_len)
+{
+ unsigned int extra_bytes = 0, extra_pkts = 0;
+ unsigned int idx = q->prod_cnt & q->mask;
+ const struct skb_shared_info *shinfo;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t addrs[MAX_SKB_FRAGS + 1];
+ struct fun_eth_tx_req *req;
+ struct fun_dataop_gl *gle;
+ const struct tcphdr *th;
+ unsigned int l4_hlen;
+ unsigned int ngle;
+ u16 flags;
+
+ shinfo = skb_shinfo(skb);
+ if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data,
+ skb_headlen(skb), addrs, lens))) {
+ FUN_QSTAT_INC(q, tx_map_err);
+ return 0;
+ }
+
+ req = fun_tx_desc_addr(q, idx);
+ req->op = FUN_ETH_OP_TX;
+ req->len8 = 0;
+ req->flags = 0;
+ req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
+ req->repr_idn = 0;
+ req->encap_proto = 0;
+
+ if (likely(shinfo->gso_size)) {
+ if (skb->encapsulation) {
+ u16 ol4_ofst;
+
+ flags = FUN_ETH_OUTER_EN | FUN_ETH_INNER_LSO |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_OUTER_L3_LEN;
+ if (shinfo->gso_type & (SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ flags |= FUN_ETH_UPDATE_OUTER_L4_LEN |
+ FUN_ETH_OUTER_UDP;
+ if (shinfo->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)
+ flags |= FUN_ETH_UPDATE_OUTER_L4_CKSUM;
+ ol4_ofst = skb_transport_offset(skb);
+ } else {
+ ol4_ofst = skb_inner_network_offset(skb);
+ }
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_OUTER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_OUTER_IPV6;
+
+ if (skb->inner_network_header) {
+ if (inner_ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ else
+ flags |= FUN_ETH_INNER_IPV6 |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ }
+ th = inner_tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ tcp_hdr_doff_flags(th), 0,
+ skb_inner_network_offset(skb),
+ skb_inner_transport_offset(skb),
+ skb_network_offset(skb), ol4_ofst);
+ FUN_QSTAT_INC(q, tx_encap_tso);
+ } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+ flags = FUN_ETH_INNER_LSO | FUN_ETH_INNER_UDP |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L4_LEN |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+
+ if (ip_hdr(skb)->version == 4)
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ else
+ flags |= FUN_ETH_INNER_IPV6;
+
+ l4_hlen = sizeof(struct udphdr);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ cpu_to_be16(l4_hlen << 10), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_uso);
+ } else {
+ /* HW considers one set of headers as inner */
+ flags = FUN_ETH_INNER_LSO |
+ FUN_ETH_UPDATE_INNER_L4_CKSUM |
+ FUN_ETH_UPDATE_INNER_L3_LEN;
+ if (shinfo->gso_type & SKB_GSO_TCPV6)
+ flags |= FUN_ETH_INNER_IPV6;
+ else
+ flags |= FUN_ETH_UPDATE_INNER_L3_CKSUM;
+ th = tcp_hdr(skb);
+ l4_hlen = __tcp_hdrlen(th);
+ fun_eth_offload_init(&req->offload, flags,
+ shinfo->gso_size,
+ tcp_hdr_doff_flags(th), 0,
+ skb_network_offset(skb),
+ skb_transport_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_tso);
+ }
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_cso += shinfo->gso_segs;
+ u64_stats_update_end(&q->syncp);
+
+ extra_pkts = shinfo->gso_segs - 1;
+ extra_bytes = (be16_to_cpu(req->offload.inner_l4_off) +
+ l4_hlen) * extra_pkts;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ flags = FUN_ETH_UPDATE_INNER_L4_CKSUM;
+ if (skb->csum_offset == offsetof(struct udphdr, check))
+ flags |= FUN_ETH_INNER_UDP;
+ fun_eth_offload_init(&req->offload, flags, 0, 0, 0, 0,
+ skb_checksum_start_offset(skb), 0, 0);
+ FUN_QSTAT_INC(q, tx_cso);
+ } else {
+ fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
+ }
+
+ ngle = shinfo->nr_frags + 1;
+ req->dataop = FUN_DATAOP_HDR_INIT(ngle, 0, ngle, 0, skb->len);
+
+ gle = fun_write_gl(q, req, addrs, lens, ngle);
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && unlikely(tls_len)) {
+ struct fun_eth_tls *tls = (struct fun_eth_tls *)gle;
+ struct fun_ktls_tx_ctx *tls_ctx;
+
+ req->len8 += FUNETH_TLS_SZ / 8;
+ req->flags = cpu_to_be16(FUN_ETH_TX_TLS);
+
+ tls_ctx = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
+ tls->tlsid = tls_ctx->tlsid;
+ tls_ctx->next_seq += tls_len;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_tls_bytes += tls_len;
+ q->stats.tx_tls_pkts += 1 + extra_pkts;
+ u64_stats_update_end(&q->syncp);
+ }
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_bytes += skb->len + extra_bytes;
+ q->stats.tx_pkts += 1 + extra_pkts;
+ u64_stats_update_end(&q->syncp);
+
+ q->info[idx].skb = skb;
+
+ trace_funeth_tx(q, skb->len, idx, req->dataop.ngather);
+ return tx_req_ndesc(req);
+}
+
+/* Return the number of available descriptors of a Tx queue.
+ * HW assumes head==tail means the ring is empty so we need to keep one
+ * descriptor unused.
+ */
+static unsigned int fun_txq_avail(const struct funeth_txq *q)
+{
+ return q->mask - q->prod_cnt + q->cons_cnt;
+}
+
+/* Stop a queue if it can't handle another worst-case packet. */
+static void fun_tx_check_stop(struct funeth_txq *q)
+{
+ if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC))
+ return;
+
+ netif_tx_stop_queue(q->ndq);
+
+ /* NAPI reclaim is freeing packets in parallel with us and we may race.
+ * We have stopped the queue but check again after synchronizing with
+ * reclaim.
+ */
+ smp_mb();
+ if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC))
+ FUN_QSTAT_INC(q, tx_nstops);
+ else
+ netif_tx_start_queue(q->ndq);
+}
+
+/* Return true if a queue has enough space to restart. Current condition is
+ * that the queue must be >= 1/4 empty.
+ */
+static bool fun_txq_may_restart(struct funeth_txq *q)
+{
+ return fun_txq_avail(q) >= q->mask / 4;
+}
+
+netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct funeth_priv *fp = netdev_priv(netdev);
+ unsigned int qid = skb_get_queue_mapping(skb);
+ struct funeth_txq *q = fp->txqs[qid];
+ unsigned int tls_len = 0;
+ unsigned int ndesc;
+
+ if (IS_ENABLED(CONFIG_TLS_DEVICE) && skb->sk &&
+ tls_is_sk_tx_device_offloaded(skb->sk)) {
+ skb = fun_tls_tx(skb, q, &tls_len);
+ if (unlikely(!skb))
+ goto dropped;
+ }
+
+ ndesc = write_pkt_desc(skb, q, tls_len);
+ if (unlikely(!ndesc)) {
+ dev_kfree_skb_any(skb);
+ goto dropped;
+ }
+
+ q->prod_cnt += ndesc;
+ fun_tx_check_stop(q);
+
+ skb_tx_timestamp(skb);
+
+ if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more()))
+ fun_txq_wr_db(q);
+ else
+ FUN_QSTAT_INC(q, tx_more);
+
+ return NETDEV_TX_OK;
+
+dropped:
+ /* A dropped packet may be the last one in a xmit_more train,
+ * ring the doorbell just in case.
+ */
+ if (!netdev_xmit_more())
+ fun_txq_wr_db(q);
+ return NETDEV_TX_OK;
+}
+
+/* Return a Tx queue's HW head index written back to host memory. */
+static u16 txq_hw_head(const struct funeth_txq *q)
+{
+ return (u16)be64_to_cpu(*q->hw_wb);
+}
+
+/* Unmap the Tx packet starting at the given descriptor index and
+ * return the number of Tx descriptors it occupied.
+ */
+static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx)
+{
+ const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx);
+ unsigned int ngle = req->dataop.ngather;
+ struct fun_dataop_gl *gle;
+
+ if (ngle) {
+ gle = (struct fun_dataop_gl *)req->dataop.imm;
+ dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len), DMA_TO_DEVICE);
+
+ for (gle++; --ngle && txq_to_end(q, gle); gle++)
+ dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len),
+ DMA_TO_DEVICE);
+
+ for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++)
+ dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data),
+ be32_to_cpu(gle->sgl_len),
+ DMA_TO_DEVICE);
+ }
+
+ return tx_req_ndesc(req);
+}
+
+/* Reclaim completed Tx descriptors and free their packets. Restart a stopped
+ * queue if we freed enough descriptors.
+ *
+ * Return true if we exhausted the budget while there is more work to be done.
+ */
+static bool fun_txq_reclaim(struct funeth_txq *q, int budget)
+{
+ unsigned int npkts = 0, nbytes = 0, ndesc = 0;
+ unsigned int head, limit, reclaim_idx;
+
+ /* budget may be 0, e.g., netpoll */
+ limit = budget ? budget : UINT_MAX;
+
+ for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
+ head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) {
+ /* The HW head is continually updated, ensure we don't read
+ * descriptor state before the head tells us to reclaim it.
+ * On the enqueue side the doorbell is an implicit write
+ * barrier.
+ */
+ rmb();
+
+ do {
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
+ struct sk_buff *skb = q->info[reclaim_idx].skb;
+
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
+
+ nbytes += skb->len;
+ napi_consume_skb(skb, budget);
+ ndesc += pkt_desc;
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ npkts++;
+ } while (reclaim_idx != head && npkts < limit);
+ }
+
+ q->cons_cnt += ndesc;
+ netdev_tx_completed_queue(q->ndq, npkts, nbytes);
+ smp_mb(); /* pairs with the one in fun_tx_check_stop() */
+
+ if (unlikely(netif_tx_queue_stopped(q->ndq) &&
+ fun_txq_may_restart(q))) {
+ netif_tx_wake_queue(q->ndq);
+ FUN_QSTAT_INC(q, tx_nrestarts);
+ }
+
+ return reclaim_idx != head;
+}
+
+/* The NAPI handler for Tx queues. */
+int fun_txq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct fun_irq *irq = container_of(napi, struct fun_irq, napi);
+ struct funeth_txq *q = irq->txq;
+ unsigned int db_val;
+
+ if (fun_txq_reclaim(q, budget))
+ return budget; /* exhausted budget */
+
+ napi_complete(napi); /* exhausted pending work */
+ db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask);
+ writel(db_val, q->db);
+ return 0;
+}
+
+/* Reclaim up to @budget completed Tx packets from a TX XDP queue. */
+static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget)
+{
+ unsigned int npkts = 0, ndesc = 0, head, reclaim_idx;
+
+ for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask;
+ head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) {
+ /* The HW head is continually updated, ensure we don't read
+ * descriptor state before the head tells us to reclaim it.
+ * On the enqueue side the doorbell is an implicit write
+ * barrier.
+ */
+ rmb();
+
+ do {
+ unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx);
+
+ xdp_return_frame(q->info[reclaim_idx].xdpf);
+
+ trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head);
+
+ reclaim_idx = (reclaim_idx + pkt_desc) & q->mask;
+ ndesc += pkt_desc;
+ npkts++;
+ } while (reclaim_idx != head && npkts < budget);
+ }
+
+ q->cons_cnt += ndesc;
+ return npkts;
+}
+
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf)
+{
+ unsigned int idx, nfrags = 1, ndesc = 1, tot_len = xdpf->len;
+ const struct skb_shared_info *si = NULL;
+ unsigned int lens[MAX_SKB_FRAGS + 1];
+ dma_addr_t dma[MAX_SKB_FRAGS + 1];
+ struct fun_eth_tx_req *req;
+
+ if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES)
+ fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH);
+
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ si = xdp_get_shared_info_from_frame(xdpf);
+ tot_len = xdp_get_frame_len(xdpf);
+ nfrags += si->nr_frags;
+ ndesc = DIV_ROUND_UP((sizeof(*req) + nfrags *
+ sizeof(struct fun_dataop_gl)),
+ FUNETH_SQE_SIZE);
+ }
+
+ if (unlikely(fun_txq_avail(q) < ndesc)) {
+ FUN_QSTAT_INC(q, tx_xdp_full);
+ return false;
+ }
+
+ if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma,
+ lens))) {
+ FUN_QSTAT_INC(q, tx_map_err);
+ return false;
+ }
+
+ idx = q->prod_cnt & q->mask;
+ req = fun_tx_desc_addr(q, idx);
+ req->op = FUN_ETH_OP_TX;
+ req->len8 = 0;
+ req->flags = 0;
+ req->suboff8 = offsetof(struct fun_eth_tx_req, dataop);
+ req->repr_idn = 0;
+ req->encap_proto = 0;
+ fun_eth_offload_init(&req->offload, 0, 0, 0, 0, 0, 0, 0, 0);
+ req->dataop = FUN_DATAOP_HDR_INIT(nfrags, 0, nfrags, 0, tot_len);
+
+ fun_write_gl(q, req, dma, lens, nfrags);
+
+ q->info[idx].xdpf = xdpf;
+
+ u64_stats_update_begin(&q->syncp);
+ q->stats.tx_bytes += tot_len;
+ q->stats.tx_pkts++;
+ u64_stats_update_end(&q->syncp);
+
+ trace_funeth_tx(q, tot_len, idx, nfrags);
+ q->prod_cnt += ndesc;
+
+ return true;
+}
+
+int fun_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq *q, **xdpqs;
+ int i, q_idx;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ xdpqs = rcu_dereference_bh(fp->xdpqs);
+ if (unlikely(!xdpqs))
+ return -ENETDOWN;
+
+ q_idx = smp_processor_id();
+ if (unlikely(q_idx >= fp->num_xdpqs))
+ return -ENXIO;
+
+ for (q = xdpqs[q_idx], i = 0; i < n; i++)
+ if (!fun_xdp_tx(q, frames[i]))
+ break;
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ fun_txq_wr_db(q);
+ return i;
+}
+
+/* Purge a Tx queue of any queued packets. Should be called once HW access
+ * to the packets has been revoked, e.g., after the queue has been disabled.
+ */
+static void fun_txq_purge(struct funeth_txq *q)
+{
+ while (q->cons_cnt != q->prod_cnt) {
+ unsigned int idx = q->cons_cnt & q->mask;
+
+ q->cons_cnt += fun_unmap_pkt(q, idx);
+ dev_kfree_skb_any(q->info[idx].skb);
+ }
+ netdev_tx_reset_queue(q->ndq);
+}
+
+static void fun_xdpq_purge(struct funeth_txq *q)
+{
+ while (q->cons_cnt != q->prod_cnt) {
+ unsigned int idx = q->cons_cnt & q->mask;
+
+ q->cons_cnt += fun_unmap_pkt(q, idx);
+ xdp_return_frame(q->info[idx].xdpf);
+ }
+}
+
+/* Create a Tx queue, allocating all the host resources needed. */
+static struct funeth_txq *fun_txq_create_sw(struct net_device *dev,
+ unsigned int qidx,
+ unsigned int ndesc,
+ struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(dev);
+ struct funeth_txq *q;
+ int numa_node;
+
+ if (irq)
+ numa_node = fun_irq_node(irq); /* skb Tx queue */
+ else
+ numa_node = cpu_to_node(qidx); /* XDP Tx queue */
+
+ q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node);
+ if (!q)
+ goto err;
+
+ q->dma_dev = &fp->pdev->dev;
+ q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE,
+ sizeof(*q->info), true, numa_node,
+ &q->dma_addr, (void **)&q->info,
+ &q->hw_wb);
+ if (!q->desc)
+ goto free_q;
+
+ q->netdev = dev;
+ q->mask = ndesc - 1;
+ q->qidx = qidx;
+ q->numa_node = numa_node;
+ u64_stats_init(&q->syncp);
+ q->init_state = FUN_QSTATE_INIT_SW;
+ return q;
+
+free_q:
+ kfree(q);
+err:
+ netdev_err(dev, "Can't allocate memory for %s queue %u\n",
+ irq ? "Tx" : "XDP", qidx);
+ return NULL;
+}
+
+static void fun_txq_free_sw(struct funeth_txq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true,
+ q->desc, q->dma_addr, q->info);
+
+ fp->tx_packets += q->stats.tx_pkts;
+ fp->tx_bytes += q->stats.tx_bytes;
+ fp->tx_dropped += q->stats.tx_map_err;
+
+ kfree(q);
+}
+
+/* Allocate the device portion of a Tx queue. */
+int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+ unsigned int irq_idx, ndesc = q->mask + 1;
+ int err;
+
+ q->irq = irq;
+ *q->hw_wb = 0;
+ q->prod_cnt = 0;
+ q->cons_cnt = 0;
+ irq_idx = irq ? irq->irq_idx : 0;
+
+ err = fun_sq_create(fp->fdev,
+ FUN_ADMIN_EPSQ_CREATE_FLAG_HEAD_WB_ADDRESS |
+ FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR, 0,
+ FUN_HCI_ID_INVALID, ilog2(FUNETH_SQE_SIZE), ndesc,
+ q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec,
+ irq_idx, 0, fp->fdev->kern_end_qid, 0,
+ &q->hw_qid, &q->db);
+ if (err)
+ goto out;
+
+ err = fun_create_and_bind_tx(fp, q->hw_qid);
+ if (err < 0)
+ goto free_devq;
+ q->ethid = err;
+
+ if (irq) {
+ irq->txq = q;
+ q->ndq = netdev_get_tx_queue(q->netdev, q->qidx);
+ q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec,
+ fp->tx_coal_count);
+ writel(q->irq_db_val, q->db);
+ }
+
+ q->init_state = FUN_QSTATE_INIT_FULL;
+ netif_info(fp, ifup, q->netdev,
+ "%s queue %u, depth %u, HW qid %u, IRQ idx %u, eth id %u, node %d\n",
+ irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx,
+ q->ethid, q->numa_node);
+ return 0;
+
+free_devq:
+ fun_destroy_sq(fp->fdev, q->hw_qid);
+out:
+ netdev_err(q->netdev,
+ "Failed to create %s queue %u on device, error %d\n",
+ irq ? "Tx" : "XDP", q->qidx, err);
+ return err;
+}
+
+static void fun_txq_free_dev(struct funeth_txq *q)
+{
+ struct funeth_priv *fp = netdev_priv(q->netdev);
+
+ if (q->init_state < FUN_QSTATE_INIT_FULL)
+ return;
+
+ netif_info(fp, ifdown, q->netdev,
+ "Freeing %s queue %u (id %u), IRQ %u, ethid %u\n",
+ q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid,
+ q->irq ? q->irq->irq_idx : 0, q->ethid);
+
+ fun_destroy_sq(fp->fdev, q->hw_qid);
+ fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid);
+
+ if (q->irq) {
+ q->irq->txq = NULL;
+ fun_txq_purge(q);
+ } else {
+ fun_xdpq_purge(q);
+ }
+
+ q->init_state = FUN_QSTATE_INIT_SW;
+}
+
+/* Create or advance a Tx queue, allocating all the host and device resources
+ * needed to reach the target state.
+ */
+int funeth_txq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ndesc, struct fun_irq *irq, int state,
+ struct funeth_txq **qp)
+{
+ struct funeth_txq *q = *qp;
+ int err;
+
+ if (!q)
+ q = fun_txq_create_sw(dev, qidx, ndesc, irq);
+ if (!q)
+ return -ENOMEM;
+
+ if (q->init_state >= state)
+ goto out;
+
+ err = fun_txq_create_dev(q, irq);
+ if (err) {
+ if (!*qp)
+ fun_txq_free_sw(q);
+ return err;
+ }
+
+out:
+ *qp = q;
+ return 0;
+}
+
+/* Free Tx queue resources until it reaches the target state.
+ * The queue must be already disconnected from the stack.
+ */
+struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state)
+{
+ if (state < FUN_QSTATE_INIT_FULL)
+ fun_txq_free_dev(q);
+
+ if (state == FUN_QSTATE_DESTROYED) {
+ fun_txq_free_sw(q);
+ q = NULL;
+ }
+
+ return q;
+}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
new file mode 100644
index 000000000000..53b7e95213a8
--- /dev/null
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -0,0 +1,265 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+
+#ifndef _FUNETH_TXRX_H
+#define _FUNETH_TXRX_H
+
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+
+/* Tx descriptor size */
+#define FUNETH_SQE_SIZE 64U
+
+/* Size of device headers per Tx packet */
+#define FUNETH_FUNOS_HDR_SZ (sizeof(struct fun_eth_tx_req))
+
+/* Number of gather list entries per Tx descriptor */
+#define FUNETH_GLE_PER_DESC (FUNETH_SQE_SIZE / sizeof(struct fun_dataop_gl))
+
+/* Max gather list size in bytes for an sk_buff. */
+#define FUNETH_MAX_GL_SZ ((MAX_SKB_FRAGS + 1) * sizeof(struct fun_dataop_gl))
+
+#if IS_ENABLED(CONFIG_TLS_DEVICE)
+# define FUNETH_TLS_SZ sizeof(struct fun_eth_tls)
+#else
+# define FUNETH_TLS_SZ 0
+#endif
+
+/* Max number of Tx descriptors for an sk_buff using a gather list. */
+#define FUNETH_MAX_GL_DESC \
+ DIV_ROUND_UP((FUNETH_FUNOS_HDR_SZ + FUNETH_MAX_GL_SZ + FUNETH_TLS_SZ), \
+ FUNETH_SQE_SIZE)
+
+/* Max number of Tx descriptors for any packet. */
+#define FUNETH_MAX_PKT_DESC FUNETH_MAX_GL_DESC
+
+/* Rx CQ descriptor size. */
+#define FUNETH_CQE_SIZE 64U
+
+/* Offset of cqe_info within a CQE. */
+#define FUNETH_CQE_INFO_OFFSET (FUNETH_CQE_SIZE - sizeof(struct fun_cqe_info))
+
+/* Construct the IRQ portion of a CQ doorbell. The resulting value arms the
+ * interrupt with the supplied time delay and packet count moderation settings.
+ */
+#define FUN_IRQ_CQ_DB(usec, pkts) \
+ (FUN_DB_IRQ_ARM_F | ((usec) << FUN_DB_INTCOAL_USEC_S) | \
+ ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
+
+/* As above for SQ doorbells. */
+#define FUN_IRQ_SQ_DB(usec, pkts) \
+ (FUN_DB_IRQ_ARM_F | \
+ ((usec) << FUN_DB_INTCOAL_USEC_S) | \
+ ((pkts) << FUN_DB_INTCOAL_ENTRIES_S))
+
+/* Per packet tailroom. Present only for 1-frag packets. */
+#define FUN_RX_TAILROOM SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+/* Per packet headroom for XDP. Preferred over XDP_PACKET_HEADROOM to
+ * accommodate two packets per buffer for 4K pages and 1500B MTUs.
+ */
+#define FUN_XDP_HEADROOM 192
+
+/* Initialization state of a queue. */
+enum {
+ FUN_QSTATE_DESTROYED, /* what queue? */
+ FUN_QSTATE_INIT_SW, /* exists in SW, not on the device */
+ FUN_QSTATE_INIT_FULL, /* exists both in SW and on device */
+};
+
+/* Initialization state of an interrupt. */
+enum {
+ FUN_IRQ_INIT, /* initialized and in the XArray but inactive */
+ FUN_IRQ_REQUESTED, /* request_irq() done */
+ FUN_IRQ_ENABLED, /* processing enabled */
+ FUN_IRQ_DISABLED, /* processing disabled */
+};
+
+struct bpf_prog;
+
+struct funeth_txq_stats { /* per Tx queue SW counters */
+ u64 tx_pkts; /* # of Tx packets */
+ u64 tx_bytes; /* total bytes of Tx packets */
+ u64 tx_cso; /* # of packets with checksum offload */
+ u64 tx_tso; /* # of non-encapsulated TSO super-packets */
+ u64 tx_encap_tso; /* # of encapsulated TSO super-packets */
+ u64 tx_uso; /* # of non-encapsulated UDP LSO super-packets */
+ u64 tx_more; /* # of DBs elided due to xmit_more */
+ u64 tx_nstops; /* # of times the queue has stopped */
+ u64 tx_nrestarts; /* # of times the queue has restarted */
+ u64 tx_map_err; /* # of packets dropped due to DMA mapping errors */
+ u64 tx_xdp_full; /* # of XDP packets that could not be enqueued */
+ u64 tx_tls_pkts; /* # of Tx TLS packets offloaded to HW */
+ u64 tx_tls_bytes; /* Tx bytes of HW-handled TLS payload */
+ u64 tx_tls_fallback; /* attempted Tx TLS offloads punted to SW */
+ u64 tx_tls_drops; /* attempted Tx TLS offloads dropped */
+};
+
+struct funeth_tx_info { /* per Tx descriptor state */
+ union {
+ struct sk_buff *skb; /* associated packet (sk_buff path) */
+ struct xdp_frame *xdpf; /* associated XDP frame (XDP path) */
+ };
+};
+
+struct funeth_txq {
+ /* RO cacheline of frequently accessed data */
+ u32 mask; /* queue depth - 1 */
+ u32 hw_qid; /* device ID of the queue */
+ void *desc; /* base address of descriptor ring */
+ struct funeth_tx_info *info;
+ struct device *dma_dev; /* device for DMA mappings */
+ volatile __be64 *hw_wb; /* HW write-back location */
+ u32 __iomem *db; /* SQ doorbell register address */
+ struct netdev_queue *ndq;
+ dma_addr_t dma_addr; /* DMA address of descriptor ring */
+ /* producer R/W cacheline */
+ u16 qidx; /* queue index within net_device */
+ u16 ethid;
+ u32 prod_cnt; /* producer counter */
+ struct funeth_txq_stats stats;
+ /* shared R/W cacheline, primarily accessed by consumer */
+ u32 irq_db_val; /* value written to IRQ doorbell */
+ u32 cons_cnt; /* consumer (cleanup) counter */
+ struct net_device *netdev;
+ struct fun_irq *irq;
+ int numa_node;
+ u8 init_state; /* queue initialization state */
+ struct u64_stats_sync syncp;
+};
+
+struct funeth_rxq_stats { /* per Rx queue SW counters */
+ u64 rx_pkts; /* # of received packets, including SW drops */
+ u64 rx_bytes; /* total size of received packets */
+ u64 rx_cso; /* # of packets with checksum offload */
+ u64 rx_bufs; /* total # of Rx buffers provided to device */
+ u64 gro_pkts; /* # of GRO superpackets */
+ u64 gro_merged; /* # of pkts merged into existing GRO superpackets */
+ u64 rx_page_alloc; /* # of page allocations for Rx buffers */
+ u64 rx_budget; /* NAPI iterations that exhausted their budget */
+ u64 rx_mem_drops; /* # of packets dropped due to memory shortage */
+ u64 rx_map_err; /* # of page DMA mapping errors */
+ u64 xdp_drops; /* XDP_DROPped packets */
+ u64 xdp_tx; /* successful XDP transmits */
+ u64 xdp_redir; /* successful XDP redirects */
+ u64 xdp_err; /* packets dropped due to XDP errors */
+};
+
+struct funeth_rxbuf { /* per Rx buffer state */
+ struct page *page; /* associated page */
+ dma_addr_t dma_addr; /* DMA address of page start */
+ int pg_refs; /* page refs held by driver */
+ int node; /* page node, or -1 if it is PF_MEMALLOC */
+};
+
+struct funeth_rx_cache { /* cache of DMA-mapped previously used buffers */
+ struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
+ unsigned int prod_cnt; /* producer counter */
+ unsigned int cons_cnt; /* consumer counter */
+ unsigned int mask; /* depth - 1 */
+};
+
+/* An Rx queue consists of a CQ and an SQ used to provide Rx buffers. */
+struct funeth_rxq {
+ struct net_device *netdev;
+ struct napi_struct *napi;
+ struct device *dma_dev; /* device for DMA mappings */
+ void *cqes; /* base of CQ descriptor ring */
+ const void *next_cqe_info; /* fun_cqe_info of next CQE */
+ u32 __iomem *cq_db; /* CQ doorbell register address */
+ unsigned int cq_head; /* CQ head index */
+ unsigned int cq_mask; /* CQ depth - 1 */
+ u16 phase; /* CQ phase tag */
+ u16 qidx; /* queue index within net_device */
+ unsigned int irq_db_val; /* IRQ info for CQ doorbell */
+ struct fun_eprq_rqbuf *rqes; /* base of RQ descriptor ring */
+ struct funeth_rxbuf *bufs; /* base of Rx buffer state ring */
+ struct funeth_rxbuf *cur_buf; /* currently active buffer */
+ u32 __iomem *rq_db; /* RQ doorbell register address */
+ unsigned int rq_cons; /* RQ consumer counter */
+ unsigned int rq_mask; /* RQ depth - 1 */
+ unsigned int buf_offset; /* offset of next pkt in head buffer */
+ u8 xdp_flush; /* XDP flush types needed at NAPI end */
+ u8 init_state; /* queue initialization state */
+ u16 headroom; /* per packet headroom */
+ unsigned int rq_cons_db; /* value of rq_cons at last RQ db */
+ unsigned int rq_db_thres; /* # of new buffers needed to write RQ db */
+ struct funeth_rxbuf spare_buf; /* spare for next buffer replacement */
+ struct funeth_rx_cache cache; /* used buffer cache */
+ struct bpf_prog *xdp_prog; /* optional XDP BPF program */
+ struct funeth_rxq_stats stats;
+ dma_addr_t cq_dma_addr; /* DMA address of CQE ring */
+ dma_addr_t rq_dma_addr; /* DMA address of RQE ring */
+ u16 irq_cnt;
+ u32 hw_cqid; /* device ID of the queue's CQ */
+ u32 hw_sqid; /* device ID of the queue's SQ */
+ int numa_node;
+ struct u64_stats_sync syncp;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+#define FUN_QSTAT_INC(q, counter) \
+ do { \
+ u64_stats_update_begin(&(q)->syncp); \
+ (q)->stats.counter++; \
+ u64_stats_update_end(&(q)->syncp); \
+ } while (0)
+
+#define FUN_QSTAT_READ(q, seq, stats_copy) \
+ do { \
+ seq = u64_stats_fetch_begin(&(q)->syncp); \
+ stats_copy = (q)->stats; \
+ } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
+
+#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
+
+struct fun_irq {
+ struct napi_struct napi;
+ struct funeth_txq *txq;
+ struct funeth_rxq *rxq;
+ u8 state;
+ u16 irq_idx; /* index of MSI-X interrupt */
+ int irq; /* Linux IRQ vector */
+ cpumask_t affinity_mask; /* IRQ affinity */
+ struct irq_affinity_notify aff_notify;
+ char name[FUN_INT_NAME_LEN];
+} ____cacheline_internodealigned_in_smp;
+
+/* Return the start address of the idx-th Tx descriptor. */
+static inline void *fun_tx_desc_addr(const struct funeth_txq *q,
+ unsigned int idx)
+{
+ return q->desc + idx * FUNETH_SQE_SIZE;
+}
+
+static inline void fun_txq_wr_db(const struct funeth_txq *q)
+{
+ unsigned int tail = q->prod_cnt & q->mask;
+
+ writel(tail, q->db);
+}
+
+static inline int fun_irq_node(const struct fun_irq *p)
+{
+ return cpu_to_mem(cpumask_first(&p->affinity_mask));
+}
+
+int fun_rxq_napi_poll(struct napi_struct *napi, int budget);
+int fun_txq_napi_poll(struct napi_struct *napi, int budget);
+netdev_tx_t fun_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf);
+int fun_xdp_xmit_frames(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags);
+
+int funeth_txq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ndesc, struct fun_irq *irq, int state,
+ struct funeth_txq **qp);
+int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq);
+struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state);
+int funeth_rxq_create(struct net_device *dev, unsigned int qidx,
+ unsigned int ncqe, unsigned int nrqe, struct fun_irq *irq,
+ int state, struct funeth_rxq **qp);
+int fun_rxq_create_dev(struct funeth_rxq *q, struct fun_irq *irq);
+struct funeth_rxq *funeth_rxq_free(struct funeth_rxq *q, int state);
+int fun_rxq_set_bpf(struct funeth_rxq *q, struct bpf_prog *prog);
+
+#endif /* _FUNETH_TXRX_H */
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index b719f72281c4..98eb78d98e9f 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -47,6 +47,10 @@
#define GVE_RX_BUFFER_SIZE_DQO 2048
+#define GVE_XDP_ACTIONS 5
+
+#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -60,7 +64,8 @@ struct gve_rx_slot_page_info {
void *page_address;
u32 page_offset; /* offset to write to in page */
int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
- u8 can_flip;
+ u16 pad; /* adjustment for rx padding */
+ u8 can_flip; /* tracks if the networking stack is using the page */
};
/* A list of pages registered with the device during setup and used by a queue
@@ -149,10 +154,17 @@ struct gve_rx_ctx {
/* head and tail of skb chain for the current packet or NULL if none */
struct sk_buff *skb_head;
struct sk_buff *skb_tail;
- u16 total_expected_size;
- u8 expected_frag_cnt;
- u8 curr_frag_cnt;
- u8 reuse_frags;
+ u32 total_size;
+ u8 frag_cnt;
+ bool drop_pkt;
+};
+
+struct gve_rx_cnts {
+ u32 ok_pkt_bytes;
+ u16 ok_pkt_cnt;
+ u16 total_pkt_cnt;
+ u16 cont_pkt_cnt;
+ u16 desc_err_pkt_cnt;
};
/* Contains datapath state used to represent an RX queue. */
@@ -167,6 +179,10 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
u16 packet_buffer_size;
+
+ u32 qpl_copy_pool_mask;
+ u32 qpl_copy_pool_head;
+ struct gve_rx_slot_page_info *qpl_copy_pool;
};
/* DQO fields. */
@@ -216,7 +232,12 @@ struct gve_rx_ring {
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
- u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
+ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied */
+ u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
+ u64 xdp_tx_errors;
+ u64 xdp_redirect_errors;
+ u64 xdp_alloc_fails;
+ u64 xdp_actions[GVE_XDP_ACTIONS];
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
@@ -224,11 +245,18 @@ struct gve_rx_ring {
struct u64_stats_sync statss; /* sync stats for 32bit archs */
struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
+
+ /* XDP stuff */
+ struct xdp_rxq_info xdp_rxq;
+ struct xdp_rxq_info xsk_rxq;
+ struct xsk_buff_pool *xsk_pool;
+ struct page_frag_cache page_cache; /* Page cache to allocate XDP frames */
};
/* A TX desc ring entry */
union gve_tx_desc {
struct gve_tx_pkt_desc pkt; /* first desc for a packet */
+ struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
};
@@ -243,7 +271,14 @@ struct gve_tx_iovec {
* ring entry but only used for a pkt_desc not a seg_desc
*/
struct gve_tx_buffer_state {
- struct sk_buff *skb; /* skb for this pkt */
+ union {
+ struct sk_buff *skb; /* skb for this pkt */
+ struct xdp_frame *xdp_frame; /* xdp_frame */
+ };
+ struct {
+ u16 size; /* size of xmitted xdp pkt */
+ u8 is_xsk; /* xsk buff */
+ } xdp;
union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
struct {
@@ -358,6 +393,8 @@ struct gve_tx_ring {
struct {
/* Spinlock for when cleanup in progress */
spinlock_t clean_lock;
+ /* Spinlock for XDP tx traffic */
+ spinlock_t xdp_lock;
};
/* DQO fields. */
@@ -435,19 +472,25 @@ struct gve_tx_ring {
dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
+ struct xsk_buff_pool *xsk_pool;
+ u32 xdp_xsk_wakeup;
+ u32 xdp_xsk_done;
+ u64 xdp_xsk_sent;
+ u64 xdp_xmit;
+ u64 xdp_xmit_errors;
} ____cacheline_aligned;
/* Wraps the info for one irq including the napi struct and the queues
* associated with that irq.
*/
struct gve_notify_block {
- __be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
+ __be32 *irq_db_index; /* pointer to idx into Bar2 */
char name[IFNAMSIZ + 16]; /* name registered with the kernel */
struct napi_struct napi; /* kernel napi struct for this block */
struct gve_priv *priv;
struct gve_tx_ring *tx; /* tx rings on this block */
struct gve_rx_ring *rx; /* rx rings on this block */
-} ____cacheline_aligned;
+};
/* Tracks allowed and current queue settings */
struct gve_queue_config {
@@ -466,6 +509,10 @@ struct gve_options_dqo_rda {
u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
};
+struct gve_irq_db {
+ __be32 index;
+} ____cacheline_aligned;
+
struct gve_ptype {
u8 l3_type; /* `gve_l3_type` in gve_adminq.h */
u8 l4_type; /* `gve_l4_type` in gve_adminq.h */
@@ -492,7 +539,8 @@ struct gve_priv {
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
- dma_addr_t ntfy_block_bus;
+ struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
+ dma_addr_t irq_db_indices_bus;
struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
char mgmt_msix_name[IFNAMSIZ + 16];
u32 mgmt_msix_idx;
@@ -506,9 +554,11 @@ struct gve_priv {
u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
+ struct bpf_prog *xdp_prog; /* XDP BPF program */
u32 rx_copybreak; /* copy packets smaller than this */
u16 default_num_queues; /* default num queues to set up */
+ u16 num_xdp_queues;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
struct gve_qpl_config qpl_cfg; /* map used QPL ids */
@@ -543,6 +593,7 @@ struct gve_priv {
u32 adminq_report_stats_cnt;
u32 adminq_report_link_speed_cnt;
u32 adminq_get_ptype_map_cnt;
+ u32 adminq_verify_driver_compatibility_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -551,6 +602,8 @@ struct gve_priv {
u32 page_alloc_fail; /* count of page alloc fails */
u32 dma_mapping_error; /* count of dma mapping errors */
u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
+ u32 suspend_cnt; /* count of times suspended */
+ u32 resume_cnt; /* count of times resumed */
struct workqueue_struct *gve_wq;
struct work_struct service_task;
struct work_struct stats_report_task;
@@ -567,6 +620,7 @@ struct gve_priv {
/* Gvnic device link speed from hypervisor. */
u64 link_speed;
+ bool up_before_suspend; /* True if dev was up before suspend */
struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
@@ -575,6 +629,10 @@ struct gve_priv {
int data_buffer_size_dqo;
enum gve_queue_format queue_format;
+
+ /* Interrupt coalescing settings */
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
};
enum gve_service_task_flags_bit {
@@ -733,7 +791,7 @@ static inline void gve_clear_report_stats(struct gve_priv *priv)
static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
struct gve_notify_block *block)
{
- return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
+ return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
}
/* Returns the index into ntfy_blocks of the given tx ring's block
@@ -757,7 +815,17 @@ static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0;
- return priv->tx_cfg.num_queues;
+ return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+}
+
+/* Returns the number of XDP tx queue page lists
+ */
+static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
+{
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT)
+ return 0;
+
+ return priv->num_xdp_queues;
}
/* Returns the number of rx queue page lists
@@ -770,16 +838,35 @@ static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
return priv->rx_cfg.num_queues;
}
+static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
+{
+ return tx_qid;
+}
+
+static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
+{
+ return priv->tx_cfg.max_queues + rx_qid;
+}
+
+static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
+{
+ return gve_tx_qpl_id(priv, 0);
+}
+
+static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv)
+{
+ return gve_rx_qpl_id(priv, 0);
+}
+
/* Returns a pointer to the next available tx qpl in the list of qpls
*/
static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
+struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid)
{
- int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
- priv->qpl_cfg.qpl_map_size);
+ int id = gve_tx_qpl_id(priv, tx_qid);
- /* we are out of tx qpls */
- if (id >= gve_num_tx_qpls(priv))
+ /* QPL already in use */
+ if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map);
@@ -789,14 +876,12 @@ struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
/* Returns a pointer to the next available rx qpl in the list of qpls
*/
static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
+struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid)
{
- int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
- priv->qpl_cfg.qpl_map_size,
- gve_num_tx_qpls(priv));
+ int id = gve_rx_qpl_id(priv, rx_qid);
- /* we are out of rx qpls */
- if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
+ /* QPL already in use */
+ if (test_bit(id, priv->qpl_cfg.qpl_id_map))
return NULL;
set_bit(id, priv->qpl_cfg.qpl_id_map);
@@ -815,7 +900,7 @@ static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id)
{
- if (id < gve_num_tx_qpls(priv))
+ if (id < gve_rx_start_qpl_id(priv))
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
@@ -827,17 +912,38 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT;
}
+static inline u32 gve_num_tx_queues(struct gve_priv *priv)
+{
+ return priv->tx_cfg.num_queues + priv->num_xdp_queues;
+}
+
+static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id)
+{
+ return priv->tx_cfg.num_queues + queue_id;
+}
+
+static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
+{
+ return gve_xdp_tx_queue_id(priv, 0);
+}
+
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction);
+ enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
+int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
+ void *data, int len, void *frame_p);
+void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
-int gve_tx_alloc_rings(struct gve_priv *priv);
-void gve_tx_free_rings_gqi(struct gve_priv *priv);
+bool gve_xdp_poll(struct gve_notify_block *block, int budget);
+int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings);
+void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings);
u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 83ae56c310d3..252974202a3f 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -289,7 +289,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
return -ENOMEM;
case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
default:
dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
return -EINVAL;
@@ -301,7 +301,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
*/
static int gve_adminq_kick_and_wait(struct gve_priv *priv)
{
- u32 tail, head;
+ int tail, head;
int i;
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
@@ -407,6 +407,9 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_GET_PTYPE_MAP:
priv->adminq_get_ptype_map_cnt++;
break;
+ case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
+ priv->adminq_verify_driver_compatibility_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
@@ -462,7 +465,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
.num_counters = cpu_to_be32(num_counters),
.irq_db_addr = cpu_to_be64(db_array_bus_addr),
.num_irq_dbs = cpu_to_be32(num_ntfy_blks),
- .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
+ .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
.ntfy_blk_msix_base_idx =
cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
.queue_format = priv->queue_format,
@@ -513,12 +516,12 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
return gve_adminq_issue_cmd(priv, &cmd);
}
-int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{
int err;
int i;
- for (i = 0; i < num_queues; i++) {
+ for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
return err;
@@ -601,12 +604,12 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
return 0;
}
-int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
{
int err;
int i;
- for (i = 0; i < num_queues; i++) {
+ for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
return err;
@@ -738,10 +741,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
* is not set to GqiRda, choose the queue format in a priority order:
* DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
*/
- if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
- dev_info(&priv->pdev->dev,
- "Driver is running with GQI RDA queue format.\n");
- } else if (dev_op_dqo_rda) {
+ if (dev_op_dqo_rda) {
priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with DQO RDA queue format.\n");
@@ -753,6 +753,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
"Driver is running with GQI RDA queue format.\n");
supported_features_mask =
be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
+ } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
+ dev_info(&priv->pdev->dev,
+ "Driver is running with GQI RDA queue format.\n");
} else {
priv->queue_format = GVE_GQI_QPL_FORMAT;
if (dev_op_gqi_qpl)
@@ -878,6 +881,22 @@ int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
return gve_adminq_execute_cmd(priv, &cmd);
}
+int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
+ u64 driver_info_len,
+ dma_addr_t driver_info_addr)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
+ cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
+ .driver_info_len = cpu_to_be64(driver_info_len),
+ .driver_info_addr = cpu_to_be64(driver_info_addr),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_report_link_speed(struct gve_priv *priv)
{
union gve_adminq_command gvnic_cmd;
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 83c0b40cd2d9..f894beb3deaf 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -24,6 +24,7 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_REPORT_STATS = 0xC,
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
+ GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
};
/* Admin queue status codes */
@@ -146,6 +147,51 @@ enum gve_sup_feature_mask {
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
+#define GVE_VERSION_STR_LEN 128
+
+enum gve_driver_capbility {
+ gve_driver_capability_gqi_qpl = 0,
+ gve_driver_capability_gqi_rda = 1,
+ gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
+ gve_driver_capability_dqo_rda = 3,
+ gve_driver_capability_alt_miss_compl = 4,
+};
+
+#define GVE_CAP1(a) BIT((int)a)
+#define GVE_CAP2(a) BIT(((int)a) - 64)
+#define GVE_CAP3(a) BIT(((int)a) - 128)
+#define GVE_CAP4(a) BIT(((int)a) - 192)
+
+#define GVE_DRIVER_CAPABILITY_FLAGS1 \
+ (GVE_CAP1(gve_driver_capability_gqi_qpl) | \
+ GVE_CAP1(gve_driver_capability_gqi_rda) | \
+ GVE_CAP1(gve_driver_capability_dqo_rda) | \
+ GVE_CAP1(gve_driver_capability_alt_miss_compl))
+
+#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
+#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
+#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
+
+struct gve_driver_info {
+ u8 os_type; /* 0x01 = Linux */
+ u8 driver_major;
+ u8 driver_minor;
+ u8 driver_sub;
+ __be32 os_version_major;
+ __be32 os_version_minor;
+ __be32 os_version_sub;
+ __be64 driver_capability_flags[4];
+ u8 os_version_str1[GVE_VERSION_STR_LEN];
+ u8 os_version_str2[GVE_VERSION_STR_LEN];
+};
+
+struct gve_adminq_verify_driver_compatibility {
+ __be64 driver_info_len;
+ __be64 driver_info_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_verify_driver_compatibility) == 16);
+
struct gve_adminq_configure_device_resources {
__be64 counter_array;
__be64 irq_db_addr;
@@ -345,6 +391,8 @@ union gve_adminq_command {
struct gve_adminq_report_stats report_stats;
struct gve_adminq_report_link_speed report_link_speed;
struct gve_adminq_get_ptype_map get_ptype_map;
+ struct gve_adminq_verify_driver_compatibility
+ verify_driver_compatibility;
};
};
u8 reserved[64];
@@ -362,8 +410,8 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
-int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
-int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
@@ -372,6 +420,9 @@ int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
dma_addr_t stats_report_addr, u64 interval);
+int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
+ u64 driver_info_len,
+ dma_addr_t driver_info_addr);
int gve_adminq_report_link_speed(struct gve_priv *priv);
struct gve_ptype_lut;
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
index 4d225a18d8ce..f4ae9e19b844 100644
--- a/drivers/net/ethernet/google/gve/gve_desc.h
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -33,6 +33,14 @@ struct gve_tx_pkt_desc {
__be64 seg_addr; /* Base address (see note) of this segment */
} __packed;
+struct gve_tx_mtd_desc {
+ u8 type_flags; /* type is lower 4 bits, subtype upper */
+ u8 path_state; /* state is lower 4 bits, hash type upper */
+ __be16 reserved0;
+ __be32 path_hash;
+ __be64 reserved1;
+} __packed;
+
struct gve_tx_seg_desc {
u8 type_flags; /* type is lower 4 bits, flags upper */
u8 l3_offset; /* TSO: 2 byte units to start of IPH */
@@ -46,6 +54,7 @@ struct gve_tx_seg_desc {
#define GVE_TXD_STD (0x0 << 4) /* Std with Host Address */
#define GVE_TXD_TSO (0x1 << 4) /* TSO with Host Address */
#define GVE_TXD_SEG (0x2 << 4) /* Seg with Host Address */
+#define GVE_TXD_MTD (0x3 << 4) /* Metadata */
/* GVE Transmit Descriptor Flags for Std Pkts */
#define GVE_TXF_L4CSUM BIT(0) /* Need csum offload */
@@ -54,6 +63,17 @@ struct gve_tx_seg_desc {
/* GVE Transmit Descriptor Flags for TSO Segs */
#define GVE_TXSF_IPV6 BIT(1) /* IPv6 TSO */
+/* GVE Transmit Descriptor Options for MTD Segs */
+#define GVE_MTD_SUBTYPE_PATH 0
+
+#define GVE_MTD_PATH_STATE_DEFAULT 0
+#define GVE_MTD_PATH_STATE_TIMEOUT 1
+#define GVE_MTD_PATH_STATE_CONGESTION 2
+#define GVE_MTD_PATH_STATE_RETRANSMIT 3
+
+#define GVE_MTD_PATH_HASH_NONE (0x0 << 4)
+#define GVE_MTD_PATH_HASH_L4 (0x1 << 4)
+
/* GVE Receive Packet Descriptor */
/* The start of an ethernet packet comes 2 bytes into the rx buffer.
* gVNIC adds this padding so that both the DMA and the L3/4 protocol header
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index e8fe9adef7f2..f79cd0591110 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -176,6 +176,11 @@ static_assert(sizeof(struct gve_tx_compl_desc) == 8);
#define GVE_COMPL_TYPE_DQO_MISS 0x1 /* Miss path completion */
#define GVE_COMPL_TYPE_DQO_REINJECTION 0x3 /* Re-injection completion */
+/* The most significant bit in the completion tag can change the completion
+ * type from packet completion to miss path completion.
+ */
+#define GVE_ALT_MISS_COMPL_BIT BIT(15)
+
/* Descriptor to post buffers to HW on buffer queue. */
struct gve_rx_desc_dqo {
__le16 buf_id; /* ID returned in Rx completion descriptor */
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 836042364124..1eb4d5fd8561 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -18,6 +18,7 @@
#define GVE_TX_IRQ_RATELIMIT_US_DQO 50
#define GVE_RX_IRQ_RATELIMIT_US_DQO 20
+#define GVE_MAX_ITR_INTERVAL_DQO (GVE_ITR_INTERVAL_DQO_MASK * 2)
/* Timeout in seconds to wait for a reinjection completion after receiving
* its corresponding miss completion.
@@ -54,17 +55,17 @@ gve_tx_put_doorbell_dqo(const struct gve_priv *priv,
}
/* Builds register value to write to DQO IRQ doorbell to enable with specified
- * ratelimit.
+ * ITR interval.
*/
-static inline u32 gve_set_itr_ratelimit_dqo(u32 ratelimit_us)
+static inline u32 gve_setup_itr_interval_dqo(u32 interval_us)
{
u32 result = GVE_ITR_ENABLE_BIT_DQO;
/* Interval has 2us granularity. */
- ratelimit_us >>= 1;
+ interval_us >>= 1;
- ratelimit_us &= GVE_ITR_INTERVAL_DQO_MASK;
- result |= (ratelimit_us << GVE_ITR_INTERVAL_DQO_SHIFT);
+ interval_us &= GVE_ITR_INTERVAL_DQO_MASK;
+ result |= (interval_us << GVE_ITR_INTERVAL_DQO_SHIFT);
return result;
}
@@ -73,9 +74,20 @@ static inline void
gve_write_irq_doorbell_dqo(const struct gve_priv *priv,
const struct gve_notify_block *block, u32 val)
{
- u32 index = be32_to_cpu(block->irq_db_index);
+ u32 index = be32_to_cpu(*block->irq_db_index);
iowrite32(val, &priv->db_bar2[index]);
}
+/* Sets interrupt throttling interval and enables interrupt
+ * by writing to IRQ doorbell.
+ */
+static inline void
+gve_set_itr_coalesce_usecs_dqo(struct gve_priv *priv,
+ struct gve_notify_block *block,
+ u32 usecs)
+{
+ gve_write_irq_doorbell_dqo(priv, block,
+ gve_setup_itr_interval_dqo(usecs));
+}
#endif /* _GVE_DQO_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index c8df47a97fa4..cfd4b8d284d1 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -8,6 +8,7 @@
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_dqo.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -33,6 +34,11 @@ static u32 gve_get_msglevel(struct net_device *netdev)
return priv->msg_enable;
}
+/* For the following stats column string names, make sure the order
+ * matches how it is filled in the code. For xdp_aborted, xdp_drop,
+ * xdp_pass, xdp_tx, xdp_redirect, make sure it also matches the order
+ * as declared in enum xdp_action inside file uapi/linux/bpf.h .
+ */
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts",
@@ -42,17 +48,22 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
};
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
- "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_consumed_desc[%u]", "rx_bytes[%u]",
"rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
+ "rx_frag_alloc_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
+ "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
+ "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
+ "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
};
static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
- "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
+ "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
"tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
- "tx_dma_mapping_error[%u]",
+ "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]",
+ "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
};
static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
@@ -79,8 +90,10 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
char *s = (char *)data;
+ int num_tx_queues;
int i, j;
+ num_tx_queues = gve_num_tx_queues(priv);
switch (stringset) {
case ETH_SS_STATS:
memcpy(s, *gve_gstrings_main_stats,
@@ -95,7 +108,7 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = 0; i < num_tx_queues; i++) {
for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
snprintf(s, ETH_GSTRING_LEN,
gve_gstrings_tx_stats[j], i);
@@ -122,12 +135,14 @@ static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
static int gve_get_sset_count(struct net_device *netdev, int sset)
{
struct gve_priv *priv = netdev_priv(netdev);
+ int num_tx_queues;
+ num_tx_queues = gve_num_tx_queues(priv);
switch (sset) {
case ETH_SS_STATS:
return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
(priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
- (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
+ (num_tx_queues * NUM_GVE_TX_CNTS);
case ETH_SS_PRIV_FLAGS:
return GVE_PRIV_FLAGS_STR_LEN;
default:
@@ -139,10 +154,11 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
- tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
+ u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail,
+ tmp_rx_buf_alloc_fail, tmp_rx_desc_err_dropped_pkt,
+ tmp_tx_pkts, tmp_tx_bytes;
u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
- rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
+ rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
int stats_idx, base_stats_idx, max_stats_idx;
struct stats *report_stats;
int *rx_qid_to_stats_idx;
@@ -150,18 +166,20 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_priv *priv;
bool skip_nic_stats;
unsigned int start;
+ int num_tx_queues;
int ring;
int i, j;
ASSERT_RTNL();
priv = netdev_priv(netdev);
+ num_tx_queues = gve_num_tx_queues(priv);
report_stats = priv->stats_report->stats;
rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx)
return;
- tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
+ tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx);
@@ -191,8 +209,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
}
}
- for (tx_pkts = 0, tx_bytes = 0, ring = 0;
- ring < priv->tx_cfg.num_queues; ring++) {
+ for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0;
+ ring < num_tx_queues; ring++) {
if (priv->tx) {
do {
start =
@@ -203,6 +221,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
+ tx_dropped += priv->tx[ring].dropped_pkt;
}
}
@@ -214,9 +233,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* total rx dropped packets */
data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
rx_desc_err_dropped_pkt;
- /* Skip tx_dropped */
- i++;
-
+ data[i++] = tx_dropped;
data[i++] = priv->tx_timeo_cnt;
data[i++] = rx_skb_alloc_fail;
data[i++] = rx_buf_alloc_fail;
@@ -230,7 +247,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
i = GVE_MAIN_STATS_LEN;
/* For rx cross-reporting stats, start from nic rx stats in report */
- base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
base_stats_idx;
@@ -255,6 +272,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt;
data[i++] = rx->cnt;
+ data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
@@ -269,6 +287,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->rx_cont_packet_cnt;
data[i++] = rx->rx_frag_flip_cnt;
data[i++] = rx->rx_frag_copy_cnt;
+ data[i++] = rx->rx_frag_alloc_cnt;
/* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail +
@@ -279,14 +298,26 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (skip_nic_stats) {
/* skip NIC rx stats */
i += NIC_RX_STATS_REPORT_NUM;
- continue;
- }
- for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
- u64 value =
- be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
+ } else {
+ stats_idx = rx_qid_to_stats_idx[ring];
+ for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[stats_idx + j].value);
- data[i++] = value;
+ data[i++] = value;
+ }
}
+ /* XDP rx counters */
+ do {
+ start = u64_stats_fetch_begin(&priv->rx[ring].statss);
+ for (j = 0; j < GVE_XDP_ACTIONS; j++)
+ data[i + j] = rx->xdp_actions[j];
+ data[i + j++] = rx->xdp_tx_errors;
+ data[i + j++] = rx->xdp_redirect_errors;
+ data[i + j++] = rx->xdp_alloc_fails;
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ start));
+ i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
}
} else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
@@ -294,7 +325,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx;
- max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false;
@@ -312,18 +343,20 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
/* walk TX rings */
if (priv->tx) {
- for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
+ for (ring = 0; ring < num_tx_queues; ring++) {
struct gve_tx_ring *tx = &priv->tx[ring];
if (gve_is_gqi(priv)) {
data[i++] = tx->req;
data[i++] = tx->done;
+ data[i++] = tx->req - tx->done;
} else {
/* DQO doesn't currently support
* posted/completed descriptor counts;
*/
data[i++] = 0;
data[i++] = 0;
+ data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head;
}
do {
start =
@@ -340,16 +373,28 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (skip_nic_stats) {
/* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM;
- continue;
- }
- for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
- u64 value =
- be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
- data[i++] = value;
+ } else {
+ stats_idx = tx_qid_to_stats_idx[ring];
+ for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[stats_idx + j].value);
+ data[i++] = value;
+ }
}
+ /* XDP xsk counters */
+ data[i++] = tx->xdp_xsk_wakeup;
+ data[i++] = tx->xdp_xsk_done;
+ do {
+ start = u64_stats_fetch_begin(&priv->tx[ring].statss);
+ data[i] = tx->xdp_xsk_sent;
+ data[i + 1] = tx->xdp_xmit;
+ data[i + 2] = tx->xdp_xmit_errors;
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ start));
+ i += 3; /* XDP tx counters */
}
} else {
- i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
+ i += num_tx_queues * NUM_GVE_TX_CNTS;
}
kfree(rx_qid_to_stats_idx);
@@ -406,6 +451,12 @@ static int gve_set_channels(struct net_device *netdev,
if (!new_rx || !new_tx)
return -EINVAL;
+ if (priv->num_xdp_queues &&
+ (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) {
+ dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
+ return -EINVAL;
+ }
+
if (!netif_carrier_ok(netdev)) {
priv->tx_cfg.num_queues = new_tx;
priv->rx_cfg.num_queues = new_rx;
@@ -419,7 +470,9 @@ static int gve_set_channels(struct net_device *netdev,
}
static void gve_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *cmd)
+ struct ethtool_ringparam *cmd,
+ struct kernel_ethtool_ringparam *kernel_cmd,
+ struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
@@ -494,7 +547,9 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct gve_priv *priv = netdev_priv(netdev);
u64 ori_flags, new_flags;
+ int num_tx_queues;
+ num_tx_queues = gve_num_tx_queues(priv);
ori_flags = READ_ONCE(priv->ethtool_flags);
new_flags = ori_flags;
@@ -514,7 +569,7 @@ static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
/* delete report stats timer. */
if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
- priv->tx_cfg.num_queues;
+ num_tx_queues;
int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
priv->rx_cfg.num_queues;
@@ -529,13 +584,74 @@ static int gve_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- int err = gve_adminq_report_link_speed(priv);
+ int err = 0;
+
+ if (priv->link_speed == 0)
+ err = gve_adminq_report_link_speed(priv);
cmd->base.speed = priv->link_speed;
return err;
}
+static int gve_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_ec,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ if (gve_is_gqi(priv))
+ return -EOPNOTSUPP;
+ ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+ ec->rx_coalesce_usecs = priv->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int gve_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_ec,
+ struct netlink_ext_ack *extack)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 tx_usecs_orig = priv->tx_coalesce_usecs;
+ u32 rx_usecs_orig = priv->rx_coalesce_usecs;
+ int idx;
+
+ if (gve_is_gqi(priv))
+ return -EOPNOTSUPP;
+
+ if (ec->tx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO ||
+ ec->rx_coalesce_usecs > GVE_MAX_ITR_INTERVAL_DQO)
+ return -EINVAL;
+ priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
+
+ if (tx_usecs_orig != priv->tx_coalesce_usecs) {
+ for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->tx_coalesce_usecs);
+ }
+ }
+
+ if (rx_usecs_orig != priv->rx_coalesce_usecs) {
+ for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
+ int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->rx_coalesce_usecs);
+ }
+ }
+
+ return 0;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
@@ -545,6 +661,8 @@ const struct ethtool_ops gve_ethtool_ops = {
.set_channels = gve_set_channels,
.get_channels = gve_get_channels,
.get_link = ethtool_op_get_link,
+ .get_coalesce = gve_get_coalesce,
+ .set_coalesce = gve_set_coalesce,
.get_ringparam = gve_get_ringparam,
.reset = gve_user_reset,
.get_tunable = gve_get_tunable,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 59b66f679e46..caa00c72aeeb 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -4,15 +4,20 @@
* Copyright (C) 2015-2021 Google, Inc.
*/
+#include <linux/bpf.h>
#include <linux/cpumask.h>
#include <linux/etherdevice.h>
+#include <linux/filter.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
#include <net/sch_generic.h>
+#include <net/xdp_sock_drv.h>
#include "gve.h"
#include "gve_dqo.h"
#include "gve_adminq.h"
@@ -30,6 +35,49 @@
const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
+static int gve_verify_driver_compatibility(struct gve_priv *priv)
+{
+ int err;
+ struct gve_driver_info *driver_info;
+ dma_addr_t driver_info_bus;
+
+ driver_info = dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct gve_driver_info),
+ &driver_info_bus, GFP_KERNEL);
+ if (!driver_info)
+ return -ENOMEM;
+
+ *driver_info = (struct gve_driver_info) {
+ .os_type = 1, /* Linux */
+ .os_version_major = cpu_to_be32(LINUX_VERSION_MAJOR),
+ .os_version_minor = cpu_to_be32(LINUX_VERSION_SUBLEVEL),
+ .os_version_sub = cpu_to_be32(LINUX_VERSION_PATCHLEVEL),
+ .driver_capability_flags = {
+ cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
+ cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
+ cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
+ cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
+ },
+ };
+ strscpy(driver_info->os_version_str1, utsname()->release,
+ sizeof(driver_info->os_version_str1));
+ strscpy(driver_info->os_version_str2, utsname()->version,
+ sizeof(driver_info->os_version_str2));
+
+ err = gve_adminq_verify_driver_compatibility(priv,
+ sizeof(struct gve_driver_info),
+ driver_info_bus);
+
+ /* It's ok if the device doesn't support this */
+ if (err == -EOPNOTSUPP)
+ err = 0;
+
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_driver_info),
+ driver_info, driver_info_bus);
+ return err;
+}
+
static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
@@ -45,8 +93,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
struct gve_priv *priv = netdev_priv(dev);
unsigned int start;
u64 packets, bytes;
+ int num_tx_queues;
int ring;
+ num_tx_queues = gve_num_tx_queues(priv);
if (priv->rx) {
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
@@ -61,7 +111,7 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
}
}
if (priv->tx) {
- for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
+ for (ring = 0; ring < num_tx_queues; ring++) {
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
@@ -135,7 +185,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
int tx_stats_num, rx_stats_num;
tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
- priv->tx_cfg.num_queues;
+ gve_num_tx_queues(priv);
rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
priv->rx_cfg.num_queues;
priv->stats_report_len = struct_size(priv->stats_report, stats,
@@ -200,8 +250,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
block = container_of(napi, struct gve_notify_block, napi);
priv = block->priv;
- if (block->tx)
- reschedule |= gve_tx_poll(block, budget);
+ if (block->tx) {
+ if (block->tx->q_num < priv->tx_cfg.num_queues)
+ reschedule |= gve_tx_poll(block, budget);
+ else
+ reschedule |= gve_xdp_poll(block, budget);
+ }
+
if (block->rx) {
work_done = gve_rx_poll(block, budget);
reschedule |= work_done == budget;
@@ -239,19 +294,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
bool reschedule = false;
int work_done = 0;
- /* Clear PCI MSI-X Pending Bit Array (PBA)
- *
- * This bit is set if an interrupt event occurs while the vector is
- * masked. If this bit is set and we reenable the interrupt, it will
- * fire again. Since we're just about to poll the queue state, we don't
- * need it to fire again.
- *
- * Under high softirq load, it's possible that the interrupt condition
- * is triggered twice before we got the chance to process it.
- */
- gve_write_irq_doorbell_dqo(priv, block,
- GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
-
if (block->tx)
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
@@ -282,7 +324,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
- char *name = priv->dev->name;
unsigned int active_cpus;
int vecs_enabled;
int i, j;
@@ -326,30 +367,38 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
/* Setup Management Vector - the last vector */
- snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
- name);
+ snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
+ pci_name(priv->pdev));
err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
if (err) {
dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
goto abort_with_msix_enabled;
}
- priv->ntfy_blocks =
+ priv->irq_db_indices =
dma_alloc_coherent(&priv->pdev->dev,
priv->num_ntfy_blks *
- sizeof(*priv->ntfy_blocks),
- &priv->ntfy_block_bus, GFP_KERNEL);
- if (!priv->ntfy_blocks) {
+ sizeof(*priv->irq_db_indices),
+ &priv->irq_db_indices_bus, GFP_KERNEL);
+ if (!priv->irq_db_indices) {
err = -ENOMEM;
goto abort_with_mgmt_vector;
}
+
+ priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
+ sizeof(*priv->ntfy_blocks), GFP_KERNEL);
+ if (!priv->ntfy_blocks) {
+ err = -ENOMEM;
+ goto abort_with_irq_db_indices;
+ }
+
/* Setup the other blocks - the first n-1 vectors */
for (i = 0; i < priv->num_ntfy_blks; i++) {
struct gve_notify_block *block = &priv->ntfy_blocks[i];
int msix_idx = i;
- snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
- name, i);
+ snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
+ i, pci_name(priv->pdev));
block->priv = priv;
err = request_irq(priv->msix_vectors[msix_idx].vector,
gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
@@ -361,6 +410,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
}
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
get_cpu_mask(i % active_cpus));
+ block->irq_db_index = &priv->irq_db_indices[i].index;
}
return 0;
abort_with_some_ntfy_blocks:
@@ -372,10 +422,13 @@ abort_with_some_ntfy_blocks:
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
}
- dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
- sizeof(*priv->ntfy_blocks),
- priv->ntfy_blocks, priv->ntfy_block_bus);
+ kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
+abort_with_irq_db_indices:
+ dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
+ sizeof(*priv->irq_db_indices),
+ priv->irq_db_indices, priv->irq_db_indices_bus);
+ priv->irq_db_indices = NULL;
abort_with_mgmt_vector:
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
abort_with_msix_enabled:
@@ -403,10 +456,12 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
free_irq(priv->msix_vectors[msix_idx].vector, block);
}
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
- dma_free_coherent(&priv->pdev->dev,
- priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
- priv->ntfy_blocks, priv->ntfy_block_bus);
+ kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
+ dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
+ sizeof(*priv->irq_db_indices),
+ priv->irq_db_indices, priv->irq_db_indices_bus);
+ priv->irq_db_indices = NULL;
pci_disable_msix(priv->pdev);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
@@ -428,7 +483,7 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_adminq_configure_device_resources(priv,
priv->counter_array_bus,
priv->num_event_counters,
- priv->ntfy_block_bus,
+ priv->irq_db_indices_bus,
priv->num_ntfy_blks);
if (unlikely(err)) {
dev_err(&priv->pdev->dev,
@@ -512,8 +567,7 @@ static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
}
static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
@@ -523,13 +577,50 @@ static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
netif_napi_del(&block->napi);
}
+static int gve_register_xdp_qpls(struct gve_priv *priv)
+{
+ int start_id;
+ int err;
+ int i;
+
+ start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
+ err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to register queue page list %d\n",
+ priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ }
+ return 0;
+}
+
static int gve_register_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int start_id;
int err;
int i;
- for (i = 0; i < num_qpls; i++) {
+ start_id = gve_tx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
+ err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to register queue page list %d\n",
+ priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ }
+
+ start_id = gve_rx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -544,13 +635,34 @@ static int gve_register_qpls(struct gve_priv *priv)
return 0;
}
+static int gve_unregister_xdp_qpls(struct gve_priv *priv)
+{
+ int start_id;
+ int err;
+ int i;
+
+ start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
+ err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean up */
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to unregister queue page list %d\n",
+ priv->qpls[i].id);
+ return err;
+ }
+ }
+ return 0;
+}
+
static int gve_unregister_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int start_id;
int err;
int i;
- for (i = 0; i < num_qpls; i++) {
+ start_id = gve_tx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean up */
if (err) {
@@ -560,25 +672,59 @@ static int gve_unregister_qpls(struct gve_priv *priv)
return err;
}
}
+
+ start_id = gve_rx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
+ err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ /* This failure will trigger a reset - no need to clean up */
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "Failed to unregister queue page list %d\n",
+ priv->qpls[i].id);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int gve_create_xdp_rings(struct gve_priv *priv)
+{
+ int err;
+
+ err = gve_adminq_create_tx_queues(priv,
+ gve_xdp_tx_start_queue_id(priv),
+ priv->num_xdp_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n",
+ priv->num_xdp_queues);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
+ }
+ netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n",
+ priv->num_xdp_queues);
+
return 0;
}
static int gve_create_rings(struct gve_priv *priv)
{
+ int num_tx_queues = gve_num_tx_queues(priv);
int err;
int i;
- err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
+ err = gve_adminq_create_tx_queues(priv, 0, num_tx_queues);
if (err) {
netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
- priv->tx_cfg.num_queues);
+ num_tx_queues);
/* This failure will trigger a reset - no need to clean
* up
*/
return err;
}
netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
- priv->tx_cfg.num_queues);
+ num_tx_queues);
err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
if (err) {
@@ -611,6 +757,23 @@ static int gve_create_rings(struct gve_priv *priv)
return 0;
}
+static void add_napi_init_xdp_sync_stats(struct gve_priv *priv,
+ int (*napi_poll)(struct napi_struct *napi,
+ int budget))
+{
+ int start_id = gve_xdp_tx_start_queue_id(priv);
+ int i;
+
+ /* Add xdp tx napi & init sync stats*/
+ for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+
+ u64_stats_init(&priv->tx[i].statss);
+ priv->tx[i].ntfy_id = ntfy_idx;
+ gve_add_napi(priv, ntfy_idx, napi_poll);
+ }
+}
+
static void add_napi_init_sync_stats(struct gve_priv *priv,
int (*napi_poll)(struct napi_struct *napi,
int budget))
@@ -618,7 +781,7 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
int i;
/* Add tx napi & init sync stats*/
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = 0; i < gve_num_tx_queues(priv); i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss);
@@ -635,34 +798,51 @@ static void add_napi_init_sync_stats(struct gve_priv *priv,
}
}
-static void gve_tx_free_rings(struct gve_priv *priv)
+static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings)
{
if (gve_is_gqi(priv)) {
- gve_tx_free_rings_gqi(priv);
+ gve_tx_free_rings_gqi(priv, start_id, num_rings);
} else {
gve_tx_free_rings_dqo(priv);
}
}
+static int gve_alloc_xdp_rings(struct gve_priv *priv)
+{
+ int start_id;
+ int err = 0;
+
+ if (!priv->num_xdp_queues)
+ return 0;
+
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues);
+ if (err)
+ return err;
+ add_napi_init_xdp_sync_stats(priv, gve_napi_poll);
+
+ return 0;
+}
+
static int gve_alloc_rings(struct gve_priv *priv)
{
int err;
/* Setup tx rings */
- priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
+ priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
GFP_KERNEL);
if (!priv->tx)
return -ENOMEM;
if (gve_is_gqi(priv))
- err = gve_tx_alloc_rings(priv);
+ err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv));
else
err = gve_tx_alloc_rings_dqo(priv);
if (err)
goto free_tx;
/* Setup rx rings */
- priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
+ priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
GFP_KERNEL);
if (!priv->rx) {
err = -ENOMEM;
@@ -687,18 +867,39 @@ free_rx:
kvfree(priv->rx);
priv->rx = NULL;
free_tx_queue:
- gve_tx_free_rings(priv);
+ gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
free_tx:
kvfree(priv->tx);
priv->tx = NULL;
return err;
}
+static int gve_destroy_xdp_rings(struct gve_priv *priv)
+{
+ int start_id;
+ int err;
+
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ err = gve_adminq_destroy_tx_queues(priv,
+ start_id,
+ priv->num_xdp_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy XDP queues\n");
+ /* This failure will trigger a reset - no need to clean up */
+ return err;
+ }
+ netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n");
+
+ return 0;
+}
+
static int gve_destroy_rings(struct gve_priv *priv)
{
+ int num_tx_queues = gve_num_tx_queues(priv);
int err;
- err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
+ err = gve_adminq_destroy_tx_queues(priv, 0, num_tx_queues);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to destroy tx queues\n");
@@ -725,17 +926,33 @@ static void gve_rx_free_rings(struct gve_priv *priv)
gve_rx_free_rings_dqo(priv);
}
+static void gve_free_xdp_rings(struct gve_priv *priv)
+{
+ int ntfy_idx, start_id;
+ int i;
+
+ start_id = gve_xdp_tx_start_queue_id(priv);
+ if (priv->tx) {
+ for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
+ ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
+ gve_remove_napi(priv, ntfy_idx);
+ }
+ gve_tx_free_rings(priv, start_id, priv->num_xdp_queues);
+ }
+}
+
static void gve_free_rings(struct gve_priv *priv)
{
+ int num_tx_queues = gve_num_tx_queues(priv);
int ntfy_idx;
int i;
if (priv->tx) {
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = 0; i < num_tx_queues; i++) {
ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
gve_remove_napi(priv, ntfy_idx);
}
- gve_tx_free_rings(priv);
+ gve_tx_free_rings(priv, 0, num_tx_queues);
kvfree(priv->tx);
priv->tx = NULL;
}
@@ -752,9 +969,9 @@ static void gve_free_rings(struct gve_priv *priv)
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(GFP_KERNEL);
+ *page = alloc_page(gfp_flags);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -797,7 +1014,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
- gve_qpl_dma_dir(priv, id));
+ gve_qpl_dma_dir(priv, id), GFP_KERNEL);
/* caller handles clean up */
if (err)
return -ENOMEM;
@@ -817,8 +1034,7 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv,
- int id)
+static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
{
struct gve_queue_page_list *qpl = &priv->qpls[id];
int i;
@@ -833,41 +1049,68 @@ static void gve_free_queue_page_list(struct gve_priv *priv,
qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
kvfree(qpl->page_buses);
+ qpl->page_buses = NULL;
free_pages:
kvfree(qpl->pages);
+ qpl->pages = NULL;
priv->num_registered_pages -= qpl->num_entries;
}
+static int gve_alloc_xdp_qpls(struct gve_priv *priv)
+{
+ int start_id;
+ int i, j;
+ int err;
+
+ start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
+ err = gve_alloc_queue_page_list(priv, i,
+ priv->tx_pages_per_qpl);
+ if (err)
+ goto free_qpls;
+ }
+
+ return 0;
+
+free_qpls:
+ for (j = start_id; j <= i; j++)
+ gve_free_queue_page_list(priv, j);
+ return err;
+}
+
static int gve_alloc_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
+ int start_id;
int i, j;
int err;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT)
return 0;
- priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
+ priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL);
if (!priv->qpls)
return -ENOMEM;
- for (i = 0; i < gve_num_tx_qpls(priv); i++) {
+ start_id = gve_tx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i,
priv->tx_pages_per_qpl);
if (err)
goto free_qpls;
}
- for (; i < num_qpls; i++) {
+
+ start_id = gve_rx_start_qpl_id(priv);
+ for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) {
err = gve_alloc_queue_page_list(priv, i,
priv->rx_data_slot_cnt);
if (err)
goto free_qpls;
}
- priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
+ priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
+ priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
sizeof(unsigned long), GFP_KERNEL);
if (!priv->qpl_cfg.qpl_id_map) {
err = -ENOMEM;
@@ -880,24 +1123,36 @@ free_qpls:
for (j = 0; j <= i; j++)
gve_free_queue_page_list(priv, j);
kvfree(priv->qpls);
+ priv->qpls = NULL;
return err;
}
+static void gve_free_xdp_qpls(struct gve_priv *priv)
+{
+ int start_id;
+ int i;
+
+ start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
+ for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++)
+ gve_free_queue_page_list(priv, i);
+}
+
static void gve_free_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
int i;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (!priv->qpls)
return;
kvfree(priv->qpl_cfg.qpl_id_map);
+ priv->qpl_cfg.qpl_id_map = NULL;
- for (i = 0; i < num_qpls; i++)
+ for (i = 0; i < max_queues; i++)
gve_free_queue_page_list(priv, i);
kvfree(priv->qpls);
+ priv->qpls = NULL;
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -915,11 +1170,109 @@ static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
static void gve_turndown(struct gve_priv *priv);
static void gve_turnup(struct gve_priv *priv);
+static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev)
+{
+ struct napi_struct *napi;
+ struct gve_rx_ring *rx;
+ int err = 0;
+ int i, j;
+ u32 tx_qid;
+
+ if (!priv->num_xdp_queues)
+ return 0;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ rx = &priv->rx[i];
+ napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+
+ err = xdp_rxq_info_reg(&rx->xdp_rxq, dev, i,
+ napi->napi_id);
+ if (err)
+ goto err;
+ err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL);
+ if (err)
+ goto err;
+ rx->xsk_pool = xsk_get_pool_from_qid(dev, i);
+ if (rx->xsk_pool) {
+ err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, i,
+ napi->napi_id);
+ if (err)
+ goto err;
+ err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err)
+ goto err;
+ xsk_pool_set_rxq_info(rx->xsk_pool,
+ &rx->xsk_rxq);
+ }
+ }
+
+ for (i = 0; i < priv->num_xdp_queues; i++) {
+ tx_qid = gve_xdp_tx_queue_id(priv, i);
+ priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i);
+ }
+ return 0;
+
+err:
+ for (j = i; j >= 0; j--) {
+ rx = &priv->rx[j];
+ if (xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+ if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
+ xdp_rxq_info_unreg(&rx->xsk_rxq);
+ }
+ return err;
+}
+
+static void gve_unreg_xdp_info(struct gve_priv *priv)
+{
+ int i, tx_qid;
+
+ if (!priv->num_xdp_queues)
+ return;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ struct gve_rx_ring *rx = &priv->rx[i];
+
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+ if (rx->xsk_pool) {
+ xdp_rxq_info_unreg(&rx->xsk_rxq);
+ rx->xsk_pool = NULL;
+ }
+ }
+
+ for (i = 0; i < priv->num_xdp_queues; i++) {
+ tx_qid = gve_xdp_tx_queue_id(priv, i);
+ priv->tx[tx_qid].xsk_pool = NULL;
+ }
+}
+
+static void gve_drain_page_cache(struct gve_priv *priv)
+{
+ struct page_frag_cache *nc;
+ int i;
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ nc = &priv->rx[i].page_cache;
+ if (nc->va) {
+ __page_frag_cache_drain(virt_to_page(nc->va),
+ nc->pagecnt_bias);
+ nc->va = NULL;
+ }
+ }
+}
+
static int gve_open(struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
int err;
+ if (priv->xdp_prog)
+ priv->num_xdp_queues = priv->rx_cfg.num_queues;
+ else
+ priv->num_xdp_queues = 0;
+
err = gve_alloc_qpls(priv);
if (err)
return err;
@@ -935,6 +1288,10 @@ static int gve_open(struct net_device *dev)
if (err)
goto free_rings;
+ err = gve_reg_xdp_info(priv, dev);
+ if (err)
+ goto free_rings;
+
err = gve_register_qpls(priv);
if (err)
goto reset;
@@ -989,6 +1346,7 @@ static int gve_close(struct net_device *dev)
netif_carrier_off(dev);
if (gve_get_device_rings_ok(priv)) {
gve_turndown(priv);
+ gve_drain_page_cache(priv);
err = gve_destroy_rings(priv);
if (err)
goto err;
@@ -999,6 +1357,7 @@ static int gve_close(struct net_device *dev)
}
del_timer_sync(&priv->stats_report_timer);
+ gve_unreg_xdp_info(priv);
gve_free_rings(priv);
gve_free_qpls(priv);
priv->interface_down_cnt++;
@@ -1015,6 +1374,306 @@ err:
return gve_reset_recovery(priv, false);
}
+static int gve_remove_xdp_queues(struct gve_priv *priv)
+{
+ int err;
+
+ err = gve_destroy_xdp_rings(priv);
+ if (err)
+ return err;
+
+ err = gve_unregister_xdp_qpls(priv);
+ if (err)
+ return err;
+
+ gve_unreg_xdp_info(priv);
+ gve_free_xdp_rings(priv);
+ gve_free_xdp_qpls(priv);
+ priv->num_xdp_queues = 0;
+ return 0;
+}
+
+static int gve_add_xdp_queues(struct gve_priv *priv)
+{
+ int err;
+
+ priv->num_xdp_queues = priv->tx_cfg.num_queues;
+
+ err = gve_alloc_xdp_qpls(priv);
+ if (err)
+ goto err;
+
+ err = gve_alloc_xdp_rings(priv);
+ if (err)
+ goto free_xdp_qpls;
+
+ err = gve_reg_xdp_info(priv, priv->dev);
+ if (err)
+ goto free_xdp_rings;
+
+ err = gve_register_xdp_qpls(priv);
+ if (err)
+ goto free_xdp_rings;
+
+ err = gve_create_xdp_rings(priv);
+ if (err)
+ goto free_xdp_rings;
+
+ return 0;
+
+free_xdp_rings:
+ gve_free_xdp_rings(priv);
+free_xdp_qpls:
+ gve_free_xdp_qpls(priv);
+err:
+ priv->num_xdp_queues = 0;
+ return err;
+}
+
+static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
+{
+ if (!gve_get_napi_enabled(priv))
+ return;
+
+ if (link_status == netif_carrier_ok(priv->dev))
+ return;
+
+ if (link_status) {
+ netdev_info(priv->dev, "Device link is up.\n");
+ netif_carrier_on(priv->dev);
+ } else {
+ netdev_info(priv->dev, "Device link is down.\n");
+ netif_carrier_off(priv->dev);
+ }
+}
+
+static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *old_prog;
+ int err = 0;
+ u32 status;
+
+ old_prog = READ_ONCE(priv->xdp_prog);
+ if (!netif_carrier_ok(priv->dev)) {
+ WRITE_ONCE(priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ return 0;
+ }
+
+ gve_turndown(priv);
+ if (!old_prog && prog) {
+ // Allocate XDP TX queues if an XDP program is
+ // being installed
+ err = gve_add_xdp_queues(priv);
+ if (err)
+ goto out;
+ } else if (old_prog && !prog) {
+ // Remove XDP TX queues if an XDP program is
+ // being uninstalled
+ err = gve_remove_xdp_queues(priv);
+ if (err)
+ goto out;
+ }
+ WRITE_ONCE(priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+out:
+ gve_turnup(priv);
+ status = ioread32be(&priv->reg_bar0->device_status);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
+ return err;
+}
+
+static int gve_xsk_pool_enable(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct napi_struct *napi;
+ struct gve_rx_ring *rx;
+ int tx_qid;
+ int err;
+
+ if (qid >= priv->rx_cfg.num_queues) {
+ dev_err(&priv->pdev->dev, "xsk pool invalid qid %d", qid);
+ return -EINVAL;
+ }
+ if (xsk_pool_get_rx_frame_size(pool) <
+ priv->dev->max_mtu + sizeof(struct ethhdr)) {
+ dev_err(&priv->pdev->dev, "xsk pool frame_len too small");
+ return -EINVAL;
+ }
+
+ err = xsk_pool_dma_map(pool, &priv->pdev->dev,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ if (err)
+ return err;
+
+ /* If XDP prog is not installed, return */
+ if (!priv->xdp_prog)
+ return 0;
+
+ rx = &priv->rx[qid];
+ napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ err = xdp_rxq_info_reg(&rx->xsk_rxq, dev, qid, napi->napi_id);
+ if (err)
+ goto err;
+
+ err = xdp_rxq_info_reg_mem_model(&rx->xsk_rxq,
+ MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err)
+ goto err;
+
+ xsk_pool_set_rxq_info(pool, &rx->xsk_rxq);
+ rx->xsk_pool = pool;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ priv->tx[tx_qid].xsk_pool = pool;
+
+ return 0;
+err:
+ if (xdp_rxq_info_is_reg(&rx->xsk_rxq))
+ xdp_rxq_info_unreg(&rx->xsk_rxq);
+
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ return err;
+}
+
+static int gve_xsk_pool_disable(struct net_device *dev,
+ u16 qid)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct napi_struct *napi_rx;
+ struct napi_struct *napi_tx;
+ struct xsk_buff_pool *pool;
+ int tx_qid;
+
+ pool = xsk_get_pool_from_qid(dev, qid);
+ if (!pool)
+ return -EINVAL;
+ if (qid >= priv->rx_cfg.num_queues)
+ return -EINVAL;
+
+ /* If XDP prog is not installed, unmap DMA and return */
+ if (!priv->xdp_prog)
+ goto done;
+
+ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ if (!netif_running(dev)) {
+ priv->rx[qid].xsk_pool = NULL;
+ xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
+ priv->tx[tx_qid].xsk_pool = NULL;
+ goto done;
+ }
+
+ napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
+ napi_disable(napi_rx); /* make sure current rx poll is done */
+
+ napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
+ napi_disable(napi_tx); /* make sure current tx poll is done */
+
+ priv->rx[qid].xsk_pool = NULL;
+ xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
+ priv->tx[tx_qid].xsk_pool = NULL;
+ smp_mb(); /* Make sure it is visible to the workers on datapath */
+
+ napi_enable(napi_rx);
+ if (gve_rx_work_pending(&priv->rx[qid]))
+ napi_schedule(napi_rx);
+
+ napi_enable(napi_tx);
+ if (gve_tx_clean_pending(priv, &priv->tx[tx_qid]))
+ napi_schedule(napi_tx);
+
+done:
+ xsk_pool_dma_unmap(pool,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
+ return 0;
+}
+
+static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
+
+ if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
+ return -EINVAL;
+
+ if (flags & XDP_WAKEUP_TX) {
+ struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
+ struct napi_struct *napi =
+ &priv->ntfy_blocks[tx->ntfy_id].napi;
+
+ if (!napi_if_scheduled_mark_missed(napi)) {
+ /* Call local_bh_enable to trigger SoftIRQ processing */
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+ }
+
+ tx->xdp_xsk_wakeup++;
+ }
+
+ return 0;
+}
+
+static int verify_xdp_configuration(struct net_device *dev)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (dev->features & NETIF_F_LRO) {
+ netdev_warn(dev, "XDP is not supported when LRO is on.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->queue_format != GVE_GQI_QPL_FORMAT) {
+ netdev_warn(dev, "XDP is not supported in mode %d.\n",
+ priv->queue_format);
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ netdev_warn(dev, "XDP is not supported for mtu %d.\n",
+ dev->mtu);
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
+ (2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
+ netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
+ priv->rx_cfg.num_queues,
+ priv->tx_cfg.num_queues,
+ priv->tx_cfg.max_queues);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = verify_xdp_configuration(dev);
+ if (err)
+ return err;
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return gve_set_xdp(priv, xdp->prog, xdp->extack);
+ case XDP_SETUP_XSK_POOL:
+ if (xdp->xsk.pool)
+ return gve_xsk_pool_enable(dev, xdp->xsk.pool, xdp->xsk.queue_id);
+ else
+ return gve_xsk_pool_disable(dev, xdp->xsk.queue_id);
+ default:
+ return -EINVAL;
+ }
+}
+
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config)
@@ -1064,7 +1723,7 @@ static void gve_turndown(struct gve_priv *priv)
return;
/* Disable napi to prevent more work from coming in */
- for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
@@ -1092,7 +1751,7 @@ static void gve_turnup(struct gve_priv *priv)
netif_tx_start_all_queues(priv->dev);
/* Enable napi and unmask interrupts for all queues */
- for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
@@ -1100,9 +1759,8 @@ static void gve_turnup(struct gve_priv *priv)
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
- u32 val = gve_set_itr_ratelimit_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO);
-
- gve_write_irq_doorbell_dqo(priv, block, val);
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->tx_coalesce_usecs);
}
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
@@ -1113,9 +1771,8 @@ static void gve_turnup(struct gve_priv *priv)
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
} else {
- u32 val = gve_set_itr_ratelimit_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO);
-
- gve_write_irq_doorbell_dqo(priv, block, val);
+ gve_set_itr_coalesce_usecs_dqo(priv, block,
+ priv->rx_coalesce_usecs);
}
}
@@ -1211,6 +1868,9 @@ static const struct net_device_ops gve_netdev_ops = {
.ndo_get_stats64 = gve_get_stats,
.ndo_tx_timeout = gve_tx_timeout,
.ndo_set_features = gve_set_features,
+ .ndo_bpf = gve_xdp,
+ .ndo_xdp_xmit = gve_xdp_xmit,
+ .ndo_xsk_wakeup = gve_xsk_wakeup,
};
static void gve_handle_status(struct gve_priv *priv, u32 status)
@@ -1254,7 +1914,7 @@ void gve_handle_report_stats(struct gve_priv *priv)
be64_add_cpu(&priv->stats_report->written_count, 1);
/* tx stats */
if (priv->tx) {
- for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ for (idx = 0; idx < gve_num_tx_queues(priv); idx++) {
u32 last_completion = 0;
u32 tx_frames = 0;
@@ -1317,23 +1977,6 @@ void gve_handle_report_stats(struct gve_priv *priv)
}
}
-static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
-{
- if (!gve_get_napi_enabled(priv))
- return;
-
- if (link_status == netif_carrier_ok(priv->dev))
- return;
-
- if (link_status) {
- netdev_info(priv->dev, "Device link is up.\n");
- netif_carrier_on(priv->dev);
- } else {
- netdev_info(priv->dev, "Device link is down.\n");
- netif_carrier_off(priv->dev);
- }
-}
-
/* Handle NIC status register changes, reset requests and report stats */
static void gve_service_task(struct work_struct *work)
{
@@ -1347,6 +1990,18 @@ static void gve_service_task(struct work_struct *work)
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
+static void gve_set_netdev_xdp_features(struct gve_priv *priv)
+{
+ if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
+ priv->dev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ priv->dev->xdp_features |= NETDEV_XDP_ACT_REDIRECT;
+ priv->dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
+ priv->dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
+ } else {
+ priv->dev->xdp_features = 0;
+ }
+}
+
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{
int num_ntfy;
@@ -1360,6 +2015,13 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
return err;
}
+ err = gve_verify_driver_compatibility(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Could not verify driver compatibility: err=%d\n", err);
+ goto err;
+ }
+
if (skip_describe_device)
goto setup_device;
@@ -1412,7 +2074,13 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
+ if (!gve_is_gqi(priv)) {
+ priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
+ priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
+ }
+
setup_device:
+ gve_set_netdev_xdp_features(priv);
err = gve_setup_device_resources(priv);
if (!err)
return 0;
@@ -1663,6 +2331,58 @@ static void gve_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void gve_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_carrier_ok(priv->dev);
+
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+ gve_reset_and_teardown(priv, was_up);
+ } else {
+ /* If the dev wasn't up or close worked, finish tearing down */
+ gve_teardown_priv_resources(priv);
+ }
+ rtnl_unlock();
+}
+
+#ifdef CONFIG_PM
+static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_carrier_ok(priv->dev);
+
+ priv->suspend_cnt++;
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+ gve_reset_and_teardown(priv, was_up);
+ } else {
+ /* If the dev wasn't up or close worked, finish tearing down */
+ gve_teardown_priv_resources(priv);
+ }
+ priv->up_before_suspend = was_up;
+ rtnl_unlock();
+ return 0;
+}
+
+static int gve_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err;
+
+ priv->resume_cnt++;
+ rtnl_lock();
+ err = gve_reset_recovery(priv, priv->up_before_suspend);
+ rtnl_unlock();
+ return err;
+}
+#endif /* CONFIG_PM */
+
static const struct pci_device_id gve_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
{ }
@@ -1673,6 +2393,11 @@ static struct pci_driver gvnic_driver = {
.id_table = gve_id_table,
.probe = gve_probe,
.remove = gve_remove,
+ .shutdown = gve_shutdown,
+#ifdef CONFIG_PM
+ .suspend = gve_suspend,
+ .resume = gve_resume,
+#endif
};
module_pci_driver(gvnic_driver);
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 3d04b5aff331..d1da7413dc4d 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -8,6 +8,9 @@
#include "gve_adminq.h"
#include "gve_utils.h"
#include <linux/etherdevice.h>
+#include <linux/filter.h>
+#include <net/xdp.h>
+#include <net/xdp_sock_drv.h>
static void gve_rx_free_buffer(struct device *dev,
struct gve_rx_slot_page_info *page_info,
@@ -35,6 +38,12 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL;
+
+ for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
+ page_ref_sub(rx->qpl_copy_pool[i].page,
+ rx->qpl_copy_pool[i].pagecnt_bias - 1);
+ put_page(rx->qpl_copy_pool[i].page);
+ }
}
kvfree(rx->data.page_info);
rx->data.page_info = NULL;
@@ -63,6 +72,10 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
dma_free_coherent(dev, bytes, rx->data.data_ring,
rx->data.data_bus);
rx->data.data_ring = NULL;
+
+ kvfree(rx->qpl_copy_pool);
+ rx->qpl_copy_pool = NULL;
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
@@ -86,7 +99,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
dma_addr_t dma;
int err;
- err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
+ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
+ GFP_ATOMIC);
if (err)
return err;
@@ -100,6 +114,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
u32 slots;
int err;
int i;
+ int j;
/* Allocate one page per Rx queue slot. Each page is split into two
* packet buffers, when possible we "page flip" between the two.
@@ -112,7 +127,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return -ENOMEM;
if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(priv);
+ rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num);
if (!rx->data.qpl) {
kvfree(rx->data.page_info);
rx->data.page_info = NULL;
@@ -134,7 +149,33 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
goto alloc_err;
}
+ if (!rx->data.raw_addressing) {
+ for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
+ struct page *page = alloc_page(GFP_KERNEL);
+
+ if (!page) {
+ err = -ENOMEM;
+ goto alloc_err_qpl;
+ }
+
+ rx->qpl_copy_pool[j].page = page;
+ rx->qpl_copy_pool[j].page_offset = 0;
+ rx->qpl_copy_pool[j].page_address = page_address(page);
+
+ /* The page already has 1 ref. */
+ page_ref_add(page, INT_MAX - 1);
+ rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
+ }
+ }
+
return slots;
+
+alloc_err_qpl:
+ while (j--) {
+ page_ref_sub(rx->qpl_copy_pool[j].page,
+ rx->qpl_copy_pool[j].pagecnt_bias - 1);
+ put_page(rx->qpl_copy_pool[j].page);
+ }
alloc_err:
while (i--)
gve_rx_free_buffer(&priv->pdev->dev,
@@ -145,12 +186,11 @@ alloc_err:
static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
{
- ctx->curr_frag_cnt = 0;
- ctx->total_expected_size = 0;
- ctx->expected_frag_cnt = 0;
ctx->skb_head = NULL;
ctx->skb_tail = NULL;
- ctx->reuse_frags = false;
+ ctx->total_size = 0;
+ ctx->frag_cnt = 0;
+ ctx->drop_pkt = false;
}
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
@@ -180,10 +220,22 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
GFP_KERNEL);
if (!rx->data.data_ring)
return -ENOMEM;
+
+ rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
+ rx->qpl_copy_pool_head = 0;
+ rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
+ sizeof(rx->qpl_copy_pool[0]),
+ GFP_KERNEL);
+
+ if (!rx->qpl_copy_pool) {
+ err = -ENOMEM;
+ goto abort_with_slots;
+ }
+
filled_pages = gve_prefill_rx_pages(rx);
if (filled_pages < 0) {
err = -ENOMEM;
- goto abort_with_slots;
+ goto abort_with_copy_pool;
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
@@ -235,6 +287,9 @@ abort_with_q_resources:
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx);
+abort_with_copy_pool:
+ kvfree(rx->qpl_copy_pool);
+ rx->qpl_copy_pool = NULL;
abort_with_slots:
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
@@ -291,30 +346,47 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
-static u16 gve_rx_ctx_padding(struct gve_rx_ctx *ctx)
-{
- return (ctx->curr_frag_cnt == 0) ? GVE_RX_PAD : 0;
-}
-
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 packet_buffer_size, u16 len,
struct gve_rx_ctx *ctx)
{
- u32 offset = page_info->page_offset + gve_rx_ctx_padding(ctx);
- struct sk_buff *skb;
+ u32 offset = page_info->page_offset + page_info->pad;
+ struct sk_buff *skb = ctx->skb_tail;
+ int num_frags = 0;
- if (!ctx->skb_head)
- ctx->skb_head = napi_get_frags(napi);
+ if (!skb) {
+ skb = napi_get_frags(napi);
+ if (unlikely(!skb))
+ return NULL;
- if (unlikely(!ctx->skb_head))
- return NULL;
+ ctx->skb_head = skb;
+ ctx->skb_tail = skb;
+ } else {
+ num_frags = skb_shinfo(ctx->skb_tail)->nr_frags;
+ if (num_frags == MAX_SKB_FRAGS) {
+ skb = napi_alloc_skb(napi, 0);
+ if (!skb)
+ return NULL;
- skb = ctx->skb_head;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+ // We will never chain more than two SKBs: 2 * 16 * 2k > 64k
+ // which is why we do not need to chain by using skb->next
+ skb_shinfo(ctx->skb_tail)->frag_list = skb;
+
+ ctx->skb_tail = skb;
+ num_frags = 0;
+ }
+ }
+
+ if (skb != ctx->skb_head) {
+ ctx->skb_head->len += len;
+ ctx->skb_head->data_len += len;
+ ctx->skb_head->truesize += packet_buffer_size;
+ }
+ skb_add_rx_frag(skb, num_frags, page_info->page,
offset, len, packet_buffer_size);
- return skb;
+ return ctx->skb_head;
}
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
@@ -362,6 +434,92 @@ gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
return skb;
}
+static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info,
+ u16 len, struct napi_struct *napi)
+{
+ u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask;
+ void *src = page_info->page_address + page_info->page_offset;
+ struct gve_rx_slot_page_info *copy_page_info;
+ struct gve_rx_ctx *ctx = &rx->ctx;
+ bool alloc_page = false;
+ struct sk_buff *skb;
+ void *dst;
+
+ copy_page_info = &rx->qpl_copy_pool[pool_idx];
+ if (!copy_page_info->can_flip) {
+ int recycle = gve_rx_can_recycle_buffer(copy_page_info);
+
+ if (unlikely(recycle < 0)) {
+ gve_schedule_reset(rx->gve);
+ return NULL;
+ }
+ alloc_page = !recycle;
+ }
+
+ if (alloc_page) {
+ struct gve_rx_slot_page_info alloc_page_info;
+ struct page *page;
+
+ /* The least recently used page turned out to be
+ * still in use by the kernel. Ignoring it and moving
+ * on alleviates head-of-line blocking.
+ */
+ rx->qpl_copy_pool_head++;
+
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return NULL;
+
+ alloc_page_info.page = page;
+ alloc_page_info.page_offset = 0;
+ alloc_page_info.page_address = page_address(page);
+ alloc_page_info.pad = page_info->pad;
+
+ memcpy(alloc_page_info.page_address, src, page_info->pad + len);
+ skb = gve_rx_add_frags(napi, &alloc_page_info,
+ rx->packet_buffer_size,
+ len, ctx);
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_copy_cnt++;
+ rx->rx_frag_alloc_cnt++;
+ u64_stats_update_end(&rx->statss);
+
+ return skb;
+ }
+
+ dst = copy_page_info->page_address + copy_page_info->page_offset;
+ memcpy(dst, src, page_info->pad + len);
+ copy_page_info->pad = page_info->pad;
+
+ skb = gve_rx_add_frags(napi, copy_page_info,
+ rx->packet_buffer_size, len, ctx);
+ if (unlikely(!skb))
+ return NULL;
+
+ gve_dec_pagecnt_bias(copy_page_info);
+ copy_page_info->page_offset += rx->packet_buffer_size;
+ copy_page_info->page_offset &= (PAGE_SIZE - 1);
+
+ if (copy_page_info->can_flip) {
+ /* We have used both halves of this copy page, it
+ * is time for it to go to the back of the queue.
+ */
+ copy_page_info->can_flip = false;
+ rx->qpl_copy_pool_head++;
+ prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page);
+ } else {
+ copy_page_info->can_flip = true;
+ }
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_copy_cnt++;
+ u64_stats_update_end(&rx->statss);
+
+ return skb;
+}
+
static struct sk_buff *
gve_rx_qpl(struct device *dev, struct net_device *netdev,
struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
@@ -376,7 +534,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* choice is to copy the data out of it so that we can return it to the
* device.
*/
- if (ctx->reuse_frags) {
+ if (page_info->can_flip) {
skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
@@ -385,116 +543,23 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
- const u16 padding = gve_rx_ctx_padding(ctx);
-
- skb = gve_rx_copy(netdev, napi, page_info, len, padding, ctx);
- if (skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_frag_copy_cnt++;
- u64_stats_update_end(&rx->statss);
- }
+ skb = gve_rx_copy_to_pool(rx, page_info, len, napi);
}
return skb;
}
-#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
-static u16 gve_rx_get_fragment_size(struct gve_rx_ctx *ctx, struct gve_rx_desc *desc)
-{
- return be16_to_cpu(desc->len) - gve_rx_ctx_padding(ctx);
-}
-
-static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
-{
- bool qpl_mode = !rx->data.raw_addressing, packet_size_error = false;
- bool buffer_error = false, desc_error = false, seqno_error = false;
- struct gve_rx_slot_page_info *page_info;
- struct gve_priv *priv = rx->gve;
- u32 idx = rx->cnt & rx->mask;
- bool reuse_frags, can_flip;
- struct gve_rx_desc *desc;
- u16 packet_size = 0;
- u16 n_frags = 0;
- int recycle;
-
- /** In QPL mode, we only flip buffers when all buffers containing the packet
- * can be flipped. RDA can_flip decisions will be made later, per frag.
- */
- can_flip = qpl_mode;
- reuse_frags = can_flip;
- do {
- u16 frag_size;
-
- n_frags++;
- desc = &rx->desc.desc_ring[idx];
- desc_error = unlikely(desc->flags_seq & GVE_RXF_ERR) || desc_error;
- if (GVE_SEQNO(desc->flags_seq) != rx->desc.seqno) {
- seqno_error = true;
- netdev_warn(priv->dev,
- "RX seqno error: want=%d, got=%d, dropping packet and scheduling reset.",
- rx->desc.seqno, GVE_SEQNO(desc->flags_seq));
- }
- frag_size = be16_to_cpu(desc->len);
- packet_size += frag_size;
- if (frag_size > rx->packet_buffer_size) {
- packet_size_error = true;
- netdev_warn(priv->dev,
- "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.",
- rx->packet_buffer_size, be16_to_cpu(desc->len));
- }
- page_info = &rx->data.page_info[idx];
- if (can_flip) {
- recycle = gve_rx_can_recycle_buffer(page_info);
- reuse_frags = reuse_frags && recycle > 0;
- buffer_error = buffer_error || unlikely(recycle < 0);
- }
- idx = (idx + 1) & rx->mask;
- rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
- } while (GVE_PKTCONT_BIT_IS_SET(desc->flags_seq));
-
- prefetch(rx->desc.desc_ring + idx);
-
- ctx->curr_frag_cnt = 0;
- ctx->total_expected_size = packet_size - GVE_RX_PAD;
- ctx->expected_frag_cnt = n_frags;
- ctx->skb_head = NULL;
- ctx->reuse_frags = reuse_frags;
-
- if (ctx->expected_frag_cnt > 1) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_cont_packet_cnt++;
- u64_stats_update_end(&rx->statss);
- }
- if (ctx->total_expected_size > priv->rx_copybreak && !ctx->reuse_frags && qpl_mode) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_copied_pkt++;
- u64_stats_update_end(&rx->statss);
- }
-
- if (unlikely(buffer_error || seqno_error || packet_size_error)) {
- gve_schedule_reset(priv);
- return false;
- }
-
- if (unlikely(desc_error)) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_desc_err_dropped_pkt++;
- u64_stats_update_end(&rx->statss);
- return false;
- }
- return true;
-}
-
static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_slot_page_info *page_info, struct napi_struct *napi,
- u16 len, union gve_rx_data_slot *data_slot)
+ u16 len, union gve_rx_data_slot *data_slot,
+ bool is_only_frag)
{
struct net_device *netdev = priv->dev;
struct gve_rx_ctx *ctx = &rx->ctx;
struct sk_buff *skb = NULL;
- if (len <= priv->rx_copybreak && ctx->expected_frag_cnt == 1) {
+ if (len <= priv->rx_copybreak && is_only_frag) {
/* Just copy small packets */
- skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD, ctx);
+ skb = gve_rx_copy(netdev, napi, page_info, len);
if (skb) {
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
@@ -503,29 +568,25 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
u64_stats_update_end(&rx->statss);
}
} else {
- if (rx->data.raw_addressing) {
- int recycle = gve_rx_can_recycle_buffer(page_info);
+ int recycle = gve_rx_can_recycle_buffer(page_info);
- if (unlikely(recycle < 0)) {
- gve_schedule_reset(priv);
- return NULL;
- }
- page_info->can_flip = recycle;
- if (page_info->can_flip) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_frag_flip_cnt++;
- u64_stats_update_end(&rx->statss);
- }
+ if (unlikely(recycle < 0)) {
+ gve_schedule_reset(priv);
+ return NULL;
+ }
+ page_info->can_flip = recycle;
+ if (page_info->can_flip) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_flip_cnt++;
+ u64_stats_update_end(&rx->statss);
+ }
+
+ if (rx->data.raw_addressing) {
skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
data_slot,
rx->packet_buffer_size, ctx);
} else {
- if (ctx->reuse_frags) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_frag_flip_cnt++;
- u64_stats_update_end(&rx->statss);
- }
skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
}
@@ -533,100 +594,243 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
return skb;
}
-static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
- u64 *packet_size_bytes, u32 *work_done)
+static int gve_xsk_pool_redirect(struct net_device *dev,
+ struct gve_rx_ring *rx,
+ void *data, int len,
+ struct bpf_prog *xdp_prog)
+{
+ struct xdp_buff *xdp;
+ int err;
+
+ if (rx->xsk_pool->frame_len < len)
+ return -E2BIG;
+ xdp = xsk_buff_alloc(rx->xsk_pool);
+ if (!xdp) {
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_alloc_fails++;
+ u64_stats_update_end(&rx->statss);
+ return -ENOMEM;
+ }
+ xdp->data_end = xdp->data + len;
+ memcpy(xdp->data, data, len);
+ err = xdp_do_redirect(dev, xdp, xdp_prog);
+ if (err)
+ xsk_buff_free(xdp);
+ return err;
+}
+
+static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
+ struct xdp_buff *orig, struct bpf_prog *xdp_prog)
+{
+ int total_len, len = orig->data_end - orig->data;
+ int headroom = XDP_PACKET_HEADROOM;
+ struct xdp_buff new;
+ void *frame;
+ int err;
+
+ if (rx->xsk_pool)
+ return gve_xsk_pool_redirect(dev, rx, orig->data,
+ len, xdp_prog);
+
+ total_len = headroom + SKB_DATA_ALIGN(len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
+ if (!frame) {
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_alloc_fails++;
+ u64_stats_update_end(&rx->statss);
+ return -ENOMEM;
+ }
+ xdp_init_buff(&new, total_len, &rx->xdp_rxq);
+ xdp_prepare_buff(&new, frame, headroom, len, false);
+ memcpy(new.data, orig->data, len);
+
+ err = xdp_do_redirect(dev, &new, xdp_prog);
+ if (err)
+ page_frag_free(frame);
+
+ return err;
+}
+
+static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct xdp_buff *xdp, struct bpf_prog *xprog,
+ int xdp_act)
+{
+ struct gve_tx_ring *tx;
+ int tx_qid;
+ int err;
+
+ switch (xdp_act) {
+ case XDP_ABORTED:
+ case XDP_DROP:
+ default:
+ break;
+ case XDP_TX:
+ tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
+ tx = &priv->tx[tx_qid];
+ spin_lock(&tx->xdp_lock);
+ err = gve_xdp_xmit_one(priv, tx, xdp->data,
+ xdp->data_end - xdp->data, NULL);
+ spin_unlock(&tx->xdp_lock);
+
+ if (unlikely(err)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_tx_errors++;
+ u64_stats_update_end(&rx->statss);
+ }
+ break;
+ case XDP_REDIRECT:
+ err = gve_xdp_redirect(priv->dev, rx, xdp, xprog);
+
+ if (unlikely(err)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_redirect_errors++;
+ u64_stats_update_end(&rx->statss);
+ }
+ break;
+ }
+ u64_stats_update_begin(&rx->statss);
+ if ((u32)xdp_act < GVE_XDP_ACTIONS)
+ rx->xdp_actions[xdp_act]++;
+ u64_stats_update_end(&rx->statss);
+}
+
+#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
+static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
+ struct gve_rx_desc *desc, u32 idx,
+ struct gve_rx_cnts *cnts)
{
+ bool is_last_frag = !GVE_PKTCONT_BIT_IS_SET(desc->flags_seq);
struct gve_rx_slot_page_info *page_info;
+ u16 frag_size = be16_to_cpu(desc->len);
struct gve_rx_ctx *ctx = &rx->ctx;
union gve_rx_data_slot *data_slot;
struct gve_priv *priv = rx->gve;
- struct gve_rx_desc *first_desc;
struct sk_buff *skb = NULL;
- struct gve_rx_desc *desc;
- struct napi_struct *napi;
+ struct bpf_prog *xprog;
+ struct xdp_buff xdp;
dma_addr_t page_bus;
- u32 work_cnt = 0;
void *va;
- u32 idx;
- u16 len;
- idx = rx->cnt & rx->mask;
- first_desc = &rx->desc.desc_ring[idx];
- desc = first_desc;
- napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ u16 len = frag_size;
+ struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+ bool is_first_frag = ctx->frag_cnt == 0;
- if (unlikely(!gve_rx_ctx_init(ctx, rx)))
- goto skb_alloc_fail;
+ bool is_only_frag = is_first_frag && is_last_frag;
- while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
- /* Prefetch two packet buffers ahead, we will need it soon. */
- page_info = &rx->data.page_info[(idx + 2) & rx->mask];
- va = page_info->page_address + page_info->page_offset;
+ if (unlikely(ctx->drop_pkt))
+ goto finish_frag;
- prefetch(page_info->page); /* Kernel page struct. */
- prefetch(va); /* Packet header. */
- prefetch(va + 64); /* Next cacheline too. */
+ if (desc->flags_seq & GVE_RXF_ERR) {
+ ctx->drop_pkt = true;
+ cnts->desc_err_pkt_cnt++;
+ napi_free_frags(napi);
+ goto finish_frag;
+ }
- len = gve_rx_get_fragment_size(ctx, desc);
+ if (unlikely(frag_size > rx->packet_buffer_size)) {
+ netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset",
+ frag_size, rx->packet_buffer_size);
+ ctx->drop_pkt = true;
+ napi_free_frags(napi);
+ gve_schedule_reset(rx->gve);
+ goto finish_frag;
+ }
- page_info = &rx->data.page_info[idx];
- data_slot = &rx->data.data_ring[idx];
- page_bus = rx->data.raw_addressing ?
- be64_to_cpu(data_slot->addr) - page_info->page_offset :
- rx->data.qpl->page_buses[idx];
- dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE);
-
- skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot);
- if (!skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_skb_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- goto skb_alloc_fail;
+ /* Prefetch two packet buffers ahead, we will need it soon. */
+ page_info = &rx->data.page_info[(idx + 2) & rx->mask];
+ va = page_info->page_address + page_info->page_offset;
+ prefetch(page_info->page); /* Kernel page struct. */
+ prefetch(va); /* Packet header. */
+ prefetch(va + 64); /* Next cacheline too. */
+
+ page_info = &rx->data.page_info[idx];
+ data_slot = &rx->data.data_ring[idx];
+ page_bus = (rx->data.raw_addressing) ?
+ be64_to_cpu(data_slot->addr) - page_info->page_offset :
+ rx->data.qpl->page_buses[idx];
+ dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ page_info->pad = is_first_frag ? GVE_RX_PAD : 0;
+ len -= page_info->pad;
+ frag_size -= page_info->pad;
+
+ xprog = READ_ONCE(priv->xdp_prog);
+ if (xprog && is_only_frag) {
+ void *old_data;
+ int xdp_act;
+
+ xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_info->page_address +
+ page_info->page_offset, GVE_RX_PAD,
+ len, false);
+ old_data = xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &xdp);
+ if (xdp_act != XDP_PASS) {
+ gve_xdp_done(priv, rx, &xdp, xprog, xdp_act);
+ ctx->total_size += frag_size;
+ goto finish_ok_pkt;
}
- ctx->curr_frag_cnt++;
- rx->cnt++;
- idx = rx->cnt & rx->mask;
- work_cnt++;
- desc = &rx->desc.desc_ring[idx];
- }
+ page_info->pad += xdp.data - old_data;
+ len = xdp.data_end - xdp.data;
- if (likely(feat & NETIF_F_RXCSUM)) {
- /* NIC passes up the partial sum */
- if (first_desc->csum)
- skb->ip_summed = CHECKSUM_COMPLETE;
- else
- skb->ip_summed = CHECKSUM_NONE;
- skb->csum = csum_unfold(first_desc->csum);
+ u64_stats_update_begin(&rx->statss);
+ rx->xdp_actions[XDP_PASS]++;
+ u64_stats_update_end(&rx->statss);
}
- /* parse flags & pass relevant info up */
- if (likely(feat & NETIF_F_RXHASH) &&
- gve_needs_rss(first_desc->flags_seq))
- skb_set_hash(skb, be32_to_cpu(first_desc->rss_hash),
- gve_rss_type(first_desc->flags_seq));
+ skb = gve_rx_skb(priv, rx, page_info, napi, len,
+ data_slot, is_only_frag);
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
+
+ napi_free_frags(napi);
+ ctx->drop_pkt = true;
+ goto finish_frag;
+ }
+ ctx->total_size += frag_size;
+
+ if (is_first_frag) {
+ if (likely(feat & NETIF_F_RXCSUM)) {
+ /* NIC passes up the partial sum */
+ if (desc->csum)
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->csum = csum_unfold(desc->csum);
+ }
- *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
- *work_done = work_cnt;
- if (skb_is_nonlinear(skb))
- napi_gro_frags(napi);
- else
- napi_gro_receive(napi, skb);
+ /* parse flags & pass relevant info up */
+ if (likely(feat & NETIF_F_RXHASH) &&
+ gve_needs_rss(desc->flags_seq))
+ skb_set_hash(skb, be32_to_cpu(desc->rss_hash),
+ gve_rss_type(desc->flags_seq));
+ }
- gve_rx_ctx_clear(ctx);
- return true;
+ if (is_last_frag) {
+ skb_record_rx_queue(skb, rx->q_num);
+ if (skb_is_nonlinear(skb))
+ napi_gro_frags(napi);
+ else
+ napi_gro_receive(napi, skb);
+ goto finish_ok_pkt;
+ }
-skb_alloc_fail:
- if (napi->skb)
- napi_free_frags(napi);
- *packet_size_bytes = 0;
- *work_done = ctx->expected_frag_cnt;
- while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
- rx->cnt++;
- ctx->curr_frag_cnt++;
+ goto finish_frag;
+
+finish_ok_pkt:
+ cnts->ok_pkt_bytes += ctx->total_size;
+ cnts->ok_pkt_cnt++;
+finish_frag:
+ ctx->frag_cnt++;
+ if (is_last_frag) {
+ cnts->total_pkt_cnt++;
+ cnts->cont_pkt_cnt += (ctx->frag_cnt > 1);
+ gve_rx_ctx_clear(ctx);
}
- gve_rx_ctx_clear(ctx);
- return false;
}
bool gve_rx_work_pending(struct gve_rx_ring *rx)
@@ -639,8 +843,6 @@ bool gve_rx_work_pending(struct gve_rx_ring *rx)
desc = rx->desc.desc_ring + next_idx;
flags_seq = desc->flags_seq;
- /* Make sure we have synchronized the seq no with the device */
- smp_rmb();
return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
}
@@ -704,36 +906,41 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat)
{
- u32 work_done = 0, total_packet_cnt = 0, ok_packet_cnt = 0;
+ u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
+ u64 xdp_txs = rx->xdp_actions[XDP_TX];
+ struct gve_rx_ctx *ctx = &rx->ctx;
struct gve_priv *priv = rx->gve;
+ struct gve_rx_cnts cnts = {0};
+ struct gve_rx_desc *next_desc;
u32 idx = rx->cnt & rx->mask;
- struct gve_rx_desc *desc;
- u64 bytes = 0;
+ u32 work_done = 0;
- desc = &rx->desc.desc_ring[idx];
+ struct gve_rx_desc *desc = &rx->desc.desc_ring[idx];
+
+ // Exceed budget only if (and till) the inflight packet is consumed.
while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
- work_done < budget) {
- u64 packet_size_bytes = 0;
- u32 work_cnt = 0;
- bool dropped;
-
- netif_info(priv, rx_status, priv->dev,
- "[%d] idx=%d desc=%p desc->flags_seq=0x%x\n",
- rx->q_num, idx, desc, desc->flags_seq);
- netif_info(priv, rx_status, priv->dev,
- "[%d] seqno=%d rx->desc.seqno=%d\n",
- rx->q_num, GVE_SEQNO(desc->flags_seq),
- rx->desc.seqno);
-
- dropped = !gve_rx(rx, feat, &packet_size_bytes, &work_cnt);
- if (!dropped) {
- bytes += packet_size_bytes;
- ok_packet_cnt++;
- }
- total_packet_cnt++;
+ (work_done < budget || ctx->frag_cnt)) {
+ next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask];
+ prefetch(next_desc);
+
+ gve_rx(rx, feat, desc, idx, &cnts);
+
+ rx->cnt++;
idx = rx->cnt & rx->mask;
desc = &rx->desc.desc_ring[idx];
- work_done += work_cnt;
+ rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
+ work_done++;
+ }
+
+ // The device will only send whole packets.
+ if (unlikely(ctx->frag_cnt)) {
+ struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+
+ napi_free_frags(napi);
+ gve_rx_ctx_clear(&rx->ctx);
+ netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling reset",
+ GVE_SEQNO(desc->flags_seq), rx->desc.seqno);
+ gve_schedule_reset(rx->gve);
}
if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
@@ -741,11 +948,19 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
if (work_done) {
u64_stats_update_begin(&rx->statss);
- rx->rpackets += ok_packet_cnt;
- rx->rbytes += bytes;
+ rx->rpackets += cnts.ok_pkt_cnt;
+ rx->rbytes += cnts.ok_pkt_bytes;
+ rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt;
+ rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt;
u64_stats_update_end(&rx->statss);
}
+ if (xdp_txs != rx->xdp_actions[XDP_TX])
+ gve_xdp_tx_flush(priv, rx->q_num);
+
+ if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
+ xdp_do_flush();
+
/* restock ring slots */
if (!rx->data.raw_addressing) {
/* In QPL mode buffs are refilled as the desc are processed */
@@ -767,7 +982,7 @@ static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
}
gve_rx_write_doorbell(priv, rx);
- return total_packet_cnt;
+ return cnts.total_pkt_cnt;
}
int gve_rx_poll(struct gve_notify_block *block, int budget)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index beb8bb079023..e57b73eb70f6 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_ATOMIC);
if (err)
return err;
@@ -568,7 +568,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
- &buf_state->page_info, buf_len, 0, NULL);
+ &buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index a9cb241fedf4..813da572abca 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -11,6 +11,7 @@
#include <linux/tcp.h>
#include <linux/vmalloc.h>
#include <linux/skbuff.h>
+#include <net/xdp_sock_drv.h>
static inline void gve_tx_put_doorbell(struct gve_priv *priv,
struct gve_queue_resources *q_resources,
@@ -19,6 +20,14 @@ static inline void gve_tx_put_doorbell(struct gve_priv *priv,
iowrite32be(val, &priv->db_bar2[be32_to_cpu(q_resources->db_index)]);
}
+void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid)
+{
+ u32 tx_qid = gve_xdp_tx_queue_id(priv, xdp_qid);
+ struct gve_tx_ring *tx = &priv->tx[tx_qid];
+
+ gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
+}
+
/* gvnic can only transmit from a Registered Segment.
* We copy skb payloads into the registered segment before writing Tx
* descriptors and ringing the Tx doorbell.
@@ -132,6 +141,58 @@ static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes)
atomic_add(bytes, &fifo->available);
}
+static size_t gve_tx_clear_buffer_state(struct gve_tx_buffer_state *info)
+{
+ size_t space_freed = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
+ space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
+ info->iov[i].iov_len = 0;
+ info->iov[i].iov_padding = 0;
+ }
+ return space_freed;
+}
+
+static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
+ u32 to_do)
+{
+ struct gve_tx_buffer_state *info;
+ u32 clean_end = tx->done + to_do;
+ u64 pkts = 0, bytes = 0;
+ size_t space_freed = 0;
+ u32 xsk_complete = 0;
+ u32 idx;
+
+ for (; tx->done < clean_end; tx->done++) {
+ idx = tx->done & tx->mask;
+ info = &tx->info[idx];
+
+ if (unlikely(!info->xdp.size))
+ continue;
+
+ bytes += info->xdp.size;
+ pkts++;
+ xsk_complete += info->xdp.is_xsk;
+
+ info->xdp.size = 0;
+ if (info->xdp_frame) {
+ xdp_return_frame(info->xdp_frame);
+ info->xdp_frame = NULL;
+ }
+ space_freed += gve_tx_clear_buffer_state(info);
+ }
+
+ gve_tx_free_fifo(&tx->tx_fifo, space_freed);
+ if (xsk_complete > 0 && tx->xsk_pool)
+ xsk_tx_completed(tx->xsk_pool, xsk_complete);
+ u64_stats_update_begin(&tx->statss);
+ tx->bytes_done += bytes;
+ tx->pkt_done += pkts;
+ u64_stats_update_end(&tx->statss);
+ return pkts;
+}
+
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake);
@@ -144,8 +205,12 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
- netdev_tx_reset_queue(tx->netdev_txq);
+ if (tx->q_num < priv->tx_cfg.num_queues) {
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ } else {
+ gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
+ }
dma_free_coherent(hdev, sizeof(*tx->q_resources),
tx->q_resources, tx->q_resources_bus);
@@ -177,6 +242,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
/* Make sure everything is zeroed to start */
memset(tx, 0, sizeof(*tx));
spin_lock_init(&tx->clean_lock);
+ spin_lock_init(&tx->xdp_lock);
tx->q_num = idx;
tx->mask = slots - 1;
@@ -195,7 +261,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT;
tx->dev = &priv->pdev->dev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
+ tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
/* map Tx FIFO */
@@ -213,7 +279,8 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
(unsigned long)tx->bus);
- tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
+ if (idx < priv->tx_cfg.num_queues)
+ tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_tx_add_to_block(priv, idx);
return 0;
@@ -233,12 +300,12 @@ abort_with_info:
return -ENOMEM;
}
-int gve_tx_alloc_rings(struct gve_priv *priv)
+int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings)
{
int err = 0;
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = start_id; i < start_id + num_rings; i++) {
err = gve_tx_alloc_ring(priv, i);
if (err) {
netif_err(priv, drv, priv->dev,
@@ -251,17 +318,17 @@ int gve_tx_alloc_rings(struct gve_priv *priv)
if (err) {
int j;
- for (j = 0; j < i; j++)
+ for (j = start_id; j < i; j++)
gve_tx_free_ring(priv, j);
}
return err;
}
-void gve_tx_free_rings_gqi(struct gve_priv *priv)
+void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings)
{
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++)
+ for (i = start_id; i < start_id + num_rings; i++)
gve_tx_free_ring(priv, i);
}
@@ -284,8 +351,8 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
int bytes;
int hlen;
- hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) +
- tcp_hdrlen(skb) : skb_headlen(skb);
+ hlen = skb_is_gso(skb) ? skb_checksum_start_offset(skb) + tcp_hdrlen(skb) :
+ min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
pad_bytes = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo,
hlen);
@@ -296,11 +363,14 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
return bytes;
}
-/* The most descriptors we could need is MAX_SKB_FRAGS + 3 : 1 for each skb frag,
- * +1 for the skb linear portion, +1 for when tcp hdr needs to be in separate descriptor,
- * and +1 if the payload wraps to the beginning of the FIFO.
+/* The most descriptors we could need is MAX_SKB_FRAGS + 4 :
+ * 1 for each skb frag
+ * 1 for the skb linear portion
+ * 1 for when tcp hdr needs to be in separate descriptor
+ * 1 if the payload wraps to the beginning of the FIFO
+ * 1 for metadata descriptor
*/
-#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 3)
+#define MAX_TX_DESC_NEEDED (MAX_SKB_FRAGS + 4)
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
if (info->skb) {
@@ -371,18 +441,18 @@ static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
}
static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
- struct sk_buff *skb, bool is_gso,
+ u16 csum_offset, u8 ip_summed, bool is_gso,
int l4_hdr_offset, u32 desc_cnt,
- u16 hlen, u64 addr)
+ u16 hlen, u64 addr, u16 pkt_len)
{
/* l4_hdr_offset and csum_offset are in units of 16-bit words */
if (is_gso) {
pkt_desc->pkt.type_flags = GVE_TXD_TSO | GVE_TXF_L4CSUM;
- pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
+ pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
- } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ } else if (likely(ip_summed == CHECKSUM_PARTIAL)) {
pkt_desc->pkt.type_flags = GVE_TXD_STD | GVE_TXF_L4CSUM;
- pkt_desc->pkt.l4_csum_offset = skb->csum_offset >> 1;
+ pkt_desc->pkt.l4_csum_offset = csum_offset >> 1;
pkt_desc->pkt.l4_hdr_offset = l4_hdr_offset >> 1;
} else {
pkt_desc->pkt.type_flags = GVE_TXD_STD;
@@ -390,21 +460,35 @@ static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
pkt_desc->pkt.l4_hdr_offset = 0;
}
pkt_desc->pkt.desc_cnt = desc_cnt;
- pkt_desc->pkt.len = cpu_to_be16(skb->len);
+ pkt_desc->pkt.len = cpu_to_be16(pkt_len);
pkt_desc->pkt.seg_len = cpu_to_be16(hlen);
pkt_desc->pkt.seg_addr = cpu_to_be64(addr);
}
+static void gve_tx_fill_mtd_desc(union gve_tx_desc *mtd_desc,
+ struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(mtd_desc->mtd) != sizeof(mtd_desc->pkt));
+
+ mtd_desc->mtd.type_flags = GVE_TXD_MTD | GVE_MTD_SUBTYPE_PATH;
+ mtd_desc->mtd.path_state = GVE_MTD_PATH_STATE_DEFAULT |
+ GVE_MTD_PATH_HASH_L4;
+ mtd_desc->mtd.path_hash = cpu_to_be32(skb->hash);
+ mtd_desc->mtd.reserved0 = 0;
+ mtd_desc->mtd.reserved1 = 0;
+}
+
static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
- struct sk_buff *skb, bool is_gso,
+ u16 l3_offset, u16 gso_size,
+ bool is_gso_v6, bool is_gso,
u16 len, u64 addr)
{
seg_desc->seg.type_flags = GVE_TXD_SEG;
if (is_gso) {
- if (skb_is_gso_v6(skb))
+ if (is_gso_v6)
seg_desc->seg.type_flags |= GVE_TXSF_IPV6;
- seg_desc->seg.l3_offset = skb_network_offset(skb) >> 1;
- seg_desc->seg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ seg_desc->seg.l3_offset = l3_offset >> 1;
+ seg_desc->seg.mss = cpu_to_be16(gso_size);
}
seg_desc->seg.seg_len = cpu_to_be16(len);
seg_desc->seg.seg_addr = cpu_to_be64(addr);
@@ -426,6 +510,7 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
union gve_tx_desc *pkt_desc, *seg_desc;
struct gve_tx_buffer_state *info;
+ int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
int payload_iov = 2;
@@ -437,13 +522,11 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
pkt_desc = &tx->desc[idx];
l4_hdr_offset = skb_checksum_start_offset(skb);
- /* If the skb is gso, then we want the tcp header in the first segment
- * otherwise we want the linear portion of the skb (which will contain
- * the checksum because skb->csum_start and skb->csum_offset are given
- * relative to skb->head) in the first segment.
+ /* If the skb is gso, then we want the tcp header alone in the first segment
+ * otherwise we want the minimum required by the gVNIC spec.
*/
hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) :
- skb_headlen(skb);
+ min_t(int, GVE_GQ_TX_MIN_PKT_DESC_BYTES, skb->len);
info->skb = skb;
/* We don't want to split the header, so if necessary, pad to the end
@@ -456,9 +539,10 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
payload_nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, skb->len - hlen,
&info->iov[payload_iov]);
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen,
- info->iov[hdr_nfrags - 1].iov_offset);
+ gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
+ is_gso, l4_hdr_offset,
+ 1 + mtd_desc_nr + payload_nfrags, hlen,
+ info->iov[hdr_nfrags - 1].iov_offset, skb->len);
skb_copy_bits(skb, 0,
tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
@@ -468,11 +552,18 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
info->iov[hdr_nfrags - 1].iov_len);
copy_offset = hlen;
+ if (mtd_desc_nr) {
+ next_idx = (tx->req + 1) & tx->mask;
+ gve_tx_fill_mtd_desc(&tx->desc[next_idx], skb);
+ }
+
for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
- next_idx = (tx->req + 1 + i - payload_iov) & tx->mask;
+ next_idx = (tx->req + 1 + mtd_desc_nr + i - payload_iov) & tx->mask;
seg_desc = &tx->desc[next_idx];
- gve_tx_fill_seg_desc(seg_desc, skb, is_gso,
+ gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
+ skb_shinfo(skb)->gso_size,
+ skb_is_gso_v6(skb), is_gso,
info->iov[i].iov_len,
info->iov[i].iov_offset);
@@ -485,16 +576,17 @@ static int gve_tx_add_skb_copy(struct gve_priv *priv, struct gve_tx_ring *tx, st
copy_offset += info->iov[i].iov_len;
}
- return 1 + payload_nfrags;
+ return 1 + mtd_desc_nr + payload_nfrags;
}
static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
- int hlen, payload_nfrags, l4_hdr_offset;
- union gve_tx_desc *pkt_desc, *seg_desc;
+ int hlen, num_descriptors, l4_hdr_offset;
+ union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
struct gve_tx_buffer_state *info;
+ int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
u64 addr;
@@ -523,23 +615,33 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
dma_unmap_len_set(info, len, len);
dma_unmap_addr_set(info, dma, addr);
- payload_nfrags = shinfo->nr_frags;
+ num_descriptors = 1 + shinfo->nr_frags;
+ if (hlen < len)
+ num_descriptors++;
+ if (mtd_desc_nr)
+ num_descriptors++;
+
+ gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
+ is_gso, l4_hdr_offset,
+ num_descriptors, hlen, addr, skb->len);
+
+ if (mtd_desc_nr) {
+ idx = (idx + 1) & tx->mask;
+ mtd_desc = &tx->desc[idx];
+ gve_tx_fill_mtd_desc(mtd_desc, skb);
+ }
+
if (hlen < len) {
/* For gso the rest of the linear portion of the skb needs to
* be in its own descriptor.
*/
- payload_nfrags++;
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen, addr);
-
len -= hlen;
addr += hlen;
- idx = (tx->req + 1) & tx->mask;
+ idx = (idx + 1) & tx->mask;
seg_desc = &tx->desc[idx];
- gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
- } else {
- gve_tx_fill_pkt_desc(pkt_desc, skb, is_gso, l4_hdr_offset,
- 1 + payload_nfrags, hlen, addr);
+ gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
+ skb_shinfo(skb)->gso_size,
+ skb_is_gso_v6(skb), is_gso, len, addr);
}
for (i = 0; i < shinfo->nr_frags; i++) {
@@ -557,14 +659,19 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
dma_unmap_len_set(&tx->info[idx], len, len);
dma_unmap_addr_set(&tx->info[idx], dma, addr);
- gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
+ gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
+ skb_shinfo(skb)->gso_size,
+ skb_is_gso_v6(skb), is_gso, len, addr);
}
- return 1 + payload_nfrags;
+ return num_descriptors;
unmap_drop:
- i += (payload_nfrags == shinfo->nr_frags ? 1 : 2);
+ i += num_descriptors - shinfo->nr_frags;
while (i--) {
+ /* Skip metadata descriptor, if set */
+ if (i == 1 && mtd_desc_nr == 1)
+ continue;
idx--;
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
@@ -615,6 +722,103 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int gve_tx_fill_xdp(struct gve_priv *priv, struct gve_tx_ring *tx,
+ void *data, int len, void *frame_p, bool is_xsk)
+{
+ int pad, nfrags, ndescs, iovi, offset;
+ struct gve_tx_buffer_state *info;
+ u32 reqi = tx->req;
+
+ pad = gve_tx_fifo_pad_alloc_one_frag(&tx->tx_fifo, len);
+ if (pad >= GVE_GQ_TX_MIN_PKT_DESC_BYTES)
+ pad = 0;
+ info = &tx->info[reqi & tx->mask];
+ info->xdp_frame = frame_p;
+ info->xdp.size = len;
+ info->xdp.is_xsk = is_xsk;
+
+ nfrags = gve_tx_alloc_fifo(&tx->tx_fifo, pad + len,
+ &info->iov[0]);
+ iovi = pad > 0;
+ ndescs = nfrags - iovi;
+ offset = 0;
+
+ while (iovi < nfrags) {
+ if (!offset)
+ gve_tx_fill_pkt_desc(&tx->desc[reqi & tx->mask], 0,
+ CHECKSUM_NONE, false, 0, ndescs,
+ info->iov[iovi].iov_len,
+ info->iov[iovi].iov_offset, len);
+ else
+ gve_tx_fill_seg_desc(&tx->desc[reqi & tx->mask],
+ 0, 0, false, false,
+ info->iov[iovi].iov_len,
+ info->iov[iovi].iov_offset);
+
+ memcpy(tx->tx_fifo.base + info->iov[iovi].iov_offset,
+ data + offset, info->iov[iovi].iov_len);
+ gve_dma_sync_for_device(&priv->pdev->dev,
+ tx->tx_fifo.qpl->page_buses,
+ info->iov[iovi].iov_offset,
+ info->iov[iovi].iov_len);
+ offset += info->iov[iovi].iov_len;
+ iovi++;
+ reqi++;
+ }
+
+ return ndescs;
+}
+
+int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->num_xdp_queues);
+
+ tx = &priv->tx[qid];
+
+ spin_lock(&tx->xdp_lock);
+ for (i = 0; i < n; i++) {
+ err = gve_xdp_xmit_one(priv, tx, frames[i]->data,
+ frames[i]->len, frames[i]);
+ if (err)
+ break;
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
+
+ spin_unlock(&tx->xdp_lock);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xmit += n;
+ tx->xdp_xmit_errors += n - i;
+ u64_stats_update_end(&tx->statss);
+
+ return i ? i : err;
+}
+
+int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
+ void *data, int len, void *frame_p)
+{
+ int nsegs;
+
+ if (!gve_can_tx(tx, len + GVE_GQ_TX_MIN_PKT_DESC_BYTES - 1))
+ return -EBUSY;
+
+ nsegs = gve_tx_fill_xdp(priv, tx, data, len, frame_p, false);
+ tx->req += nsegs;
+
+ return 0;
+}
+
#define GVE_TX_START_THRESH PAGE_SIZE
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
@@ -624,8 +828,8 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u64 pkts = 0, bytes = 0;
size_t space_freed = 0;
struct sk_buff *skb;
- int i, j;
u32 idx;
+ int j;
for (j = 0; j < to_do; j++) {
idx = tx->done & tx->mask;
@@ -647,12 +851,7 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
dev_consume_skb_any(skb);
if (tx->raw_addressing)
continue;
- /* FIFO free */
- for (i = 0; i < ARRAY_SIZE(info->iov); i++) {
- space_freed += info->iov[i].iov_len + info->iov[i].iov_padding;
- info->iov[i].iov_len = 0;
- info->iov[i].iov_padding = 0;
- }
+ space_freed += gve_tx_clear_buffer_state(info);
}
}
@@ -687,6 +886,70 @@ u32 gve_tx_load_event_counter(struct gve_priv *priv,
return be32_to_cpu(counter);
}
+static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
+ int budget)
+{
+ struct xdp_desc desc;
+ int sent = 0, nsegs;
+ void *data;
+
+ spin_lock(&tx->xdp_lock);
+ while (sent < budget) {
+ if (!gve_can_tx(tx, GVE_TX_START_THRESH))
+ goto out;
+
+ if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) {
+ tx->xdp_xsk_done = tx->xdp_xsk_wakeup;
+ goto out;
+ }
+
+ data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr);
+ nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true);
+ tx->req += nsegs;
+ sent++;
+ }
+out:
+ if (sent > 0) {
+ gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
+ xsk_tx_release(tx->xsk_pool);
+ }
+ spin_unlock(&tx->xdp_lock);
+ return sent;
+}
+
+bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+{
+ struct gve_priv *priv = block->priv;
+ struct gve_tx_ring *tx = block->tx;
+ u32 nic_done;
+ bool repoll;
+ u32 to_do;
+
+ /* If budget is 0, do all the work */
+ if (budget == 0)
+ budget = INT_MAX;
+
+ /* Find out how much work there is to be done */
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+ gve_clean_xdp_done(priv, tx, to_do);
+ repoll = nic_done != tx->done;
+
+ if (tx->xsk_pool) {
+ int sent = gve_xsk_tx(priv, tx, budget);
+
+ u64_stats_update_begin(&tx->statss);
+ tx->xdp_xsk_sent += sent;
+ u64_stats_update_end(&tx->statss);
+ repoll |= (sent == budget);
+ if (xsk_uses_need_wakeup(tx->xsk_pool))
+ xsk_set_tx_need_wakeup(tx->xsk_pool);
+ }
+
+ /* If we still have work we want to repoll */
+ return repoll;
+}
+
bool gve_tx_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index ec394d991668..b76143bfd594 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -386,7 +386,7 @@ static int gve_prep_tso(struct sk_buff *skb)
(__force __wsum)htonl(paylen));
/* Compute length of segmentation header. */
- header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ header_len = skb_tcp_all_headers(skb);
break;
default:
return -EINVAL;
@@ -598,9 +598,9 @@ static int gve_num_buffer_descs_needed(const struct sk_buff *skb)
*/
static bool gve_can_send_tso(const struct sk_buff *skb)
{
- const int header_len = skb_checksum_start_offset(skb) + tcp_hdrlen(skb);
const int max_bufs_per_seg = GVE_TX_MAX_DATA_DESCS - 1;
const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
int cur_seg_size;
@@ -795,7 +795,7 @@ static void gve_handle_packet_completion(struct gve_priv *priv,
GVE_PACKET_STATE_PENDING_REINJECT_COMPL)) {
/* No outstanding miss completion but packet allocated
* implies packet receives a re-injection completion
- * without a a prior miss completion. Return without
+ * without a prior miss completion. Return without
* completing the packet.
*/
net_err_ratelimited("%s: Re-injection completion received without corresponding miss completion: %d\n",
@@ -953,12 +953,18 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head);
} else if (type == GVE_COMPL_TYPE_DQO_PKT) {
u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
-
- gve_handle_packet_completion(priv, tx, !!napi,
- compl_tag,
- &pkt_compl_bytes,
- &pkt_compl_pkts,
- /*is_reinjection=*/false);
+ if (compl_tag & GVE_ALT_MISS_COMPL_BIT) {
+ compl_tag &= ~GVE_ALT_MISS_COMPL_BIT;
+ gve_handle_miss_completion(priv, tx, compl_tag,
+ &miss_compl_bytes,
+ &miss_compl_pkts);
+ } else {
+ gve_handle_packet_completion(priv, tx, !!napi,
+ compl_tag,
+ &pkt_compl_bytes,
+ &pkt_compl_pkts,
+ false);
+ }
} else if (type == GVE_COMPL_TYPE_DQO_MISS) {
u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
@@ -972,7 +978,7 @@ int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
compl_tag,
&reinject_compl_bytes,
&reinject_compl_pkts,
- /*is_reinjection=*/true);
+ true);
}
tx->dqo_compl.head =
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index d57508bc4307..26e08d753270 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -49,35 +49,19 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
}
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info, u16 len,
- u16 padding, struct gve_rx_ctx *ctx)
+ struct gve_rx_slot_page_info *page_info, u16 len)
{
- void *va = page_info->page_address + padding + page_info->page_offset;
- int skb_linear_offset = 0;
- bool set_protocol = false;
+ void *va = page_info->page_address + page_info->page_offset +
+ page_info->pad;
struct sk_buff *skb;
- if (ctx) {
- if (!ctx->skb_head)
- ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);
-
- if (unlikely(!ctx->skb_head))
- return NULL;
- skb = ctx->skb_head;
- skb_linear_offset = skb->len;
- set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
- } else {
- skb = napi_alloc_skb(napi, len);
-
- if (unlikely(!skb))
- return NULL;
- set_protocol = true;
- }
- __skb_put(skb, len);
- skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
+ skb = napi_alloc_skb(napi, len);
+ if (unlikely(!skb))
+ return NULL;
- if (set_protocol)
- skb->protocol = eth_type_trans(skb, dev);
+ __skb_put(skb, len);
+ skb_copy_to_linear_data_offset(skb, 0, va, len);
+ skb->protocol = eth_type_trans(skb, dev);
return skb;
}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 6d98e69fd3b8..324fd98a6112 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -18,8 +18,7 @@ void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
- struct gve_rx_slot_page_info *page_info, u16 len,
- u16 pad, struct gve_rx_ctx *ctx);
+ struct gve_rx_slot_page_info *page_info, u16 len);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index c84ef494bd60..50c3f5d6611f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -830,8 +830,8 @@ static int hip04_set_coalesce(struct net_device *netdev,
static void hip04_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static const struct ethtool_ops hip04_ethtool_ops = {
@@ -990,7 +990,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
- netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll);
hip04_reset_dreq(priv);
hip04_reset_ppe(priv);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index a6c18b6527f9..ce2571c16e43 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_bytes += len;
next:
pos = (pos + 1) % rxq->num;
if (rx_pkts_num >= limit)
@@ -852,7 +852,8 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->netdev_ops = &hisi_femac_netdev_ops;
ndev->ethtool_ops = &hisi_femac_ethtools_ops;
- netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
+ netif_napi_add_weight(ndev, &priv->napi, hisi_femac_poll,
+ FEMAC_POLL_WEIGHT);
hisi_femac_port_init(priv);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index d7e62eca050f..f867e9531117 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_bytes += len;
next:
pos = dma_ring_incr(pos, RX_DESC_NUM);
}
@@ -1243,7 +1243,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto out_phy_node;
- netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll);
if (HAS_CAP_TSO(priv->hw_cap)) {
ret = hix5hd2_init_sg_desc_queue(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 00fafc0f8512..8a1027ad340d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -419,10 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
hdev->cls_dev.release = hnae_release;
(void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
ret = device_register(&hdev->cls_dev);
- if (ret)
+ if (ret) {
+ put_device(&hdev->cls_dev);
return ret;
-
- __module_get(THIS_MODULE);
+ }
INIT_LIST_HEAD(&hdev->handle_list);
spin_lock_init(&hdev->lock);
@@ -443,13 +443,12 @@ EXPORT_SYMBOL(hnae_ae_register);
void hnae_ae_unregister(struct hnae_ae_dev *hdev)
{
device_unregister(&hdev->cls_dev);
- module_put(THIS_MODULE);
}
EXPORT_SYMBOL(hnae_ae_unregister);
static int __init hnae_init(void)
{
- hnae_class = class_create(THIS_MODULE, "hnae");
+ hnae_class = class_create("hnae");
return PTR_ERR_OR_ZERO(hnae_class);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 7edf8569514c..928d934cb21a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -1065,19 +1065,23 @@ int hns_mac_init(struct dsaf_device *dsaf_dev)
device_for_each_child_node(dsaf_dev->dev, child) {
ret = fwnode_property_read_u32(child, "reg", &port_id);
if (ret) {
+ fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"get reg fail, ret=%d!\n", ret);
return ret;
}
if (port_id >= max_port_num) {
+ fwnode_handle_put(child);
dev_err(dsaf_dev->dev,
"reg(%u) out of range!\n", port_id);
return -EINVAL;
}
mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
GFP_KERNEL);
- if (!mac_cb)
+ if (!mac_cb) {
+ fwnode_handle_put(child);
return -ENOMEM;
+ }
mac_cb->fw_port = child;
mac_cb->mac_id = (u8)port_id;
dsaf_dev->mac_cb[port_id] = mac_cb;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 740850b64aff..5df19c604d09 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -554,11 +554,11 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
argv4.package.count = 1;
argv4.package.elements = &obj_args;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
- &hns_dsaf_acpi_dsm_guid, 0,
- HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
-
- if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_PORT_TYPE_FUNC, &argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
return phy_if;
phy_if = obj->integer.value ?
@@ -601,11 +601,11 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
argv4.package.count = 1;
argv4.package.elements = &obj_args;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
- &hns_dsaf_acpi_dsm_guid, 0,
- HNS_OP_GET_SFP_STAT_FUNC, &argv4);
-
- if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_SFP_STAT_FUNC, &argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
return -ENODEV;
*sfp_prsnt = obj->integer.value;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 22a463e15678..7cf10d1e2b31 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -31,8 +31,6 @@
#define HNS_BUFFER_SIZE_2048 2048
#define BD_MAX_SEND_SIZE 8191
-#define SKB_TMP_LEN(SKB) \
- (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
int send_sz, dma_addr_t dma, int frag_end,
@@ -94,7 +92,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
} else if (skb->protocol == htons(ETH_P_IPV6)) {
hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
@@ -108,7 +106,7 @@ static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
HNSV2_TXD_TSE_B, 1);
l4_len = tcp_hdrlen(skb);
mss = skb_shinfo(skb)->gso_size;
- paylen = skb->len - SKB_TMP_LEN(skb);
+ paylen = skb->len - skb_tcp_all_headers(skb);
}
}
desc->tx.ip_offset = ip_offset;
@@ -1782,7 +1780,7 @@ static int hns_nic_set_features(struct net_device *netdev,
priv->ops.fill_desc = fill_tso_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* The chip only support 7*4096 */
- netif_set_gso_max_size(netdev, 7 * 4096);
+ netif_set_tso_max_size(netdev, 7 * 4096);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
@@ -2111,8 +2109,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
hns_nic_tx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2124,8 +2121,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
hns_nic_rx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
@@ -2168,7 +2164,7 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
priv->ops.fill_desc = fill_tso_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* This chip only support 7*4096 */
- netif_set_gso_max_size(netdev, 7 * 4096);
+ netif_set_tso_max_size(netdev, 7 * 4096);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index ab7390225942..b54f3706fb97 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -644,18 +644,15 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
- strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
+ strscpy(drvinfo->version, HNAE_DRIVER_VERSION,
sizeof(drvinfo->version));
- drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
- strncpy(drvinfo->driver, HNAE_DRIVER_NAME, sizeof(drvinfo->driver));
- drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
+ strscpy(drvinfo->driver, HNAE_DRIVER_NAME, sizeof(drvinfo->driver));
- strncpy(drvinfo->bus_info, priv->dev->bus->name,
+ strscpy(drvinfo->bus_info, priv->dev->bus->name,
sizeof(drvinfo->bus_info));
- drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
- strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
+ strscpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
drvinfo->eedump_len = 0;
}
@@ -663,9 +660,13 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
* hns_get_ringparam - get ring parameter
* @net_dev: net device
* @param: ethtool parameter
+ * @kernel_param: ethtool external parameter
+ * @extack: netlink extended ACK report struct
*/
static void hns_get_ringparam(struct net_device *net_dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
@@ -883,8 +884,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev,
p[21] = net_stats->rx_compressed;
p[22] = net_stats->tx_compressed;
- p[23] = netdev->rx_dropped.counter;
- p[24] = netdev->tx_dropped.counter;
+ p[23] = 0; /* was netdev->rx_dropped.counter */
+ p[24] = 0; /* was netdev->tx_dropped.counter */
p[25] = priv->tx_timeout_count;
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 7aa2fac76c5e..6efea4662858 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -4,9 +4,9 @@
#
ccflags-y += -I$(srctree)/$(src)
-
-obj-$(CONFIG_HNS3) += hns3pf/
-obj-$(CONFIG_HNS3) += hns3vf/
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf
+ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
obj-$(CONFIG_HNS3) += hnae3.o
@@ -14,3 +14,16 @@ obj-$(CONFIG_HNS3_ENET) += hns3.o
hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o \
+ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+
+obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o \
+ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
+ hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+
+
+hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index c2bd2584201f..abcd7877f7d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
+ HCLGE_MBX_GET_RING_VECTOR_MAP, /* (VF -> PF) get ring-to-vector map */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
@@ -80,6 +81,9 @@ enum hclge_mbx_tbl_cfg_subcode {
#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+#define HCLGE_RESET_SCHED_TIMEOUT (3 * HZ)
+#define HCLGE_MBX_SCHED_TIMEOUT (HZ / 2)
+
struct hclge_ring_chain_param {
u8 ring_type;
u8 tqp_index;
@@ -89,8 +93,8 @@ struct hclge_ring_chain_param {
struct hclge_basic_info {
u8 hw_tc_map;
u8 rsv;
- u16 mbx_api_version;
- u32 pf_caps;
+ __le16 mbx_api_version;
+ __le32 pf_caps;
};
struct hclgevf_mbx_resp_status {
@@ -131,11 +135,20 @@ struct hclge_vf_to_pf_msg {
};
struct hclge_pf_to_vf_msg {
- u16 code;
- u16 vf_mbx_msg_code;
- u16 vf_mbx_msg_subcode;
- u16 resp_status;
- u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ __le16 code;
+ union {
+ /* used for mbx response */
+ struct {
+ __le16 vf_mbx_msg_code;
+ __le16 vf_mbx_msg_subcode;
+ __le16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ };
+ /* used for general mbx */
+ struct {
+ u8 msg_data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ };
};
struct hclge_mbx_vf_to_pf_cmd {
@@ -145,7 +158,7 @@ struct hclge_mbx_vf_to_pf_cmd {
u8 rsv1[1];
u8 msg_len;
u8 rsv2;
- u16 match_id;
+ __le16 match_id;
struct hclge_vf_to_pf_msg msg;
};
@@ -156,7 +169,7 @@ struct hclge_mbx_pf_to_vf_cmd {
u8 rsv[3];
u8 msg_len;
u8 rsv1;
- u16 match_id;
+ __le16 match_id;
struct hclge_pf_to_vf_msg msg;
};
@@ -166,6 +179,49 @@ struct hclge_vf_rst_cmd {
u8 rsv[22];
};
+#pragma pack(1)
+struct hclge_mbx_link_status {
+ __le16 link_status;
+ __le32 speed;
+ __le16 duplex;
+ u8 flag;
+};
+
+struct hclge_mbx_link_mode {
+ __le16 idx;
+ __le64 link_mode;
+};
+
+struct hclge_mbx_port_base_vlan {
+ __le16 state;
+ __le16 vlan_proto;
+ __le16 qos;
+ __le16 vlan_tag;
+};
+
+struct hclge_mbx_vf_queue_info {
+ __le16 num_tqps;
+ __le16 rss_size;
+ __le16 rx_buf_len;
+};
+
+struct hclge_mbx_vf_queue_depth {
+ __le16 num_tx_desc;
+ __le16 num_rx_desc;
+};
+
+struct hclge_mbx_vlan_filter {
+ u8 is_kill;
+ __le16 vlan_id;
+ __le16 proto;
+};
+
+struct hclge_mbx_mtu_info {
+ __le32 mtu;
+};
+
+#pragma pack()
+
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
@@ -174,9 +230,20 @@ struct hclgevf_mbx_arq_ring {
u32 head;
u32 tail;
atomic_t count;
- u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
+ __le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
};
+struct hclge_dev;
+
+#define HCLGE_MBX_OPCODE_MAX 256
+struct hclge_mbx_ops_param {
+ struct hclge_vport *vport;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_respond_to_vf_msg *resp_msg;
+};
+
+typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param);
+
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 63f5abcc6bf4..9c9c72dc57e0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -32,6 +32,7 @@
#include <linux/pkt_sched.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define HNAE3_MOD_VERSION "1.0"
@@ -96,13 +97,17 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
+ HNAE3_DEV_SUPPORT_CQ_B,
+ HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ HNAE3_DEV_SUPPORT_LANE_NUM_B,
+ HNAE3_DEV_SUPPORT_WOL_B,
};
-#define hnae3_dev_fd_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_fd_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (ae_dev)->caps)
-#define hnae3_dev_gro_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_gro_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (ae_dev)->caps)
#define hnae3_dev_fec_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
@@ -155,6 +160,18 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+#define hnae3_ae_dev_cq_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_fec_stats_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_STATS_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_lane_num_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_LANE_NUM_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_wol_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_WOL_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -167,6 +184,7 @@ struct hnae3_handle;
struct hnae3_queue {
void __iomem *io_base;
+ void __iomem *mem_base;
struct hnae3_ae_algo *ae_algo;
struct hnae3_handle *handle;
int tqp_index; /* index in a handle */
@@ -182,6 +200,7 @@ struct hns3_mac_stats {
/* hnae3 loop mode */
enum hnae3_loop {
+ HNAE3_LOOP_EXTERNAL,
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
HNAE3_LOOP_PARALLEL_SERDES,
@@ -218,6 +237,8 @@ enum hnae3_fec_mode {
HNAE3_FEC_AUTO = 0,
HNAE3_FEC_BASER,
HNAE3_FEC_RS,
+ HNAE3_FEC_LLRS,
+ HNAE3_FEC_NONE,
HNAE3_FEC_USER_DEF,
};
@@ -265,6 +286,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_TC_SCH_INFO,
HNAE3_DBG_CMD_QOS_PAUSE_CFG,
HNAE3_DBG_CMD_QOS_PRI_MAP,
+ HNAE3_DBG_CMD_QOS_DSCP_MAP,
HNAE3_DBG_CMD_QOS_BUF_CFG,
HNAE3_DBG_CMD_DEV_INFO,
HNAE3_DBG_CMD_TX_BD,
@@ -303,6 +325,11 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+enum hnae3_tc_map_mode {
+ HNAE3_TC_MAP_MODE_PRIO,
+ HNAE3_TC_MAP_MODE_DSCP,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -536,6 +563,12 @@ struct hnae3_ae_dev {
* Get 1588 rx hwstamp
* get_ts_info
* Get phc info
+ * clean_vf_config
+ * Clean residual vf info after disable sriov
+ * get_wol
+ * Get wake on lan info
+ * set_wol
+ * Config wake on lan
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -553,14 +586,17 @@ struct hnae3_ae_ops {
void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex);
+ u8 *auto_neg, u32 *speed, u8 *duplex,
+ u32 *lane_num);
int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
- u8 duplex);
+ u8 duplex, u8 lane_num);
void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
u8 *module_type);
int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec_stats)(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats);
void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
u8 *fec_mode);
int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
@@ -729,6 +765,13 @@ struct hnae3_ae_ops {
struct ethtool_ts_info *info);
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
+ void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
+ int (*get_dscp_prio)(struct hnae3_handle *handle, u8 dscp,
+ u8 *tc_map_mode, u8 *priority);
+ void (*get_wol)(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol);
+ int (*set_wol)(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol);
};
struct hnae3_dcb_ops {
@@ -737,6 +780,8 @@ struct hnae3_dcb_ops {
int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setapp)(struct hnae3_handle *h, struct dcb_app *app);
+ int (*ieee_delapp)(struct hnae3_handle *h, struct dcb_app *app);
/* DCBX configuration */
u8 (*getdcbx)(struct hnae3_handle *);
@@ -761,10 +806,13 @@ struct hnae3_tc_info {
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
u16 tqp_count[HNAE3_MAX_TC];
u16 tqp_offset[HNAE3_MAX_TC];
+ u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
};
+#define HNAE3_MAX_DSCP 64
+#define HNAE3_PRIO_ID_INVALID 0xff
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -775,13 +823,15 @@ struct hnae3_knic_private_info {
u32 tx_spare_buf_size;
struct hnae3_tc_info tc_info;
+ u8 tc_map_mode;
+ u8 dscp_app_cnt;
+ u8 dscp_prio[HNAE3_MAX_DSCP];
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
const struct hnae3_dcb_ops *dcb_ops;
u16 int_rl_setting;
- enum pkt_hash_types rss_type;
void __iomem *io_base;
};
@@ -806,6 +856,7 @@ struct hnae3_roce_private_info {
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5)
#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
@@ -841,6 +892,7 @@ struct hnae3_handle {
struct dentry *hnae3_dbgfs;
/* protects concurrent contention between debugfs commands */
struct mutex dbgfs_lock;
+ char **dbgfs_buf;
/* Network interface message level enabled bits */
u32 msg_enable;
@@ -861,6 +913,20 @@ struct hnae3_handle {
#define hnae3_get_bit(origin, shift) \
hnae3_get_field(origin, 0x1 << (shift), shift)
+#define HNAE3_FORMAT_MAC_ADDR_LEN 18
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_0 0
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_4 4
+#define HNAE3_FORMAT_MAC_ADDR_OFFSET_5 5
+
+static inline void hnae3_format_mac_addr(char *format_mac_addr,
+ const u8 *mac_addr)
+{
+ snprintf(format_mac_addr, HNAE3_FORMAT_MAC_ADDR_LEN, "%02x:**:**:**:%02x:%02x",
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_0],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_4],
+ mac_addr[HNAE3_FORMAT_MAC_ADDR_OFFSET_5]);
+}
+
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
new file mode 100644
index 000000000000..cbbab5b2b402
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+static void hclge_comm_cmd_config_regs(struct hclge_comm_hw *hw,
+ struct hclge_comm_cmq_ring *ring)
+{
+ dma_addr_t dma = ring->desc_dma_addr;
+ u32 reg_val;
+
+ if (ring->ring_type == HCLGE_COMM_TYPE_CSQ) {
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_COMM_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = ring->desc_num >> HCLGE_COMM_NIC_CMQ_DESC_NUM_S;
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, reg_val);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw)
+{
+ hclge_comm_cmd_config_regs(hw, &hw->cmq.csq);
+ hclge_comm_cmd_config_regs(hw, &hw->cmq.crq);
+}
+
+void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
+{
+ desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
+}
+
+static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
+ bool is_pf)
+{
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ if (is_pf) {
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
+ }
+}
+
+void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode,
+ bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hclge_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
+
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
+}
+
+int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, bool en)
+{
+ struct hclge_comm_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
+
+ if (en) {
+ req = (struct hclge_comm_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_COMM_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_NCSI_ERROR_REPORT_EN_B, 1);
+ if (hclge_comm_dev_phy_imp_supported(ae_dev))
+ hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1);
+
+ req->compat = cpu_to_le32(compat);
+ }
+
+ return hclge_comm_cmd_send(hw, &desc, 1);
+}
+
+void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(&ring->pdev->dev, size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+}
+
+static int hclge_comm_alloc_cmd_desc(struct hclge_comm_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ ring->desc = dma_alloc_coherent(&ring->pdev->dev,
+ size, &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static __le32 hclge_comm_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
+static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
+ {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_COMM_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
+ {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_COMM_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
+ {HCLGE_COMM_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
+ {HCLGE_COMM_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
+ {HCLGE_COMM_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
+ {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
+ {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
+ {HCLGE_COMM_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
+ {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B,
+ HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
+ {HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+ {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B},
+ {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
+ {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
+ {HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
+};
+
+static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
+ {HCLGE_COMM_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
+ {HCLGE_COMM_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
+ {HCLGE_COMM_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
+ {HCLGE_COMM_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
+ {HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
+ {HCLGE_COMM_CAP_QB_B, HNAE3_DEV_SUPPORT_QB_B},
+ {HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
+ {HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
+ {HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+};
+
+static void
+hclge_comm_parse_capability(struct hnae3_ae_dev *ae_dev, bool is_pf,
+ struct hclge_comm_query_version_cmd *cmd)
+{
+ const struct hclge_comm_caps_bit_map *caps_map =
+ is_pf ? hclge_pf_cmd_caps : hclge_vf_cmd_caps;
+ u32 size = is_pf ? ARRAY_SIZE(hclge_pf_cmd_caps) :
+ ARRAY_SIZE(hclge_vf_cmd_caps);
+ u32 caps, i;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+ for (i = 0; i < size; i++)
+ if (hnae3_get_bit(caps, caps_map[i].imp_bit))
+ set_bit(caps_map[i].local_bit, ae_dev->caps);
+}
+
+int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type)
+{
+ struct hclge_comm_cmq_ring *ring =
+ (ring_type == HCLGE_COMM_TYPE_CSQ) ? &hw->cmq.csq :
+ &hw->cmq.crq;
+ int ret;
+
+ ring->ring_type = ring_type;
+
+ ret = hclge_comm_alloc_cmd_desc(ring);
+ if (ret)
+ dev_err(&ring->pdev->dev, "descriptor %s alloc error %d\n",
+ (ring_type == HCLGE_COMM_TYPE_CSQ) ? "CSQ" : "CRQ",
+ ret);
+
+ return ret;
+}
+
+int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf)
+{
+ struct hclge_comm_query_version_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
+ resp = (struct hclge_comm_query_version_cmd *)desc.data;
+ resp->api_caps = hclge_comm_build_api_caps();
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ *fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= ae_dev->pdev->revision;
+
+ if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ hclge_comm_set_default_capability(ae_dev, is_pf);
+ return 0;
+ }
+
+ hclge_comm_parse_capability(ae_dev, is_pf, resp);
+
+ return ret;
+}
+
+static const u16 spec_opcode[] = { HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HCLGE_QUERY_ALL_ERR_INFO };
+
+static bool hclge_comm_is_special_opcode(u16 opcode)
+{
+ /* these commands have several descriptors,
+ * and use the first one to save opcode and return value
+ */
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++)
+ if (spec_opcode[i] == opcode)
+ return true;
+
+ return false;
+}
+
+static int hclge_comm_ring_space(struct hclge_comm_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static void hclge_comm_cmd_copy_desc(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc, int num)
+{
+ struct hclge_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+}
+
+static int hclge_comm_is_valid_csq_clean_head(struct hclge_comm_cmq_ring *ring,
+ int head)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
+static int hclge_comm_cmd_csq_clean(struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
+ int clean;
+ u32 head;
+
+ head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclge_comm_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hw->cmq.csq.pdev->dev, "wrong cmd head (%u, %d-%d)\n",
+ head, csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
+ }
+
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
+ return clean;
+}
+
+static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
+{
+ u32 head = hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
+ bool *is_completed)
+{
+ u32 timeout = 0;
+
+ do {
+ if (hclge_comm_cmd_csq_done(hw)) {
+ *is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+}
+
+static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
+{
+ struct hclge_comm_errcode hclge_comm_cmd_errcode[] = {
+ { HCLGE_COMM_CMD_EXEC_SUCCESS, 0 },
+ { HCLGE_COMM_CMD_NO_AUTH, -EPERM },
+ { HCLGE_COMM_CMD_NOT_SUPPORTED, -EOPNOTSUPP },
+ { HCLGE_COMM_CMD_QUEUE_FULL, -EXFULL },
+ { HCLGE_COMM_CMD_NEXT_ERR, -ENOSR },
+ { HCLGE_COMM_CMD_UNEXE_ERR, -ENOTBLK },
+ { HCLGE_COMM_CMD_PARA_ERR, -EINVAL },
+ { HCLGE_COMM_CMD_RESULT_ERR, -ERANGE },
+ { HCLGE_COMM_CMD_TIMEOUT, -ETIME },
+ { HCLGE_COMM_CMD_HILINK_ERR, -ENOLINK },
+ { HCLGE_COMM_CMD_QUEUE_ILLEGAL, -ENXIO },
+ { HCLGE_COMM_CMD_INVALID, -EBADR },
+ };
+ u32 errcode_count = ARRAY_SIZE(hclge_comm_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclge_comm_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclge_comm_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
+static int hclge_comm_cmd_check_retval(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc, int num,
+ int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc >= hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ if (likely(!hclge_comm_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+
+ hw->cmq.last_status = desc_ret;
+
+ return hclge_comm_cmd_convert_err_code(desc_ret);
+}
+
+static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, int ntc)
+{
+ bool is_completed = false;
+ int handle, ret;
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ hclge_comm_wait_for_resp(hw, &is_completed);
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclge_comm_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_comm_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hw->cmq.csq.pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
+}
+
+/**
+ * hclge_comm_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num)
+{
+ struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
+ int ret;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclge_comm_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean =
+ hclge_comm_read_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG);
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /**
+ * Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+
+ hclge_comm_cmd_copy_desc(hw, desc, num);
+
+ /* Write to hardware */
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ ret = hclge_comm_cmd_check_result(hw, desc, num, ntc);
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return ret;
+}
+
+static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
+{
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CSQ_TAIL_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_DEPTH_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 0);
+ hclge_comm_write_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG, 0);
+}
+
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+
+ hclge_comm_firmware_compat_config(ae_dev, hw, false);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ /* wait to ensure that the firmware completes the possible left
+ * over commands.
+ */
+ msleep(HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME);
+ spin_lock_bh(&cmdq->csq.lock);
+ spin_lock(&cmdq->crq.lock);
+ hclge_comm_cmd_uninit_regs(hw);
+ spin_unlock(&cmdq->crq.lock);
+ spin_unlock_bh(&cmdq->csq.lock);
+
+ hclge_comm_free_cmd_desc(&cmdq->csq);
+ hclge_comm_free_cmd_desc(&cmdq->crq);
+}
+
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+ int ret;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&cmdq->csq.lock);
+ spin_lock_init(&cmdq->crq.lock);
+
+ cmdq->csq.pdev = pdev;
+ cmdq->crq.pdev = pdev;
+
+ /* Setup the queue entries for use cmd queue */
+ cmdq->csq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+ cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
+
+ /* Setup Tx write back timeout */
+ cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
+
+ /* Setup queue rings */
+ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
+ if (ret) {
+ dev_err(&pdev->dev, "CSQ ring setup error %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CRQ);
+ if (ret) {
+ dev_err(&pdev->dev, "CRQ ring setup error %d\n", ret);
+ goto err_csq;
+ }
+
+ return 0;
+err_csq:
+ hclge_comm_free_cmd_desc(&hw->cmq.csq);
+ return ret;
+}
+
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf,
+ unsigned long reset_pending)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+ int ret;
+
+ spin_lock_bh(&cmdq->csq.lock);
+ spin_lock(&cmdq->crq.lock);
+
+ cmdq->csq.next_to_clean = 0;
+ cmdq->csq.next_to_use = 0;
+ cmdq->crq.next_to_clean = 0;
+ cmdq->crq.next_to_use = 0;
+
+ hclge_comm_cmd_init_regs(hw);
+
+ spin_unlock(&cmdq->crq.lock);
+ spin_unlock_bh(&cmdq->csq.lock);
+
+ clear_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (reset_pending) {
+ ret = -EBUSY;
+ goto err_cmd_init;
+ }
+
+ /* get version and device capabilities */
+ ret = hclge_comm_cmd_query_version_and_capability(ae_dev, hw,
+ fw_version, is_pf);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "failed to query version and capabilities, ret = %d\n",
+ ret);
+ goto err_cmd_init;
+ }
+
+ dev_info(&ae_dev->pdev->dev,
+ "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(*fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ if (!is_pf && ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return 0;
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_comm_firmware_compat_config(ae_dev, hw, true);
+ if (ret)
+ dev_warn(&ae_dev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
+ return 0;
+
+err_cmd_init:
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
new file mode 100644
index 000000000000..de72ecbfd5ad
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_CMD_H
+#define __HCLGE_COMM_CMD_H
+#include <linux/types.h>
+
+#include "hnae3.h"
+
+#define HCLGE_COMM_CMD_FLAG_IN BIT(0)
+#define HCLGE_COMM_CMD_FLAG_NEXT BIT(2)
+#define HCLGE_COMM_CMD_FLAG_WR BIT(3)
+#define HCLGE_COMM_CMD_FLAG_NO_INTR BIT(4)
+
+#define HCLGE_COMM_SEND_SYNC(flag) \
+ ((flag) & HCLGE_COMM_CMD_FLAG_NO_INTR)
+
+#define HCLGE_COMM_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_COMM_NCSI_ERROR_REPORT_EN_B 1
+#define HCLGE_COMM_PHY_IMP_EN_B 2
+#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
+#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+#define HCLGE_COMM_LLRS_FEC_EN_B 5
+
+#define hclge_comm_dev_phy_imp_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
+
+#define HCLGE_COMM_TYPE_CRQ 0
+#define HCLGE_COMM_TYPE_CSQ 1
+
+#define HCLGE_COMM_CMDQ_CLEAR_WAIT_TIME 200
+
+/* bar registers for cmdq */
+#define HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGE_COMM_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGE_COMM_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGE_COMM_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG 0x2701C
+#define HCLGE_COMM_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGE_COMM_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGE_COMM_NIC_CRQ_HEAD_REG 0x27028
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_COMM_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGE_COMM_VECTOR0_CMDQ_STATE_REG 0x27104
+#define HCLGE_COMM_CMDQ_INTR_EN_REG 0x27108
+#define HCLGE_COMM_CMDQ_INTR_GEN_REG 0x2710C
+#define HCLGE_COMM_CMDQ_INTR_STS_REG 0x27104
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_COMM_NIC_SW_RST_RDY_B 16
+#define HCLGE_COMM_NIC_SW_RST_RDY BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM_S 3
+#define HCLGE_COMM_NIC_CMQ_DESC_NUM 1024
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT 30000
+
+enum hclge_opcode_type {
+ /* Generic commands */
+ HCLGE_OPC_QUERY_FW_VER = 0x0001,
+ HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
+ HCLGE_OPC_GBL_RST_STATUS = 0x0021,
+ HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
+ HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
+ HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
+ HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
+
+ HCLGE_OPC_STATS_64_BIT = 0x0030,
+ HCLGE_OPC_STATS_32_BIT = 0x0031,
+ HCLGE_OPC_STATS_MAC = 0x0032,
+ HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
+ HCLGE_OPC_STATS_MAC_ALL = 0x0034,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
+ HCLGE_OPC_DFX_BD_NUM = 0x0043,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
+ HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
+ HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
+ HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
+ HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
+ HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
+ HCLGE_OPC_DFX_NCSI_REG = 0x004A,
+ HCLGE_OPC_DFX_RTC_REG = 0x004B,
+ HCLGE_OPC_DFX_PPP_REG = 0x004C,
+ HCLGE_OPC_DFX_RCB_REG = 0x004D,
+ HCLGE_OPC_DFX_TQP_REG = 0x004E,
+ HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+
+ HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
+
+ /* MAC command */
+ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
+ HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
+ HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
+ HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
+ HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
+ HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
+ HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
+ HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_QUERY_FEC_STATS = 0x0316,
+ HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
+ HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
+
+ /* PTP commands */
+ HCLGE_OPC_PTP_INT_EN = 0x0501,
+ HCLGE_OPC_PTP_MODE_CFG = 0x0507,
+
+ /* PFC/Pause commands */
+ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
+ HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
+ HCLGE_OPC_CFG_MAC_PARA = 0x0703,
+ HCLGE_OPC_CFG_PFC_PARA = 0x0704,
+ HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
+ HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
+ HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
+ HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
+ HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
+ HCLGE_OPC_QOS_MAP = 0x070A,
+
+ /* ETS/scheduler commands */
+ HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
+ HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
+ HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
+ HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
+ HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
+ HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
+ HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
+ HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
+ HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
+ HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
+ HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
+ HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
+ HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
+ HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
+ HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
+ HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
+ HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_TM_NODES = 0x0816,
+ HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
+ HCLGE_OPC_QSET_DFX_STS = 0x0844,
+ HCLGE_OPC_PRI_DFX_STS = 0x0845,
+ HCLGE_OPC_PG_DFX_STS = 0x0846,
+ HCLGE_OPC_PORT_DFX_STS = 0x0847,
+ HCLGE_OPC_SCH_NQ_CNT = 0x0848,
+ HCLGE_OPC_SCH_RQ_CNT = 0x0849,
+ HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
+ HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
+ HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
+
+ /* Packet buffer allocate commands */
+ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
+ HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
+ HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
+ HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
+ HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
+ HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
+
+ /* TQP management command */
+ HCLGE_OPC_SET_TQP_MAP = 0x0A01,
+
+ /* TQP commands */
+ HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
+ HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
+ HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
+ HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
+ HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
+ HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
+ HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
+ HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
+
+ /* PPU commands */
+ HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
+
+ /* TSO command */
+ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
+
+ /* RSS commands */
+ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGE_OPC_RSS_TC_MODE = 0x0D08,
+ HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
+
+ /* Promisuous mode command */
+ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+
+ /* Vlan offload commands */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
+ /* Interrupts commands */
+ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
+ HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
+
+ /* MAC commands */
+ HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
+ HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
+ HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
+ HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
+ HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
+ HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+
+ /* MAC VLAN commands */
+ HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
+
+ /* VLAN commands */
+ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
+ HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
+ HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
+
+ /* Flow Director commands */
+ HCLGE_OPC_FD_MODE_CTRL = 0x1200,
+ HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
+ HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
+ HCLGE_OPC_FD_TCAM_OP = 0x1203,
+ HCLGE_OPC_FD_AD_OP = 0x1204,
+ HCLGE_OPC_FD_CNT_OP = 0x1205,
+ HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
+ HCLGE_OPC_FD_QB_CTRL = 0x1210,
+ HCLGE_OPC_FD_QB_AD_OP = 0x1211,
+
+ /* MDIO command */
+ HCLGE_OPC_MDIO_CONFIG = 0x1900,
+
+ /* QCN commands */
+ HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
+ HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
+ HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
+ HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
+ HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
+ HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
+ HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
+ HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
+
+ /* Mailbox command */
+ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
+ /* clear hardware resource command */
+ HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
+
+ /* NCL config command */
+ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
+ /* IMP stats command */
+ HCLGE_OPC_IMP_STATS_BD = 0x7012,
+ HCLGE_OPC_IMP_STATS_INFO = 0x7013,
+ HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
+
+ /* SFP command */
+ HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
+ HCLGE_OPC_GET_SFP_EXIST = 0x7101,
+ HCLGE_OPC_GET_SFP_INFO = 0x7104,
+
+ /* Error INT commands */
+ HCLGE_MAC_COMMON_INT_EN = 0x030E,
+ HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
+ HCLGE_SSU_ECC_INT_CMD = 0x0989,
+ HCLGE_SSU_COMMON_INT_CMD = 0x098C,
+ HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
+ HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
+ HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
+ HCLGE_COMMON_ECC_INT_CFG = 0x1505,
+ HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
+ HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
+ HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
+ HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
+ HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
+ HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
+ HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
+ HCLGE_IGU_COMMON_INT_EN = 0x1806,
+ HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
+ HCLGE_PPP_CMD0_INT_CMD = 0x2100,
+ HCLGE_PPP_CMD1_INT_CMD = 0x2101,
+ HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
+ HCLGE_OPC_WOL_GET_SUPPORTED_MODE = 0x2201,
+ HCLGE_OPC_WOL_CFG = 0x2202,
+ HCLGE_NCSI_INT_EN = 0x2401,
+
+ /* ROH MAC commands */
+ HCLGE_OPC_MAC_ADDR_CHECK = 0x9004,
+
+ /* PHY command */
+ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
+ HCLGE_OPC_PHY_REG = 0x7026,
+
+ /* Query link diagnosis info command */
+ HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
+};
+
+enum hclge_comm_cmd_return_status {
+ HCLGE_COMM_CMD_EXEC_SUCCESS = 0,
+ HCLGE_COMM_CMD_NO_AUTH = 1,
+ HCLGE_COMM_CMD_NOT_SUPPORTED = 2,
+ HCLGE_COMM_CMD_QUEUE_FULL = 3,
+ HCLGE_COMM_CMD_NEXT_ERR = 4,
+ HCLGE_COMM_CMD_UNEXE_ERR = 5,
+ HCLGE_COMM_CMD_PARA_ERR = 6,
+ HCLGE_COMM_CMD_RESULT_ERR = 7,
+ HCLGE_COMM_CMD_TIMEOUT = 8,
+ HCLGE_COMM_CMD_HILINK_ERR = 9,
+ HCLGE_COMM_CMD_QUEUE_ILLEGAL = 10,
+ HCLGE_COMM_CMD_INVALID = 11,
+};
+
+enum HCLGE_COMM_CAP_BITS {
+ HCLGE_COMM_CAP_UDP_GSO_B,
+ HCLGE_COMM_CAP_QB_B,
+ HCLGE_COMM_CAP_FD_FORWARD_TC_B,
+ HCLGE_COMM_CAP_PTP_B,
+ HCLGE_COMM_CAP_INT_QL_B,
+ HCLGE_COMM_CAP_HW_TX_CSUM_B,
+ HCLGE_COMM_CAP_TX_PUSH_B,
+ HCLGE_COMM_CAP_PHY_IMP_B,
+ HCLGE_COMM_CAP_TQP_TXRX_INDEP_B,
+ HCLGE_COMM_CAP_HW_PAD_B,
+ HCLGE_COMM_CAP_STASH_B,
+ HCLGE_COMM_CAP_UDP_TUNNEL_CSUM_B,
+ HCLGE_COMM_CAP_RAS_IMP_B = 12,
+ HCLGE_COMM_CAP_FEC_B = 13,
+ HCLGE_COMM_CAP_PAUSE_B = 14,
+ HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
+ HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
+ HCLGE_COMM_CAP_CQ_B = 18,
+ HCLGE_COMM_CAP_GRO_B = 20,
+ HCLGE_COMM_CAP_FD_B = 21,
+ HCLGE_COMM_CAP_FEC_STATS_B = 25,
+ HCLGE_COMM_CAP_LANE_NUM_B = 27,
+ HCLGE_COMM_CAP_WOL_B = 28,
+};
+
+enum HCLGE_COMM_API_CAP_BITS {
+ HCLGE_COMM_API_CAP_FLEX_RSS_TBL_B,
+};
+
+/* capabilities bits map between imp firmware and local driver */
+struct hclge_comm_caps_bit_map {
+ u16 imp_bit;
+ u16 local_bit;
+};
+
+struct hclge_comm_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
+enum hclge_comm_cmd_state {
+ HCLGE_COMM_STATE_CMD_DISABLE,
+};
+
+struct hclge_comm_errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+#define HCLGE_COMM_QUERY_CAP_LENGTH 3
+struct hclge_comm_query_version_cmd {
+ __le32 firmware;
+ __le32 hardware;
+ __le32 api_caps;
+ __le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
+};
+
+#define HCLGE_DESC_DATA_LEN 6
+struct hclge_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[HCLGE_DESC_DATA_LEN];
+};
+
+struct hclge_comm_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclge_desc *desc;
+ struct pci_dev *pdev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 ring_type; /* cmq ring type */
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclge_comm_cmd_status {
+ HCLGE_COMM_STATUS_SUCCESS = 0,
+ HCLGE_COMM_ERR_CSQ_FULL = -1,
+ HCLGE_COMM_ERR_CSQ_TIMEOUT = -2,
+ HCLGE_COMM_ERR_CSQ_ERROR = -3,
+};
+
+struct hclge_comm_cmq {
+ struct hclge_comm_cmq_ring csq;
+ struct hclge_comm_cmq_ring crq;
+ u16 tx_timeout;
+ enum hclge_comm_cmd_status last_status;
+};
+
+struct hclge_comm_hw {
+ void __iomem *io_base;
+ void __iomem *mem_base;
+ struct hclge_comm_cmq cmq;
+ unsigned long comm_state;
+};
+
+static inline void hclge_comm_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclge_comm_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclge_comm_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->io_base, reg, value)
+#define hclge_comm_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->io_base, reg)
+
+void hclge_comm_cmd_init_regs(struct hclge_comm_hw *hw);
+int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf);
+int hclge_comm_alloc_cmd_queue(struct hclge_comm_hw *hw, int ring_type);
+int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num);
+void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
+int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, bool en);
+void hclge_comm_free_cmd_desc(struct hclge_comm_cmq_ring *ring);
+void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode,
+ bool is_read);
+void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw);
+int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
+int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
+ u32 *fw_version, bool is_pf,
+ unsigned long reset_pending);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
new file mode 100644
index 000000000000..ae2736549526
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+#include <linux/skbuff.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+#include "hclge_comm_rss.h"
+
+static const u8 hclge_comm_hash_key[] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
+static void
+hclge_comm_init_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_tuple_cfg *rss_tuple_cfg)
+{
+ rss_tuple_cfg->ipv4_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv4_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv4_sctp_en = HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
+ rss_tuple_cfg->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_tcp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_udp_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ rss_tuple_cfg->ipv6_sctp_en =
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGE_COMM_RSS_INPUT_TUPLE_SCTP;
+ rss_tuple_cfg->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+}
+
+int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ u16 rss_ind_tbl_size = ae_dev->dev_specs.rss_ind_tbl_size;
+ int rss_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
+ u16 *rss_ind_tbl;
+
+ if (nic->flags & HNAE3_SUPPORT_VF)
+ rss_cfg->rss_size = nic->kinfo.rss_size;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ rss_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
+
+ hclge_comm_init_rss_tuple(ae_dev, &rss_cfg->rss_tuple_sets);
+
+ rss_cfg->rss_algo = rss_algo;
+
+ rss_ind_tbl = devm_kcalloc(&ae_dev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ rss_cfg->rss_indirection_tbl = rss_ind_tbl;
+ memcpy(rss_cfg->rss_hash_key, hclge_comm_hash_key,
+ HCLGE_COMM_RSS_KEY_SIZE);
+
+ hclge_comm_rss_indir_init_cfg(ae_dev, rss_cfg);
+
+ return 0;
+}
+
+void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size)
+{
+ u16 roundup_size;
+ u32 i;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
+ }
+}
+
+int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size)
+{
+ struct hclge_comm_rss_tc_mode_cmd *req;
+ struct hclge_desc desc;
+ unsigned int i;
+ int ret;
+
+ req = (struct hclge_comm_rss_tc_mode_cmd *)desc.data;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) {
+ u16 mode = 0;
+
+ hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_COMM_RSS_TC_SIZE_M,
+ HCLGE_COMM_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_bit(mode, HCLGE_COMM_RSS_TC_SIZE_MSB_B,
+ tc_size[i] >> HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET &
+ 0x1);
+ hnae3_set_field(mode, HCLGE_COMM_RSS_TC_OFFSET_M,
+ HCLGE_COMM_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
+ }
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to set rss tc mode, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
+ struct hclge_comm_hw *hw, const u8 *key,
+ const u8 hfunc)
+{
+ u8 hash_algo;
+ int ret;
+
+ ret = hclge_comm_parse_rss_hfunc(rss_cfg, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ ret = hclge_comm_set_rss_algo_key(hw, hash_algo, key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key, HCLGE_COMM_RSS_KEY_SIZE);
+ } else {
+ ret = hclge_comm_set_rss_algo_key(hw, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+ }
+ rss_cfg->rss_algo = hash_algo;
+
+ return 0;
+}
+
+int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_comm_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
+ false);
+
+ ret = hclge_comm_init_rss_tuple_cmd(rss_cfg, nfc, ae_dev, req);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to init rss tuple cmd, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to set rss tuple, ret = %d.\n", ret);
+ return ret;
+ }
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ return 0;
+}
+
+u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGE_COMM_RSS_KEY_SIZE;
+}
+
+int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
+ const u8 hfunc, u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_COMM_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = rss_cfg->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ u16 i;
+ /* Initialize RSS indirect table */
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+}
+
+int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
+ u8 *tuple_sets)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ *tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ *tuple_sets = HCLGE_COMM_S_IP_BIT | HCLGE_COMM_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
+ u16 qid, u32 j)
+{
+ u8 rss_msb_oft;
+ u8 rss_msb_val;
+
+ rss_msb_oft =
+ j * HCLGE_COMM_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
+ rss_msb_val = (qid >> HCLGE_COMM_RSS_CFG_TBL_BW_L & 0x1) <<
+ (j * HCLGE_COMM_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
+ req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
+}
+
+int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, const u16 *indir)
+{
+ struct hclge_comm_rss_ind_tbl_cmd *req;
+ struct hclge_desc desc;
+ u16 rss_cfg_tbl_num;
+ int ret;
+ u16 qid;
+ u16 i;
+ u32 j;
+
+ req = (struct hclge_comm_rss_ind_tbl_cmd *)desc.data;
+ rss_cfg_tbl_num = ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGE_COMM_RSS_CFG_TBL_SIZE;
+
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
+ hclge_comm_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_RSS_INDIR_TABLE,
+ false);
+
+ req->start_table_index =
+ cpu_to_le16(i * HCLGE_COMM_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap =
+ cpu_to_le16(HCLGE_COMM_RSS_SET_BITMAP_MSK);
+ for (j = 0; j < HCLGE_COMM_RSS_CFG_TBL_SIZE; j++) {
+ qid = indir[i * HCLGE_COMM_RSS_CFG_TBL_SIZE + j];
+ req->rss_qid_l[j] = qid & 0xff;
+ hclge_comm_append_rss_msb_info(req, qid, j);
+ }
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure rss table, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
+ struct hclge_comm_hw *hw, bool is_pf,
+ struct hclge_comm_rss_cfg *rss_cfg)
+{
+ struct hclge_comm_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE,
+ false);
+
+ req = (struct hclge_comm_rss_input_tuple_cmd *)desc.data;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret)
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure rss input, ret = %d.\n", ret);
+ return ret;
+}
+
+void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
+ u8 *hfunc)
+{
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (rss_cfg->rss_algo) {
+ case HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_COMM_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Get the RSS Key required by the user */
+ if (key)
+ memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
+}
+
+void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
+ u32 *indir, u16 rss_ind_tbl_size)
+{
+ u16 i;
+
+ if (!indir)
+ return;
+
+ for (i = 0; i < rss_ind_tbl_size; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+}
+
+int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
+ const u8 *key)
+{
+ struct hclge_comm_rss_config_cmd *req;
+ unsigned int key_offset = 0;
+ struct hclge_desc desc;
+ int key_counts;
+ int key_size;
+ int ret;
+
+ key_counts = HCLGE_COMM_RSS_KEY_SIZE;
+ req = (struct hclge_comm_rss_config_cmd *)desc.data;
+
+ while (key_counts) {
+ hclge_comm_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGE_COMM_RSS_HASH_ALGO_MASK);
+ req->hash_config |=
+ (key_offset << HCLGE_COMM_RSS_HASH_KEY_OFFSET_B);
+
+ key_size = min(HCLGE_COMM_RSS_HASH_KEY_NUM, key_counts);
+ memcpy(req->hash_key,
+ key + key_offset * HCLGE_COMM_RSS_HASH_KEY_NUM,
+ key_size);
+
+ key_counts -= key_size;
+ key_offset++;
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to configure RSS key, ret = %d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_COMM_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGE_COMM_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGE_COMM_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGE_COMM_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_COMM_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGE_COMM_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_input_tuple_cmd *req)
+{
+ u8 tuple_sets;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclge_comm_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if (ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGE_COMM_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
+
+ if (tuple_sets & HCLGE_COMM_D_PORT_BIT)
+ tuple_data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGE_COMM_S_PORT_BIT)
+ tuple_data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGE_COMM_D_IP_BIT)
+ tuple_data |= RXH_IP_DST;
+ if (tuple_sets & HCLGE_COMM_S_IP_BIT)
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
new file mode 100644
index 000000000000..92af3d2980d3
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_RSS_H
+#define __HCLGE_COMM_RSS_H
+#include <linux/types.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+
+#define HCLGE_COMM_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGE_COMM_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGE_COMM_RSS_HASH_ALGO_SYMMETRIC 2
+
+#define HCLGE_COMM_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+
+#define HCLGE_COMM_D_PORT_BIT BIT(0)
+#define HCLGE_COMM_S_PORT_BIT BIT(1)
+#define HCLGE_COMM_D_IP_BIT BIT(2)
+#define HCLGE_COMM_S_IP_BIT BIT(3)
+#define HCLGE_COMM_V_TAG_BIT BIT(4)
+#define HCLGE_COMM_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_COMM_D_IP_BIT | HCLGE_COMM_S_IP_BIT | HCLGE_COMM_V_TAG_BIT)
+#define HCLGE_COMM_MAX_TC_NUM 8
+
+#define HCLGE_COMM_RSS_TC_OFFSET_S 0
+#define HCLGE_COMM_RSS_TC_OFFSET_M GENMASK(10, 0)
+#define HCLGE_COMM_RSS_TC_SIZE_MSB_B 11
+#define HCLGE_COMM_RSS_TC_SIZE_S 12
+#define HCLGE_COMM_RSS_TC_SIZE_M GENMASK(14, 12)
+#define HCLGE_COMM_RSS_TC_VALID_B 15
+#define HCLGE_COMM_RSS_TC_SIZE_MSB_OFFSET 3
+
+struct hclge_comm_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
+#define HCLGE_COMM_RSS_KEY_SIZE 40
+#define HCLGE_COMM_RSS_CFG_TBL_SIZE 16
+#define HCLGE_COMM_RSS_CFG_TBL_BW_H 2U
+#define HCLGE_COMM_RSS_CFG_TBL_BW_L 8U
+#define HCLGE_COMM_RSS_CFG_TBL_SIZE_H 4
+#define HCLGE_COMM_RSS_SET_BITMAP_MSK GENMASK(15, 0)
+#define HCLGE_COMM_RSS_HASH_ALGO_MASK GENMASK(3, 0)
+#define HCLGE_COMM_RSS_HASH_KEY_OFFSET_B 4
+
+#define HCLGE_COMM_RSS_HASH_KEY_NUM 16
+struct hclge_comm_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGE_COMM_RSS_HASH_KEY_NUM];
+};
+
+struct hclge_comm_rss_cfg {
+ u8 rss_hash_key[HCLGE_COMM_RSS_KEY_SIZE]; /* user configured hash keys */
+
+ /* shadow table */
+ u16 *rss_indirection_tbl;
+ u32 rss_algo;
+
+ struct hclge_comm_rss_tuple_cfg rss_tuple_sets;
+ u32 rss_size;
+};
+
+struct hclge_comm_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+struct hclge_comm_rss_ind_tbl_cmd {
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
+ u8 rss_qid_h[HCLGE_COMM_RSS_CFG_TBL_SIZE_H];
+ u8 rss_qid_l[HCLGE_COMM_RSS_CFG_TBL_SIZE];
+};
+
+struct hclge_comm_rss_tc_mode_cmd {
+ __le16 rss_tc_mode[HCLGE_COMM_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle);
+void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg);
+int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
+ u8 *tuple_sets);
+int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
+ const u8 hfunc, u8 *hash_algo);
+void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
+ u8 *hfunc);
+void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
+ u32 *indir, u16 rss_ind_tbl_size);
+int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
+ const u8 *key);
+int hclge_comm_init_rss_tuple_cmd(struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_input_tuple_cmd *req);
+u64 hclge_comm_convert_rss_tuple(u8 tuple_sets);
+int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic,
+ struct hclge_comm_hw *hw, bool is_pf,
+ struct hclge_comm_rss_cfg *rss_cfg);
+int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw, const u16 *indir);
+int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
+ struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_rss_cfg *rss_cfg);
+void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size);
+int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
+ u16 *tc_valid, u16 *tc_size);
+int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
+ struct hclge_comm_hw *hw, const u8 *key,
+ const u8 hfunc);
+int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
+ struct hclge_comm_hw *hw,
+ struct hclge_comm_rss_cfg *rss_cfg,
+ struct ethtool_rxnfc *nfc);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
new file mode 100644
index 000000000000..f3c9395d8351
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#include <linux/err.h>
+
+#include "hnae3.h"
+#include "hclge_comm_cmd.h"
+#include "hclge_comm_tqp_stats.h"
+
+u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ u64 *buff = data;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+
+ return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
+}
+
+u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u8 *buff = data;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_comm_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd", tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_comm_tqp *tqp =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd", tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ struct hclge_comm_hw *hw)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ struct hclge_desc desc;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_RX_STATS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to get tqp stat, ret = %d, rx = %u.\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclge_comm_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_TX_STATS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ ret = hclge_comm_cmd_send(hw, &desc, 1);
+ if (ret) {
+ dev_err(&hw->cmq.csq.pdev->dev,
+ "failed to get tqp stat, ret = %d, tx = %u.\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_tqp *tqp;
+ struct hnae3_queue *queue;
+ u16 i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ queue = kinfo->tqp[i];
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
new file mode 100644
index 000000000000..a46350162ee8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021-2021 Hisilicon Limited.
+
+#ifndef __HCLGE_COMM_TQP_STATS_H
+#define __HCLGE_COMM_TQP_STATS_H
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include "hnae3.h"
+
+/* each tqp has TX & RX two queues */
+#define HCLGE_COMM_QUEUE_PAIR_SIZE 2
+
+/* TQP stats */
+struct hclge_comm_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclge_comm_tqp {
+ /* copy of device pointer from pci_dev,
+ * used when perform DMA mapping
+ */
+ struct device *dev;
+ struct hnae3_queue q;
+ struct hclge_comm_tqp_stats tqp_stats;
+ u16 index; /* Global index in a NIC controller */
+
+ bool alloced;
+};
+
+u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data);
+int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle);
+u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data);
+void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle);
+int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
+ struct hclge_comm_hw *hw);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index d2ec4c573bf8..3b6dbf158b98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -56,6 +56,32 @@ static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
+static int hns3_dcbnl_ieee_setapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_setapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_delapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
/* DCBX configuration */
static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
{
@@ -83,6 +109,8 @@ static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
.ieee_setets = hns3_dcbnl_ieee_setets,
.ieee_getpfc = hns3_dcbnl_ieee_getpfc,
.ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .ieee_setapp = hns3_dcbnl_ieee_setapp,
+ .ieee_delapp = hns3_dcbnl_ieee_delapp,
.getdcbx = hns3_dcbnl_getdcbx,
.setdcbx = hns3_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index c381f8af67f0..4c3e90a1c4d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -106,6 +106,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.init = hns3_dbg_common_file_init,
},
{
+ .name = "qos_dscp_map",
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dentry = HNS3_DBG_DENTRY_TM,
@@ -395,6 +402,15 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support modify vlan filter state",
.cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }, {
+ .name = "support FEC statistics",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ }, {
+ .name = "support lane num",
+ .cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B,
+ }, {
+ .name = "support wake on lan",
+ .cap_bit = HNAE3_DEV_SUPPORT_WOL_B,
}
};
@@ -562,12 +578,12 @@ static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
for (i = 0; i < ring_num; i++) {
j = 0;
- sprintf(result[j++], "%8u", i);
- sprintf(result[j++], "%9u", ring->tx_copybreak);
- sprintf(result[j++], "%3u", tx_spare->len);
- sprintf(result[j++], "%3u", tx_spare->next_to_use);
- sprintf(result[j++], "%3u", tx_spare->next_to_clean);
- sprintf(result[j++], "%3u", tx_spare->last_to_clean);
+ sprintf(result[j++], "%u", i);
+ sprintf(result[j++], "%u", ring->tx_copybreak);
+ sprintf(result[j++], "%u", tx_spare->len);
+ sprintf(result[j++], "%u", tx_spare->next_to_use);
+ sprintf(result[j++], "%u", tx_spare->next_to_clean);
+ sprintf(result[j++], "%u", tx_spare->last_to_clean);
sprintf(result[j++], "%pad", &tx_spare->dma);
hns3_dbg_fill_content(content, sizeof(content),
tx_spare_info_items,
@@ -598,35 +614,35 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h;
u32 j = 0;
- sprintf(result[j++], "%8u", index);
+ sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_NUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BD_LEN_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_TAIL_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_HEAD_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_FBDNUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%9u", ring->rx_copybreak);
+ sprintf(result[j++], "%u", ring->rx_copybreak);
- sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_EN_REG) ? "on" : "off");
else
- sprintf(result[j++], "%10s", "NA");
+ sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG);
@@ -700,36 +716,36 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring,
u32 base_add_l, base_add_h;
u32 j = 0;
- sprintf(result[j++], "%8u", index);
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BD_NUM_REG));
- sprintf(result[j++], "%2u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TC_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG));
- sprintf(result[j++], "%4u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_FBDNUM_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_OFFSET_REG));
- sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG));
- sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev))
- sprintf(result[j++], "%10s", readl_relaxed(ring->tqp->io_base +
+ sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_EN_REG) ? "on" : "off");
else
- sprintf(result[j++], "%10s", "NA");
+ sprintf(result[j++], "%s", "NA");
base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG);
@@ -848,15 +864,15 @@ static void hns3_dump_rx_bd_info(struct hns3_nic_priv *priv,
{
unsigned int j = 0;
- sprintf(result[j++], "%5d", idx);
+ sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.l234_info));
- sprintf(result[j++], "%7u", le16_to_cpu(desc->rx.pkt_len));
- sprintf(result[j++], "%4u", le16_to_cpu(desc->rx.size));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.pkt_len));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.size));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.rss_hash));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->rx.fd_id));
- sprintf(result[j++], "%8u", le16_to_cpu(desc->rx.vlan_tag));
- sprintf(result[j++], "%15u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
- sprintf(result[j++], "%11u", le16_to_cpu(desc->rx.ot_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.fd_id));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.o_dm_vlan_id_fb));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->rx.ot_vlan_tag));
sprintf(result[j++], "%#x", le32_to_cpu(desc->rx.bd_base_info));
if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
u32 ol_info = le32_to_cpu(desc->rx.ol_info);
@@ -930,19 +946,19 @@ static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv,
{
unsigned int j = 0;
- sprintf(result[j++], "%6d", idx);
+ sprintf(result[j++], "%d", idx);
sprintf(result[j++], "%#llx", le64_to_cpu(desc->addr));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.vlan_tag));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.send_size));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.send_size));
sprintf(result[j++], "%#x",
le32_to_cpu(desc->tx.type_cs_vlan_tso_len));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.outer_vlan_tag));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.tv));
- sprintf(result[j++], "%10u",
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.outer_vlan_tag));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.tv));
+ sprintf(result[j++], "%u",
le32_to_cpu(desc->tx.ol_type_vlan_len_msec));
sprintf(result[j++], "%#x", le32_to_cpu(desc->tx.paylen_ol4cs));
sprintf(result[j++], "%#x", le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri));
- sprintf(result[j++], "%5u", le16_to_cpu(desc->tx.mss_hw_csum));
+ sprintf(result[j++], "%u", le16_to_cpu(desc->tx.mss_hw_csum));
}
static int hns3_dbg_tx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
@@ -1083,7 +1099,7 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%u", index);
sprintf(result[j++], "%u",
READ_ONCE(ring->page_pool->pages_state_hold_cnt));
- sprintf(result[j++], "%u",
+ sprintf(result[j++], "%d",
atomic_read(&ring->page_pool->pages_state_release_cnt));
sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
sprintf(result[j++], "%u", ring->page_pool->p.order);
@@ -1227,7 +1243,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
return ret;
mutex_lock(&handle->dbgfs_lock);
- save_buf = &hns3_dbg_cmd[index].buf;
+ save_buf = &handle->dbgfs_buf[index];
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) {
@@ -1332,6 +1348,13 @@ int hns3_dbg_init(struct hnae3_handle *handle)
int ret;
u32 i;
+ handle->dbgfs_buf = devm_kcalloc(&handle->pdev->dev,
+ ARRAY_SIZE(hns3_dbg_cmd),
+ sizeof(*handle->dbgfs_buf),
+ GFP_KERNEL);
+ if (!handle->dbgfs_buf)
+ return -ENOMEM;
+
hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry =
debugfs_create_dir(name, hns3_dbgfs_root);
handle->hnae3_dbgfs = hns3_dbg_dentry[HNS3_DBG_DENTRY_COMMON].dentry;
@@ -1380,9 +1403,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
u32 i;
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
- if (hns3_dbg_cmd[i].buf) {
- kvfree(hns3_dbg_cmd[i].buf);
- hns3_dbg_cmd[i].buf = NULL;
+ if (handle->dbgfs_buf[i]) {
+ kvfree(handle->dbgfs_buf[i]);
+ handle->dbgfs_buf[i] = NULL;
}
mutex_destroy(&handle->dbgfs_lock);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
index bd8801065e02..97578eabb7d8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
@@ -4,6 +4,8 @@
#ifndef __HNS3_DEBUGFS_H
#define __HNS3_DEBUGFS_H
+#include "hnae3.h"
+
#define HNS3_DBG_READ_LEN 65536
#define HNS3_DBG_READ_LEN_128KB 0x20000
#define HNS3_DBG_READ_LEN_1MB 0x100000
@@ -47,7 +49,6 @@ struct hns3_dbg_cmd_info {
enum hnae3_dbg_cmd cmd;
enum hns3_dbg_dentry_type dentry;
u32 buf_len;
- char *buf;
int (*init)(struct hnae3_handle *handle, unsigned int cmd);
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 9ccebbaa0d69..b676496ec6d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -13,12 +13,13 @@
#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/skbuff.h>
#include <linux/sctp.h>
#include <net/gre.h>
+#include <net/gro.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
@@ -53,10 +54,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Network interface message level setting");
-static unsigned int tx_spare_buf_size;
-module_param(tx_spare_buf_size, uint, 0400);
-MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer");
-
static unsigned int tx_sgl = 1;
module_param(tx_sgl, uint, 0600);
MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
@@ -108,26 +105,28 @@ static const struct pci_device_id hns3_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
-#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \
+#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \
{ ptype, \
l, \
CHECKSUM_##s, \
HNS3_L3_TYPE_##t, \
- 1 }
+ 1, \
+ h}
#define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \
- { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 }
+ { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \
+ PKT_HASH_TYPE_NONE }
static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
HNS3_RX_PTYPE_UNUSED_ENTRY(0),
- HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP),
- HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP),
- HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP),
- HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM),
- HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL),
+ HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
HNS3_RX_PTYPE_UNUSED_ENTRY(9),
HNS3_RX_PTYPE_UNUSED_ENTRY(10),
HNS3_RX_PTYPE_UNUSED_ENTRY(11),
@@ -135,36 +134,36 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
HNS3_RX_PTYPE_UNUSED_ENTRY(13),
HNS3_RX_PTYPE_UNUSED_ENTRY(14),
HNS3_RX_PTYPE_UNUSED_ENTRY(15),
- HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4),
- HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4),
- HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4),
- HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4),
+ HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
HNS3_RX_PTYPE_UNUSED_ENTRY(26),
HNS3_RX_PTYPE_UNUSED_ENTRY(27),
HNS3_RX_PTYPE_UNUSED_ENTRY(28),
- HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
HNS3_RX_PTYPE_UNUSED_ENTRY(38),
- HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
HNS3_RX_PTYPE_UNUSED_ENTRY(46),
HNS3_RX_PTYPE_UNUSED_ENTRY(47),
HNS3_RX_PTYPE_UNUSED_ENTRY(48),
@@ -230,35 +229,35 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = {
HNS3_RX_PTYPE_UNUSED_ENTRY(108),
HNS3_RX_PTYPE_UNUSED_ENTRY(109),
HNS3_RX_PTYPE_UNUSED_ENTRY(110),
- HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6),
- HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6),
- HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6),
- HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6),
+ HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
HNS3_RX_PTYPE_UNUSED_ENTRY(120),
HNS3_RX_PTYPE_UNUSED_ENTRY(121),
HNS3_RX_PTYPE_UNUSED_ENTRY(122),
- HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL),
- HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4),
- HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4),
- HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4),
+ HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE),
+ HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3),
HNS3_RX_PTYPE_UNUSED_ENTRY(132),
- HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6),
- HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6),
- HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6),
+ HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4),
+ HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
+ HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3),
HNS3_RX_PTYPE_UNUSED_ENTRY(140),
HNS3_RX_PTYPE_UNUSED_ENTRY(141),
HNS3_RX_PTYPE_UNUSED_ENTRY(142),
@@ -1005,9 +1004,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
return false;
if (ALIGN(len, dma_get_cache_alignment()) > space) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_spare_full++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_spare_full);
return false;
}
@@ -1024,9 +1021,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
return false;
if (space < HNS3_MAX_SGL_SIZE) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_spare_full++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_spare_full);
return false;
}
@@ -1035,47 +1030,56 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
+ u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
struct hns3_tx_spare *tx_spare;
struct page *page;
- u32 alloc_size;
dma_addr_t dma;
int order;
- alloc_size = tx_spare_buf_size ? tx_spare_buf_size :
- ring->tqp->handle->kinfo.tx_spare_buf_size;
if (!alloc_size)
return;
order = get_order(alloc_size);
+ if (order > MAX_ORDER) {
+ if (net_ratelimit())
+ dev_warn(ring_to_dev(ring), "failed to allocate tx spare buffer, exceed to max order\n");
+ return;
+ }
+
tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
GFP_KERNEL);
if (!tx_spare) {
/* The driver still work without the tx spare buffer */
dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
- return;
+ goto devm_kzalloc_error;
}
page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
GFP_KERNEL, order);
if (!page) {
dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
- devm_kfree(ring_to_dev(ring), tx_spare);
- return;
+ goto alloc_pages_error;
}
dma = dma_map_page(ring_to_dev(ring), page, 0,
PAGE_SIZE << order, DMA_TO_DEVICE);
if (dma_mapping_error(ring_to_dev(ring), dma)) {
dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
- put_page(page);
- devm_kfree(ring_to_dev(ring), tx_spare);
- return;
+ goto dma_mapping_error;
}
tx_spare->dma = dma;
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ return;
+
+dma_mapping_error:
+ put_page(page);
+alloc_pages_error:
+ devm_kfree(ring_to_dev(ring), tx_spare);
+devm_kzalloc_error:
+ ring->tqp->handle->kinfo.tx_spare_buf_size = 0;
}
/* Use hns3_tx_spare_space() to make sure there is enough buffer
@@ -1306,7 +1310,7 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
if (!(!skb->encapsulation &&
(l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
l4.udp->dest == htons(GENEVE_UDP_PORT) ||
- l4.udp->dest == htons(4790))))
+ l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT))))
return false;
return true;
@@ -1359,44 +1363,9 @@ static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
HNS3_TUN_NVGRE);
}
-static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
+static void hns3_set_l3_type(struct sk_buff *skb, union l3_hdr_info l3,
+ u32 *type_cs_vlan_tso)
{
- unsigned char *l2_hdr = skb->data;
- u32 l4_proto = ol4_proto;
- union l4_hdr_info l4;
- union l3_hdr_info l3;
- u32 l2_len, l3_len;
-
- l4.hdr = skb_transport_header(skb);
- l3.hdr = skb_network_header(skb);
-
- /* handle encapsulation skb */
- if (skb->encapsulation) {
- /* If this is a not UDP/GRE encapsulation skb */
- if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
- /* drop the skb tunnel packet if hardware don't support,
- * because hardware can't calculate csum when TSO.
- */
- if (skb_is_gso(skb))
- return -EDOM;
-
- /* the stack computes the IP header already,
- * driver calculate l4 checksum when not TSO.
- */
- return skb_checksum_help(skb);
- }
-
- hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
-
- /* switch to inner header */
- l2_hdr = skb_inner_mac_header(skb);
- l3.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- l4_proto = il4_proto;
- }
-
if (l3.v4->version == 4) {
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
HNS3_L3T_IPV4);
@@ -1410,15 +1379,11 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
HNS3_L3T_IPV6);
}
+}
- /* compute inner(/normal) L2 header size, defined in 2 Bytes */
- l2_len = l3.hdr - l2_hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
-
- /* compute inner(/normal) L3 header size, defined in 4 Bytes */
- l3_len = l4.hdr - l3.hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
-
+static int hns3_set_l4_csum_length(struct sk_buff *skb, union l4_hdr_info l4,
+ u32 l4_proto, u32 *type_cs_vlan_tso)
+{
/* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -1464,6 +1429,57 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
+static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+{
+ unsigned char *l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
+ u32 l2_len, l3_len;
+
+ l4.hdr = skb_transport_header(skb);
+ l3.hdr = skb_network_header(skb);
+
+ /* handle encapsulation skb */
+ if (skb->encapsulation) {
+ /* If this is a not UDP/GRE encapsulation skb */
+ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
+ /* drop the skb tunnel packet if hardware don't support,
+ * because hardware can't calculate csum when TSO.
+ */
+ if (skb_is_gso(skb))
+ return -EDOM;
+
+ /* the stack computes the IP header already,
+ * driver calculate l4 checksum when not TSO.
+ */
+ return skb_checksum_help(skb);
+ }
+
+ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+
+ /* switch to inner header */
+ l2_hdr = skb_inner_mac_header(skb);
+ l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = il4_proto;
+ }
+
+ hns3_set_l3_type(skb, l3, type_cs_vlan_tso);
+
+ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - l2_hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ return hns3_set_l4_csum_length(skb, l4, l4_proto, type_cs_vlan_tso);
+}
+
static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
struct sk_buff *skb)
{
@@ -1516,7 +1532,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
if (unlikely(rc < 0))
return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr = skb_vlan_eth_hdr(skb);
vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
& VLAN_PRIO_MASK);
@@ -1540,92 +1556,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
return true;
}
-static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
- struct sk_buff *skb, struct hns3_desc *desc,
- struct hns3_desc_cb *desc_cb)
+struct hns3_desc_param {
+ u32 paylen_ol4cs;
+ u32 ol_type_vlan_len_msec;
+ u32 type_cs_vlan_tso;
+ u16 mss_hw_csum;
+ u16 inner_vtag;
+ u16 out_vtag;
+};
+
+static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
+{
+ pa->paylen_ol4cs = skb->len;
+ pa->ol_type_vlan_len_msec = 0;
+ pa->type_cs_vlan_tso = 0;
+ pa->mss_hw_csum = 0;
+ pa->inner_vtag = 0;
+ pa->out_vtag = 0;
+}
+
+static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_param *param)
{
- u32 ol_type_vlan_len_msec = 0;
- u32 paylen_ol4cs = skb->len;
- u32 type_cs_vlan_tso = 0;
- u16 mss_hw_csum = 0;
- u16 inner_vtag = 0;
- u16 out_vtag = 0;
int ret;
ret = hns3_handle_vtags(ring, skb);
if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_vlan_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_vlan_err);
return ret;
} else if (ret == HNS3_INNER_VLAN_TAG) {
- inner_vtag = skb_vlan_tag_get(skb);
- inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ param->inner_vtag = skb_vlan_tag_get(skb);
+ param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
- hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
} else if (ret == HNS3_OUTER_VLAN_TAG) {
- out_vtag = skb_vlan_tag_get(skb);
- out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ param->out_vtag = skb_vlan_tag_get(skb);
+ param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK;
- hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1);
}
+ return 0;
+}
- desc_cb->send_bytes = skb->len;
+static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ struct hns3_desc_param *param)
+{
+ u8 ol4_proto, il4_proto;
+ int ret;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 ol4_proto, il4_proto;
-
- if (hns3_check_hw_tx_csum(skb)) {
- /* set checksum start and offset, defined in 2 Bytes */
- hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
- skb_checksum_start_offset(skb) >> 1);
- hns3_set_field(ol_type_vlan_len_msec,
- HNS3_TXD_CSUM_OFFSET_S,
- skb->csum_offset >> 1);
- mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
- goto out_hw_tx_csum;
- }
+ if (hns3_check_hw_tx_csum(skb)) {
+ /* set checksum start and offset, defined in 2 Bytes */
+ hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
+ skb_checksum_start_offset(skb) >> 1);
+ hns3_set_field(param->ol_type_vlan_len_msec,
+ HNS3_TXD_CSUM_OFFSET_S,
+ skb->csum_offset >> 1);
+ param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
+ return 0;
+ }
- skb_reset_mac_len(skb);
+ skb_reset_mac_len(skb);
- ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_l4_proto_err++;
- u64_stats_update_end(&ring->syncp);
- return ret;
- }
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l4_proto_err);
+ return ret;
+ }
- ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_l2l3l4_err++;
- u64_stats_update_end(&ring->syncp);
- return ret;
- }
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &param->type_cs_vlan_tso,
+ &param->ol_type_vlan_len_msec);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_l2l3l4_err);
+ return ret;
+ }
- ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum,
- &type_cs_vlan_tso, &desc_cb->send_bytes);
- if (unlikely(ret < 0)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_tso_err++;
- u64_stats_update_end(&ring->syncp);
+ ret = hns3_set_tso(skb, &param->paylen_ol4cs, &param->mss_hw_csum,
+ &param->type_cs_vlan_tso, &desc_cb->send_bytes);
+ if (unlikely(ret < 0)) {
+ hns3_ring_stats_update(ring, tx_tso_err);
+ return ret;
+ }
+ return 0;
+}
+
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc_param param;
+ int ret;
+
+ hns3_init_desc_data(skb, &param);
+ ret = hns3_handle_vlan_info(ring, skb, &param);
+ if (unlikely(ret < 0))
+ return ret;
+
+ desc_cb->send_bytes = skb->len;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ret = hns3_handle_csum_partial(ring, skb, desc_cb, &param);
+ if (ret)
return ret;
- }
}
-out_hw_tx_csum:
/* Set txbd */
desc->tx.ol_type_vlan_len_msec =
- cpu_to_le32(ol_type_vlan_len_msec);
- desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs);
- desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum);
- desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
- desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+ cpu_to_le32(param.ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
+ desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
+ desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
+ desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
return 0;
}
@@ -1705,9 +1751,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
}
if (unlikely(dma_mapping_error(dev, dma))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -1796,9 +1840,9 @@ static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
{
if (!skb->encapsulation)
- return skb_transport_offset(skb) + tcp_hdrlen(skb);
+ return skb_tcp_all_headers(skb);
- return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+ return skb_inner_tcp_all_headers(skb);
}
/* HW need every continuous max_non_tso_bd_num buffer data to be larger
@@ -1853,9 +1897,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* recursion level of over HNS3_MAX_RECURSION_LEVEL.
*/
if (bd_num == UINT_MAX) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.over_max_recursion++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, over_max_recursion);
return -ENOMEM;
}
@@ -1864,16 +1906,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.hw_limitation++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, hw_limitation);
return -ENOMEM;
}
if (__skb_linearize(skb)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -1903,9 +1941,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_copy++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_copy);
}
out:
@@ -1925,9 +1961,7 @@ out:
return bd_num;
}
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_busy);
return -EBUSY;
}
@@ -2006,23 +2040,86 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
return bd_num;
}
+static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
+{
+#define HNS3_BYTES_PER_64BIT 8
+
+ struct hns3_desc desc[HNS3_MAX_PUSH_BD_NUM] = {};
+ int offset = 0;
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ do {
+ int idx = (ring->next_to_use - num + ring->desc_num) %
+ ring->desc_num;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_push++;
+ u64_stats_update_end(&ring->syncp);
+ memcpy(&desc[offset], &ring->desc[idx],
+ sizeof(struct hns3_desc));
+ offset++;
+ } while (--num);
+
+ __iowrite64_copy(ring->tqp->mem_base, desc,
+ (sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
+ HNS3_BYTES_PER_64BIT);
+
+ io_stop_wc();
+}
+
+static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
+{
+#define HNS3_MEM_DOORBELL_OFFSET 64
+
+ __le64 bd_num = cpu_to_le64((u64)ring->pending_buf);
+
+ /* make sure everything is visible to device before
+ * excuting tx push or updating doorbell
+ */
+ dma_wmb();
+
+ __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET,
+ &bd_num, 1);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_mem_doorbell += ring->pending_buf;
+ u64_stats_update_end(&ring->syncp);
+
+ io_stop_wc();
+}
+
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
bool doorbell)
{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ /* when tx push is enabled, the packet whose number of BD below
+ * HNS3_MAX_PUSH_BD_NUM can be pushed directly.
+ */
+ if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
+ !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ hns3_tx_push_bd(ring, num);
+ WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+ return;
+ }
+
ring->pending_buf += num;
if (!doorbell) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_more++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_more);
return;
}
- if (!ring->pending_buf)
- return;
+ if (ring->tqp->mem_base)
+ hns3_tx_mem_doorbell(ring);
+ else
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
- writel(ring->pending_buf,
- ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring->pending_buf = 0;
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
@@ -2064,9 +2161,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
ret = skb_copy_bits(skb, 0, buf, size);
if (unlikely(ret < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.copy_bits_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, copy_bits_err);
return ret;
}
@@ -2089,9 +2184,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
dma_sync_single_for_device(ring_to_dev(ring), dma, size,
DMA_TO_DEVICE);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_bounce++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_bounce);
+
return bd_num;
}
@@ -2121,9 +2215,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
if (unlikely(nents < 0)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.skb2sgl_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, skb2sgl_err);
return -ENOMEM;
}
@@ -2132,9 +2224,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
DMA_TO_DEVICE);
if (unlikely(!sgt->nents)) {
hns3_tx_spare_rollback(ring, cb_len);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.map_sg_err++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, map_sg_err);
return -ENOMEM;
}
@@ -2146,10 +2236,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
for (i = 0; i < sgt->nents; i++)
bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
sg_dma_len(sgt->sgl + i));
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_sgl++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, tx_sgl);
return bd_num;
}
@@ -2174,23 +2261,45 @@ out:
return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
}
+static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ struct hns3_desc_cb *desc_cb,
+ int next_to_use_head)
+{
+ int ret;
+
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
+ desc_cb);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
+ /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
+ * zero, which is unlikely, and 'ret > 0' means how many tx desc
+ * need to be notified to the hw.
+ */
+ ret = hns3_handle_desc_filling(ring, skb);
+ if (likely(ret > 0))
+ return ret;
+
+fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
+ return ret;
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct netdev_queue *dev_queue;
- int pre_ntu, next_to_use_head;
+ int pre_ntu, ret;
bool doorbell;
- int ret;
/* Hardware can only handle short frames above 32 bytes */
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return NETDEV_TX_OK;
}
@@ -2209,20 +2318,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out_err_tx_ok;
}
- next_to_use_head = ring->next_to_use;
-
- ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
- desc_cb);
- if (unlikely(ret < 0))
- goto fill_err;
-
- /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
- * zero, which is unlikely, and 'ret > 0' means how many tx desc
- * need to be notified to the hw.
- */
- ret = hns3_handle_desc_filling(ring, skb);
+ ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
if (unlikely(ret <= 0))
- goto fill_err;
+ goto out_err_tx_ok;
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
@@ -2244,9 +2342,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-fill_err:
- hns3_clear_desc(ring, next_to_use_head);
-
out_err_tx_ok:
dev_kfree_skb_any(skb);
hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
@@ -2255,6 +2350,8 @@ out_err_tx_ok:
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
{
+ char format_mac_addr_perm[HNAE3_FORMAT_MAC_ADDR_LEN];
+ char format_mac_addr_sa[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = hns3_get_handle(netdev);
struct sockaddr *mac_addr = p;
int ret;
@@ -2263,8 +2360,9 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
- netdev_info(netdev, "already using mac address %pM\n",
- mac_addr->sa_data);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_info(netdev, "already using mac address %s\n",
+ format_mac_addr_sa);
return 0;
}
@@ -2273,8 +2371,10 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
*/
if (!hns3_is_phys_func(h->pdev) &&
!is_zero_ether_addr(netdev->perm_addr)) {
- netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
- netdev->perm_addr, mac_addr->sa_data);
+ hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr);
+ hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data);
+ netdev_err(netdev, "has permanent MAC %s, user MAC %s not allow\n",
+ format_mac_addr_perm, format_mac_addr_sa);
return -EPERM;
}
@@ -2382,90 +2482,89 @@ static netdev_features_t hns3_features_check(struct sk_buff *skb,
return features;
}
+static void hns3_fetch_stats(struct rtnl_link_stats64 *stats,
+ struct hns3_enet_ring *ring, bool is_tx)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ if (is_tx) {
+ stats->tx_bytes += ring->stats.tx_bytes;
+ stats->tx_packets += ring->stats.tx_pkts;
+ stats->tx_dropped += ring->stats.sw_err_cnt;
+ stats->tx_dropped += ring->stats.tx_vlan_err;
+ stats->tx_dropped += ring->stats.tx_l4_proto_err;
+ stats->tx_dropped += ring->stats.tx_l2l3l4_err;
+ stats->tx_dropped += ring->stats.tx_tso_err;
+ stats->tx_dropped += ring->stats.over_max_recursion;
+ stats->tx_dropped += ring->stats.hw_limitation;
+ stats->tx_dropped += ring->stats.copy_bits_err;
+ stats->tx_dropped += ring->stats.skb2sgl_err;
+ stats->tx_dropped += ring->stats.map_sg_err;
+ stats->tx_errors += ring->stats.sw_err_cnt;
+ stats->tx_errors += ring->stats.tx_vlan_err;
+ stats->tx_errors += ring->stats.tx_l4_proto_err;
+ stats->tx_errors += ring->stats.tx_l2l3l4_err;
+ stats->tx_errors += ring->stats.tx_tso_err;
+ stats->tx_errors += ring->stats.over_max_recursion;
+ stats->tx_errors += ring->stats.hw_limitation;
+ stats->tx_errors += ring->stats.copy_bits_err;
+ stats->tx_errors += ring->stats.skb2sgl_err;
+ stats->tx_errors += ring->stats.map_sg_err;
+ } else {
+ stats->rx_bytes += ring->stats.rx_bytes;
+ stats->rx_packets += ring->stats.rx_pkts;
+ stats->rx_dropped += ring->stats.l2_err;
+ stats->rx_errors += ring->stats.l2_err;
+ stats->rx_errors += ring->stats.l3l4_csum_err;
+ stats->rx_crc_errors += ring->stats.l2_err;
+ stats->multicast += ring->stats.rx_multicast;
+ stats->rx_length_errors += ring->stats.err_pkt_len;
+ }
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+}
+
static void hns3_nic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
int queue_num = priv->ae_handle->kinfo.num_tqps;
struct hnae3_handle *handle = priv->ae_handle;
+ struct rtnl_link_stats64 ring_total_stats;
struct hns3_enet_ring *ring;
- u64 rx_length_errors = 0;
- u64 rx_crc_errors = 0;
- u64 rx_multicast = 0;
- unsigned int start;
- u64 tx_errors = 0;
- u64 rx_errors = 0;
unsigned int idx;
- u64 tx_bytes = 0;
- u64 rx_bytes = 0;
- u64 tx_pkts = 0;
- u64 rx_pkts = 0;
- u64 tx_drop = 0;
- u64 rx_drop = 0;
if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
return;
handle->ae_algo->ops->update_stats(handle, &netdev->stats);
+ memset(&ring_total_stats, 0, sizeof(ring_total_stats));
for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */
ring = &priv->ring[idx];
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- tx_bytes += ring->stats.tx_bytes;
- tx_pkts += ring->stats.tx_pkts;
- tx_drop += ring->stats.sw_err_cnt;
- tx_drop += ring->stats.tx_vlan_err;
- tx_drop += ring->stats.tx_l4_proto_err;
- tx_drop += ring->stats.tx_l2l3l4_err;
- tx_drop += ring->stats.tx_tso_err;
- tx_drop += ring->stats.over_max_recursion;
- tx_drop += ring->stats.hw_limitation;
- tx_drop += ring->stats.copy_bits_err;
- tx_drop += ring->stats.skb2sgl_err;
- tx_drop += ring->stats.map_sg_err;
- tx_errors += ring->stats.sw_err_cnt;
- tx_errors += ring->stats.tx_vlan_err;
- tx_errors += ring->stats.tx_l4_proto_err;
- tx_errors += ring->stats.tx_l2l3l4_err;
- tx_errors += ring->stats.tx_tso_err;
- tx_errors += ring->stats.over_max_recursion;
- tx_errors += ring->stats.hw_limitation;
- tx_errors += ring->stats.copy_bits_err;
- tx_errors += ring->stats.skb2sgl_err;
- tx_errors += ring->stats.map_sg_err;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ hns3_fetch_stats(&ring_total_stats, ring, true);
/* fetch the rx stats */
ring = &priv->ring[idx + queue_num];
- do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- rx_bytes += ring->stats.rx_bytes;
- rx_pkts += ring->stats.rx_pkts;
- rx_drop += ring->stats.l2_err;
- rx_errors += ring->stats.l2_err;
- rx_errors += ring->stats.l3l4_csum_err;
- rx_crc_errors += ring->stats.l2_err;
- rx_multicast += ring->stats.rx_multicast;
- rx_length_errors += ring->stats.err_pkt_len;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
- }
-
- stats->tx_bytes = tx_bytes;
- stats->tx_packets = tx_pkts;
- stats->rx_bytes = rx_bytes;
- stats->rx_packets = rx_pkts;
-
- stats->rx_errors = rx_errors;
- stats->multicast = rx_multicast;
- stats->rx_length_errors = rx_length_errors;
- stats->rx_crc_errors = rx_crc_errors;
+ hns3_fetch_stats(&ring_total_stats, ring, false);
+ }
+
+ stats->tx_bytes = ring_total_stats.tx_bytes;
+ stats->tx_packets = ring_total_stats.tx_packets;
+ stats->rx_bytes = ring_total_stats.rx_bytes;
+ stats->rx_packets = ring_total_stats.rx_packets;
+
+ stats->rx_errors = ring_total_stats.rx_errors;
+ stats->multicast = ring_total_stats.multicast;
+ stats->rx_length_errors = ring_total_stats.rx_length_errors;
+ stats->rx_crc_errors = ring_total_stats.rx_crc_errors;
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
- stats->tx_errors = tx_errors;
- stats->rx_dropped = rx_drop;
- stats->tx_dropped = tx_drop;
+ stats->tx_errors = ring_total_stats.tx_errors;
+ stats->rx_dropped = ring_total_stats.rx_dropped;
+ stats->tx_dropped = ring_total_stats.tx_dropped;
stats->collisions = netdev->stats.collisions;
stats->rx_over_errors = netdev->stats.rx_over_errors;
stats->rx_frame_errors = netdev->stats.rx_frame_errors;
@@ -2658,18 +2757,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
return ret;
}
-static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+static int hns3_get_timeout_queue(struct net_device *ndev)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = hns3_get_handle(ndev);
- struct hns3_enet_ring *tx_ring;
- struct napi_struct *napi;
- int timeout_queue = 0;
- int hw_head, hw_tail;
- int fbd_num, fbd_oft;
- int ebd_num, ebd_oft;
- int bd_num, bd_err;
- int ring_en, tc;
int i;
/* Find the stopped queue the same way the stack does */
@@ -2678,11 +2767,17 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
unsigned long trans_start;
q = netdev_get_tx_queue(ndev, i);
- trans_start = q->trans_start;
+ trans_start = READ_ONCE(q->trans_start);
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + ndev->watchdog_timeo))) {
- timeout_queue = i;
+#ifdef CONFIG_BQL
+ struct dql *dql = &q->dql;
+
+ netdev_info(ndev, "DQL info last_cnt: %u, queued: %u, adj_limit: %u, completed: %u\n",
+ dql->last_obj_cnt, dql->num_queued,
+ dql->adj_limit, dql->num_completed);
+#endif
netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
q->state,
jiffies_to_msecs(jiffies - trans_start));
@@ -2690,17 +2785,15 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
}
}
- if (i == ndev->num_tx_queues) {
- netdev_info(ndev,
- "no netdev TX timeout queue found, timeout count: %llu\n",
- priv->tx_timeout_count);
- return false;
- }
-
- priv->tx_timeout_count++;
+ return i;
+}
- tx_ring = &priv->ring[timeout_queue];
- napi = &tx_ring->tqp_vector->napi;
+static void hns3_dump_queue_stats(struct net_device *ndev,
+ struct hns3_enet_ring *tx_ring,
+ int timeout_queue)
+{
+ struct napi_struct *napi = &tx_ring->tqp_vector->napi;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
netdev_info(ndev,
"tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
@@ -2717,6 +2810,51 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+ netdev_info(ndev, "tx_push: %llu, tx_mem_doorbell: %llu\n",
+ tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell);
+}
+
+static void hns3_dump_queue_reg(struct net_device *ndev,
+ struct hns3_enet_ring *tx_ring)
+{
+ netdev_info(ndev,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_NUM_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_HEAD_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TAIL_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_BD_ERR_REG),
+ readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_EN_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_TC_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_FBDNUM_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_OFFSET_REG),
+ hns3_tqp_read_reg(tx_ring, HNS3_RING_TX_RING_EBDNUM_REG),
+ hns3_tqp_read_reg(tx_ring,
+ HNS3_RING_TX_RING_EBD_OFFSET_REG));
+}
+
+static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hns3_enet_ring *tx_ring;
+ int timeout_queue;
+
+ timeout_queue = hns3_get_timeout_queue(ndev);
+ if (timeout_queue >= ndev->num_tx_queues) {
+ netdev_info(ndev,
+ "no netdev TX timeout queue found, timeout count: %llu\n",
+ priv->tx_timeout_count);
+ return false;
+ }
+
+ priv->tx_timeout_count++;
+
+ tx_ring = &priv->ring[timeout_queue];
+ hns3_dump_queue_stats(ndev, tx_ring, timeout_queue);
+
/* When mac received many pause frames continuous, it's unable to send
* packets, which may cause tx timeout
*/
@@ -2728,32 +2866,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
}
- hw_head = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_HEAD_REG);
- hw_tail = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_TAIL_REG);
- fbd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_FBDNUM_REG);
- fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_OFFSET_REG);
- ebd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_EBDNUM_REG);
- ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_EBD_OFFSET_REG);
- bd_num = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_NUM_REG);
- bd_err = readl_relaxed(tx_ring->tqp->io_base +
- HNS3_RING_TX_RING_BD_ERR_REG);
- ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
- tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
-
- netdev_info(ndev,
- "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
- bd_num, hw_head, hw_tail, bd_err,
- readl(tx_ring->tqp_vector->mask_addr));
- netdev_info(ndev,
- "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
- ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
+ hns3_dump_queue_reg(ndev, tx_ring);
return true;
}
@@ -2836,20 +2949,64 @@ static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
if (!h->ae_algo->ops->set_vf_mac)
return -EOPNOTSUPP;
if (is_multicast_ether_addr(mac)) {
+ hnae3_format_mac_addr(format_mac_addr, mac);
netdev_err(netdev,
- "Invalid MAC:%pM specified. Could not set MAC\n",
- mac);
+ "Invalid MAC:%s specified. Could not set MAC\n",
+ format_mac_addr);
return -EINVAL;
}
return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
}
+#define HNS3_INVALID_DSCP 0xff
+#define HNS3_DSCP_SHIFT 2
+
+static u8 hns3_get_skb_dscp(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+ u8 dscp = HNS3_INVALID_DSCP;
+
+ if (protocol == htons(ETH_P_8021Q))
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT;
+ else if (protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT;
+
+ return dscp;
+}
+
+static u16 hns3_nic_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 dscp;
+
+ if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP ||
+ !h->ae_algo->ops->get_dscp_prio)
+ goto out;
+
+ dscp = hns3_get_skb_dscp(skb);
+ if (unlikely(dscp >= HNAE3_MAX_DSCP))
+ goto out;
+
+ skb->priority = h->kinfo.dscp_prio[dscp];
+ if (skb->priority == HNAE3_PRIO_ID_INVALID)
+ skb->priority = 0;
+
+out:
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -2875,6 +3032,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
+ .ndo_select_queue = hns3_nic_select_queue,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -2947,6 +3105,21 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
}
+/**
+ * hns3_clean_vf_config
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs allocated
+ *
+ * Clean residual vf config after disable sriov
+ **/
+static void hns3_clean_vf_config(struct pci_dev *pdev, int num_vfs)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (ae_dev->ops->clean_vf_config)
+ ae_dev->ops->clean_vf_config(ae_dev, num_vfs);
+}
+
/* hns3_remove - Device removal routine
* @pdev: PCI device information struct
*/
@@ -2985,7 +3158,10 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
else
return num_vfs;
} else if (!pci_vfs_assigned(pdev)) {
+ int num_vfs_pre = pci_num_vf(pdev);
+
pci_disable_sriov(pdev);
+ hns3_clean_vf_config(pdev, num_vfs_pre);
} else {
dev_warn(&pdev->dev,
"Unable to free VFs because some are assigned to VMs.\n");
@@ -3140,12 +3316,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ if (hnae3_ae_dev_gro_supported(ae_dev))
netdev->features |= NETIF_F_GRO_HW;
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->features |= NETIF_F_NTUPLE;
- }
+ if (hnae3_ae_dev_fd_supported(ae_dev))
+ netdev->features |= NETIF_F_NTUPLE;
if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
netdev->features |= NETIF_F_GSO_UDP_L4;
@@ -3497,17 +3672,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
for (i = 0; i < cleand_count; i++) {
desc_cb = &ring->desc_cb[ring->next_to_use];
if (desc_cb->reuse_flag) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.reuse_pg_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, reuse_pg_cnt);
hns3_reuse_buffer(ring, ring->next_to_use);
} else {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
@@ -3519,9 +3690,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_reuse_pg++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, non_reuse_pg);
}
ring_ptr_move_fw(ring, next_to_use);
@@ -3536,6 +3705,34 @@ static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
return page_count(cb->priv) == cb->pagecnt_bias;
}
+static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
+ struct hns3_enet_ring *ring,
+ int pull_len,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ u32 frag_offset = desc_cb->page_offset + pull_len;
+ int size = le16_to_cpu(desc->rx.size);
+ u32 frag_size = size - pull_len;
+ void *frag = napi_alloc_frag(frag_size);
+
+ if (unlikely(!frag)) {
+ hns3_ring_stats_update(ring, frag_alloc_err);
+
+ hns3_rl_err(ring_to_netdev(ring),
+ "failed to allocate rx frag\n");
+ return -ENOMEM;
+ }
+
+ desc_cb->reuse_flag = 1;
+ memcpy(frag, desc_cb->buf + frag_offset, frag_size);
+ skb_add_rx_frag(skb, i, virt_to_page(frag),
+ offset_in_page(frag), frag_size, frag_size);
+
+ hns3_ring_stats_update(ring, frag_alloc);
+ return 0;
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -3545,6 +3742,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring);
u32 frag_size = size - pull_len;
+ int ret = 0;
bool reused;
if (ring->page_pool) {
@@ -3579,27 +3777,9 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
desc_cb->page_offset = 0;
desc_cb->reuse_flag = 1;
} else if (frag_size <= ring->rx_copybreak) {
- void *frag = napi_alloc_frag(frag_size);
-
- if (unlikely(!frag)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.frag_alloc_err++;
- u64_stats_update_end(&ring->syncp);
-
- hns3_rl_err(ring_to_netdev(ring),
- "failed to allocate rx frag\n");
- goto out;
- }
-
- desc_cb->reuse_flag = 1;
- memcpy(frag, desc_cb->buf + frag_offset, frag_size);
- skb_add_rx_frag(skb, i, virt_to_page(frag),
- offset_in_page(frag), frag_size, frag_size);
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.frag_alloc++;
- u64_stats_update_end(&ring->syncp);
- return;
+ ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb);
+ if (!ret)
+ return;
}
out:
@@ -3675,20 +3855,16 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
return 0;
}
-static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
+static void hns3_checksum_complete(struct hns3_enet_ring *ring,
struct sk_buff *skb, u32 ptype, u16 csum)
{
if (ptype == HNS3_INVALID_PTYPE ||
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
- return false;
+ return;
- u64_stats_update_begin(&ring->syncp);
- ring->stats.csum_complete++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, csum_complete);
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)csum);
-
- return true;
}
static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
@@ -3748,8 +3924,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
HNS3_RXD_PTYPE_S);
- if (hns3_checksum_complete(ring, skb, ptype, csum))
- return;
+ hns3_checksum_complete(ring, skb, ptype, csum);
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
@@ -3758,9 +3933,8 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.l3l4_csum_err++;
- u64_stats_update_end(&ring->syncp);
+ skb->ip_summed = CHECKSUM_NONE;
+ hns3_ring_stats_update(ring, l3l4_csum_err);
return;
}
@@ -3851,10 +4025,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
skb = ring->skb;
if (unlikely(!skb)) {
hns3_rl_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
return -ENOMEM;
}
@@ -3885,9 +4056,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (ring->page_pool)
skb_mark_for_recycle(skb);
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, seg_pkt_cnt);
ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
@@ -4002,19 +4171,72 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
- struct sk_buff *skb, u32 rss_hash)
+ struct sk_buff *skb, u32 rss_hash,
+ u32 l234info, u32 ol_info)
{
- struct hnae3_handle *handle = ring->tqp->handle;
- enum pkt_hash_types rss_type;
+ enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
- if (rss_hash)
- rss_type = handle->kinfo.rss_type;
- else
- rss_type = PKT_HASH_TYPE_NONE;
+ if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) {
+ u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
+ HNS3_RXD_PTYPE_S);
+
+ rss_type = hns3_rx_ptype_tbl[ptype].hash_type;
+ } else {
+ int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
+
+ if (l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) {
+ if (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP)
+ rss_type = PKT_HASH_TYPE_L4;
+ else if (l4_type == HNS3_L4_TYPE_IGMP ||
+ l4_type == HNS3_L4_TYPE_ICMP)
+ rss_type = PKT_HASH_TYPE_L3;
+ }
+ }
skb_set_hash(skb, rss_hash, rss_type);
}
+static void hns3_handle_rx_ts_info(struct net_device *netdev,
+ struct hns3_desc *desc, struct sk_buff *skb,
+ u32 bd_base_info)
+{
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 nsec = le32_to_cpu(desc->ts_nsec);
+ u32 sec = le32_to_cpu(desc->ts_sec);
+
+ if (h->ae_algo->ops->get_rx_hwts)
+ h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
+ }
+}
+
+static void hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, struct sk_buff *skb,
+ u32 l234info)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+}
+
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct net_device *netdev = ring_to_netdev(ring);
@@ -4037,26 +4259,9 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ol_info = le32_to_cpu(desc->rx.ol_info);
csum = le16_to_cpu(desc->csum);
- if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
- struct hnae3_handle *h = hns3_get_handle(netdev);
- u32 nsec = le32_to_cpu(desc->ts_nsec);
- u32 sec = le32_to_cpu(desc->ts_sec);
-
- if (h->ae_algo->ops->get_rx_hwts)
- h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
- }
-
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
+ hns3_handle_rx_ts_info(netdev, desc, skb, bd_base_info);
- if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- vlan_tag);
- }
+ hns3_handle_rx_vlan_tag(ring, desc, skb, l234info);
if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
BIT(HNS3_RXD_L2E_B))))) {
@@ -4079,9 +4284,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ret = hns3_set_gro_and_checksum(ring, skb, l234info,
bd_base_info, ol_info, csum);
if (unlikely(ret)) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.rx_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, rx_err_cnt);
return ret;
}
@@ -4099,7 +4302,8 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ring->tqp_vector->rx_group.total_bytes += len;
- hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash),
+ l234info, ol_info);
return 0;
}
@@ -4297,87 +4501,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
return rx_pkt_total;
}
-static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
- struct hnae3_ring_chain_node *head)
+static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hnae3_ring_chain_node **head,
+ bool is_tx)
{
+ u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
+ u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
+ struct hnae3_ring_chain_node *cur_chain = *head;
struct pci_dev *pdev = tqp_vector->handle->pdev;
- struct hnae3_ring_chain_node *cur_chain = head;
struct hnae3_ring_chain_node *chain;
- struct hns3_enet_ring *tx_ring;
- struct hns3_enet_ring *rx_ring;
-
- tx_ring = tqp_vector->tx_group.ring;
- if (tx_ring) {
- cur_chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
-
- cur_chain->next = NULL;
-
- while (tx_ring->next) {
- tx_ring = tx_ring->next;
-
- chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
- GFP_KERNEL);
- if (!chain)
- goto err_free_chain;
-
- cur_chain->next = chain;
- chain->tqp_index = tx_ring->tqp->tqp_index;
- hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_TX);
- hnae3_set_field(chain->int_gl_idx,
- HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S,
- HNAE3_RING_GL_TX);
-
- cur_chain = chain;
- }
- }
+ struct hns3_enet_ring *ring;
- rx_ring = tqp_vector->rx_group.ring;
- if (!tx_ring && rx_ring) {
- cur_chain->next = NULL;
- cur_chain->tqp_index = rx_ring->tqp->tqp_index;
- hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
- rx_ring = rx_ring->next;
+ if (cur_chain) {
+ while (cur_chain->next)
+ cur_chain = cur_chain->next;
}
- while (rx_ring) {
+ while (ring) {
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
if (!chain)
- goto err_free_chain;
-
- cur_chain->next = chain;
- chain->tqp_index = rx_ring->tqp->tqp_index;
+ return -ENOMEM;
+ if (cur_chain)
+ cur_chain->next = chain;
+ else
+ *head = chain;
+ chain->tqp_index = ring->tqp->tqp_index;
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
- HNAE3_RING_TYPE_RX);
- hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+ bit_value);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, field_value);
cur_chain = chain;
- rx_ring = rx_ring->next;
+ ring = ring->next;
}
return 0;
+}
+
+static struct hnae3_ring_chain_node *
+hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *cur_chain = NULL;
+ struct hnae3_ring_chain_node *chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
+ goto err_free_chain;
+
+ if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
+ goto err_free_chain;
+
+ return cur_chain;
err_free_chain:
- cur_chain = head->next;
while (cur_chain) {
chain = cur_chain->next;
devm_kfree(&pdev->dev, cur_chain);
cur_chain = chain;
}
- head->next = NULL;
- return -ENOMEM;
+ return NULL;
}
static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
@@ -4386,7 +4573,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
struct pci_dev *pdev = tqp_vector->handle->pdev;
struct hnae3_ring_chain_node *chain_tmp, *chain;
- chain = head->next;
+ chain = head;
while (chain) {
chain_tmp = chain->next;
@@ -4501,7 +4688,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
}
for (i = 0; i < priv->vector_num; i++) {
- struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_ring_chain_node *vector_ring_chain;
tqp_vector = &priv->tqp_vector[i];
@@ -4511,21 +4698,22 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->tx_group.total_packets = 0;
tqp_vector->handle = h;
- ret = hns3_get_vector_ring_chain(tqp_vector,
- &vector_ring_chain);
- if (ret)
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain) {
+ ret = -ENOMEM;
goto map_ring_fail;
+ }
ret = h->ae_algo->ops->map_ring_to_vector(h,
- tqp_vector->vector_irq, &vector_ring_chain);
+ tqp_vector->vector_irq, vector_ring_chain);
- hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
if (ret)
goto map_ring_fail;
netif_napi_add(priv->netdev, &tqp_vector->napi,
- hns3_nic_common_poll, NAPI_POLL_WEIGHT);
+ hns3_nic_common_poll);
}
return 0;
@@ -4618,7 +4806,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
{
- struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_ring_chain_node *vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
int i;
@@ -4633,13 +4821,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
* chain between vector and ring, we should go on to deal with
* the remaining options.
*/
- if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
+ vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
+ if (!vector_ring_chain)
dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h,
- tqp_vector->vector_irq, &vector_ring_chain);
+ tqp_vector->vector_irq, vector_ring_chain);
- hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+ hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group);
@@ -4934,6 +5123,7 @@ static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
static int hns3_init_mac_addr(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
int ret = 0;
@@ -4944,8 +5134,9 @@ static int hns3_init_mac_addr(struct net_device *netdev)
/* Check if the MAC address is valid, if not get a random one */
if (!is_valid_ether_addr(mac_addr_temp)) {
eth_hw_addr_random(netdev);
- dev_warn(priv->dev, "using random MAC address %pM\n",
- netdev->dev_addr);
+ hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr);
+ dev_warn(priv->dev, "using random MAC address %s\n",
+ format_mac_addr);
} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
eth_hw_addr_set(netdev, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
@@ -4997,8 +5188,10 @@ static void hns3_client_stop(struct hnae3_handle *handle)
static void hns3_info_show(struct hns3_nic_priv *priv)
{
struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
- dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr);
+ dev_info(priv->dev, "MAC address: %s\n", format_mac_addr);
dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
@@ -5029,10 +5222,7 @@ static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv,
priv->tqp_vector[i].rx_group.dim.mode = mode;
}
- /* only device version above V3(include V3), GL can switch CQ/EQ
- * period mode.
- */
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
+ if (hnae3_ae_dev_cq_supported(ae_dev)) {
u32 new_mode;
u64 reg;
@@ -5060,6 +5250,9 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
@@ -5070,6 +5263,13 @@ static void hns3_state_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state);
}
+static void hns3_state_uninit(struct hnae3_handle *handle)
+{
+ struct hns3_nic_priv *priv = handle->priv;
+
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -5187,7 +5387,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
return ret;
out_reg_netdev_fail:
+ hns3_state_uninit(handle);
hns3_dbg_uninit(handle);
+ hns3_client_stop(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
@@ -5287,9 +5489,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
+ hns3_ring_stats_update(ring, sw_err_cnt);
/* if alloc new buffer fail, exit directly
* and reclear in up flow.
*/
@@ -5531,8 +5731,8 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
return 0;
}
-static int hns3_reset_notify(struct hnae3_handle *handle,
- enum hnae3_reset_notify_type type)
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
{
int ret = 0;
@@ -5645,6 +5845,57 @@ int hns3_set_channels(struct net_device *netdev,
return 0;
}
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(ndev))
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (h->ae_algo->ops->get_status(h))
+ netif_carrier_on(ndev);
+}
+
static const struct hns3_hw_error_info hns3_hw_err[] = {
{ .type = HNAE3_PPU_POISON_ERROR,
.msg = "PPU poison" },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1715c98d906d..88af34bbee34 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -7,9 +7,13 @@
#include <linux/dim.h>
#include <linux/if_vlan.h>
#include <net/page_pool.h>
+#include <asm/barrier.h>
#include "hnae3.h"
+struct iphdr;
+struct ipv6hdr;
+
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
@@ -22,9 +26,12 @@ enum hns3_nic_state {
HNS3_NIC_STATE2_RESET_REQUESTED,
HNS3_NIC_STATE_HW_TX_CSUM_ENABLE,
HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE,
+ HNS3_NIC_STATE_TX_PUSH_ENABLE,
HNS3_NIC_STATE_MAX
};
+#define HNS3_MAX_PUSH_BD_NUM 2
+
#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
@@ -397,6 +404,7 @@ struct hns3_rx_ptype {
u32 ip_summed : 2;
u32 l3_type : 4;
u32 valid : 1;
+ u32 hash_type: 3;
};
struct ring_stats {
@@ -407,6 +415,8 @@ struct ring_stats {
u64 tx_pkts;
u64 tx_bytes;
u64 tx_more;
+ u64 tx_push;
+ u64 tx_mem_doorbell;
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
@@ -621,6 +631,11 @@ static inline int ring_space(struct hns3_enet_ring *ring)
(begin - end)) - 1;
}
+static inline u32 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg)
+{
+ return readl_relaxed(ring->tqp->io_base + reg);
+}
+
static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{
return readl(base + reg);
@@ -655,6 +670,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_buf_size(_ring) ((_ring)->buf_size)
+#define hns3_ring_stats_update(ring, cnt) do { \
+ typeof(ring) (tmp) = (ring); \
+ u64_stats_update_begin(&(tmp)->syncp); \
+ ((tmp)->stats.cnt)++; \
+ u64_stats_update_end(&(tmp)->syncp); \
+} while (0) \
+
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{
#if (PAGE_SIZE < 8192)
@@ -673,6 +695,12 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+#define hns3_get_ae_dev(handle) \
+ (pci_get_drvdata((handle)->pdev))
+
+#define hns3_get_ops(handle) \
+ ((handle)->ae_algo->ops)
+
#define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
@@ -705,6 +733,8 @@ void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
+int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
@@ -721,4 +751,7 @@ u16 hns3_get_max_available_channels(struct hnae3_handle *h);
void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
enum dim_cq_period_mode tx_mode,
enum dim_cq_period_mode rx_mode);
+
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index c9b4568d7a8d..51d1278b18f6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -23,6 +23,8 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
HNS3_TQP_STAT("more", tx_more),
+ HNS3_TQP_STAT("push", tx_push),
+ HNS3_TQP_STAT("mem_doorbell", tx_mem_doorbell),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
@@ -67,7 +69,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -93,6 +94,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
case HNAE3_LOOP_PHY:
+ case HNAE3_LOOP_EXTERNAL:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -302,6 +304,10 @@ out:
static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
{
+ st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL;
+ st_param[HNAE3_LOOP_EXTERNAL][1] =
+ h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -320,17 +326,11 @@ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
}
-static void hns3_selftest_prepare(struct net_device *ndev,
- bool if_running, int (*st_param)[2])
+static void hns3_selftest_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test start\n");
-
- hns3_set_selftest_param(h, st_param);
-
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -369,18 +369,15 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
-
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test end\n");
}
static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
struct ethtool_test *eth_test, u64 *data)
{
- int test_index = 0;
+ int test_index = HNAE3_LOOP_APP;
u32 i;
- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
+ for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
@@ -399,6 +396,20 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
}
}
+static void hns3_do_external_lb(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL);
+ if (!data[HNAE3_LOOP_EXTERNAL])
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL);
+ hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL);
+
+ if (data[HNAE3_LOOP_EXTERNAL])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+}
+
/**
* hns3_self_test - self test
* @ndev: net device
@@ -408,7 +419,9 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
if (hns3_nic_resetting(ndev)) {
@@ -416,13 +429,29 @@ static void hns3_self_test(struct net_device *ndev,
return;
}
- /* Only do offline selftest, or pass by default */
- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
return;
- hns3_selftest_prepare(ndev, if_running, st_param);
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
+
+ /* external loopback test requires that the link is up and the duplex is
+ * full, do external test first to reduce the whole test time
+ */
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ hns3_external_lb_prepare(ndev, if_running);
+ hns3_do_external_lb(ndev, eth_test, data);
+ hns3_external_lb_restore(ndev, if_running);
+ }
+
+ hns3_selftest_prepare(ndev, if_running);
hns3_do_selftest(ndev, st_param, eth_test, data);
hns3_selftest_restore(ndev, if_running);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
@@ -610,13 +639,11 @@ static void hns3_get_drvinfo(struct net_device *netdev,
return;
}
- strncpy(drvinfo->driver, dev_driver_string(&h->pdev->dev),
+ strscpy(drvinfo->driver, dev_driver_string(&h->pdev->dev),
sizeof(drvinfo->driver));
- drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
- strncpy(drvinfo->bus_info, pci_name(h->pdev),
+ strscpy(drvinfo->bus_info, pci_name(h->pdev),
sizeof(drvinfo->bus_info));
- drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
@@ -643,14 +670,16 @@ static u32 hns3_get_link(struct net_device *netdev)
}
static void hns3_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
- int queue_num = h->kinfo.num_tqps;
+ int rx_queue_index = h->kinfo.num_tqps;
- if (hns3_nic_resetting(netdev)) {
- netdev_err(netdev, "dev resetting!");
+ if (hns3_nic_resetting(netdev) || !priv->ring) {
+ netdev_err(netdev, "failed to get ringparam value, due to dev resetting or uninited\n");
return;
}
@@ -658,7 +687,10 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->rx_max_pending = HNS3_RING_MAX_PENDING;
param->tx_pending = priv->ring[0].desc_num;
- param->rx_pending = priv->ring[queue_num].desc_num;
+ param->rx_pending = priv->ring[rx_queue_index].desc_num;
+ kernel_param->rx_buf_len = priv->ring[rx_queue_index].buf_size;
+ kernel_param->tx_push = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE,
+ &priv->state);
}
static void hns3_get_pauseparam(struct net_device *netdev,
@@ -705,7 +737,8 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
ops->get_ksettings_an_result(h,
&cmd->base.autoneg,
&cmd->base.speed,
- &cmd->base.duplex);
+ &cmd->base.duplex,
+ &cmd->lanes);
/* 2.get link mode */
if (ops->get_link_mode)
@@ -787,6 +820,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u32 lane_num;
u8 autoneg;
u32 speed;
u8 duplex;
@@ -799,9 +833,9 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
return 0;
if (ops->get_ksettings_an_result) {
- ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex, &lane_num);
if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
- cmd->base.duplex == duplex)
+ cmd->base.duplex == duplex && cmd->lanes == lane_num)
return 0;
}
@@ -838,10 +872,14 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ if (cmd->lanes && !hnae3_ae_dev_lane_num_supported(ae_dev))
+ return -EOPNOTSUPP;
+
netif_dbg(handle, drv, netdev,
- "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u, lanes=%u\n",
netdev->phydev ? "phy" : "mac",
- cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex,
+ cmd->lanes);
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev) {
@@ -879,7 +917,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (ops->cfg_mac_speed_dup_h)
ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
- cmd->base.duplex);
+ cmd->base.duplex, (u8)(cmd->lanes));
return ret;
}
@@ -1064,14 +1102,29 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
}
static int hns3_check_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param)
{
- if (hns3_nic_resetting(ndev))
+#define RX_BUF_LEN_2K 2048
+#define RX_BUF_LEN_4K 4096
+
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (hns3_nic_resetting(ndev) || !priv->ring) {
+ netdev_err(ndev, "failed to set ringparam value, due to dev resetting or uninited\n");
return -EBUSY;
+ }
+
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
+ if (kernel_param->rx_buf_len != RX_BUF_LEN_2K &&
+ kernel_param->rx_buf_len != RX_BUF_LEN_4K) {
+ netdev_err(ndev, "Rx buf len only support 2048 and 4096\n");
+ return -EINVAL;
+ }
+
if (param->tx_pending > HNS3_RING_MAX_PENDING ||
param->tx_pending < HNS3_RING_MIN_PENDING ||
param->rx_pending > HNS3_RING_MAX_PENDING ||
@@ -1084,54 +1137,126 @@ static int hns3_check_ringparam(struct net_device *ndev,
return 0;
}
+static bool
+hns3_is_ringparam_changed(struct net_device *ndev,
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct hns3_ring_param *old_ringparam,
+ struct hns3_ring_param *new_ringparam)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ new_ringparam->tx_desc_num = ALIGN(param->tx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ new_ringparam->rx_desc_num = ALIGN(param->rx_pending,
+ HNS3_RING_BD_MULTIPLE);
+ old_ringparam->tx_desc_num = priv->ring[0].desc_num;
+ old_ringparam->rx_desc_num = priv->ring[queue_num].desc_num;
+ old_ringparam->rx_buf_len = priv->ring[queue_num].buf_size;
+ new_ringparam->rx_buf_len = kernel_param->rx_buf_len;
+
+ if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num &&
+ old_ringparam->rx_desc_num == new_ringparam->rx_desc_num &&
+ old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) {
+ netdev_info(ndev, "descriptor number and rx buffer length not changed\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int hns3_change_rx_buf_len(struct net_device *ndev, u32 rx_buf_len)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.rx_buf_len = rx_buf_len;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ h->kinfo.tqp[i]->buf_size = rx_buf_len;
+ priv->ring[i + h->kinfo.num_tqps].buf_size = rx_buf_len;
+ }
+
+ return 0;
+}
+
+static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ u32 old_state = test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps) && tx_push)
+ return -EOPNOTSUPP;
+
+ if (tx_push == old_state)
+ return 0;
+
+ netdev_dbg(netdev, "Changing tx push from %s to %s\n",
+ old_state ? "on" : "off", tx_push ? "on" : "off");
+
+ if (tx_push)
+ set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+ else
+ clear_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state);
+
+ return 0;
+}
+
static int hns3_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
+ struct hns3_ring_param old_ringparam, new_ringparam;
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_ring *tmp_rings;
bool if_running = netif_running(ndev);
- u32 old_tx_desc_num, new_tx_desc_num;
- u32 old_rx_desc_num, new_rx_desc_num;
- u16 queue_num = h->kinfo.num_tqps;
int ret, i;
- ret = hns3_check_ringparam(ndev, param);
+ ret = hns3_check_ringparam(ndev, param, kernel_param);
if (ret)
return ret;
- /* Hardware requires that its descriptors must be multiple of eight */
- new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
- new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
- old_tx_desc_num = priv->ring[0].desc_num;
- old_rx_desc_num = priv->ring[queue_num].desc_num;
- if (old_tx_desc_num == new_tx_desc_num &&
- old_rx_desc_num == new_rx_desc_num)
+ ret = hns3_set_tx_push(ndev, kernel_param->tx_push);
+ if (ret)
+ return ret;
+
+ if (!hns3_is_ringparam_changed(ndev, param, kernel_param,
+ &old_ringparam, &new_ringparam))
return 0;
tmp_rings = hns3_backup_ringparam(priv);
if (!tmp_rings) {
- netdev_err(ndev,
- "backup ring param failed by allocating memory fail\n");
+ netdev_err(ndev, "backup ring param failed by allocating memory fail\n");
return -ENOMEM;
}
netdev_info(ndev,
- "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
- old_tx_desc_num, old_rx_desc_num,
- new_tx_desc_num, new_rx_desc_num);
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u, Changing rx buffer len from %u to %u\n",
+ old_ringparam.tx_desc_num, old_ringparam.rx_desc_num,
+ new_ringparam.tx_desc_num, new_ringparam.rx_desc_num,
+ old_ringparam.rx_buf_len, new_ringparam.rx_buf_len);
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
- hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ hns3_change_all_ring_bd_num(priv, new_ringparam.tx_desc_num,
+ new_ringparam.rx_desc_num);
+ hns3_change_rx_buf_len(ndev, new_ringparam.rx_buf_len);
ret = hns3_init_all_ring(priv);
if (ret) {
- netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ netdev_err(ndev, "set ringparam fail, revert to old value(%d)\n",
ret);
- hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
- old_rx_desc_num);
+ hns3_change_rx_buf_len(ndev, old_ringparam.rx_buf_len);
+ hns3_change_all_ring_bd_num(priv, old_ringparam.tx_desc_num,
+ old_ringparam.rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring));
@@ -1341,11 +1466,33 @@ static int hns3_check_ql_coalesce_param(struct net_device *netdev,
return 0;
}
-static int hns3_check_coalesce_para(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
+static int
+hns3_check_cqe_coalesce_param(struct net_device *netdev,
+ struct kernel_ethtool_coalesce *kernel_coal)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+
+ if ((kernel_coal->use_cqe_mode_tx || kernel_coal->use_cqe_mode_rx) &&
+ !hnae3_ae_dev_cq_supported(ae_dev)) {
+ netdev_err(netdev, "coalesced cqe mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal)
{
int ret;
+ ret = hns3_check_cqe_coalesce_param(netdev, kernel_coal);
+ if (ret)
+ return ret;
+
ret = hns3_check_gl_coalesce_para(netdev, cmd);
if (ret) {
netdev_err(netdev,
@@ -1420,7 +1567,7 @@ static int hns3_set_coalesce(struct net_device *netdev,
if (hns3_nic_resetting(netdev))
return -EBUSY;
- ret = hns3_check_coalesce_para(netdev, cmd);
+ ret = hns3_check_coalesce_para(netdev, cmd, kernel_coal);
if (ret)
return ret;
@@ -1496,6 +1643,19 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
h->msg_enable = msg_level;
}
+static void hns3_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
+ return;
+
+ ops->get_fec_stats(handle, fec_stats);
+}
+
/* Translate local fec value into ethtool value. */
static unsigned int loc_to_eth_fec(u8 loc_fec)
{
@@ -1505,12 +1665,12 @@ static unsigned int loc_to_eth_fec(u8 loc_fec)
eth_fec |= ETHTOOL_FEC_AUTO;
if (loc_fec & BIT(HNAE3_FEC_RS))
eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_LLRS))
+ eth_fec |= ETHTOOL_FEC_LLRS;
if (loc_fec & BIT(HNAE3_FEC_BASER))
eth_fec |= ETHTOOL_FEC_BASER;
-
- /* if nothing is set, then FEC is off */
- if (!eth_fec)
- eth_fec = ETHTOOL_FEC_OFF;
+ if (loc_fec & BIT(HNAE3_FEC_NONE))
+ eth_fec |= ETHTOOL_FEC_OFF;
return eth_fec;
}
@@ -1521,12 +1681,13 @@ static unsigned int eth_to_loc_fec(unsigned int eth_fec)
u32 loc_fec = 0;
if (eth_fec & ETHTOOL_FEC_OFF)
- return loc_fec;
-
+ loc_fec |= BIT(HNAE3_FEC_NONE);
if (eth_fec & ETHTOOL_FEC_AUTO)
loc_fec |= BIT(HNAE3_FEC_AUTO);
if (eth_fec & ETHTOOL_FEC_RS)
loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_LLRS)
+ loc_fec |= BIT(HNAE3_FEC_LLRS);
if (eth_fec & ETHTOOL_FEC_BASER)
loc_fec |= BIT(HNAE3_FEC_BASER);
@@ -1552,6 +1713,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
fec->fec = loc_to_eth_fec(fec_ability);
fec->active_fec = loc_to_eth_fec(fec_mode);
+ if (!fec->active_fec)
+ fec->active_fec = ETHTOOL_FEC_OFF;
return 0;
}
@@ -1699,6 +1862,7 @@ static int hns3_get_tunable(struct net_device *netdev,
void *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
int ret = 0;
switch (tuna->id) {
@@ -1709,6 +1873,9 @@ static int hns3_get_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
*(u32 *)data = priv->rx_copybreak;
break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ *(u32 *)data = h->kinfo.tx_spare_buf_size;
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1717,14 +1884,48 @@ static int hns3_get_tunable(struct net_device *netdev,
return ret;
}
+static int hns3_set_tx_spare_buf_size(struct net_device *netdev,
+ u32 data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int ret;
+
+ h->kinfo.tx_spare_buf_size = data;
+
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UP_CLIENT);
+ if (ret)
+ hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+
+ return ret;
+}
+
static int hns3_set_tunable(struct net_device *netdev,
const struct ethtool_tunable *tuna,
const void *data)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u32 old_tx_spare_buf_size, new_tx_spare_buf_size;
struct hnae3_handle *h = priv->ae_handle;
int i, ret = 0;
+ if (hns3_nic_resetting(netdev) || !priv->ring) {
+ netdev_err(netdev, "failed to set tunable value, dev resetting!");
+ return -EBUSY;
+ }
+
switch (tuna->id) {
case ETHTOOL_TX_COPYBREAK:
priv->tx_copybreak = *(u32 *)data;
@@ -1740,6 +1941,34 @@ static int hns3_set_tunable(struct net_device *netdev,
priv->ring[i].rx_copybreak = priv->rx_copybreak;
break;
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
+ old_tx_spare_buf_size = h->kinfo.tx_spare_buf_size;
+ new_tx_spare_buf_size = *(u32 *)data;
+ netdev_info(netdev, "request to set tx spare buf size from %u to %u\n",
+ old_tx_spare_buf_size, new_tx_spare_buf_size);
+ ret = hns3_set_tx_spare_buf_size(netdev, new_tx_spare_buf_size);
+ if (ret ||
+ (!priv->ring->tx_spare && new_tx_spare_buf_size != 0)) {
+ int ret1;
+
+ netdev_warn(netdev, "change tx spare buf size fail, revert to old value\n");
+ ret1 = hns3_set_tx_spare_buf_size(netdev,
+ old_tx_spare_buf_size);
+ if (ret1) {
+ netdev_err(netdev, "revert to old tx spare buf size fail\n");
+ return ret1;
+ }
+
+ return ret;
+ }
+
+ if (!priv->ring->tx_spare)
+ netdev_info(netdev, "the active tx spare buf size is 0, disable tx spare buffer\n");
+ else
+ netdev_info(netdev, "the active tx spare buf size is %u, due to page order\n",
+ priv->ring->tx_spare->len);
+
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -1755,6 +1984,9 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_COALESCE_MAX_FRAMES | \
ETHTOOL_COALESCE_USE_CQE)
+#define HNS3_ETHTOOL_RING (ETHTOOL_RING_USE_RX_BUF_LEN | \
+ ETHTOOL_RING_USE_TX_PUSH)
+
static int hns3_get_ts_info(struct net_device *netdev,
struct ethtool_ts_info *info)
{
@@ -1831,8 +2063,34 @@ static int hns3_get_link_ext_state(struct net_device *netdev,
return -ENODATA;
}
+static void hns3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+
+ if (!hnae3_ae_dev_wol_supported(ae_dev))
+ return;
+
+ ops->get_wol(handle, wol);
+}
+
+static int hns3_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = hns3_get_ops(handle);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(handle);
+
+ if (!hnae3_ae_dev_wol_supported(ae_dev))
+ return -EOPNOTSUPP;
+
+ return ops->set_wol(handle, wol);
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.set_ringparam = hns3_set_ringparam,
@@ -1864,6 +2122,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .supported_ring_params = HNS3_ETHTOOL_RING,
+ .cap_link_lanes_supported = true,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
@@ -1894,6 +2154,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_fec_stats = hns3_get_fec_stats,
.get_module_info = hns3_get_module_info,
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
@@ -1903,6 +2164,8 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_tunable = hns3_set_tunable,
.reset = hns3_set_reset,
.get_link_ext_state = hns3_get_link_ext_state,
+ .get_wol = hns3_get_wol,
+ .set_wol = hns3_set_wol,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
index 822d6fcbc73b..da207d1d9aa9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h
@@ -28,4 +28,10 @@ struct hns3_ethtool_link_ext_state_mapping {
u8 link_ext_substate;
};
+struct hns3_ring_param {
+ u32 tx_desc_num;
+ u32 rx_desc_num;
+ u32 rx_buf_len;
+};
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index 5153e5d41bbd..b8a1ecb4b8fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -37,8 +37,7 @@ DECLARE_EVENT_CLASS(hns3_skb_template,
__entry->gso_segs = skb_shinfo(skb)->gso_segs;
__entry->gso_type = skb_shinfo(skb)->gso_type;
__entry->hdr_len = skb->encapsulation ?
- skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
- skb_transport_offset(skb) + tcp_hdrlen(skb);
+ skb_inner_tcp_all_headers(skb) : skb_tcp_all_headers(skb);
__entry->ip_summed = skb->ip_summed;
__entry->fraglist = skb_has_frag_list(skb);
hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
deleted file mode 100644
index d1bf5c4c0abb..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Makefile for the HISILICON network device drivers.
-#
-
-ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-ccflags-y += -I $(srctree)/$(src)
-
-obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o
-
-hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
deleted file mode 100644
index c5d5466810bb..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ /dev/null
@@ -1,591 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2016-2017 Hisilicon Limited.
-
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/dma-direction.h>
-#include "hclge_cmd.h"
-#include "hnae3.h"
-#include "hclge_main.h"
-
-#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
-
-static int hclge_ring_space(struct hclge_cmq_ring *ring)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
- int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
-static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
-
- if (ntu > ntc)
- return head >= ntc && head <= ntu;
-
- return head >= ntc || head <= ntu;
-}
-
-static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclge_desc);
-
- ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
- &ring->desc_dma_addr, GFP_KERNEL);
- if (!ring->desc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclge_desc);
-
- if (ring->desc) {
- dma_free_coherent(cmq_ring_to_dev(ring), size,
- ring->desc, ring->desc_dma_addr);
- ring->desc = NULL;
- }
-}
-
-static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
-{
- struct hclge_hw *hw = &hdev->hw;
- struct hclge_cmq_ring *ring =
- (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
- int ret;
-
- ring->ring_type = ring_type;
- ring->dev = hdev;
-
- ret = hclge_alloc_cmd_desc(ring);
- if (ret) {
- dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
- (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
- return ret;
- }
-
- return 0;
-}
-
-void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
-{
- desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
-}
-
-void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
- enum hclge_opcode_type opcode, bool is_read)
-{
- memset((void *)desc, 0, sizeof(struct hclge_desc));
- desc->opcode = cpu_to_le16(opcode);
- desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
-
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
-}
-
-static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
-{
- dma_addr_t dma = ring->desc_dma_addr;
- struct hclge_dev *hdev = ring->dev;
- struct hclge_hw *hw = &hdev->hw;
- u32 reg_val;
-
- if (ring->ring_type == HCLGE_TYPE_CSQ) {
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
- lower_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
- reg_val &= HCLGE_NIC_SW_RST_RDY;
- reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
- } else {
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
- lower_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
- upper_32_bits(dma));
- hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
- ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
- }
-}
-
-static void hclge_cmd_init_regs(struct hclge_hw *hw)
-{
- hclge_cmd_config_regs(&hw->cmq.csq);
- hclge_cmd_config_regs(&hw->cmq.crq);
-}
-
-static int hclge_cmd_csq_clean(struct hclge_hw *hw)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- struct hclge_cmq_ring *csq = &hw->cmq.csq;
- u32 head;
- int clean;
-
- head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
-
- if (!is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- dev_warn(&hdev->pdev->dev,
- "Disabling any further commands to IMP firmware\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- dev_warn(&hdev->pdev->dev,
- "IMP firmware watchdog reset soon expected!\n");
- return -EIO;
- }
-
- clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
- csq->next_to_clean = head;
- return clean;
-}
-
-static int hclge_cmd_csq_done(struct hclge_hw *hw)
-{
- u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- return head == hw->cmq.csq.next_to_use;
-}
-
-static bool hclge_is_special_opcode(u16 opcode)
-{
- /* these commands have several descriptors,
- * and use the first one to save opcode and return value
- */
- static const u16 spec_opcode[] = {
- HCLGE_OPC_STATS_64_BIT,
- HCLGE_OPC_STATS_32_BIT,
- HCLGE_OPC_STATS_MAC,
- HCLGE_OPC_STATS_MAC_ALL,
- HCLGE_OPC_QUERY_32_BIT_REG,
- HCLGE_OPC_QUERY_64_BIT_REG,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT,
- HCLGE_QUERY_CLEAR_PF_RAS_INT,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
- HCLGE_QUERY_ALL_ERR_INFO
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
- if (spec_opcode[i] == opcode)
- return true;
- }
-
- return false;
-}
-
-struct errcode {
- u32 imp_errcode;
- int common_errno;
-};
-
-static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
- int num)
-{
- struct hclge_desc *desc_to_use;
- int handle = 0;
-
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
-}
-
-static int hclge_cmd_convert_err_code(u16 desc_ret)
-{
- struct errcode hclge_cmd_errcode[] = {
- {HCLGE_CMD_EXEC_SUCCESS, 0},
- {HCLGE_CMD_NO_AUTH, -EPERM},
- {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
- {HCLGE_CMD_QUEUE_FULL, -EXFULL},
- {HCLGE_CMD_NEXT_ERR, -ENOSR},
- {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
- {HCLGE_CMD_PARA_ERR, -EINVAL},
- {HCLGE_CMD_RESULT_ERR, -ERANGE},
- {HCLGE_CMD_TIMEOUT, -ETIME},
- {HCLGE_CMD_HILINK_ERR, -ENOLINK},
- {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
- {HCLGE_CMD_INVALID, -EBADR},
- };
- u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
- u32 i;
-
- for (i = 0; i < errcode_count; i++)
- if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
- return hclge_cmd_errcode[i].common_errno;
-
- return -EIO;
-}
-
-static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
- int num, int ntc)
-{
- u16 opcode, desc_ret;
- int handle;
-
- opcode = le16_to_cpu(desc[0].opcode);
- for (handle = 0; handle < num; handle++) {
- desc[handle] = hw->cmq.csq.desc[ntc];
- ntc++;
- if (ntc >= hw->cmq.csq.desc_num)
- ntc = 0;
- }
- if (likely(!hclge_is_special_opcode(opcode)))
- desc_ret = le16_to_cpu(desc[num - 1].retval);
- else
- desc_ret = le16_to_cpu(desc[0].retval);
-
- hw->cmq.last_status = desc_ret;
-
- return hclge_cmd_convert_err_code(desc_ret);
-}
-
-static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
- int num, int ntc)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- bool is_completed = false;
- u32 timeout = 0;
- int handle, ret;
-
- /**
- * If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclge_cmd_csq_done(hw)) {
- is_completed = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!is_completed)
- ret = -EBADE;
- else
- ret = hclge_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclge_cmd_csq_clean(hw);
- if (handle < 0)
- ret = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
- return ret;
-}
-
-/**
- * hclge_cmd_send - send command to command queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor for describing the command
- * @num : the number of descriptors to be sent
- *
- * This is the main send command for command queue, it
- * sends the queue, cleans the queue, etc
- **/
-int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
-{
- struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
- struct hclge_cmq_ring *csq = &hw->cmq.csq;
- int ret;
- int ntc;
-
- spin_lock_bh(&hw->cmq.csq.lock);
-
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- if (num > hclge_ring_space(&hw->cmq.csq)) {
- /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
- * need update the SW HEAD pointer csq->next_to_clean
- */
- csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- /**
- * Record the location of desc in the ring for this time
- * which will be use for hardware to write back
- */
- ntc = hw->cmq.csq.next_to_use;
-
- hclge_cmd_copy_desc(hw, desc, num);
-
- /* Write to hardware */
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
-
- ret = hclge_cmd_check_result(hw, desc, num, ntc);
-
- spin_unlock_bh(&hw->cmq.csq.lock);
-
- return ret;
-}
-
-static void hclge_set_default_capability(struct hclge_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
-
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
- set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
- }
-}
-
-static const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = {
- {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
- {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B},
- {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
- {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
- {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
- {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
- {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B},
- {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B},
- {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B},
- {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B},
- {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B},
- {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
- {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
- {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
-};
-
-static void hclge_parse_capability(struct hclge_dev *hdev,
- struct hclge_query_version_cmd *cmd)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- u32 caps, i;
-
- caps = __le32_to_cpu(cmd->caps[0]);
- for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++)
- if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit))
- set_bit(hclge_cmd_caps_bit_map0[i].local_bit,
- ae_dev->caps);
-}
-
-static __le32 hclge_build_api_caps(void)
-{
- u32 api_caps = 0;
-
- hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
-
- return cpu_to_le32(api_caps);
-}
-
-static enum hclge_cmd_status
-hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hclge_query_version_cmd *resp;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
- resp = (struct hclge_query_version_cmd *)desc.data;
- resp->api_caps = hclge_build_api_caps();
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- return ret;
-
- hdev->fw_version = le32_to_cpu(resp->firmware);
-
- ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
- HNAE3_PCI_REVISION_BIT_SIZE;
- ae_dev->dev_version |= hdev->pdev->revision;
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- hclge_set_default_capability(hdev);
-
- hclge_parse_capability(hdev, resp);
-
- return ret;
-}
-
-int hclge_cmd_queue_init(struct hclge_dev *hdev)
-{
- int ret;
-
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
- /* Setup the queue entries for use cmd queue */
- hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
- hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
-
- /* Setup Tx write back timeout */
- hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
-
- /* Setup queue rings */
- ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CSQ ring setup error %d\n", ret);
- return ret;
- }
-
- ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CRQ ring setup error %d\n", ret);
- goto err_csq;
- }
-
- return 0;
-err_csq:
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- return ret;
-}
-
-static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
-{
- struct hclge_firmware_compat_cmd *req;
- struct hclge_desc desc;
- u32 compat = 0;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
-
- if (en) {
- req = (struct hclge_firmware_compat_cmd *)desc.data;
-
- hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
- if (hnae3_dev_phy_imp_supported(hdev))
- hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_MAC_STATS_EXT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_SYNC_RX_RING_HEAD_EN_B, 1);
-
- req->compat = cpu_to_le32(compat);
- }
-
- return hclge_cmd_send(&hdev->hw, &desc, 1);
-}
-
-int hclge_cmd_init(struct hclge_dev *hdev)
-{
- int ret;
-
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
-
- hdev->hw.cmq.csq.next_to_clean = 0;
- hdev->hw.cmq.csq.next_to_use = 0;
- hdev->hw.cmq.crq.next_to_clean = 0;
- hdev->hw.cmq.crq.next_to_use = 0;
-
- hclge_cmd_init_regs(&hdev->hw);
-
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-
- /* Check if there is new reset pending, because the higher level
- * reset may happen when lower level reset is being processed.
- */
- if ((hclge_is_reset_pending(hdev))) {
- dev_err(&hdev->pdev->dev,
- "failed to init cmd since reset %#lx pending\n",
- hdev->reset_pending);
- ret = -EBUSY;
- goto err_cmd_init;
- }
-
- /* get version and device capabilities */
- ret = hclge_cmd_query_version_and_capability(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to query version and capabilities, ret = %d\n",
- ret);
- goto err_cmd_init;
- }
-
- dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
- HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
- HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
- HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
- HNAE3_FW_VERSION_BYTE0_SHIFT));
-
- /* ask the firmware to enable some features, driver can work without
- * it.
- */
- ret = hclge_firmware_compat_config(hdev, true);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Firmware compatible features not enabled(%d).\n",
- ret);
-
- return 0;
-
-err_cmd_init:
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
-
- return ret;
-}
-
-static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
-{
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
- hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
-}
-
-void hclge_cmd_uninit(struct hclge_dev *hdev)
-{
- hclge_firmware_compat_config(hdev, false);
-
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- /* wait to ensure that the firmware completes the possible left
- * over commands.
- */
- msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
- hclge_cmd_uninit_regs(&hdev->hw);
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- hclge_free_cmd_desc(&hdev->hw.cmq.csq);
- hclge_free_cmd_desc(&hdev->hw.cmq.crq);
-}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index d24e59028798..91c173f40701 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -7,323 +7,21 @@
#include <linux/io.h>
#include <linux/etherdevice.h>
#include "hnae3.h"
-
-#define HCLGE_CMDQ_TX_TIMEOUT 30000
-#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200
-#define HCLGE_DESC_DATA_LEN 6
+#include "hclge_comm_cmd.h"
struct hclge_dev;
-struct hclge_desc {
- __le16 opcode;
#define HCLGE_CMDQ_RX_INVLD_B 0
#define HCLGE_CMDQ_RX_OUTVLD_B 1
- __le16 flag;
- __le16 retval;
- __le16 rsv;
- __le32 data[HCLGE_DESC_DATA_LEN];
-};
-
-struct hclge_cmq_ring {
- dma_addr_t desc_dma_addr;
- struct hclge_desc *desc;
- struct hclge_dev *dev;
- u32 head;
- u32 tail;
-
- u16 buf_size;
- u16 desc_num;
- int next_to_use;
- int next_to_clean;
- u8 ring_type; /* cmq ring type */
- spinlock_t lock; /* Command queue lock */
-};
-
-enum hclge_cmd_return_status {
- HCLGE_CMD_EXEC_SUCCESS = 0,
- HCLGE_CMD_NO_AUTH = 1,
- HCLGE_CMD_NOT_SUPPORTED = 2,
- HCLGE_CMD_QUEUE_FULL = 3,
- HCLGE_CMD_NEXT_ERR = 4,
- HCLGE_CMD_UNEXE_ERR = 5,
- HCLGE_CMD_PARA_ERR = 6,
- HCLGE_CMD_RESULT_ERR = 7,
- HCLGE_CMD_TIMEOUT = 8,
- HCLGE_CMD_HILINK_ERR = 9,
- HCLGE_CMD_QUEUE_ILLEGAL = 10,
- HCLGE_CMD_INVALID = 11,
-};
-
-enum hclge_cmd_status {
- HCLGE_STATUS_SUCCESS = 0,
- HCLGE_ERR_CSQ_FULL = -1,
- HCLGE_ERR_CSQ_TIMEOUT = -2,
- HCLGE_ERR_CSQ_ERROR = -3,
-};
-
struct hclge_misc_vector {
u8 __iomem *addr;
int vector_irq;
char name[HNAE3_INT_NAME_LEN];
};
-struct hclge_cmq {
- struct hclge_cmq_ring csq;
- struct hclge_cmq_ring crq;
- u16 tx_timeout;
- enum hclge_cmd_status last_status;
-};
-
-#define HCLGE_CMD_FLAG_IN BIT(0)
-#define HCLGE_CMD_FLAG_OUT BIT(1)
-#define HCLGE_CMD_FLAG_NEXT BIT(2)
-#define HCLGE_CMD_FLAG_WR BIT(3)
-#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
-#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
-
-enum hclge_opcode_type {
- /* Generic commands */
- HCLGE_OPC_QUERY_FW_VER = 0x0001,
- HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
- HCLGE_OPC_GBL_RST_STATUS = 0x0021,
- HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
- HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
- HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
- HCLGE_OPC_GET_CFG_PARAM = 0x0025,
- HCLGE_OPC_PF_RST_DONE = 0x0026,
- HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
-
- HCLGE_OPC_STATS_64_BIT = 0x0030,
- HCLGE_OPC_STATS_32_BIT = 0x0031,
- HCLGE_OPC_STATS_MAC = 0x0032,
- HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
- HCLGE_OPC_STATS_MAC_ALL = 0x0034,
-
- HCLGE_OPC_QUERY_REG_NUM = 0x0040,
- HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
- HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
- HCLGE_OPC_DFX_BD_NUM = 0x0043,
- HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
- HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
- HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
- HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
- HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
- HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
- HCLGE_OPC_DFX_NCSI_REG = 0x004A,
- HCLGE_OPC_DFX_RTC_REG = 0x004B,
- HCLGE_OPC_DFX_PPP_REG = 0x004C,
- HCLGE_OPC_DFX_RCB_REG = 0x004D,
- HCLGE_OPC_DFX_TQP_REG = 0x004E,
- HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
-
- HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
-
- /* MAC command */
- HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
- HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
- HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
- HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
- HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
- HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
- HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
- HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
- HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
- HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
-
- /* PTP commands */
- HCLGE_OPC_PTP_INT_EN = 0x0501,
- HCLGE_OPC_PTP_MODE_CFG = 0x0507,
-
- /* PFC/Pause commands */
- HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
- HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
- HCLGE_OPC_CFG_MAC_PARA = 0x0703,
- HCLGE_OPC_CFG_PFC_PARA = 0x0704,
- HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
- HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
- HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
- HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
- HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
- HCLGE_OPC_QOS_MAP = 0x070A,
-
- /* ETS/scheduler commands */
- HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
- HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
- HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
- HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
- HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
- HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
- HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
- HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
- HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
- HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
- HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
- HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
- HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
- HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
- HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
- HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
- HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
- HCLGE_OPC_TM_NODES = 0x0816,
- HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
- HCLGE_OPC_QSET_DFX_STS = 0x0844,
- HCLGE_OPC_PRI_DFX_STS = 0x0845,
- HCLGE_OPC_PG_DFX_STS = 0x0846,
- HCLGE_OPC_PORT_DFX_STS = 0x0847,
- HCLGE_OPC_SCH_NQ_CNT = 0x0848,
- HCLGE_OPC_SCH_RQ_CNT = 0x0849,
- HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
- HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
- HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
-
- /* Packet buffer allocate commands */
- HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
- HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
- HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
- HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
- HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
- HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
-
- /* TQP management command */
- HCLGE_OPC_SET_TQP_MAP = 0x0A01,
-
- /* TQP commands */
- HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
- HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
- HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
- HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
- HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
- HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
- HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
- HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
- HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
- HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
- HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
-
- /* PPU commands */
- HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
-
- /* TSO command */
- HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
- HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
-
- /* RSS commands */
- HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
- HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
- HCLGE_OPC_RSS_TC_MODE = 0x0D08,
- HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
-
- /* Promisuous mode command */
- HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
-
- /* Vlan offload commands */
- HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
- HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
-
- /* Interrupts commands */
- HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
- HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
-
- /* MAC commands */
- HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
- HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
- HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
- HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
- HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
- HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
- HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
-
- /* MAC VLAN commands */
- HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
-
- /* VLAN commands */
- HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
- HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
- HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
- HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
-
- /* Flow Director commands */
- HCLGE_OPC_FD_MODE_CTRL = 0x1200,
- HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
- HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
- HCLGE_OPC_FD_TCAM_OP = 0x1203,
- HCLGE_OPC_FD_AD_OP = 0x1204,
- HCLGE_OPC_FD_CNT_OP = 0x1205,
- HCLGE_OPC_FD_USER_DEF_OP = 0x1207,
-
- /* MDIO command */
- HCLGE_OPC_MDIO_CONFIG = 0x1900,
-
- /* QCN commands */
- HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
- HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
- HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
- HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
- HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
- HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
- HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
- HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
-
- /* Mailbox command */
- HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
-
- /* Led command */
- HCLGE_OPC_LED_STATUS_CFG = 0xB000,
-
- /* clear hardware resource command */
- HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
-
- /* NCL config command */
- HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
-
- /* IMP stats command */
- HCLGE_OPC_IMP_STATS_BD = 0x7012,
- HCLGE_OPC_IMP_STATS_INFO = 0x7013,
- HCLGE_OPC_IMP_COMPAT_CFG = 0x701A,
-
- /* SFP command */
- HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
- HCLGE_OPC_GET_SFP_EXIST = 0x7101,
- HCLGE_OPC_GET_SFP_INFO = 0x7104,
-
- /* Error INT commands */
- HCLGE_MAC_COMMON_INT_EN = 0x030E,
- HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
- HCLGE_SSU_ECC_INT_CMD = 0x0989,
- HCLGE_SSU_COMMON_INT_CMD = 0x098C,
- HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
- HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
- HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
- HCLGE_COMMON_ECC_INT_CFG = 0x1505,
- HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
- HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
- HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
- HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
- HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
- HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
- HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
- HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
- HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
- HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
- HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
- HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
- HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
- HCLGE_IGU_COMMON_INT_EN = 0x1806,
- HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
- HCLGE_PPP_CMD0_INT_CMD = 0x2100,
- HCLGE_PPP_CMD1_INT_CMD = 0x2101,
- HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
- HCLGE_NCSI_INT_EN = 0x2401,
-
- /* PHY command */
- HCLGE_OPC_PHY_LINK_KSETTING = 0x7025,
- HCLGE_OPC_PHY_REG = 0x7026,
-
- /* Query link diagnosis info command */
- HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A,
-};
+#define hclge_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
#define HCLGE_TQP_REG_OFFSET 0x80000
#define HCLGE_TQP_REG_SIZE 0x200
@@ -391,38 +89,6 @@ struct hclge_rx_priv_buff_cmd {
u8 rsv[6];
};
-enum HCLGE_CAP_BITS {
- HCLGE_CAP_UDP_GSO_B,
- HCLGE_CAP_QB_B,
- HCLGE_CAP_FD_FORWARD_TC_B,
- HCLGE_CAP_PTP_B,
- HCLGE_CAP_INT_QL_B,
- HCLGE_CAP_HW_TX_CSUM_B,
- HCLGE_CAP_TX_PUSH_B,
- HCLGE_CAP_PHY_IMP_B,
- HCLGE_CAP_TQP_TXRX_INDEP_B,
- HCLGE_CAP_HW_PAD_B,
- HCLGE_CAP_STASH_B,
- HCLGE_CAP_UDP_TUNNEL_CSUM_B,
- HCLGE_CAP_RAS_IMP_B = 12,
- HCLGE_CAP_FEC_B = 13,
- HCLGE_CAP_PAUSE_B = 14,
- HCLGE_CAP_RXD_ADV_LAYOUT_B = 15,
- HCLGE_CAP_PORT_VLAN_BYPASS_B = 17,
-};
-
-enum HCLGE_API_CAP_BITS {
- HCLGE_API_CAP_FLEX_RSS_TBL_B,
-};
-
-#define HCLGE_QUERY_CAP_LENGTH 3
-struct hclge_query_version_cmd {
- __le32 firmware;
- __le32 hardware;
- __le32 api_caps;
- __le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
-};
-
#define HCLGE_RX_PRIV_EN_B 15
#define HCLGE_TC_NUM_ONE_DESC 4
struct hclge_priv_wl {
@@ -571,38 +237,10 @@ struct hclge_vf_num_cmd {
};
#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
-#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
-#define HCLGE_RSS_HASH_KEY_NUM 16
-struct hclge_rss_config_cmd {
- u8 hash_config;
- u8 rsv[7];
- u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
-};
-
-struct hclge_rss_input_tuple_cmd {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
- u8 rsv[16];
-};
-#define HCLGE_RSS_CFG_TBL_SIZE 16
#define HCLGE_RSS_CFG_TBL_SIZE_H 4
-#define HCLGE_RSS_CFG_TBL_BW_H 2U
#define HCLGE_RSS_CFG_TBL_BW_L 8U
-struct hclge_rss_indirection_table_cmd {
- __le16 start_table_index;
- __le16 rss_set_bitmap;
- u8 rss_qid_h[HCLGE_RSS_CFG_TBL_SIZE_H];
- u8 rss_qid_l[HCLGE_RSS_CFG_TBL_SIZE];
-};
-
#define HCLGE_RSS_TC_OFFSET_S 0
#define HCLGE_RSS_TC_OFFSET_M GENMASK(10, 0)
#define HCLGE_RSS_TC_SIZE_MSB_B 11
@@ -610,10 +248,6 @@ struct hclge_rss_indirection_table_cmd {
#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_RSS_TC_SIZE_MSB_OFFSET 3
#define HCLGE_RSS_TC_VALID_B 15
-struct hclge_rss_tc_mode_cmd {
- __le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
- u8 rsv[8];
-};
#define HCLGE_LINK_STATUS_UP_B 0
#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
@@ -687,7 +321,9 @@ struct hclge_config_mac_speed_dup_cmd {
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
u8 mac_change_fec_en;
- u8 rsv[22];
+ u8 rsv[4];
+ u8 lane_num;
+ u8 rsv1[17];
};
#define HCLGE_TQP_ENABLE_B 0
@@ -713,7 +349,9 @@ struct hclge_sfp_info_cmd {
u8 autoneg_ability; /* whether support autoneg */
__le32 speed_ability; /* speed ability for current media */
__le32 module_type;
- u8 rsv[8];
+ u8 fec_ability;
+ u8 lane_num;
+ u8 rsv[6];
};
#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
@@ -725,12 +363,27 @@ struct hclge_sfp_info_cmd {
#define HCLGE_MAC_FEC_OFF 0
#define HCLGE_MAC_FEC_BASER 1
#define HCLGE_MAC_FEC_RS 2
+#define HCLGE_MAC_FEC_LLRS 3
struct hclge_config_fec_cmd {
u8 fec_mode;
u8 default_config;
u8 rsv[22];
};
+#define HCLGE_FEC_STATS_CMD_NUM 4
+
+struct hclge_query_fec_stats_cmd {
+ /* fec rs mode total stats */
+ __le32 rs_fec_corr_blocks;
+ __le32 rs_fec_uncorr_blocks;
+ __le32 rs_fec_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u8 base_r_lane_num;
+ u8 rsv[3];
+ __le32 base_r_fec_corr_blocks;
+ __le32 base_r_fec_uncorr_blocks;
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -1015,16 +668,6 @@ struct hclge_common_lb_cmd {
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
-#define HCLGE_TYPE_CRQ 0
-#define HCLGE_TYPE_CSQ 1
-
-/* this bit indicates that the driver is ready for hardware reset */
-#define HCLGE_NIC_SW_RST_RDY_B 16
-#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
-
-#define HCLGE_NIC_CMQ_DESC_NUM 1024
-#define HCLGE_NIC_CMQ_DESC_NUM_S 3
-
#define HCLGE_LED_LOCATE_STATE_S 0
#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
@@ -1147,16 +790,6 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
u8 rsv[4];
};
-#define HCLGE_LINK_EVENT_REPORT_EN_B 0
-#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
-#define HCLGE_PHY_IMP_EN_B 2
-#define HCLGE_MAC_STATS_EXT_EN_B 3
-#define HCLGE_SYNC_RX_RING_HEAD_EN_B 4
-struct hclge_firmware_compat_cmd {
- __le32 compat;
- u8 rsv[20];
-};
-
#define HCLGE_SFP_INFO_CMD_NUM 6
#define HCLGE_SFP_INFO_BD0_LEN 20
#define HCLGE_SFP_INFO_BDX_LEN 24
@@ -1239,44 +872,22 @@ struct hclge_phy_reg_cmd {
u8 rsv1[18];
};
-/* capabilities bits map between imp firmware and local driver */
-struct hclge_caps_bit_map {
- u16 imp_bit;
- u16 local_bit;
+struct hclge_wol_cfg_cmd {
+ __le32 wake_on_lan_mode;
+ u8 sopass[SOPASS_MAX];
+ u8 sopass_size;
+ u8 rsv[13];
};
-int hclge_cmd_init(struct hclge_dev *hdev);
-static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
-{
- writel(value, base + reg);
-}
-
-#define hclge_write_dev(a, reg, value) \
- hclge_write_reg((a)->io_base, reg, value)
-#define hclge_read_dev(a, reg) \
- hclge_read_reg((a)->io_base, reg)
-
-static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
-{
- u8 __iomem *reg_addr = READ_ONCE(base);
-
- return readl(reg_addr + reg);
-}
-
-#define HCLGE_SEND_SYNC(flag) \
- ((flag) & HCLGE_CMD_FLAG_NO_INTR)
+struct hclge_query_wol_supported_cmd {
+ __le32 supported_wake_mode;
+ u8 rsv[20];
+};
struct hclge_hw;
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
-void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
- enum hclge_opcode_type opcode, bool is_read);
-void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
-
-enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
- struct hclge_desc *desc);
-enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
- struct hclge_desc *desc);
-
-void hclge_cmd_uninit(struct hclge_dev *hdev);
-int hclge_cmd_queue_init(struct hclge_dev *hdev);
+enum hclge_comm_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
+ struct hclge_desc *desc);
+enum hclge_comm_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
+ struct hclge_desc *desc);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 375ebf105a9a..c4aded65e848 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -203,7 +203,7 @@ static int hclge_map_update(struct hclge_dev *hdev)
if (ret)
return ret;
- hclge_rss_indir_init_cfg(hdev);
+ hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
return hclge_rss_init_hw(hdev);
}
@@ -359,6 +359,93 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ struct dcb_app old_app;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO)
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ if (app->priority == h->kinfo.dscp_prio[app->protocol])
+ return 0;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = h->kinfo.dscp_prio[app->protocol];
+
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = old_app.priority;
+ (void)dcb_ieee_delapp(netdev, app);
+ return ret;
+ }
+
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
+ if (old_app.priority == HNAE3_PRIO_ID_INVALID)
+ h->kinfo.dscp_app_cnt++;
+ else
+ ret = dcb_ieee_delapp(netdev, &old_app);
+
+ return ret;
+}
+
+static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO ||
+ app->priority != h->kinfo.dscp_prio[app->protocol])
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to del dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ (void)dcb_ieee_setapp(netdev, app);
+ return ret;
+ }
+
+ if (h->kinfo.dscp_app_cnt)
+ h->kinfo.dscp_app_cnt--;
+
+ if (!h->kinfo.dscp_app_cnt) {
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ ret = hclge_up_to_tc_map(hdev);
+ }
+
+ return ret;
+}
+
/* DCBX configuration */
static u8 hclge_getdcbx(struct hnae3_handle *h)
{
@@ -543,6 +630,8 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setets = hclge_ieee_setets,
.ieee_getpfc = hclge_ieee_getpfc,
.ieee_setpfc = hclge_ieee_setpfc,
+ .ieee_setapp = hclge_ieee_setapp,
+ .ieee_delapp = hclge_ieee_delapp,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
.setup_tc = hclge_setup_tc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 4e0a8c2f7c05..a0b46e7d863e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018-2019 Hisilicon Limited. */
#include <linux/device.h>
+#include <linux/sched/clock.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
@@ -14,6 +15,8 @@ static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
+static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -77,6 +80,10 @@ static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
.cmd = HCLGE_OPC_DFX_TQP_REG } },
};
+/* make sure: len(name) + interval >= maxlen(item data) + 2,
+ * for example, name = "pkt_num"(len: 7), the prototype of item data is u32,
+ * and print as "%u"(maxlen: 10), so the interval should be at least 5.
+ */
static void hclge_dbg_fill_content(char *content, u16 len,
const struct hclge_dbg_item *items,
const char **result, u16 size)
@@ -99,7 +106,7 @@ static void hclge_dbg_fill_content(char *content, u16 len,
static char *hclge_dbg_get_func_id_str(char *buf, u8 id)
{
if (id)
- sprintf(buf, "vf%u", id - 1);
+ sprintf(buf, "vf%u", id - 1U);
else
sprintf(buf, "pf");
@@ -146,7 +153,7 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
desc->data[0] = cpu_to_le32(index);
for (i = 1; i < bd_num; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -258,12 +265,29 @@ hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
return 0;
}
+static const struct hclge_dbg_status_dfx_info hclge_dbg_mac_en_status[] = {
+ {HCLGE_MAC_TX_EN_B, "mac_trans_en"},
+ {HCLGE_MAC_RX_EN_B, "mac_rcv_en"},
+ {HCLGE_MAC_PAD_TX_B, "pad_trans_en"},
+ {HCLGE_MAC_PAD_RX_B, "pad_rcv_en"},
+ {HCLGE_MAC_1588_TX_B, "1588_trans_en"},
+ {HCLGE_MAC_1588_RX_B, "1588_rcv_en"},
+ {HCLGE_MAC_APP_LP_B, "mac_app_loop_en"},
+ {HCLGE_MAC_LINE_LP_B, "mac_line_loop_en"},
+ {HCLGE_MAC_FCS_TX_B, "mac_fcs_tx_en"},
+ {HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, "mac_rx_oversize_truncate_en"},
+ {HCLGE_MAC_RX_FCS_STRIP_B, "mac_rx_fcs_strip_en"},
+ {HCLGE_MAC_RX_FCS_B, "mac_rx_fcs_en"},
+ {HCLGE_MAC_TX_UNDER_MIN_ERR_B, "mac_tx_under_min_err_en"},
+ {HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, "mac_tx_oversize_truncate_en"}
+};
+
static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
int len, int *pos)
{
struct hclge_config_mac_mode_cmd *req;
struct hclge_desc desc;
- u32 loop_en;
+ u32 loop_en, i, offset;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
@@ -278,39 +302,12 @@ static int hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev, char *buf,
req = (struct hclge_config_mac_mode_cmd *)desc.data;
loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
- *pos += scnprintf(buf + *pos, len - *pos, "mac_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
- *pos += scnprintf(buf + *pos, len - *pos, "pad_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "pad_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "1588_trans_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "1588_rcv_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_app_loop_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_line_loop_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_fcs_tx_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_rx_oversize_truncate_en: %#x\n",
- hnae3_get_bit(loop_en,
- HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_strip_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
- *pos += scnprintf(buf + *pos, len - *pos, "mac_rx_fcs_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_tx_under_min_err_en: %#x\n",
- hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
- *pos += scnprintf(buf + *pos, len - *pos,
- "mac_tx_oversize_truncate_en: %#x\n",
- hnae3_get_bit(loop_en,
- HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_mac_en_status); i++) {
+ offset = hclge_dbg_mac_en_status[i].offset;
+ *pos += scnprintf(buf + *pos, len - *pos, "%s: %#x\n",
+ hclge_dbg_mac_en_status[i].message,
+ hnae3_get_bit(loop_en, offset));
+ }
return 0;
}
@@ -788,7 +785,6 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
-
if (!data_str)
return -ENOMEM;
@@ -1122,10 +1118,11 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
return 0;
}
+#define HCLGE_DBG_TC_MASK 0x0F
+
static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
int len)
{
-#define HCLGE_DBG_TC_MASK 0x0F
#define HCLGE_DBG_TC_BIT_WIDTH 4
struct hclge_qos_pri_map_cmd *pri_map;
@@ -1159,6 +1156,58 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
return 0;
}
+static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 dscp_tc[HNAE3_MAX_DSCP];
+ int pos, ret;
+ u8 i, j;
+
+ pos = scnprintf(buf, len, "tc map mode: %s\n",
+ tc_map_mode_str[kinfo->tc_map_mode]);
+
+ if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos dscp map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[i] &= HCLGE_DBG_TC_MASK;
+ dscp_tc[j] &= HCLGE_DBG_TC_MASK;
+ }
+
+ for (i = 0; i < HNAE3_MAX_DSCP; i++) {
+ if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
+ i, kinfo->dscp_prio[i], dscp_tc[i]);
+ }
+
+ return 0;
+}
+
static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
@@ -1273,7 +1322,7 @@ static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev, char *buf,
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) {
@@ -1309,7 +1358,7 @@ static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev,
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) {
@@ -1454,9 +1503,9 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
u32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
@@ -1524,7 +1573,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
char *tcam_buf;
int pos = 0;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
return -EOPNOTSUPP;
@@ -1592,6 +1641,9 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
u64 cnt;
u8 i;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
pos += scnprintf(buf + pos, len - pos,
"func_id\thit_times\n");
@@ -1614,8 +1666,19 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
+static const struct hclge_dbg_status_dfx_info hclge_dbg_rst_info[] = {
+ {HCLGE_MISC_VECTOR_REG_BASE, "vector0 interrupt enable status"},
+ {HCLGE_MISC_RESET_STS_REG, "reset interrupt source"},
+ {HCLGE_MISC_VECTOR_INT_STS, "reset interrupt status"},
+ {HCLGE_RAS_PF_OTHER_INT_STS_REG, "RAS interrupt status"},
+ {HCLGE_GLOBAL_RESET_REG, "hardware reset status"},
+ {HCLGE_NIC_CSQ_DEPTH_REG, "handshake status"},
+ {HCLGE_FUN_RST_ING, "function reset status"}
+};
+
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
{
+ u32 i, offset;
int pos = 0;
pos += scnprintf(buf + pos, len - pos, "PF reset count: %u\n",
@@ -1634,22 +1697,14 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len)
hdev->rst_stats.reset_cnt);
pos += scnprintf(buf + pos, len - pos, "reset fail count: %u\n",
hdev->rst_stats.reset_fail_cnt);
- pos += scnprintf(buf + pos, len - pos,
- "vector0 interrupt enable status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
- pos += scnprintf(buf + pos, len - pos, "reset interrupt source: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
- pos += scnprintf(buf + pos, len - pos, "reset interrupt status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
- pos += scnprintf(buf + pos, len - pos, "RAS interrupt status: 0x%x\n",
- hclge_read_dev(&hdev->hw,
- HCLGE_RAS_PF_OTHER_INT_STS_REG));
- pos += scnprintf(buf + pos, len - pos, "hardware reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
- pos += scnprintf(buf + pos, len - pos, "handshake status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
- pos += scnprintf(buf + pos, len - pos, "function reset status: 0x%x\n",
- hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_rst_info); i++) {
+ offset = hclge_dbg_rst_info[i].offset;
+ pos += scnprintf(buf + pos, len - pos, "%s: 0x%x\n",
+ hclge_dbg_rst_info[i].message,
+ hclge_read_dev(&hdev->hw, offset));
+ }
+
pos += scnprintf(buf + pos, len - pos, "hdev state: 0x%lx\n",
hdev->state);
@@ -1771,7 +1826,7 @@ hclge_dbg_get_imp_stats_info(struct hclge_dev *hdev, char *buf, int len)
#define HCLGE_MAX_NCL_CONFIG_LENGTH 16384
static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
- char *buf, int *len, int *pos)
+ char *buf, int len, int *pos)
{
#define HCLGE_CMD_DATA_NUM 6
@@ -1783,7 +1838,7 @@ static void hclge_ncl_config_data_print(struct hclge_desc *desc, int *index,
if (i == 0 && j == 0)
continue;
- *pos += scnprintf(buf + *pos, *len - *pos,
+ *pos += scnprintf(buf + *pos, len - *pos,
"0x%04x | 0x%08x\n", offset,
le32_to_cpu(desc[i].data[j]));
@@ -1821,7 +1876,7 @@ hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *buf, int len)
if (ret)
return ret;
- hclge_ncl_config_data_print(desc, &index, buf, &len, &pos);
+ hclge_ncl_config_data_print(desc, &index, buf, len, &pos);
}
return 0;
@@ -2378,6 +2433,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.dbg_dump = hclge_dbg_dump_qos_pri_map,
},
{
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ },
+ {
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index c526591a7240..724052928b88 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -94,6 +94,11 @@ struct hclge_dbg_func {
char *buf, int len);
};
+struct hclge_dbg_status_dfx_info {
+ u32 offset;
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
@@ -321,10 +326,10 @@ static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "IGU_RX_OUT_UDP0_PKT"},
{true, "IGU_RX_IN_UDP0_PKT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
{false, "Reserved"},
{true, "IGU_RX_OVERSIZE_PKT_L"},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 4c441e6a5082..9a939c0b217f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -13,11 +13,6 @@ static int hclge_devlink_info_get(struct devlink *devlink,
struct hclge_devlink_priv *priv = devlink_priv(devlink);
char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
struct hclge_dev *hdev = priv->hdev;
- int ret;
-
- ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (ret)
- return ret;
snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
@@ -119,7 +114,6 @@ int hclge_devlink_init(struct hclge_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 20e628c2bd44..3f35227ef1fa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2016-2017 Hisilicon Limited. */
+#include <linux/sched/clock.h>
+
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
@@ -1399,7 +1401,7 @@ static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
if (en) {
@@ -1498,7 +1500,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
/* configure PPP error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
@@ -1633,7 +1635,7 @@ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
/* configure PPU error interrupts */
if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
if (en) {
desc[0].data[0] =
@@ -1718,7 +1720,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU ecc error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
if (en) {
desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
@@ -1740,7 +1742,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure SSU common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
if (en) {
@@ -1963,7 +1965,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
&ae_dev->hw_err_reset_req);
/* clear all main PF RAS errors */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
@@ -1977,7 +1979,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
* @num: number of extended command structures
*
* This function handles all the PF RAS errors in the
- * hw register/s using command.
+ * hw registers using command.
*/
static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
struct hclge_desc *desc,
@@ -2036,7 +2038,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
}
/* clear all PF RAS errors */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret)
dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
@@ -2087,8 +2089,8 @@ static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
true);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
if (ret) {
@@ -2119,7 +2121,7 @@ static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
ret = hclge_cmd_query_error(hdev, &desc[0],
HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
- HCLGE_CMD_FLAG_NEXT);
+ HCLGE_COMM_CMD_FLAG_NEXT);
if (ret) {
dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
return ret;
@@ -2235,7 +2237,7 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
}
/* clear error status */
- hclge_cmd_reuse_desc(&desc[0], false);
+ hclge_comm_cmd_reuse_desc(&desc[0], false);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
if (ret) {
dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
@@ -2405,7 +2407,8 @@ static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
else
desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
- desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+ desc[0].flag = cpu_to_le16(HCLGE_COMM_CMD_FLAG_NO_INTR |
+ HCLGE_COMM_CMD_FLAG_IN);
return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c2a58101144e..4fb5406c1951 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -24,6 +24,7 @@
#include "hclge_err.h"
#include "hnae3.h"
#include "hclge_devlink.h"
+#include "hclge_comm_cmd.h"
#define HCLGE_NAME "hclge"
@@ -70,6 +71,7 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -90,20 +92,20 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
- HCLGE_NIC_CSQ_BASEADDR_H_REG,
- HCLGE_NIC_CSQ_DEPTH_REG,
- HCLGE_NIC_CSQ_TAIL_REG,
- HCLGE_NIC_CSQ_HEAD_REG,
- HCLGE_NIC_CRQ_BASEADDR_L_REG,
- HCLGE_NIC_CRQ_BASEADDR_H_REG,
- HCLGE_NIC_CRQ_DEPTH_REG,
- HCLGE_NIC_CRQ_TAIL_REG,
- HCLGE_NIC_CRQ_HEAD_REG,
- HCLGE_VECTOR0_CMDQ_SRC_REG,
- HCLGE_CMDQ_INTR_STS_REG,
- HCLGE_CMDQ_INTR_EN_REG,
- HCLGE_CMDQ_INTR_GEN_REG};
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_CMDQ_INTR_STS_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
HCLGE_PF_OTHER_INT_REG,
@@ -147,10 +149,11 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
HCLGE_TQP_INTR_RL_REG};
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "App Loopback test",
- "Serdes serial Loopback test",
- "Serdes parallel Loopback test",
- "Phy Loopback test"
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
@@ -370,14 +373,6 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
},
};
-static const u8 hclge_hash_key[] = {
- 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
- 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
- 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
- 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
- 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
-};
-
static const u32 hclge_dfx_bd_offset_list[] = {
HCLGE_DFX_BIOS_BD_OFFSET,
HCLGE_DFX_SSU_0_BD_OFFSET,
@@ -478,6 +473,20 @@ static const struct key_info tuple_key_info[] = {
offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
};
+/**
+ * hclge_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
+{
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
+}
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -604,111 +613,6 @@ int hclge_mac_update_stats(struct hclge_dev *hdev)
return hclge_mac_update_stats_defective(hdev);
}
-static int hclge_tqps_update_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- struct hnae3_queue *queue;
- struct hclge_desc desc[1];
- struct hclge_tqp *tqp;
- int ret, i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
- true);
-
- desc[0].data[0] = cpu_to_le32(tqp->index);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- /* command : HCLGE_OPC_QUERY_IGU_STAT */
- hclge_cmd_setup_basic_desc(&desc[0],
- HCLGE_OPC_QUERY_TX_STATS,
- true);
-
- desc[0].data[0] = cpu_to_le32(tqp->index);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- ret, i);
- return ret;
- }
- tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc[0].data[1]);
- }
-
- return 0;
-}
-
-static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclge_tqp *tqp;
- u64 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
- }
-
- return buff;
-}
-
-static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
-
- /* each tqp has TX & RX two queues */
- return kinfo->num_tqps * (2);
-}
-
-static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
- tqp->index);
- buff = buff + ETH_GSTRING_LEN;
- }
-
- return buff;
-}
-
static int hclge_comm_get_count(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
u32 size)
@@ -769,7 +673,7 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
handle = &hdev->vport[0].nic;
if (handle->client) {
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status) {
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -777,6 +681,8 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
}
}
+ hclge_update_fec_stats(hdev);
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -799,7 +705,7 @@ static void hclge_update_stats(struct hnae3_handle *handle,
"Update MAC stats fail, status = %d.\n",
status);
- status = hclge_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
@@ -813,7 +719,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
HNAE3_SUPPORT_PHY_LOOPBACK | \
HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -835,9 +742,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 2;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) ||
@@ -848,7 +758,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
} else if (stringset == ETH_SS_STATS) {
count = hclge_comm_get_count(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string)) +
- hclge_tqps_get_sset_count(handle, stringset);
+ hclge_comm_tqps_get_sset_count(handle);
}
return count;
@@ -866,8 +776,13 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
- p = hclge_tqps_get_strings(handle, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
@@ -900,7 +815,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
- p = hclge_tqps_get_stats(handle, p);
+ p = hclge_comm_tqps_get_stats(handle, p);
}
static void hclge_get_mac_stat(struct hnae3_handle *handle,
@@ -1101,6 +1016,27 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
@@ -1199,34 +1135,36 @@ static void hclge_convert_setting_kr(u16 speed_ability,
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
- BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_100G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
case HCLGE_MAC_SPEED_200G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
- mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
break;
default:
mac->fec_ability = 0;
break;
}
+
+out:
+ hclge_update_fec_support(mac);
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
@@ -1480,7 +1418,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
- ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
@@ -1520,7 +1458,7 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
- dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
if (!dev_specs->max_qset_num)
@@ -1567,7 +1505,7 @@ static int hclge_query_dev_specs(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
@@ -1613,13 +1551,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
+static void hclge_init_tc_config(struct hclge_dev *hdev)
+{
+ unsigned int i;
+
+ if (hdev->tc_max > HNAE3_MAX_TC ||
+ hdev->tc_max < 1) {
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
+ }
+
+ hdev->tm_info.num_tc = 1;
+
+ /* Currently not support uncontiuous tc */
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
- unsigned int i;
- int node, ret;
+ int ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret)
@@ -1646,7 +1610,7 @@ static int hclge_configure(struct hclge_dev *hdev)
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- if (hnae3_dev_fd_supported(hdev)) {
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
@@ -1662,38 +1626,9 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
- if ((hdev->tc_max > HNAE3_MAX_TC) ||
- (hdev->tc_max < 1)) {
- dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
- hdev->tc_max);
- hdev->tc_max = 1;
- }
-
- /* Dev does not support DCB */
- if (!hnae3_dev_dcb_supported(hdev)) {
- hdev->tc_max = 1;
- hdev->pfc_max = 0;
- } else {
- hdev->pfc_max = hdev->tc_max;
- }
-
- hdev->tm_info.num_tc = 1;
-
- /* Currently not support uncontiuous tc */
- for (i = 0; i < hdev->tm_info.num_tc; i++)
- hnae3_set_bit(hdev->hw_tc_map, i, 1);
-
- hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
-
+ hclge_init_tc_config(hdev);
hclge_init_kdump_kernel_config(hdev);
- /* Set the affinity based on numa node */
- node = dev_to_node(&hdev->pdev->dev);
- if (node != NUMA_NO_NODE)
- cpumask = cpumask_of_node(node);
-
- cpumask_copy(&hdev->affinity_mask, cpumask);
-
return ret;
}
@@ -1718,7 +1653,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
@@ -1736,11 +1671,12 @@ static int hclge_config_gro(struct hclge_dev *hdev)
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
- struct hclge_tqp *tqp;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
- sizeof(struct hclge_tqp), GFP_KERNEL);
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
@@ -1759,16 +1695,24 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
* HCLGE_TQP_MAX_SIZE_DEV_V2
*/
if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET +
i * HCLGE_TQP_REG_SIZE;
else
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGE_TQP_REG_OFFSET +
HCLGE_TQP_EXT_REG_OFFSET +
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
HCLGE_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGE_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -1864,8 +1808,8 @@ static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
kinfo = &nic->kinfo;
for (i = 0; i < vport->alloc_tqps; i++) {
- struct hclge_tqp *q =
- container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ struct hclge_comm_tqp *q =
+ container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
bool is_pf;
int ret;
@@ -1885,7 +1829,7 @@ static int hclge_map_tqp(struct hclge_dev *hdev)
u16 i, num_vport;
num_vport = hdev->num_req_vfs + 1;
- for (i = 0; i < num_vport; i++) {
+ for (i = 0; i < num_vport; i++) {
int ret;
ret = hclge_map_tqp_to_vport(hdev, vport);
@@ -1907,7 +1851,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
- nic->kinfo.io_base = hdev->hw.io_base;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
hdev->num_tx_desc, hdev->num_rx_desc);
@@ -1956,6 +1900,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ vport->port_base_vlan_cfg.tbl_sta = true;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
vport->req_vlan_fltr_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
@@ -2416,9 +2361,9 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
@@ -2461,9 +2406,9 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev,
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
else
- desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
@@ -2592,8 +2537,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->rinfo.base_vector = hdev->num_nic_msi;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = hdev->hw.io_base;
- roce->rinfo.roce_mem_base = hdev->hw.mem_base;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2653,11 +2598,38 @@ static u8 hclge_check_speed_dup(u8 duplex, int speed)
return duplex;
}
+static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
+ {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
+ {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
+ {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
+ {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
+ {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
+ {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
+ {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
+ {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
+ {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
+};
+
+static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
+{
+ u16 i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
+ if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
+ *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
+ u32 speed_fw;
int ret;
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
@@ -2667,50 +2639,17 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
if (duplex)
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
- switch (speed) {
- case HCLGE_MAC_SPEED_10M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
- break;
- case HCLGE_MAC_SPEED_100M:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
- break;
- case HCLGE_MAC_SPEED_1G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
- break;
- case HCLGE_MAC_SPEED_10G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
- break;
- case HCLGE_MAC_SPEED_25G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
- break;
- case HCLGE_MAC_SPEED_40G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
- break;
- case HCLGE_MAC_SPEED_50G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
- break;
- case HCLGE_MAC_SPEED_100G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
- break;
- case HCLGE_MAC_SPEED_200G:
- hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
- HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
- break;
- default:
+ ret = hclge_convert_to_fw_speed(speed, &speed_fw);
+ if (ret) {
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
- return -EINVAL;
+ return ret;
}
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
+ speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
+ req->lane_num = lane_num;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2722,33 +2661,35 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
return 0;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (!mac->support_autoneg && mac->speed == speed &&
- mac->duplex == duplex)
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2828,6 +2769,157 @@ static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
return 0;
}
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2842,6 +2934,9 @@ static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
@@ -2894,7 +2989,7 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex);
+ hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
return ret;
@@ -2933,16 +3028,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_mbx_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
- !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
+ hdev->last_rst_scheduled = jiffies;
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
+ }
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
@@ -3082,6 +3181,9 @@ static void hclge_update_fec_advertising(struct hclge_mac *mac)
if (mac->fec_mode & BIT(HNAE3_FEC_RS))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
mac->advertising);
@@ -3131,7 +3233,6 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
if (hnae3_dev_fec_supported(hdev))
- /* update fec ability by speed */
hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
@@ -3213,10 +3314,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
if (!resp->active_fec)
mac->fec_mode = 0;
else
mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -3237,7 +3340,7 @@ static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
true);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
true);
@@ -3294,7 +3397,7 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
false);
@@ -3340,6 +3443,7 @@ static int hclge_update_tp_port_info(struct hclge_dev *hdev)
hdev->hw.mac.autoneg = cmd.base.autoneg;
hdev->hw.mac.speed = cmd.base.speed;
hdev->hw.mac.duplex = cmd.base.duplex;
+ linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising);
return 0;
}
@@ -3362,7 +3466,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
static int hclge_update_port_info(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ int speed;
int ret;
/* get the port info from SFP cmd if not copper port */
@@ -3373,10 +3477,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (!hdev->support_sfp_query)
return 0;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ speed = mac->speed;
ret = hclge_get_sfp_info(hdev, mac);
- else
+ } else {
+ speed = HCLGE_MAC_SPEED_UNKNOWN;
ret = hclge_get_sfp_speed(hdev, &speed);
+ }
if (ret == -EOPNOTSUPP) {
hdev->support_sfp_query = false;
@@ -3388,16 +3495,18 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
hclge_update_port_capability(hdev, mac);
+ if (mac->speed != speed)
+ (void)hclge_tm_port_shaper_cfg(hdev);
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
- HCLGE_MAC_FULL);
+ HCLGE_MAC_FULL, mac->lane_num);
} else {
if (speed == HCLGE_MAC_SPEED_UNKNOWN)
return 0; /* do nothing if no SFP */
/* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
}
}
@@ -3470,6 +3579,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
link_state_old = vport->vf_info.link_state;
vport->vf_info.link_state = link_state;
+ /* return success directly if the VF is unalive, VF will
+ * query link state itself when it starts work.
+ */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ return 0;
+
ret = hclge_push_vf_link_status(vport);
if (ret) {
vport->vf_info.link_state = link_state_old;
@@ -3501,7 +3616,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
hdev->rst_stats.imp_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
@@ -3509,7 +3624,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
dev_info(&hdev->pdev->dev, "global reset interrupt\n");
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
hdev->rst_stats.global_rst_cnt++;
@@ -3643,24 +3758,13 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
- vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
+ vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
hdev->vector_status[0] = 0;
hdev->num_msi_left -= 1;
hdev->num_msi_used += 1;
}
-static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
-{
- irq_set_affinity_hint(hdev->misc_vector.vector_irq,
- &hdev->affinity_mask);
-}
-
-static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
-{
- irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
-}
-
static int hclge_misc_irq_init(struct hclge_dev *hdev)
{
int ret;
@@ -3806,8 +3910,16 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return ret;
}
- if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ if (!reset ||
+ !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
+ continue;
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
+ hdev->reset_type == HNAE3_FUNC_RESET) {
+ set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
+ &vport->need_notify);
continue;
+ }
/* Inform VF to process the reset.
* hclge_inform_reset_assert_to_vf may fail if VF
@@ -3827,10 +3939,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
static void hclge_mailbox_service_task(struct hclge_dev *hdev)
{
if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
- test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "mbx service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
+ smp_processor_id());
+
hclge_mbx_handler(hdev);
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
@@ -3865,7 +3984,7 @@ static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
return;
}
msleep(HCLGE_PF_RESET_SYNC_TIME);
- hclge_cmd_reuse_desc(&desc, true);
+ hclge_comm_cmd_reuse_desc(&desc, true);
} while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
@@ -4022,13 +4141,13 @@ static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
{
u32 reg_val;
- reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
if (enable)
- reg_val |= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
else
- reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+ reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
}
static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
@@ -4065,9 +4184,9 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
/* After performaning pf reset, it is not necessary to do the
* mailbox handling or send any command to firmware, because
* any mailbox handling or command to firmware is only valid
- * after hclge_cmd_init is called.
+ * after hclge_comm_cmd_init is called.
*/
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
@@ -4480,6 +4599,13 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
return;
+ if (time_is_before_jiffies(hdev->last_rst_scheduled +
+ HCLGE_RESET_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "reset service task is scheduled after %ums on cpu%u!\n",
+ jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
+ smp_processor_id());
+
down(&hdev->reset_sem);
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
@@ -4491,18 +4617,25 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
static void hclge_update_vport_alive(struct hclge_dev *hdev)
{
+#define HCLGE_ALIVE_SECONDS_NORMAL 8
+
+ unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
int i;
/* start from vport 1 for PF is always alive */
for (i = 1; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
- if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
+ !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ continue;
+ if (time_after(jiffies, vport->last_active_jiffies +
+ alive_time)) {
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
-
- /* If vf is not alive, set to default value */
- if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
- vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ dev_warn(&hdev->pdev->dev,
+ "VF %u heartbeat timeout\n",
+ i - HCLGE_VF_VPORT_START_NUM);
+ }
}
}
@@ -4614,11 +4747,11 @@ static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
/* need an extend offset to config vector >= 64 */
if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
- vector_info->io_addr = hdev->hw.io_base +
+ vector_info->io_addr = hdev->hw.hw.io_base +
HCLGE_VECTOR_REG_BASE +
(idx - 1) * HCLGE_VECTOR_REG_OFFSET;
else
- vector_info->io_addr = hdev->hw.io_base +
+ vector_info->io_addr = hdev->hw.hw.io_base +
HCLGE_VECTOR_EXT_REG_BASE +
(idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
HCLGE_VECTOR_REG_OFFSET_H +
@@ -4688,334 +4821,43 @@ static int hclge_put_vector(struct hnae3_handle *handle, int vector)
return 0;
}
-static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_KEY_SIZE;
-}
-
-static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
- const u8 hfunc, const u8 *key)
-{
- struct hclge_rss_config_cmd *req;
- unsigned int key_offset = 0;
- struct hclge_desc desc;
- int key_counts;
- int key_size;
- int ret;
-
- key_counts = HCLGE_RSS_KEY_SIZE;
- req = (struct hclge_rss_config_cmd *)desc.data;
-
- while (key_counts) {
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
- false);
-
- req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
- req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
-
- key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
- memcpy(req->hash_key,
- key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
-
- key_counts -= key_size;
- key_offset++;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure RSS config fail, status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
-{
- struct hclge_rss_indirection_table_cmd *req;
- struct hclge_desc desc;
- int rss_cfg_tbl_num;
- u8 rss_msb_oft;
- u8 rss_msb_val;
- int ret;
- u16 qid;
- int i;
- u32 j;
-
- req = (struct hclge_rss_indirection_table_cmd *)desc.data;
- rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
- HCLGE_RSS_CFG_TBL_SIZE;
-
- for (i = 0; i < rss_cfg_tbl_num; i++) {
- hclge_cmd_setup_basic_desc
- (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
-
- req->start_table_index =
- cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
- req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
- for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) {
- qid = indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
- req->rss_qid_l[j] = qid & 0xff;
- rss_msb_oft =
- j * HCLGE_RSS_CFG_TBL_BW_H / BITS_PER_BYTE;
- rss_msb_val = (qid >> HCLGE_RSS_CFG_TBL_BW_L & 0x1) <<
- (j * HCLGE_RSS_CFG_TBL_BW_H % BITS_PER_BYTE);
- req->rss_qid_h[rss_msb_oft] |= rss_msb_val;
- }
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure rss indir table fail,status = %d\n",
- ret);
- return ret;
- }
- }
- return 0;
-}
-
-static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
- u16 *tc_size, u16 *tc_offset)
-{
- struct hclge_rss_tc_mode_cmd *req;
- struct hclge_desc desc;
- int ret;
- int i;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
- req = (struct hclge_rss_tc_mode_cmd *)desc.data;
-
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- u16 mode = 0;
-
- hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
- hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
- HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_bit(mode, HCLGE_RSS_TC_SIZE_MSB_B,
- tc_size[i] >> HCLGE_RSS_TC_SIZE_MSB_OFFSET & 0x1);
- hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
- HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
-
- req->rss_tc_mode[i] = cpu_to_le16(mode);
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss tc mode fail, status = %d\n", ret);
-
- return ret;
-}
-
-static void hclge_get_rss_type(struct hclge_vport *vport)
-{
- if (vport->rss_tuple_sets.ipv4_tcp_en ||
- vport->rss_tuple_sets.ipv4_udp_en ||
- vport->rss_tuple_sets.ipv4_sctp_en ||
- vport->rss_tuple_sets.ipv6_tcp_en ||
- vport->rss_tuple_sets.ipv6_udp_en ||
- vport->rss_tuple_sets.ipv6_sctp_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
- else if (vport->rss_tuple_sets.ipv4_fragment_en ||
- vport->rss_tuple_sets.ipv6_fragment_en)
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
- else
- vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
-}
-
-static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
-{
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
- int ret;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
-
- /* Get the tuple cfg from pf */
- req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
- hclge_get_rss_type(&hdev->vport[0]);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss input fail, status = %d\n", ret);
- return ret;
-}
-
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
- int i;
-
- /* Get hash algorithm */
- if (hfunc) {
- switch (vport->rss_algo) {
- case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- case HCLGE_RSS_HASH_ALGO_SIMPLE:
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- default:
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- break;
- }
- }
+ struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
- /* Get the RSS Key required by the user */
- if (key)
- memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
- /* Get indirect table */
- if (indir)
- for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
- indir[i] = vport->rss_indirection_tbl[i];
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
-static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
- u8 *hash_algo)
-{
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- return 0;
- case ETH_RSS_HASH_XOR:
- *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- return 0;
- case ETH_RSS_HASH_NO_CHANGE:
- *hash_algo = vport->rss_algo;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- u8 hash_algo;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
- ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
if (ret) {
dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
return ret;
}
- /* Set the RSS Hash Key if specififed by the user */
- if (key) {
- ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
- if (ret)
- return ret;
-
- /* Update the shadow RSS key with user specified qids */
- memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- } else {
- ret = hclge_set_rss_algo_key(hdev, hash_algo,
- vport->rss_hash_key);
- if (ret)
- return ret;
- }
- vport->rss_algo = hash_algo;
-
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport->rss_indirection_tbl[i] = indir[i];
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
- return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
-}
-
-static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
-{
- u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
-
- if (nfc->data & RXH_L4_B_2_3)
- hash_sets |= HCLGE_D_PORT_BIT;
- else
- hash_sets &= ~HCLGE_D_PORT_BIT;
-
- if (nfc->data & RXH_IP_SRC)
- hash_sets |= HCLGE_S_IP_BIT;
- else
- hash_sets &= ~HCLGE_S_IP_BIT;
-
- if (nfc->data & RXH_IP_DST)
- hash_sets |= HCLGE_D_IP_BIT;
- else
- hash_sets &= ~HCLGE_D_IP_BIT;
-
- if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
- hash_sets |= HCLGE_V_TAG_BIT;
-
- return hash_sets;
-}
-
-static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
- struct ethtool_rxnfc *nfc,
- struct hclge_rss_input_tuple_cmd *req)
-{
- struct hclge_dev *hdev = vport->back;
- u8 tuple_sets;
-
- req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
-
- tuple_sets = hclge_get_rss_hash_bits(nfc);
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- req->ipv4_tcp_en = tuple_sets;
- break;
- case TCP_V6_FLOW:
- req->ipv6_tcp_en = tuple_sets;
- break;
- case UDP_V4_FLOW:
- req->ipv4_udp_en = tuple_sets;
- break;
- case UDP_V6_FLOW:
- req->ipv6_udp_en = tuple_sets;
- break;
- case SCTP_V4_FLOW:
- req->ipv4_sctp_en = tuple_sets;
- break;
- case SCTP_V6_FLOW:
- if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
- (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
- return -EINVAL;
-
- req->ipv6_sctp_en = tuple_sets;
- break;
- case IPV4_FLOW:
- req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- case IPV6_FLOW:
- req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
}
static int hclge_set_rss_tuple(struct hnae3_handle *handle,
@@ -5023,92 +4865,19 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
int ret;
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
-
- ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to init rss tuple cmd, ret = %d\n", ret);
- return ret;
- }
-
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
if (ret) {
dev_err(&hdev->pdev->dev,
- "Set rss tuple fail, status = %d\n", ret);
+ "failed to set rss tuple, ret = %d.\n", ret);
return ret;
}
- vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
- vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
- vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
- vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
- vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
- vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
- vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
- vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
- hclge_get_rss_type(vport);
return 0;
}
-static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
- u8 *tuple_sets)
-{
- switch (flow_type) {
- case TCP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
- break;
- case UDP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
- break;
- case TCP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
- break;
- case UDP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
- break;
- case SCTP_V4_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
- break;
- case SCTP_V6_FLOW:
- *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static u64 hclge_convert_rss_tuple(u8 tuple_sets)
-{
- u64 tuple_data = 0;
-
- if (tuple_sets & HCLGE_D_PORT_BIT)
- tuple_data |= RXH_L4_B_2_3;
- if (tuple_sets & HCLGE_S_PORT_BIT)
- tuple_data |= RXH_L4_B_0_1;
- if (tuple_sets & HCLGE_D_IP_BIT)
- tuple_data |= RXH_IP_DST;
- if (tuple_sets & HCLGE_S_IP_BIT)
- tuple_data |= RXH_IP_SRC;
-
- return tuple_data;
-}
-
static int hclge_get_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
@@ -5118,11 +4887,12 @@ static int hclge_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
- ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
+ &tuple_sets);
if (ret || !tuple_sets)
return ret;
- nfc->data = hclge_convert_rss_tuple(tuple_sets);
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -5175,78 +4945,35 @@ static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
tc_offset[i] = tc_info->tqp_offset[i];
}
- return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
}
int hclge_rss_init_hw(struct hclge_dev *hdev)
{
- struct hclge_vport *vport = hdev->vport;
- u16 *rss_indir = vport[0].rss_indirection_tbl;
- u8 *key = vport[0].rss_hash_key;
- u8 hfunc = vport[0].rss_algo;
+ u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
+ u8 *key = hdev->rss_cfg.rss_hash_key;
+ u8 hfunc = hdev->rss_cfg.rss_algo;
int ret;
- ret = hclge_set_rss_indir_table(hdev, rss_indir);
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_indir);
if (ret)
return ret;
- ret = hclge_set_rss_algo_key(hdev, hfunc, key);
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
if (ret)
return ret;
- ret = hclge_set_rss_input_tuple(hdev);
+ ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
+ &hdev->hw.hw, true,
+ &hdev->rss_cfg);
if (ret)
return ret;
return hclge_init_rss_tc_mode(hdev);
}
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = &hdev->vport[0];
- int i;
-
- for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
- vport->rss_indirection_tbl[i] = i % vport->alloc_rss_size;
-}
-
-static int hclge_rss_init_cfg(struct hclge_dev *hdev)
-{
- u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
- int rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- struct hclge_vport *vport = &hdev->vport[0];
- u16 *rss_ind_tbl;
-
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
-
- vport->rss_tuple_sets.ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport->rss_tuple_sets.ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
- vport->rss_tuple_sets.ipv6_sctp_en =
- hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
- HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
- HCLGE_RSS_INPUT_TUPLE_SCTP;
- vport->rss_tuple_sets.ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
-
- vport->rss_algo = rss_algo;
-
- rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
- sizeof(*rss_ind_tbl), GFP_KERNEL);
- if (!rss_ind_tbl)
- return -ENOMEM;
-
- vport->rss_indirection_tbl = rss_ind_tbl;
- memcpy(vport->rss_hash_key, hclge_hash_key, HCLGE_RSS_KEY_SIZE);
-
- hclge_rss_indir_init_cfg(hdev);
-
- return 0;
-}
-
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
int vector_id, bool en,
struct hnae3_ring_chain_node *ring_chain)
@@ -5256,7 +4983,7 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
struct hclge_desc desc;
struct hclge_ctrl_vector_chain_cmd *req =
(struct hclge_ctrl_vector_chain_cmd *)desc.data;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
enum hclge_opcode_type op;
u16 tqp_type_and_id;
int i;
@@ -5819,7 +5546,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
struct hclge_fd_key_cfg *key_cfg;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
@@ -5886,9 +5613,9 @@ static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
@@ -6790,7 +6517,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
"Error: vf id (%u) should be less than %u\n",
- vf - 1, hdev->num_req_vfs);
+ vf - 1U, hdev->num_req_vfs);
return -EINVAL;
}
@@ -6800,7 +6527,7 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
if (ring >= tqps) {
dev_err(&hdev->pdev->dev,
"Error: queue id (%u) > max tqp num (%u)\n",
- ring, tqps - 1);
+ ring, tqps - 1U);
return -EINVAL;
}
@@ -6824,7 +6551,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"flow table director is not supported\n");
return -EOPNOTSUPP;
@@ -6880,7 +6607,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
struct ethtool_rx_flow_spec *fs;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6916,9 +6643,6 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
struct hlist_node *node;
u16 location;
- if (!hnae3_dev_fd_supported(hdev))
- return;
-
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
@@ -6943,6 +6667,9 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
hclge_clear_fd_rules_in_list(hdev, true);
hclge_fd_disable_user_def(hdev);
}
@@ -6958,7 +6685,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
* return value. If error is returned here, the reset process will
* fail.
*/
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
/* if fd is disabled, should not restore it when reset */
@@ -6982,7 +6709,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -7161,6 +6888,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
}
}
+static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
+ u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location == location)
+ return rule;
+ else if (rule->location > location)
+ return NULL;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+}
+
static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd)
{
@@ -7168,23 +6926,17 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_fd_rule *rule = NULL;
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- struct hlist_node *node2;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
spin_lock_bh(&hdev->fd_rule_lock);
- hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
- if (rule->location >= fs->location)
- break;
- }
-
- if (!rule || fs->location != rule->location) {
+ rule = hclge_get_fd_rule(hdev, fs->location);
+ if (!rule) {
spin_unlock_bh(&hdev->fd_rule_lock);
-
return -ENOENT;
}
@@ -7222,16 +6974,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
hclge_fd_get_ext_info(fs, rule);
- if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
- fs->ring_cookie = RX_CLS_FLOW_DISC;
- } else {
- u64 vf_id;
-
- fs->ring_cookie = rule->queue_id;
- vf_id = rule->vf_id;
- vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
- fs->ring_cookie |= vf_id;
- }
+ hclge_fd_get_ring_cookie(fs, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -7247,7 +6990,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hlist_node *node2;
int cnt = 0;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
@@ -7347,7 +7090,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule *rule;
u16 bit_id;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
/* when there is already fd rule existed add by user,
@@ -7636,6 +7379,12 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -7689,6 +7438,9 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
spin_lock_bh(&hdev->fd_rule_lock);
rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
@@ -7751,6 +7503,9 @@ out:
static void hclge_sync_fd_table(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
@@ -7776,7 +7531,7 @@ static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
}
static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
@@ -7866,7 +7621,7 @@ static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->switch_param = (req->switch_param & param_mask) | switch_param;
req->param_mask = param_mask;
@@ -7960,7 +7715,7 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
/* 3 Config mac work mode with loopback flag
* and its original configure parameters
*/
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
@@ -7968,16 +7723,13 @@ static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
return ret;
}
-static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
- enum hnae3_loop loop_mode)
+static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
{
-#define HCLGE_COMMON_LB_RETRY_MS 10
-#define HCLGE_COMMON_LB_RETRY_NUM 100
-
struct hclge_common_lb_cmd *req;
struct hclge_desc desc;
- int ret, i = 0;
u8 loop_mode_b;
+ int ret;
req = (struct hclge_common_lb_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
@@ -7994,23 +7746,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
break;
default:
dev_err(&hdev->pdev->dev,
- "unsupported common loopback mode %d\n", loop_mode);
+ "unsupported loopback mode %d\n", loop_mode);
return -ENOTSUPP;
}
- if (en) {
+ req->mask = loop_mode_b;
+ if (en)
req->enable = loop_mode_b;
- req->mask = loop_mode_b;
- } else {
- req->mask = loop_mode_b;
- }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev,
- "common loopback set fail, ret = %d\n", ret);
- return ret;
- }
+ "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
+ loop_mode, ret);
+
+ return ret;
+}
+
+static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_COMMON_LB_RETRY_MS 10
+#define HCLGE_COMMON_LB_RETRY_NUM 100
+
+ struct hclge_common_lb_cmd *req;
+ struct hclge_desc desc;
+ u32 i = 0;
+ int ret;
+
+ req = (struct hclge_common_lb_cmd *)desc.data;
do {
msleep(HCLGE_COMMON_LB_RETRY_MS);
@@ -8019,20 +7782,34 @@ static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "common loopback get, ret = %d\n", ret);
+ "failed to get loopback done status, ret = %d\n",
+ ret);
return ret;
}
} while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
!(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set timeout\n");
+ dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
return -EBUSY;
} else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
- dev_err(&hdev->pdev->dev, "common loopback set failed in fw\n");
+ dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
return -EIO;
}
- return ret;
+
+ return 0;
+}
+
+static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_common_loopback_wait(hdev);
}
static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
@@ -8152,7 +7929,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int ret;
+ int ret = 0;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@@ -8179,6 +7956,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -8213,22 +7992,6 @@ static int hclge_set_default_loopback(struct hclge_dev *hdev)
HNAE3_LOOP_PARALLEL_SERDES);
}
-static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hnae3_knic_private_info *kinfo;
- struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
- int i;
-
- kinfo = &vport->nic.kinfo;
- for (i = 0; i < kinfo->num_tqps; i++) {
- queue = handle->kinfo.tqp[i];
- tqp = container_of(queue, struct hclge_tqp, q);
- memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
- }
-}
-
static void hclge_flush_link_update(struct hclge_dev *hdev)
{
#define HCLGE_FLUSH_LINK_TIMEOUT 100000
@@ -8270,7 +8033,7 @@ static int hclge_ae_start(struct hnae3_handle *handle)
hdev->hw.mac.link = 0;
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_mac_start_phy(hdev);
@@ -8308,7 +8071,7 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_mac_stop_phy(hdev);
/* reset tqp stats */
- hclge_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclge_update_link_status(hdev);
}
@@ -8316,9 +8079,11 @@ int hclge_vport_start(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
+ set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_active_jiffies = jiffies;
+ vport->need_notify = 0;
if (test_bit(vport->vport_id, hdev->vport_config_block)) {
if (vport->vport_id) {
@@ -8336,7 +8101,9 @@ int hclge_vport_start(struct hclge_vport *vport)
void hclge_vport_stop(struct hclge_vport *vport)
{
+ clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->need_notify = 0;
}
static int hclge_client_start(struct hnae3_handle *handle)
@@ -8511,14 +8278,14 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
if (is_mc) {
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(desc[0].data,
req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_MAC_VLAN_ADD,
true);
- desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[2],
HCLGE_OPC_MAC_VLAN_ADD,
true);
@@ -8568,12 +8335,12 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
resp_code,
HCLGE_MAC_VLAN_ADD);
} else {
- hclge_cmd_reuse_desc(&mc_desc[0], false);
- mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[1], false);
- mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_reuse_desc(&mc_desc[2], false);
- mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
+ mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
+ mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
+ mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
memcpy(mc_desc[0].data, req,
sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
@@ -8743,6 +8510,7 @@ int hclge_update_mac_list(struct hclge_vport *vport,
enum HCLGE_MAC_ADDR_TYPE mac_type,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_node *mac_node;
struct list_head *list;
@@ -8767,9 +8535,10 @@ int hclge_update_mac_list(struct hclge_vport *vport,
/* if this address is never added, unnecessary to delete */
if (state == HCLGE_MAC_TO_DEL) {
spin_unlock_bh(&vport->mac_list_lock);
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "failed to delete address %pM from mac list\n",
- addr);
+ "failed to delete address %s from mac list\n",
+ format_mac_addr);
return -ENOENT;
}
@@ -8802,6 +8571,7 @@ static int hclge_add_uc_addr(struct hnae3_handle *handle,
int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc;
@@ -8812,9 +8582,10 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
- addr, is_zero_ether_addr(addr),
+ "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
+ format_mac_addr, is_zero_ether_addr(addr),
is_broadcast_ether_addr(addr),
is_multicast_ether_addr(addr));
return -EINVAL;
@@ -8871,6 +8642,7 @@ static int hclge_rm_uc_addr(struct hnae3_handle *handle,
int hclge_rm_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
int ret;
@@ -8879,8 +8651,9 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
if (is_zero_ether_addr(addr) ||
is_broadcast_ether_addr(addr) ||
is_multicast_ether_addr(addr)) {
- dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
- addr);
+ hnae3_format_mac_addr(format_mac_addr, addr);
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -8888,12 +8661,11 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hclge_prepare_mac_addr(&req, addr, false);
ret = hclge_remove_mac_vlan_tbl(vport, &req);
- if (!ret) {
+ if (!ret || ret == -ENOENT) {
mutex_lock(&hdev->vport_lock);
hclge_update_umv_space(vport, true);
mutex_unlock(&hdev->vport_lock);
- } else if (ret == -ENOENT) {
- ret = 0;
+ return 0;
}
return ret;
@@ -8911,6 +8683,7 @@ static int hclge_add_mc_addr(struct hnae3_handle *handle,
int hclge_add_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
@@ -8919,9 +8692,10 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_err(&hdev->pdev->dev,
- "Add mc mac err! invalid mac:%pM.\n",
- addr);
+ "Add mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
memset(&req, 0, sizeof(req));
@@ -8973,16 +8747,18 @@ static int hclge_rm_mc_addr(struct hnae3_handle *handle,
int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc[3];
/* mac addr check */
if (!is_multicast_ether_addr(addr)) {
+ hnae3_format_mac_addr(format_mac_addr, addr);
dev_dbg(&hdev->pdev->dev,
- "Remove mc mac err! invalid mac:%pM.\n",
- addr);
+ "Remove mc mac err! invalid mac:%s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9422,30 +9198,38 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
u8 *mac_addr)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
return -EINVAL;
+ hnae3_format_mac_addr(format_mac_addr, mac_addr);
if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
dev_info(&hdev->pdev->dev,
- "Specified MAC(=%pM) is same as before, no change committed!\n",
- mac_addr);
+ "Specified MAC(=%s) is same as before, no change committed!\n",
+ format_mac_addr);
return 0;
}
ether_addr_copy(vport->vf_info.mac, mac_addr);
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ */
if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
dev_info(&hdev->pdev->dev,
- "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
- vf, mac_addr);
- return hclge_inform_reset_assert_to_vf(vport);
+ "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
+ vf, format_mac_addr);
+ (void)hclge_inform_reset_assert_to_vf(vport);
+ return 0;
}
- dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
- vf, mac_addr);
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %s, will be active after VF reset\n",
+ vf, format_mac_addr);
return 0;
}
@@ -9549,6 +9333,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
{
const unsigned char *new_addr = (const unsigned char *)p;
struct hclge_vport *vport = hclge_get_vport(handle);
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclge_dev *hdev = vport->back;
unsigned char *old_addr = NULL;
int ret;
@@ -9557,9 +9342,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
if (is_zero_ether_addr(new_addr) ||
is_broadcast_ether_addr(new_addr) ||
is_multicast_ether_addr(new_addr)) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "change uc mac err! invalid mac: %pM.\n",
- new_addr);
+ "change uc mac err! invalid mac: %s.\n",
+ format_mac_addr);
return -EINVAL;
}
@@ -9577,9 +9363,10 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
spin_lock_bh(&vport->mac_list_lock);
ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr, new_addr);
dev_err(&hdev->pdev->dev,
- "failed to change the mac addr:%pM, ret = %d\n",
- new_addr, ret);
+ "failed to change the mac addr:%s, ret = %d\n",
+ format_mac_addr, ret);
spin_unlock_bh(&vport->mac_list_lock);
if (!is_first)
@@ -9677,20 +9464,20 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to get vlan filter config, ret = %d.\n", ret);
+ dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
return ret;
}
/* modify and write new config parameter */
- hclge_cmd_reuse_desc(&desc, false);
+ hclge_comm_cmd_reuse_desc(&desc, false);
req->vlan_fe = filter_en ?
(req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
- ret);
+ dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
+ vf_id, ret);
return ret;
}
@@ -9809,7 +9596,7 @@ static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
- desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
vf_byte_off = vfid / 8;
vf_byte_val = 1 << (vfid % 8);
@@ -9936,6 +9723,32 @@ static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
return ret;
}
+static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
+ u16 vlan_id, bool is_kill)
+{
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return false;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Add port vlan failed, vport %u is already in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_warn(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
+ vport_id, vlan_id);
+ return false;
+ }
+
+ return true;
+}
+
static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
u16 vport_id, u16 vlan_id,
bool is_kill)
@@ -9957,26 +9770,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
- /* vlan 0 may be added twice when 8021q module is enabled */
- if (!is_kill && !vlan_id &&
- test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
return 0;
- if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Add port vlan failed, vport %u is already in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
- if (is_kill &&
- !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
- dev_err(&hdev->pdev->dev,
- "Delete port vlan failed, vport %u is not in vlan %u\n",
- vport_id, vlan_id);
- return -EINVAL;
- }
-
for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
vport_num++;
@@ -10168,67 +9964,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
return status;
}
-static int hclge_init_vlan_config(struct hclge_dev *hdev)
+static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
-#define HCLGE_DEF_VLAN_TYPE 0x8100
-
- struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport;
int ret;
int i;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- /* for revision 0x21, vf vlan filter is per function */
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- ret = hclge_set_vlan_filter_ctrl(hdev,
- HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS,
- true,
- vport->vport_id);
- if (ret)
- return ret;
- vport->cur_vlan_fltr_en = true;
- }
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true, 0);
- ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true,
- 0);
- if (ret)
- return ret;
- } else {
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B,
- true, 0);
+ HCLGE_FILTER_FE_EGRESS, true,
+ vport->vport_id);
if (ret)
return ret;
+ vport->cur_vlan_fltr_en = true;
}
- hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
- hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true, 0);
+}
- ret = hclge_set_vlan_protocol_type(hdev);
- if (ret)
- return ret;
+static int hclge_init_vlan_type(struct hclge_dev *hdev)
+{
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
+ hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- u16 vlan_tag;
- u8 qos;
+ return hclge_set_vlan_protocol_type(hdev);
+}
+static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
+{
+ struct hclge_port_base_vlan_config *cfg;
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
- vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ cfg = &vport->port_base_vlan_cfg;
- ret = hclge_vlan_offload_cfg(vport,
- vport->port_base_vlan_cfg.state,
- vlan_tag, qos);
+ ret = hclge_vlan_offload_cfg(vport, cfg->state,
+ cfg->vlan_info.vlan_tag,
+ cfg->vlan_info.qos);
if (ret)
return ret;
}
+ return 0;
+}
+
+static int hclge_init_vlan_config(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ int ret;
+
+ ret = hclge_init_vlan_filter(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_type(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vport_vlan_offload(hdev);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
@@ -10237,19 +10046,28 @@ static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
bool writen_to_tbl)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
- if (vlan->vlan_id == vlan_id)
+ mutex_lock(&hdev->vport_lock);
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
+ }
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
- if (!vlan)
+ if (!vlan) {
+ mutex_unlock(&hdev->vport_lock);
return;
+ }
vlan->hd_tbl_status = writen_to_tbl;
vlan->vlan_id = vlan_id;
list_add_tail(&vlan->node, &vport->vlan_list);
+ mutex_unlock(&hdev->vport_lock);
}
static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
@@ -10258,6 +10076,8 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
int ret;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (!vlan->hd_tbl_status) {
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
@@ -10267,12 +10087,16 @@ static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
dev_err(&hdev->pdev->dev,
"restore vport vlan list failed, ret=%d\n",
ret);
+
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
}
vlan->hd_tbl_status = true;
}
+ mutex_unlock(&hdev->vport_lock);
+
return 0;
}
@@ -10282,6 +10106,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->vlan_id == vlan_id) {
if (is_write_tbl && vlan->hd_tbl_status)
@@ -10296,6 +10122,8 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
break;
}
}
+
+ mutex_unlock(&hdev->vport_lock);
}
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
@@ -10303,6 +10131,8 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
+ mutex_lock(&hdev->vport_lock);
+
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->hd_tbl_status)
hclge_set_vlan_filter_hw(hdev,
@@ -10318,6 +10148,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
}
}
clear_bit(vport->vport_id, hdev->vf_vlan_full);
+ mutex_unlock(&hdev->vport_lock);
}
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -10326,6 +10157,8 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
struct hclge_vport *vport;
int i;
+ mutex_lock(&hdev->vport_lock);
+
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
@@ -10333,37 +10166,61 @@ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
kfree(vlan);
}
}
+
+ mutex_unlock(&hdev->vport_lock);
}
-void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
{
- struct hclge_vport_vlan_cfg *vlan, *tmp;
- struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
u16 vlan_proto;
u16 vlan_id;
u16 state;
+ int vf_id;
int ret;
- vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
- vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- state = vport->port_base_vlan_cfg.state;
+ /* PF should restore all vfs port base vlan */
+ for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
+ vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
+ vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
+ &vport->port_base_vlan_cfg.vlan_info :
+ &vport->port_base_vlan_cfg.old_vlan_info;
- if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
- clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
- hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
- vport->vport_id, vlan_id,
- false);
- return;
+ vlan_id = vlan_info->vlan_tag;
+ vlan_proto = vlan_info->vlan_proto;
+ state = vport->port_base_vlan_cfg.state;
+
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id,
+ vlan_id, false);
+ vport->port_base_vlan_cfg.tbl_sta = ret == 0;
+ }
}
+}
- list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
- ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
- vport->vport_id,
- vlan->vlan_id, false);
- if (ret)
- break;
- vlan->hd_tbl_status = true;
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
+ vlan->hd_tbl_status = true;
+ }
}
+
+ mutex_unlock(&hdev->vport_lock);
}
/* For global reset and imp reset, hardware will clear the mac table,
@@ -10403,6 +10260,7 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
struct hnae3_handle *handle = &vport->nic;
hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_port_base_vlan_config(hdev);
hclge_restore_vport_vlan_table(vport);
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle);
@@ -10459,6 +10317,8 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
false);
}
+ vport->port_base_vlan_cfg.tbl_sta = false;
+
/* force add VLAN 0 */
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
if (ret)
@@ -10485,12 +10345,42 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
return false;
}
+static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
+ vport->vport_id, new_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ vport->port_base_vlan_cfg.tbl_sta = false;
+ /* remove old VLAN tag */
+ if (old_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_info->vlan_tag, ret);
+
+ return ret;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
- struct hclge_dev *hdev = vport->back;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
@@ -10503,38 +10393,12 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
goto out;
- if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
- /* add new VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(vlan_info->vlan_proto),
- vport->vport_id,
- vlan_info->vlan_tag,
- false);
- if (ret)
- return ret;
-
- /* remove old VLAN tag */
- if (old_vlan_info->vlan_tag == 0)
- ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
- true, 0);
- else
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(ETH_P_8021Q),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to clear vport%u port base vlan %u, ret = %d.\n",
- vport->vport_id, old_vlan_info->vlan_tag, ret);
- return ret;
- }
-
- goto out;
- }
-
- ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
- old_vlan_info);
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
+ ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
+ old_vlan_info);
+ else
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
if (ret)
return ret;
@@ -10545,7 +10409,9 @@ out:
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
vport->port_base_vlan_cfg.vlan_info = *vlan_info;
+ vport->port_base_vlan_cfg.tbl_sta = true;
hclge_set_vport_vlan_fltr_change(vport);
return 0;
@@ -10613,15 +10479,22 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
return ret;
}
- /* for DEVICE_VERSION_V3, vf doesn't need to know about the port based
+ /* there is a timewindow for PF to know VF unalive, it may
+ * cause send mailbox fail, but it doesn't matter, VF will
+ * query it when reinit.
+ * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
* VLAN state.
*/
- if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
- test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
- hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
- vport->vport_id, state,
- &vlan_info);
-
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ state,
+ &vlan_info);
+ else
+ set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+ &vport->need_notify);
+ }
return 0;
}
@@ -10678,11 +10551,11 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
}
if (!ret) {
- if (is_kill)
- hclge_rm_vport_vlan_table(vport, vlan_id, false);
- else
+ if (!is_kill)
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
+ else if (is_kill && vlan_id != 0)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
} else if (is_kill) {
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
@@ -10804,6 +10677,9 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* PF's mps must be greater then VF's mps */
for (i = 1; i < hdev->num_alloc_vport; i++)
if (max_frm_size < hdev->vport[i].mps) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set pf mtu for less than vport %d, mps = %u.\n",
+ i, hdev->vport[i].mps);
mutex_unlock(&hdev->vport_lock);
return -EINVAL;
}
@@ -10881,11 +10757,11 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
+ struct hclge_comm_tqp *tqp;
struct hnae3_queue *queue;
- struct hclge_tqp *tqp;
queue = handle->kinfo.tqp[queue_id];
- tqp = container_of(queue, struct hclge_tqp, q);
+ tqp = container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
@@ -11152,7 +11028,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex)
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -11163,6 +11039,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
}
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
@@ -11433,8 +11311,6 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
static int hclge_dev_mem_map(struct hclge_dev *hdev)
{
-#define HCLGE_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclge_hw *hw = &hdev->hw;
@@ -11442,10 +11318,11 @@ static int hclge_dev_mem_map(struct hclge_dev *hdev)
if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
return 0;
- hw->mem_base = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev, HCLGE_MEM_BAR),
- pci_resource_len(pdev, HCLGE_MEM_BAR));
- if (!hw->mem_base) {
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGE_MEM_BAR),
+ pci_resource_len(pdev, HCLGE_MEM_BAR));
+ if (!hw->hw.mem_base) {
dev_err(&pdev->dev, "failed to map device memory\n");
return -EFAULT;
}
@@ -11484,11 +11361,11 @@ static int hclge_pci_init(struct hclge_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->io_base = pcim_iomap(pdev, 2, 0);
- if (!hw->io_base) {
+ hw->hw.io_base = pcim_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "Can't map configuration register space\n");
ret = -ENOMEM;
- goto err_clr_master;
+ goto err_release_regions;
}
ret = hclge_dev_mem_map(hdev);
@@ -11500,9 +11377,8 @@ static int hclge_pci_init(struct hclge_dev *hdev)
return 0;
err_unmap_io_base:
- pcim_iounmap(pdev, hdev->hw.io_base);
-err_clr_master:
- pci_clear_master(pdev);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
+err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -11514,12 +11390,11 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->hw.mem_base)
- devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
- pcim_iounmap(pdev, hdev->hw.io_base);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
- pci_clear_master(pdev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
@@ -11556,29 +11431,25 @@ static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0;
int ret;
-retry:
- down(&hdev->reset_sem);
- set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = rst_type;
- ret = hclge_reset_prepare(hdev);
- if (ret || hdev->reset_pending) {
- dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
- ret);
- if (hdev->reset_pending ||
- retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
- dev_err(&hdev->pdev->dev,
- "reset_pending:0x%lx, retry_cnt:%d\n",
- hdev->reset_pending, retry_cnt);
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
- msleep(HCLGE_RESET_RETRY_WAIT_MS);
- goto retry;
- }
+ while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclge_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_RESET_RETRY_WAIT_MS);
}
/* disable misc vector before reset done */
hclge_enable_vector(&hdev->misc_vector, false);
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
if (hdev->reset_type == HNAE3_FLR_RESET)
hdev->rst_stats.flr_rst_cnt++;
@@ -11651,6 +11522,124 @@ static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
}
+static struct hclge_wol_info *hclge_get_wol_info(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return &vport->back->hw.mac.wol;
+}
+
+static int hclge_get_wol_supported_mode(struct hclge_dev *hdev,
+ u32 *wol_supported)
+{
+ struct hclge_query_wol_supported_cmd *wol_supported_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_GET_SUPPORTED_MODE,
+ true);
+ wol_supported_cmd = (struct hclge_query_wol_supported_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query wol supported, ret = %d\n", ret);
+ return ret;
+ }
+
+ *wol_supported = le32_to_cpu(wol_supported_cmd->supported_wake_mode);
+
+ return 0;
+}
+
+static int hclge_set_wol_cfg(struct hclge_dev *hdev,
+ struct hclge_wol_info *wol_info)
+{
+ struct hclge_wol_cfg_cmd *wol_cfg_cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_WOL_CFG, false);
+ wol_cfg_cmd = (struct hclge_wol_cfg_cmd *)desc.data;
+ wol_cfg_cmd->wake_on_lan_mode = cpu_to_le32(wol_info->wol_current_mode);
+ wol_cfg_cmd->sopass_size = wol_info->wol_sopass_size;
+ memcpy(wol_cfg_cmd->sopass, wol_info->wol_sopass, SOPASS_MAX);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set wol config, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_update_wol(struct hclge_dev *hdev)
+{
+ struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
+
+ if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
+ return 0;
+
+ return hclge_set_wol_cfg(hdev, wol_info);
+}
+
+static int hclge_init_wol(struct hclge_dev *hdev)
+{
+ struct hclge_wol_info *wol_info = &hdev->hw.mac.wol;
+ int ret;
+
+ if (!hnae3_ae_dev_wol_supported(hdev->ae_dev))
+ return 0;
+
+ memset(wol_info, 0, sizeof(struct hclge_wol_info));
+ ret = hclge_get_wol_supported_mode(hdev,
+ &wol_info->wol_support_mode);
+ if (ret) {
+ wol_info->wol_support_mode = 0;
+ return ret;
+ }
+
+ return hclge_update_wol(hdev);
+}
+
+static void hclge_get_wol(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol)
+{
+ struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
+
+ wol->supported = wol_info->wol_support_mode;
+ wol->wolopts = wol_info->wol_current_mode;
+ if (wol_info->wol_current_mode & WAKE_MAGICSECURE)
+ memcpy(wol->sopass, wol_info->wol_sopass, SOPASS_MAX);
+}
+
+static int hclge_set_wol(struct hnae3_handle *handle,
+ struct ethtool_wolinfo *wol)
+{
+ struct hclge_wol_info *wol_info = hclge_get_wol_info(handle);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u32 wol_mode;
+ int ret;
+
+ wol_mode = wol->wolopts;
+ if (wol_mode & ~wol_info->wol_support_mode)
+ return -EINVAL;
+
+ wol_info->wol_current_mode = wol_mode;
+ if (wol_mode & WAKE_MAGICSECURE) {
+ memcpy(wol_info->wol_sopass, wol->sopass, SOPASS_MAX);
+ wol_info->wol_sopass_size = SOPASS_MAX;
+ } else {
+ wol_info->wol_sopass_size = 0;
+ }
+
+ ret = hclge_set_wol_cfg(vport->back, wol_info);
+ if (ret)
+ wol_info->wol_current_mode = 0;
+
+ return ret;
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -11683,12 +11672,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_pci_uninit;
/* Firmware command queue initialize */
- ret = hclge_cmd_queue_init(hdev);
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_devlink_uninit;
/* Firmware command initialize */
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret)
goto err_cmd_uninit;
@@ -11737,9 +11727,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_msi_irq_uninit;
- if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
- !hnae3_dev_phy_imp_supported(hdev)) {
- ret = hclge_mac_mdio_config(hdev);
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ if (hnae3_dev_phy_imp_supported(hdev))
+ ret = hclge_update_tp_port_info(hdev);
+ else
+ ret = hclge_mac_mdio_config(hdev);
+
if (ret)
goto err_msi_irq_uninit;
}
@@ -11776,7 +11769,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- ret = hclge_rss_init_cfg(hdev);
+ ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_mdiobus_unreg;
@@ -11805,6 +11799,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_mdiobus_unreg;
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11812,11 +11810,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
- /* Setup affinity after service timer setup because add_timer_on
- * is called in affinity notify.
- */
- hclge_misc_affinity_setup(hdev);
-
hclge_clear_all_event_cause(hdev);
hclge_clear_resetting_state(hdev);
@@ -11843,6 +11836,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
/* Enable MISC vector(vector0) */
hclge_enable_vector(&hdev->misc_vector, true);
+ ret = hclge_init_wol(hdev);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "failed to wake on lan init, ret = %d\n", ret);
+
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
@@ -11861,12 +11859,11 @@ err_msi_irq_uninit:
err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_devlink_uninit:
hclge_devlink_uninit(hdev);
err_pci_uninit:
- pcim_iounmap(pdev, hdev->hw.io_base);
- pci_clear_master(pdev);
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
out:
@@ -11877,6 +11874,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -12087,7 +12085,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
- hclge_vport_stop(vport);
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
vport++;
}
}
@@ -12112,7 +12110,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_umv_space(hdev);
}
- ret = hclge_cmd_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
+ true, hdev->reset_pending);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed\n");
return ret;
@@ -12220,6 +12219,11 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_init_rxd_adv_layout(hdev);
+ ret = hclge_update_wol(hdev);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "failed to update wol config, ret = %d\n", ret);
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -12233,7 +12237,6 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_reset_vf_rate(hdev);
hclge_clear_vf_vlan(hdev);
- hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
hclge_ptp_uninit(hdev);
hclge_uninit_rxd_adv_layout(hdev);
@@ -12252,12 +12255,12 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_config_nic_hw_error(hdev, false);
hclge_config_rocee_ras_interrupt(hdev, false);
- hclge_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclge_misc_irq_uninit(hdev);
hclge_devlink_uninit(hdev);
hclge_pci_uninit(hdev);
- mutex_destroy(&hdev->vport_lock);
hclge_uninit_vport_vlan_table(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@@ -12288,19 +12291,43 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->pf_rss_size_max;
}
+static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ unsigned int i;
+
+ roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = vport->nic.kinfo.rss_size * i;
+ }
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
+ tc_size);
+}
+
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
- u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
- u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
- u16 tc_valid[HCLGE_MAX_TC_NUM];
- u16 roundup_size;
u32 *rss_indir;
unsigned int i;
int ret;
@@ -12313,20 +12340,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
return ret;
}
- roundup_size = roundup_pow_of_two(kinfo->rss_size);
- roundup_size = ilog2(roundup_size);
- /* Set the RSS TC mode according to the new RSS size */
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- tc_valid[i] = 0;
-
- if (!(hdev->hw_tc_map & BIT(i)))
- continue;
-
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = kinfo->rss_size * i;
- }
- ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ ret = hclge_set_rss_tc_mode_cfg(handle);
if (ret)
return ret;
@@ -12508,7 +12522,7 @@ int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
true);
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
/* initialize the last command BD */
@@ -12552,7 +12566,7 @@ static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
hclge_cmd_setup_basic_desc(desc, cmd, true);
for (i = 0; i < bd_num - 1; i++) {
- desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
desc++;
hclge_cmd_setup_basic_desc(desc, cmd, true);
}
@@ -12889,60 +12903,71 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
return ret;
}
-static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
{
- struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ bool uc_en = false;
+ bool mc_en = false;
u8 tmp_flags;
+ bool bc_en;
int ret;
- u16 i;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_promisc_flags = vport->overflow_promisc_flags;
}
- if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state))
+ return 0;
+
+ /* for PF */
+ if (!vport->vport_id) {
tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
tmp_flags & HNAE3_MPE);
- if (!ret) {
- clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state);
+ if (!ret)
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
- }
+ else
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ return ret;
}
- for (i = 1; i < hdev->num_alloc_vport; i++) {
- bool uc_en = false;
- bool mc_en = false;
- bool bc_en;
+ /* for VF */
+ if (vport->vf_info.trusted) {
+ uc_en = vport->vf_info.request_uc_en > 0 ||
+ vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
+ mc_en = vport->vf_info.request_mc_en > 0 ||
+ vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
+ }
+ bc_en = vport->vf_info.request_bc_en > 0;
- vport = &hdev->vport[i];
+ ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+ mc_en, bc_en);
+ if (ret) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ return ret;
+ }
+ hclge_set_vport_vlan_fltr_change(vport);
- if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state))
- continue;
+ return 0;
+}
- if (vport->vf_info.trusted) {
- uc_en = vport->vf_info.request_uc_en > 0 ||
- vport->overflow_promisc_flags &
- HNAE3_OVERFLOW_UPE;
- mc_en = vport->vf_info.request_mc_en > 0 ||
- vport->overflow_promisc_flags &
- HNAE3_OVERFLOW_MPE;
- }
- bc_en = vport->vf_info.request_bc_en > 0;
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
- ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
- mc_en, bc_en);
- if (ret) {
- set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
- &vport->state);
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+
+ ret = hclge_sync_vport_promisc_mode(vport);
+ if (ret)
return;
- }
- hclge_set_vport_vlan_fltr_change(vport);
}
}
@@ -12985,7 +13010,7 @@ static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
/* bd0~bd4 need next flag */
if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
- desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
/* setup bd0, this bd contains offset and read length. */
@@ -13070,6 +13095,77 @@ static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
return 0;
}
+/* After disable sriov, VF still has some config and info need clean,
+ * which configed by PF.
+ */
+static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ int ret;
+
+ clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->need_notify = 0;
+ vport->mps = 0;
+
+ /* after disable sriov, clean VF rate configured by PF */
+ ret = hclge_tm_qs_shaper_cfg(vport, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d rate config, ret = %d\n",
+ vfid, ret);
+
+ vlan_info.vlan_tag = 0;
+ vlan_info.qos = 0;
+ vlan_info.vlan_proto = ETH_P_8021Q;
+ ret = hclge_update_port_base_vlan_cfg(vport,
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ &vlan_info);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d port base vlan, ret = %d\n",
+ vfid, ret);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clean vf%d spoof config, ret = %d\n",
+ vfid, ret);
+
+ memset(&vport->vf_info, 0, sizeof(vport->vf_info));
+}
+
+static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
+
+ hclge_clear_vport_vf_info(vport, i);
+ }
+}
+
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = vport->nic.kinfo.tc_map_mode;
+ if (priority)
+ *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ vport->nic.kinfo.dscp_prio[dscp];
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -13093,9 +13189,10 @@ static const struct hnae3_ae_ops hclge_ops = {
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
.check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
- .get_rss_key_size = hclge_get_rss_key_size,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
@@ -13171,6 +13268,10 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_rx_hwts = hclge_ptp_get_rx_hwts,
.get_ts_info = hclge_ptp_get_ts_info,
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
+ .clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
+ .get_wol = hclge_get_wol,
+ .set_wol = hclge_set_wol,
};
static struct hnae3_ae_algo ae_algo = {
@@ -13178,7 +13279,7 @@ static struct hnae3_ae_algo ae_algo = {
.pdev_id_table = ae_algo_pci_tbl,
};
-static int hclge_init(void)
+static int __init hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
@@ -13193,7 +13294,7 @@ static int hclge_init(void)
return 0;
}
-static void hclge_exit(void)
+static void __exit hclge_exit(void)
{
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ebba603483a0..81aa6b0facf5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -13,6 +13,8 @@
#include "hclge_cmd.h"
#include "hclge_ptp.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
#define HCLGE_MOD_VERSION "1.0"
#define HCLGE_DRIVER_NAME "hclge"
@@ -38,20 +40,7 @@
#define HCLGE_VECTOR_REG_OFFSET_H 0x1000
#define HCLGE_VECTOR_VF_OFFSET 0x100000
-#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C
-#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
-
-#define HCLGE_CMDQ_INTR_STS_REG 0x27104
-#define HCLGE_CMDQ_INTR_EN_REG 0x27108
-#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
#define HCLGE_GRO_EN_REG 0x28000
@@ -93,22 +82,6 @@
#define HCLGE_TQP_INTR_RL_REG 0x20900
#define HCLGE_RSS_IND_TBL_SIZE 512
-#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
-#define HCLGE_RSS_KEY_SIZE 40
-#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
-#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
-#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
-
-#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
-#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
-#define HCLGE_D_PORT_BIT BIT(0)
-#define HCLGE_S_PORT_BIT BIT(1)
-#define HCLGE_D_IP_BIT BIT(2)
-#define HCLGE_S_IP_BIT BIT(3)
-#define HCLGE_V_TAG_BIT BIT(4)
-#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
- (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
@@ -196,6 +169,14 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
#define HCLGE_TRIGGER_IMP_RESET_B 7U
+#define HCLGE_TQP_MEM_SIZE 0x10000
+#define HCLGE_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGE_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGE_MEM_BAR) >> 1)
+#define HCLGE_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGE_NIC_MEM_OFFSET(hdev) + HCLGE_TQP_MEM_SIZE * (i))
+
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
@@ -228,7 +209,6 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_MBX_HANDLING,
HCLGE_STATE_ERR_SERVICE_SCHED,
HCLGE_STATE_STATISTICS_UPDATING,
- HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_FD_TBL_CHANGED,
@@ -236,6 +216,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_PTP_EN,
HCLGE_STATE_PTP_TX_HANDLING,
+ HCLGE_STATE_FEC_STATS_UPDATING,
HCLGE_STATE_MAX
};
@@ -268,6 +249,13 @@ enum HCLGE_MAC_DUPLEX {
#define QUERY_SFP_SPEED 0
#define QUERY_ACTIVE_SPEED 1
+struct hclge_wol_info {
+ u32 wol_support_mode; /* store the wake on lan info */
+ u32 wol_current_mode;
+ u8 wol_sopass[SOPASS_MAX];
+ u8 wol_sopass_size;
+};
+
struct hclge_mac {
u8 mac_id;
u8 phy_addr;
@@ -278,6 +266,7 @@ struct hclge_mac {
u8 duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
u32 speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
@@ -286,6 +275,7 @@ struct hclge_mac {
u32 user_fec_mode;
u32 fec_ability;
int link; /* store the link status of mac & phy (if phy exists) */
+ struct hclge_wol_info wol;
struct phy_device *phydev;
struct mii_bus *mdio_bus;
phy_interface_t phy_if;
@@ -294,31 +284,9 @@ struct hclge_mac {
};
struct hclge_hw {
- void __iomem *io_base;
- void __iomem *mem_base;
+ struct hclge_comm_hw hw;
struct hclge_mac mac;
int num_vec;
- struct hclge_cmq cmq;
-};
-
-/* TQP stats */
-struct hlcge_tqp_stats {
- /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
- u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
- /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
- u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
-};
-
-struct hclge_tqp {
- /* copy of device pointer from pci_dev,
- * used when perform DMA mapping
- */
- struct device *dev;
- struct hnae3_queue q;
- struct hlcge_tqp_stats tqp_stats;
- u16 index; /* Global index in a NIC controller */
-
- bool alloced;
};
enum hclge_fc_mode {
@@ -530,6 +498,26 @@ struct hclge_mac_stats {
#define HCLGE_STATS_TIMER_INTERVAL 300UL
+/* fec stats ,opcode id: 0x0316 */
+#define HCLGE_FEC_STATS_MAX_LANES 8
+struct hclge_fec_stats {
+ /* fec rs mode total stats */
+ u64 rs_corr_blocks;
+ u64 rs_uncorr_blocks;
+ u64 rs_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u64 base_r_lane_num;
+ u64 base_r_corr_blocks;
+ u64 base_r_uncorr_blocks;
+ union {
+ struct {
+ u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ };
+ u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2];
+ };
+};
+
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
u16 rx_ot_sec_vlan_type;
@@ -641,6 +629,11 @@ struct key_info {
#define MAX_FD_FILTER_NUM 4096
#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
+#define hclge_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclge_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
HCLGE_FD_ARFS_ACTIVE,
@@ -817,8 +810,8 @@ struct hclge_vf_vlan_cfg {
union {
struct {
u8 is_kill;
- u16 vlan;
- u16 proto;
+ __le16 vlan;
+ __le16 proto;
};
u8 enable;
};
@@ -863,6 +856,7 @@ struct hclge_dev {
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
struct hclge_mac_stats mac_stats;
+ struct hclge_fec_stats fec_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -920,7 +914,7 @@ struct hclge_dev {
bool cur_promisc;
int num_alloc_vfs; /* Actual number of VFs allocated */
- struct hclge_tqp *htqp;
+ struct hclge_comm_tqp *htqp;
struct hclge_vport *vport;
struct dentry *hclge_dbgfs;
@@ -955,6 +949,8 @@ struct hclge_dev {
u16 hclge_fd_rule_num;
unsigned long serv_processed_cnt;
unsigned long last_serv_processed;
+ unsigned long last_rst_scheduled;
+ unsigned long last_mbx_scheduled;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
@@ -973,10 +969,9 @@ struct hclge_dev {
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
- /* affinity mask and notify for misc interrupt */
- cpumask_t affinity_mask;
struct hclge_ptp *ptp;
struct devlink *devlink;
+ struct hclge_comm_rss_cfg rss_cfg;
};
/* VPort level vlan tag configuration for TX direction */
@@ -1003,25 +998,20 @@ struct hclge_rx_vtag_cfg {
bool strip_tag2_discard_en; /* Outer vlan tag discard for BD enable */
};
-struct hclge_rss_tuple_cfg {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
-};
-
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
HCLGE_VPORT_STATE_PROMISC_CHANGE,
HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ HCLGE_VPORT_STATE_INITED,
HCLGE_VPORT_STATE_MAX
};
+enum HCLGE_VPORT_NEED_NOTIFY {
+ HCLGE_VPORT_NEED_NOTIFY_RESET,
+ HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+};
+
struct hclge_vlan_info {
u16 vlan_proto; /* so far support 802.1Q only */
u16 qos;
@@ -1030,7 +1020,9 @@ struct hclge_vlan_info {
struct hclge_port_base_vlan_config {
u16 state;
+ bool tbl_sta;
struct hclge_vlan_info vlan_info;
+ struct hclge_vlan_info old_vlan_info;
};
struct hclge_vf_info {
@@ -1047,15 +1039,6 @@ struct hclge_vf_info {
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
- u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
- /* User configured lookup table entries */
- u16 *rss_indirection_tbl;
- int rss_algo; /* User configured hash algorithm */
- /* User configured rss tuple sets */
- struct hclge_rss_tuple_cfg rss_tuple_sets;
-
- u16 alloc_rss_size;
-
u16 qs_offset;
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
@@ -1075,6 +1058,7 @@ struct hclge_vport {
struct hnae3_handle roce;
unsigned long state;
+ unsigned long need_notify;
unsigned long last_active_jiffies;
u32 mps; /* Max packet size */
struct hclge_vf_info vf_info;
@@ -1085,6 +1069,7 @@ struct hclge_vport {
spinlock_t mac_list_lock; /* protect mac address need to add/detele */
struct list_head uc_mac_list; /* Store VF unicast table */
struct list_head mc_mac_list; /* Store VF multicast table */
+
struct list_head vlan_list; /* Store VF vlan table */
};
@@ -1093,6 +1078,11 @@ struct hclge_speed_bit_map {
u32 speed_bit;
};
+struct hclge_mac_speed_map {
+ u32 speed_drv; /* speed defined in driver */
+ u32 speed_fw; /* speed defined in firmware */
+};
+
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@@ -1111,25 +1101,20 @@ int hclge_bind_ring_with_vector(struct hclge_vport *vport,
static inline int hclge_get_queue_id(struct hnae3_queue *queue)
{
- struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
+ struct hclge_comm_tqp *tqp =
+ container_of(queue, struct hclge_comm_tqp, q);
return tqp->index;
}
-static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
-{
- return !!hdev->reset_pending;
-}
-
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
-void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle);
@@ -1154,6 +1139,7 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
void hclge_restore_mac_table_common(struct hclge_vport *vport);
+void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev);
void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 65d78ee4d65a..04ff9bf12185 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -4,6 +4,7 @@
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
#define CREATE_TRACE_POINTS
#include "hclge_trace.h"
@@ -33,7 +34,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc;
u16 resp;
@@ -56,17 +57,19 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
- resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
- resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
- resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode;
+ resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP);
+ resp_pf_to_vf->msg.vf_mbx_msg_code =
+ cpu_to_le16(vf_to_pf_req->msg.code);
+ resp_pf_to_vf->msg.vf_mbx_msg_subcode =
+ cpu_to_le16(vf_to_pf_req->msg.subcode);
resp = hclge_errno_to_resp(resp_msg->status);
if (resp < SHRT_MAX) {
- resp_pf_to_vf->msg.resp_status = resp;
+ resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp);
} else {
dev_warn(&hdev->pdev->dev,
"failed to send response to VF, response status %u is out-of-bound\n",
resp);
- resp_pf_to_vf->msg.resp_status = EIO;
+ resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO);
}
if (resp_msg->len > 0)
@@ -90,18 +93,25 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
{
struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
struct hclge_dev *hdev = vport->back;
- enum hclge_cmd_status status;
+ enum hclge_comm_cmd_status status;
struct hclge_desc desc;
+ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "msg data length(=%u) exceeds maximum(=%u)\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EMSGSIZE;
+ }
+
resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
resp_pf_to_vf->dest_vfid = dest_vfid;
resp_pf_to_vf->msg_len = msg_len;
- resp_pf_to_vf->msg.code = mbx_opcode;
+ resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode);
- memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+ memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len);
trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
@@ -114,17 +124,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
+static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
+{
+ __le16 msg_data;
+ u8 dest_vfid;
+
+ dest_vfid = (u8)vport->vport_id;
+ msg_data = cpu_to_le16(reset_type);
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
+ HCLGE_MBX_ASSERTING_RESET, dest_vfid);
+}
+
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
u16 reset_type;
- u8 msg_data[2];
- u8 dest_vfid;
BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
- dest_vfid = (u8)vport->vport_id;
-
if (hdev->reset_type == HNAE3_FUNC_RESET)
reset_type = HNAE3_VF_PF_FUNC_RESET;
else if (hdev->reset_type == HNAE3_FLR_RESET)
@@ -132,11 +151,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
else
reset_type = HNAE3_VF_FUNC_RESET;
- memcpy(&msg_data[0], &reset_type, sizeof(u16));
-
- /* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
- HCLGE_MBX_ASSERTING_RESET, dest_vfid);
+ return hclge_inform_vf_reset(vport, reset_type);
}
static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
@@ -175,13 +190,13 @@ static int hclge_get_ring_chain_from_mbx(
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
- return -ENOMEM;
+ return -EINVAL;
for (i = 0; i < ring_num; i++) {
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
req->msg.param[i].tqp_index,
- vport->nic.kinfo.rss_size - 1);
+ vport->nic.kinfo.rss_size - 1U);
return -EINVAL;
}
}
@@ -241,6 +256,81 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
return ret;
}
+static int hclge_query_ring_vector_map(struct hclge_vport *vport,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_desc *desc)
+{
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc->data;
+ struct hclge_dev *hdev = vport->back;
+ u16 tqp_type_and_id;
+ int status;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true);
+
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
+ hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
+ ring_chain->tqp_index);
+ req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id);
+ req->vfid = vport->vport_id;
+
+ status = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Get VF ring vector map info fail, status is %d.\n",
+ status);
+
+ return status;
+}
+
+static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hclge_respond_to_vf_msg *resp)
+{
+#define HCLGE_LIMIT_RING_NUM 1
+#define HCLGE_RING_TYPE_OFFSET 0
+#define HCLGE_TQP_INDEX_OFFSET 1
+#define HCLGE_INT_GL_INDEX_OFFSET 2
+#define HCLGE_VECTOR_ID_OFFSET 3
+#define HCLGE_RING_VECTOR_MAP_INFO_LEN 4
+ struct hnae3_ring_chain_node ring_chain;
+ struct hclge_desc desc;
+ struct hclge_ctrl_vector_chain_cmd *data =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ u16 tqp_type_and_id;
+ u8 int_gl_index;
+ int ret;
+
+ req->msg.ring_num = HCLGE_LIMIT_RING_NUM;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc);
+ if (ret) {
+ hclge_free_vector_ring_chain(&ring_chain);
+ return ret;
+ }
+
+ tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]);
+ int_gl_index = hnae3_get_field(tqp_type_and_id,
+ HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S);
+
+ resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type;
+ resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index;
+ resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index;
+ resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l;
+ resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN;
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return ret;
+}
+
static void hclge_set_vf_promisc_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *req)
{
@@ -331,16 +421,14 @@ int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
u16 state,
struct hclge_vlan_info *vlan_info)
{
-#define MSG_DATA_SIZE 8
-
- u8 msg_data[MSG_DATA_SIZE];
+ struct hclge_mbx_port_base_vlan base_vlan;
- memcpy(&msg_data[0], &state, sizeof(u16));
- memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16));
- memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16));
- memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16));
+ base_vlan.state = cpu_to_le16(state);
+ base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto);
+ base_vlan.qos = cpu_to_le16(vlan_info->qos);
+ base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag);
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan),
HCLGE_MBX_PUSH_VLAN_INFO, vfid);
}
@@ -354,13 +442,16 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
struct hclge_vf_vlan_cfg *msg_cmd;
+ __be16 proto;
+ u16 vlan_id;
msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
switch (msg_cmd->subcode) {
case HCLGE_MBX_VLAN_FILTER:
- return hclge_set_vlan_filter(handle,
- cpu_to_be16(msg_cmd->proto),
- msg_cmd->vlan, msg_cmd->is_kill);
+ proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto));
+ vlan_id = le16_to_cpu(msg_cmd->vlan);
+ return hclge_set_vlan_filter(handle, proto, vlan_id,
+ msg_cmd->is_kill);
case HCLGE_MBX_VLAN_RX_OFF_CFG:
return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable);
case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE:
@@ -403,15 +494,17 @@ static void hclge_get_basic_info(struct hclge_vport *vport,
struct hnae3_ae_dev *ae_dev = vport->back->ae_dev;
struct hclge_basic_info *basic_info;
unsigned int i;
+ u32 pf_caps;
basic_info = (struct hclge_basic_info *)resp_msg->data;
for (i = 0; i < kinfo->tc_info.num_tc; i++)
basic_info->hw_tc_map |= BIT(i);
+ pf_caps = le32_to_cpu(basic_info->pf_caps);
if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
- hnae3_set_bit(basic_info->pf_caps,
- HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1);
+ hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1);
+ basic_info->pf_caps = cpu_to_le32(pf_caps);
resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
@@ -419,19 +512,15 @@ static void hclge_get_vf_queue_info(struct hclge_vport *vport,
struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_RSS_INFO_LEN 6
-#define HCLGE_TQPS_ALLOC_OFFSET 0
-#define HCLGE_TQPS_RSS_SIZE_OFFSET 2
-#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4
+ struct hclge_mbx_vf_queue_info *queue_info;
struct hclge_dev *hdev = vport->back;
/* get the queue related info */
- memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET],
- &vport->alloc_tqps, sizeof(u16));
- memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET],
- &vport->nic.kinfo.rss_size, sizeof(u16));
- memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET],
- &hdev->rx_buf_len, sizeof(u16));
+ queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data;
+ queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps);
+ queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size);
+ queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len);
resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN;
}
@@ -446,16 +535,15 @@ static void hclge_get_vf_queue_depth(struct hclge_vport *vport,
struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_TQPS_DEPTH_INFO_LEN 4
-#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0
-#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2
+ struct hclge_mbx_vf_queue_depth *queue_depth;
struct hclge_dev *hdev = vport->back;
/* get the queue depth info */
- memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET],
- &hdev->num_tx_desc, sizeof(u16));
- memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET],
- &hdev->num_rx_desc, sizeof(u16));
+ queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data;
+ queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc);
+ queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc);
+
resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN;
}
@@ -480,10 +568,9 @@ int hclge_push_vf_link_status(struct hclge_vport *vport)
#define HCLGE_VF_LINK_STATE_UP 1U
#define HCLGE_VF_LINK_STATE_DOWN 0U
+ struct hclge_mbx_link_status link_info;
struct hclge_dev *hdev = vport->back;
u16 link_status;
- u8 msg_data[9];
- u16 duplex;
/* mac.link can only be 0 or 1 */
switch (vport->vf_info.link_state) {
@@ -499,14 +586,13 @@ int hclge_push_vf_link_status(struct hclge_vport *vport)
break;
}
- duplex = hdev->hw.mac.duplex;
- memcpy(&msg_data[0], &link_status, sizeof(u16));
- memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
- memcpy(&msg_data[6], &duplex, sizeof(u16));
- msg_data[8] = HCLGE_MBX_PUSH_LINK_STATUS_EN;
+ link_info.link_status = cpu_to_le16(link_status);
+ link_info.speed = cpu_to_le32(hdev->hw.mac.speed);
+ link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex);
+ link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN;
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info),
HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id);
}
@@ -514,22 +600,22 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
#define HCLGE_SUPPORTED 1
+ struct hclge_mbx_link_mode link_mode;
struct hclge_dev *hdev = vport->back;
unsigned long advertising;
unsigned long supported;
unsigned long send_data;
- u8 msg_data[10] = {};
u8 dest_vfid;
advertising = hdev->hw.mac.advertising[0];
supported = hdev->hw.mac.supported[0];
dest_vfid = mbx_req->mbx_src_vfid;
- msg_data[0] = mbx_req->msg.data[0];
-
- send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
+ send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported :
+ advertising;
+ link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]);
+ link_mode.link_mode = cpu_to_le64(send_data);
- memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
- hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode),
HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
}
@@ -543,7 +629,7 @@ static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
u16 queue_id;
int ret;
- memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data);
resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE;
resp_msg->len = sizeof(u8);
@@ -571,66 +657,119 @@ static int hclge_reset_vf(struct hclge_vport *vport)
return hclge_func_reset_cmd(hdev, vport->vport_id);
}
+static void hclge_notify_vf_config(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_port_base_vlan_config *vlan_cfg;
+ int ret;
+
+ hclge_push_vf_link_status(vport);
+ if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) {
+ ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to inform VF %u reset!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ return;
+ }
+ vport->need_notify = 0;
+ return;
+ }
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
+ test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) {
+ vlan_cfg = &vport->port_base_vlan_cfg;
+ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id,
+ vlan_cfg->state,
+ &vlan_cfg->vlan_info);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to inform VF %u port base vlan!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ return;
+ }
+ clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify);
+ }
+}
+
static void hclge_vf_keep_alive(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+
vport->last_active_jiffies = jiffies;
+
+ if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) &&
+ !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ dev_info(&hdev->pdev->dev, "VF %u is alive!",
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+ hclge_notify_vf_config(vport);
+ }
}
static int hclge_set_vf_mtu(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
+ struct hclge_mbx_mtu_info *mtu_info;
u32 mtu;
- memcpy(&mtu, mbx_req->msg.data, sizeof(mtu));
+ mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data;
+ mtu = le32_to_cpu(mtu_info->mtu);
return hclge_set_vport_mtu(vport, mtu);
}
-static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
u16 queue_id, qid_in_pf;
- memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data);
if (queue_id >= handle->kinfo.num_tqps) {
dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
queue_id, mbx_req->mbx_src_vfid);
- return;
+ return -EINVAL;
}
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
- memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
+ *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf);
resp_msg->len = sizeof(qid_in_pf);
+ return 0;
}
-static void hclge_get_rss_key(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- struct hclge_respond_to_vf_msg *resp_msg)
+static int hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
#define HCLGE_RSS_MBX_RESP_LEN 8
struct hclge_dev *hdev = vport->back;
+ struct hclge_comm_rss_cfg *rss_cfg;
u8 index;
index = mbx_req->msg.data[0];
+ rss_cfg = &hdev->rss_cfg;
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
- sizeof(vport[0].rss_hash_key)) {
+ sizeof(rss_cfg->rss_hash_key)) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
- return;
+ return -EINVAL;
}
memcpy(resp_msg->data,
- &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
+ &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+ return 0;
}
static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
@@ -661,9 +800,9 @@ static void hclge_handle_link_change_event(struct hclge_dev *hdev,
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
{
- u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
+ u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->cmq.crq.next_to_use;
+ return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
@@ -692,20 +831,289 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
}
}
+static int
+hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, true,
+ param->req);
+}
+
+static int
+hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, false,
+ param->req);
+}
+
+static int
+hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_get_vf_ring_vector_map(param->vport, param->req,
+ param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to get VF ring vector map\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_set_vf_promisc_mode(param->vport, param->req);
+ return 0;
+}
+
+static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_uc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_alive(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_depth(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_basic_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_push_vf_link_status(param->vport);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_mbx_reset_vf_queue(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_reset_vf(param->vport);
+}
+
+static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_vf_keep_alive(param->vport);
+ return 0;
+}
+
+static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mtu(param->vport, param->req);
+ if (ret)
+ dev_err(&param->vport->back->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_queue_id_in_pf(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_rss_key(param->vport, param->req, param->resp_msg);
+}
+
+static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_link_mode(param->vport, param->req);
+ return 0;
+}
+
+static int
+hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, false);
+ return 0;
+}
+
+static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, true);
+ param->vport->mps = 0;
+ return 0;
+}
+
+static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_media_type(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_link_change_event(param->vport->back, param->req);
+ return 0;
+}
+
+static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_mac_addr(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_ncsi_error(param->vport->back);
+ return 0;
+}
+
+static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_vf_tbl(param->vport, param->req);
+ return 0;
+}
+
+static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
+ [HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
+ [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
+ [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler,
+ [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler,
+ [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler,
+ [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler,
+ [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler,
+ [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler,
+ [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler,
+ [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler,
+ [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler,
+ [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler,
+ [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler,
+ [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler,
+ [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler,
+ [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler,
+ [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler,
+ [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler,
+ [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler,
+ [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler,
+ [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
+ [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
+ [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
+ [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
+ [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
+ [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
+};
+
+static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+{
+ hclge_mbx_ops_fn cmd_func = NULL;
+ struct hclge_dev *hdev;
+ int ret = 0;
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+ if (cmd_func)
+ ret = cmd_func(param);
+ else
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ param->resp_msg->status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ param->req->mbx_src_vfid,
+ param->req->msg.code,
+ param->req->msg.subcode);
+
+ hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
- struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclge_vport *vport;
+ struct hclge_mbx_ops_param param;
struct hclge_desc *desc;
- bool is_del = false;
unsigned int flag;
- int ret = 0;
+ param.resp_msg = &resp_msg;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_warn(&hdev->pdev->dev,
"command queue needs re-initializing\n");
return;
@@ -726,138 +1134,19 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
continue;
}
- vport = &hdev->vport[req->mbx_src_vfid];
-
trace_hclge_pf_mbx_get(hdev, req);
/* clear the resp_msg before processing every mailbox message */
memset(&resp_msg, 0, sizeof(resp_msg));
-
- switch (req->msg.code) {
- case HCLGE_MBX_MAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
- req);
- break;
- case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
- req);
- break;
- case HCLGE_MBX_SET_PROMISC_MODE:
- hclge_set_vf_promisc_mode(vport, req);
- break;
- case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF UC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF MC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to config VF's VLAN\n",
- ret);
- break;
- case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to set VF's ALIVE\n",
- ret);
- break;
- case HCLGE_MBX_GET_QINFO:
- hclge_get_vf_queue_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_QDEPTH:
- hclge_get_vf_queue_depth(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_BASIC_INFO:
- hclge_get_basic_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_STATUS:
- ret = hclge_push_vf_link_status(vport);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "failed to inform link stat to VF, ret = %d\n",
- ret);
- break;
- case HCLGE_MBX_QUEUE_RESET:
- ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_RESET:
- ret = hclge_reset_vf(vport);
- break;
- case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport);
- break;
- case HCLGE_MBX_SET_MTU:
- ret = hclge_set_vf_mtu(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF fail(%d) to set mtu\n", ret);
- break;
- case HCLGE_MBX_GET_QID_IN_PF:
- hclge_get_queue_id_in_pf(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_RSS_KEY:
- hclge_get_rss_key(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_MODE:
- hclge_get_link_mode(vport, req);
- break;
- case HCLGE_MBX_GET_VF_FLR_STATUS:
- case HCLGE_MBX_VF_UNINIT:
- is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, is_del);
- break;
- case HCLGE_MBX_GET_MEDIA_TYPE:
- hclge_get_vf_media_type(vport, &resp_msg);
- break;
- case HCLGE_MBX_PUSH_LINK_STATUS:
- hclge_handle_link_change_event(hdev, req);
- break;
- case HCLGE_MBX_GET_MAC_ADDR:
- hclge_get_vf_mac_addr(vport, &resp_msg);
- break;
- case HCLGE_MBX_NCSI_ERROR:
- hclge_handle_ncsi_error(hdev);
- break;
- case HCLGE_MBX_HANDLE_VF_TBL:
- hclge_handle_vf_tbl(vport, req);
- break;
- default:
- dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %u\n",
- req->msg.code);
- break;
- }
-
- /* PF driver should not reply IMP */
- if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
- req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
- resp_msg.status = ret;
- hclge_gen_resp_to_vf(vport, req, &resp_msg);
- }
+ param.vport = &hdev->vport[req->mbx_src_vfid];
+ param.req = req;
+ hclge_mbx_request_handling(&param);
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
-
- /* reinitialize ret after complete the mbx message processing */
- ret = 0;
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
- hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+ hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 1231c34f0949..85fb11de43a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -47,8 +47,8 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
- return 0;
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ return -EBUSY;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@@ -85,8 +85,8 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
struct hclge_desc desc;
int ret;
- if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
- return 0;
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
+ return -EBUSY;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
@@ -187,7 +187,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index fd0e20190b90..4200d0b6d931 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -4,6 +4,10 @@
#ifndef __HCLGE_MDIO_H
#define __HCLGE_MDIO_H
+#include "hnae3.h"
+
+struct hclge_dev;
+
int hclge_mac_mdio_config(struct hclge_dev *hdev);
int hclge_mac_connect_phy(struct hnae3_handle *handle);
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index befa9bcc2f2f..80a2a0073d97 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -22,28 +22,16 @@ static int hclge_ptp_get_cycle(struct hclge_dev *hdev)
return 0;
}
-static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int hclge_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
struct hclge_ptp_cycle *cycle = &hdev->ptp->cycle;
- u64 adj_val, adj_base, diff;
+ u64 adj_val, adj_base;
unsigned long flags;
- bool is_neg = false;
u32 quo, numerator;
- if (ppb < 0) {
- ppb = -ppb;
- is_neg = true;
- }
-
adj_base = (u64)cycle->quo * (u64)cycle->den + (u64)cycle->numer;
- adj_val = adj_base * ppb;
- diff = div_u64(adj_val, 1000000000ULL);
-
- if (is_neg)
- adj_val = adj_base - diff;
- else
- adj_val = adj_base + diff;
+ adj_val = adjust_by_scaled_ppm(adj_base, scaled_ppm);
/* This clock cycle is defined by three part: quotient, numerator
* and denominator. For example, 2.5ns, the quotient is 2,
@@ -446,7 +434,7 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
ptp->info.n_ext_ts = 0;
ptp->info.pps = 0;
- ptp->info.adjfreq = hclge_ptp_adjfreq;
+ ptp->info.adjfine = hclge_ptp_adjfine;
ptp->info.adjtime = hclge_ptp_adjtime;
ptp->info.gettimex64 = hclge_ptp_gettimex;
ptp->info.settime64 = hclge_ptp_settime;
@@ -464,7 +452,7 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
}
spin_lock_init(&ptp->lock);
- ptp->io_base = hdev->hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
hdev->ptp = ptp;
@@ -504,7 +492,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
goto out;
set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
- ret = hclge_ptp_adjfreq(&hdev->ptp->info, 0);
+ ret = hclge_ptp_adjfine(&hdev->ptp->info, 0);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init freq, ret = %d\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index 7a9b77de632a..bbee74cd8404 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -8,6 +8,9 @@
#include <linux/net_tstamp.h>
#include <linux/types.h>
+struct hclge_dev;
+struct ifreq;
+
#define HCLGE_PTP_REG_OFFSET 0x29000
#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 429652a8cde1..4a33f65190e2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -248,7 +248,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
return 0;
}
-static int hclge_up_to_tc_map(struct hclge_dev *hdev)
+int hclge_up_to_tc_map(struct hclge_dev *hdev)
{
struct hclge_desc desc;
u8 *pri = (u8 *)desc.data;
@@ -266,6 +266,47 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
+ for (i = 0; i < HNAE3_MAX_DSCP; i++)
+ hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
+}
+
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 pri_id, tc_id, i, j;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+ }
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+}
+
static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
u8 pg_id, u8 pri_bit_map)
{
@@ -282,8 +323,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
- u16 qs_id, u8 pri)
+static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
+ bool link_vld)
{
struct hclge_qs_to_pri_link_cmd *map;
struct hclge_desc desc;
@@ -294,7 +335,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
map->qs_id = cpu_to_le16(qs_id);
map->priority = pri;
- map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
+ map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -420,7 +461,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
-static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
struct hclge_shaper_ir_para ir_para;
@@ -642,11 +683,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
if (vport->vport_id) {
+ kinfo->tc_info.max_tc = 1;
kinfo->tc_info.num_tc = 1;
vport->qs_offset = HNAE3_MAX_TC +
vport->vport_id - HCLGE_VF_VPORT_START_NUM;
vport_max_rss_size = hdev->vf_rss_size_max;
} else {
+ kinfo->tc_info.max_tc = hdev->tc_max;
kinfo->tc_info.num_tc =
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
vport->qs_offset = 0;
@@ -678,9 +721,11 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
hclge_tm_update_kinfo_rss_size(vport);
kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
vport->dwrr = 100; /* 100 percent as init */
- vport->alloc_rss_size = kinfo->rss_size;
vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ if (vport->vport_id == PF_VPORT_ID)
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
+
/* when enable mqprio, the tc_info has been updated. */
if (kinfo->tc_info.mqprio_active)
return;
@@ -714,14 +759,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
{
- u8 i;
+ u8 i, tc_sch_mode;
+ u32 bw_limit;
+
+ for (i = 0; i < hdev->tc_max; i++) {
+ if (i < hdev->tm_info.num_tc) {
+ tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+ } else {
+ tc_sch_mode = HCLGE_SCH_MODE_SP;
+ bw_limit = 0;
+ }
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i;
- hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
hdev->tm_info.tc_info[i].pgid = 0;
- hdev->tm_info.tc_info[i].bw_limit =
- hdev->tm_info.pg_info[0].bw_limit;
+ hdev->tm_info.tc_info[i].bw_limit = bw_limit;
}
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
@@ -916,38 +969,66 @@ static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
return 0;
}
-static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
int ret;
- u32 i, k;
- if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
- /* Cfg qs -> pri mapping, one by one mapping */
- for (k = 0; k < hdev->num_alloc_vport; k++) {
- struct hnae3_knic_private_info *kinfo =
- &vport[k].nic.kinfo;
-
- for (i = 0; i < kinfo->tc_info.num_tc; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(
- hdev, vport[k].qs_offset + i, i);
- if (ret)
- return ret;
- }
+ /* Cfg qs -> pri mapping, one by one mapping */
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ for (i = 0; i < kinfo->tc_info.max_tc; i++) {
+ u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
+ bool link_vld = i < kinfo->tc_info.num_tc;
+
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ pri, link_vld);
+ if (ret)
+ return ret;
}
- } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
- /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
- for (k = 0; k < hdev->num_alloc_vport; k++)
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- ret = hclge_tm_qs_to_pri_map_cfg(
- hdev, vport[k].qs_offset + i, k);
- if (ret)
- return ret;
- }
- } else {
- return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, k;
+ int ret;
+
+ /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
+ for (k = 0; k < hdev->num_alloc_vport; k++)
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(hdev,
+ vport[k].qs_offset + i,
+ k, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
+ else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
+ ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return ret;
+
/* Cfg q -> qs mapping */
for (i = 0; i < hdev->num_alloc_vport; i++) {
ret = hclge_vport_q_to_qs_map(hdev, vport);
@@ -964,33 +1045,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
struct hclge_shaper_ir_para ir_para;
- u32 shaper_para;
+ u32 shaper_para_c, shaper_para_p;
int ret;
u32 i;
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
u32 rate = hdev->tm_info.tc_info[i].bw_limit;
- ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
- &ir_para, max_tm_rate);
- if (ret)
- return ret;
+ if (rate) {
+ ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ } else {
+ shaper_para_c = 0;
+ shaper_para_p = 0;
+ }
- shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
- shaper_para, rate);
+ shaper_para_c, rate);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
- ir_para.ir_u,
- ir_para.ir_s,
- HCLGE_SHAPER_BS_U_DEF,
- HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
- shaper_para, rate);
+ shaper_para_p, rate);
if (ret)
return ret;
}
@@ -1100,7 +1187,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
int ret;
u32 i, k;
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (i = 0; i < hdev->tc_max; i++) {
pg_info =
&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
dwrr = pg_info->tc_dwrr[i];
@@ -1110,9 +1197,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
return ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
+
+ if (i >= kinfo->tc_info.max_tc)
+ continue;
+
+ dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
ret = hclge_tm_qs_weight_cfg(
hdev, vport[k].qs_offset + i,
- vport[k].dwrr);
+ dwrr);
if (ret)
return ret;
}
@@ -1200,7 +1293,7 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
if (ret == -EOPNOTSUPP) {
dev_warn(&hdev->pdev->dev,
- "fw %08x does't support ets tc weight cmd\n",
+ "fw %08x doesn't support ets tc weight cmd\n",
hdev->fw_version);
ret = 0;
}
@@ -1223,6 +1316,12 @@ static int hclge_tm_map_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret)
+ return ret;
+ }
+
ret = hclge_tm_pg_to_pri_map(hdev);
if (ret)
return ret;
@@ -1274,6 +1373,35 @@ static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
return 0;
}
+static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u8 mode;
+ u16 i;
+
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
+
+ if (pri_id >= kinfo->tc_info.max_tc)
+ continue;
+
+ mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
+ HCLGE_SCH_MODE_SP;
+ ret = hclge_tm_qs_schd_mode_cfg(hdev,
+ vport[i].qs_offset + pri_id,
+ mode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
@@ -1304,21 +1432,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
int ret;
- u8 i, k;
+ u8 i;
if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
- for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
+ for (i = 0; i < hdev->tc_max; i++) {
+ ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
if (ret)
return ret;
-
- for (k = 0; k < hdev->num_alloc_vport; k++) {
- ret = hclge_tm_qs_schd_mode_cfg(
- hdev, vport[k].qs_offset + i,
- HCLGE_SCH_MODE_DWRR);
- if (ret)
- return ret;
- }
}
} else {
for (i = 0; i < hdev->num_alloc_vport; i++) {
@@ -1573,6 +1693,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return -EINVAL;
hclge_tm_schd_info_init(hdev);
+ hclge_dscp_to_prio_map_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 1db7f40b4525..68f28a98e380 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -6,6 +6,12 @@
#include <linux/types.h>
+#include "hnae3.h"
+
+struct hclge_dev;
+struct hclge_vport;
+enum hclge_opcode_type;
+
/* MAC Pause */
#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
@@ -24,6 +30,9 @@
#define HCLGE_TM_PF_MAX_PRI_NUM 8
#define HCLGE_TM_PF_MAX_QSET_NUM 8
+#define HCLGE_DSCP_MAP_TC_BD_NUM 2
+#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -231,6 +240,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev);
int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
@@ -255,4 +265,6 @@ int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
struct hclge_tm_shaper_para *para);
int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
struct hclge_tm_shaper_para *para);
+int hclge_up_to_tc_map(struct hclge_dev *hdev);
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
index 5b0b71bd6120..8510b88d4982 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -62,7 +62,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
TP_fast_assign(
__entry->vfid = req->dest_vfid;
- __entry->code = req->msg.code;
+ __entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
deleted file mode 100644
index 51ff7d86ee90..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0+
-#
-# Makefile for the HISILICON network device drivers.
-#
-
-ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
-ccflags-y += -I $(srctree)/$(src)
-
-obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
-hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
deleted file mode 100644
index e605c2c5bcce..000000000000
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ /dev/null
@@ -1,556 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-// Copyright (c) 2016-2017 Hisilicon Limited.
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include "hclgevf_cmd.h"
-#include "hclgevf_main.h"
-#include "hnae3.h"
-
-#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
-
-static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
-{
- int ntc = ring->next_to_clean;
- int ntu = ring->next_to_use;
- int used;
-
- used = (ntu - ntc + ring->desc_num) % ring->desc_num;
-
- return ring->desc_num - used - 1;
-}
-
-static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
- int head)
-{
- int ntu = ring->next_to_use;
- int ntc = ring->next_to_clean;
-
- if (ntu > ntc)
- return head >= ntc && head <= ntu;
-
- return head >= ntc || head <= ntu;
-}
-
-static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
-{
- struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
- struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int clean;
- u32 head;
-
- head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
- rmb(); /* Make sure head is ready before touch any data */
-
- if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
- dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
- csq->next_to_use, csq->next_to_clean);
- dev_warn(&hdev->pdev->dev,
- "Disabling any further commands to IMP firmware\n");
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- return -EIO;
- }
-
- clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
- csq->next_to_clean = head;
- return clean;
-}
-
-static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
-{
- u32 head;
-
- head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
-
- return head == hw->cmq.csq.next_to_use;
-}
-
-static bool hclgevf_is_special_opcode(u16 opcode)
-{
- const u16 spec_opcode[] = {0x30, 0x31, 0x32};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
- if (spec_opcode[i] == opcode)
- return true;
- }
-
- return false;
-}
-
-static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
-{
- struct hclgevf_dev *hdev = ring->dev;
- struct hclgevf_hw *hw = &hdev->hw;
- u32 reg_val;
-
- if (ring->flag == HCLGEVF_TYPE_CSQ) {
- reg_val = lower_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = upper_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
- reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
- reg_val &= HCLGEVF_NIC_SW_RST_RDY;
- reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- } else {
- reg_val = lower_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = upper_32_bits(ring->desc_dma_addr);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
- }
-}
-
-static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
-{
- hclgevf_cmd_config_regs(&hw->cmq.csq);
- hclgevf_cmd_config_regs(&hw->cmq.crq);
-}
-
-static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclgevf_desc);
-
- ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
- &ring->desc_dma_addr, GFP_KERNEL);
- if (!ring->desc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
-{
- int size = ring->desc_num * sizeof(struct hclgevf_desc);
-
- if (ring->desc) {
- dma_free_coherent(cmq_ring_to_dev(ring), size,
- ring->desc, ring->desc_dma_addr);
- ring->desc = NULL;
- }
-}
-
-static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
-{
- struct hclgevf_hw *hw = &hdev->hw;
- struct hclgevf_cmq_ring *ring =
- (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
- int ret;
-
- ring->dev = hdev;
- ring->flag = ring_type;
-
- /* allocate CSQ/CRQ descriptor */
- ret = hclgevf_alloc_cmd_desc(ring);
- if (ret)
- dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
- (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
-
- return ret;
-}
-
-void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
- enum hclgevf_opcode_type opcode, bool is_read)
-{
- memset(desc, 0, sizeof(struct hclgevf_desc));
- desc->opcode = cpu_to_le16(opcode);
- desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
- HCLGEVF_CMD_FLAG_IN);
- if (is_read)
- desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
- else
- desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
-}
-
-struct vf_errcode {
- u32 imp_errcode;
- int common_errno;
-};
-
-static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num)
-{
- struct hclgevf_desc *desc_to_use;
- int handle = 0;
-
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
-}
-
-static int hclgevf_cmd_convert_err_code(u16 desc_ret)
-{
- struct vf_errcode hclgevf_cmd_errcode[] = {
- {HCLGEVF_CMD_EXEC_SUCCESS, 0},
- {HCLGEVF_CMD_NO_AUTH, -EPERM},
- {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
- {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
- {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
- {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
- {HCLGEVF_CMD_PARA_ERR, -EINVAL},
- {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
- {HCLGEVF_CMD_TIMEOUT, -ETIME},
- {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
- {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
- {HCLGEVF_CMD_INVALID, -EBADR},
- };
- u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
- u32 i;
-
- for (i = 0; i < errcode_count; i++)
- if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
- return hclgevf_cmd_errcode[i].common_errno;
-
- return -EIO;
-}
-
-static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num, int ntc)
-{
- u16 opcode, desc_ret;
- int handle;
-
- opcode = le16_to_cpu(desc[0].opcode);
- for (handle = 0; handle < num; handle++) {
- /* Get the result of hardware write back */
- desc[handle] = hw->cmq.csq.desc[ntc];
- ntc++;
- if (ntc == hw->cmq.csq.desc_num)
- ntc = 0;
- }
- if (likely(!hclgevf_is_special_opcode(opcode)))
- desc_ret = le16_to_cpu(desc[num - 1].retval);
- else
- desc_ret = le16_to_cpu(desc[0].retval);
- hw->cmq.last_status = desc_ret;
-
- return hclgevf_cmd_convert_err_code(desc_ret);
-}
-
-static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
- struct hclgevf_desc *desc, int num, int ntc)
-{
- struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
- bool is_completed = false;
- u32 timeout = 0;
- int handle, ret;
-
- /* If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclgevf_cmd_csq_done(hw)) {
- is_completed = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!is_completed)
- ret = -EBADE;
- else
- ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclgevf_cmd_csq_clean(hw);
- if (handle < 0)
- ret = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
- return ret;
-}
-
-/* hclgevf_cmd_send - send command to command queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor for describing the command
- * @num : the number of descriptors to be sent
- *
- * This is the main send command for command queue, it
- * sends the queue, cleans the queue, etc
- */
-int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
-{
- struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
- struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- int ret;
- int ntc;
-
- spin_lock_bh(&hw->cmq.csq.lock);
-
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- if (num > hclgevf_ring_space(&hw->cmq.csq)) {
- /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
- * need update the SW HEAD pointer csq->next_to_clean
- */
- csq->next_to_clean = hclgevf_read_dev(hw,
- HCLGEVF_NIC_CSQ_HEAD_REG);
- spin_unlock_bh(&hw->cmq.csq.lock);
- return -EBUSY;
- }
-
- /* Record the location of desc in the ring for this time
- * which will be use for hardware to write back
- */
- ntc = hw->cmq.csq.next_to_use;
-
- hclgevf_cmd_copy_desc(hw, desc, num);
-
- /* Write to hardware */
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
- hw->cmq.csq.next_to_use);
-
- ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
-
- spin_unlock_bh(&hw->cmq.csq.lock);
-
- return ret;
-}
-
-static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
-
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
-}
-
-static const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = {
- {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B},
- {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B},
- {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B},
- {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B},
- {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B},
- {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
-};
-
-static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
- struct hclgevf_query_version_cmd *cmd)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- u32 caps, i;
-
- caps = __le32_to_cpu(cmd->caps[0]);
- for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++)
- if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit))
- set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit,
- ae_dev->caps);
-}
-
-static __le32 hclgevf_build_api_caps(void)
-{
- u32 api_caps = 0;
-
- hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1);
-
- return cpu_to_le32(api_caps);
-}
-
-static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hclgevf_query_version_cmd *resp;
- struct hclgevf_desc desc;
- int status;
-
- resp = (struct hclgevf_query_version_cmd *)desc.data;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
- resp->api_caps = hclgevf_build_api_caps();
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- return status;
-
- hdev->fw_version = le32_to_cpu(resp->firmware);
-
- ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
- HNAE3_PCI_REVISION_BIT_SIZE;
- ae_dev->dev_version |= hdev->pdev->revision;
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
- hclgevf_set_default_capability(hdev);
-
- hclgevf_parse_capability(hdev, resp);
-
- return status;
-}
-
-int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
-{
- int ret;
-
- /* Setup the lock for command queue */
- spin_lock_init(&hdev->hw.cmq.csq.lock);
- spin_lock_init(&hdev->hw.cmq.crq.lock);
-
- hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
- hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
-
- ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CSQ ring setup error %d\n", ret);
- return ret;
- }
-
- ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "CRQ ring setup error %d\n", ret);
- goto err_csq;
- }
-
- return 0;
-err_csq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
- return ret;
-}
-
-static int hclgevf_firmware_compat_config(struct hclgevf_dev *hdev, bool en)
-{
- struct hclgevf_firmware_compat_cmd *req;
- struct hclgevf_desc desc;
- u32 compat = 0;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_IMP_COMPAT_CFG, false);
-
- if (en) {
- req = (struct hclgevf_firmware_compat_cmd *)desc.data;
-
- hnae3_set_bit(compat, HCLGEVF_SYNC_RX_RING_HEAD_EN_B, 1);
-
- req->compat = cpu_to_le32(compat);
- }
-
- return hclgevf_cmd_send(&hdev->hw, &desc, 1);
-}
-
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
-{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- int ret;
-
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
-
- /* initialize the pointers of async rx queue of mailbox */
- hdev->arq.hdev = hdev;
- hdev->arq.head = 0;
- hdev->arq.tail = 0;
- atomic_set(&hdev->arq.count, 0);
- hdev->hw.cmq.csq.next_to_clean = 0;
- hdev->hw.cmq.csq.next_to_use = 0;
- hdev->hw.cmq.crq.next_to_clean = 0;
- hdev->hw.cmq.crq.next_to_use = 0;
-
- hclgevf_cmd_init_regs(&hdev->hw);
-
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
- /* Check if there is new reset pending, because the higher level
- * reset may happen when lower level reset is being processed.
- */
- if (hclgevf_is_reset_pending(hdev)) {
- ret = -EBUSY;
- goto err_cmd_init;
- }
-
- /* get version and device capabilities */
- ret = hclgevf_cmd_query_version_and_capability(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to query version and capabilities, ret = %d\n", ret);
- goto err_cmd_init;
- }
-
- dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
- HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
- HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
- HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
- HNAE3_FW_VERSION_BYTE0_SHIFT));
-
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) {
- /* ask the firmware to enable some features, driver can work
- * without it.
- */
- ret = hclgevf_firmware_compat_config(hdev, true);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "Firmware compatible features not enabled(%d).\n",
- ret);
- }
-
- return 0;
-
-err_cmd_init:
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
-
- return ret;
-}
-
-static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
-{
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
-}
-
-void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
-{
- hclgevf_firmware_compat_config(hdev, false);
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- /* wait to ensure that the firmware completes the possible left
- * over commands.
- */
- msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
- spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock(&hdev->hw.cmq.crq.lock);
- hclgevf_cmd_uninit_regs(&hdev->hw);
- spin_unlock(&hdev->hw.cmq.crq.lock);
- spin_unlock_bh(&hdev->hw.cmq.csq.lock);
-
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index edc9e154061a..537b887fa0a2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -6,9 +6,8 @@
#include <linux/io.h>
#include <linux/types.h>
#include "hnae3.h"
+#include "hclge_comm_cmd.h"
-#define HCLGEVF_CMDQ_TX_TIMEOUT 30000
-#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200
#define HCLGEVF_CMDQ_RX_INVLD_B 0
#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
@@ -16,107 +15,6 @@ struct hclgevf_hw;
struct hclgevf_dev;
#define HCLGEVF_SYNC_RX_RING_HEAD_EN_B 4
-struct hclgevf_firmware_compat_cmd {
- __le32 compat;
- u8 rsv[20];
-};
-
-struct hclgevf_desc {
- __le16 opcode;
- __le16 flag;
- __le16 retval;
- __le16 rsv;
- __le32 data[6];
-};
-
-struct hclgevf_desc_cb {
- dma_addr_t dma;
- void *va;
- u32 length;
-};
-
-struct hclgevf_cmq_ring {
- dma_addr_t desc_dma_addr;
- struct hclgevf_desc *desc;
- struct hclgevf_desc_cb *desc_cb;
- struct hclgevf_dev *dev;
- u32 head;
- u32 tail;
-
- u16 buf_size;
- u16 desc_num;
- int next_to_use;
- int next_to_clean;
- u8 flag;
- spinlock_t lock; /* Command queue lock */
-};
-
-enum hclgevf_cmd_return_status {
- HCLGEVF_CMD_EXEC_SUCCESS = 0,
- HCLGEVF_CMD_NO_AUTH = 1,
- HCLGEVF_CMD_NOT_SUPPORTED = 2,
- HCLGEVF_CMD_QUEUE_FULL = 3,
- HCLGEVF_CMD_NEXT_ERR = 4,
- HCLGEVF_CMD_UNEXE_ERR = 5,
- HCLGEVF_CMD_PARA_ERR = 6,
- HCLGEVF_CMD_RESULT_ERR = 7,
- HCLGEVF_CMD_TIMEOUT = 8,
- HCLGEVF_CMD_HILINK_ERR = 9,
- HCLGEVF_CMD_QUEUE_ILLEGAL = 10,
- HCLGEVF_CMD_INVALID = 11,
-};
-
-enum hclgevf_cmd_status {
- HCLGEVF_STATUS_SUCCESS = 0,
- HCLGEVF_ERR_CSQ_FULL = -1,
- HCLGEVF_ERR_CSQ_TIMEOUT = -2,
- HCLGEVF_ERR_CSQ_ERROR = -3
-};
-
-struct hclgevf_cmq {
- struct hclgevf_cmq_ring csq;
- struct hclgevf_cmq_ring crq;
- u16 tx_timeout; /* Tx timeout */
- enum hclgevf_cmd_status last_status;
-};
-
-#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
-#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
-#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
-#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
-#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
-#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
-
-#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
-#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
-#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
-#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
-#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
-#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
-
-enum hclgevf_opcode_type {
- /* Generic command */
- HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
- HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
- HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
-
- /* TQP command */
- HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
- HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
- HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
- /* GRO command */
- HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
- /* RSS cmd */
- HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
- HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
- HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
- HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
- /* Mailbox cmd */
- HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
-
- /* IMP stats command */
- HCLGEVF_OPC_IMP_COMPAT_CFG = 0x701A,
-};
#define HCLGEVF_TQP_REG_OFFSET 0x80000
#define HCLGEVF_TQP_REG_SIZE 0x200
@@ -156,34 +54,6 @@ struct hclgevf_ctrl_vector_chain {
u8 resv;
};
-enum HCLGEVF_CAP_BITS {
- HCLGEVF_CAP_UDP_GSO_B,
- HCLGEVF_CAP_QB_B,
- HCLGEVF_CAP_FD_FORWARD_TC_B,
- HCLGEVF_CAP_PTP_B,
- HCLGEVF_CAP_INT_QL_B,
- HCLGEVF_CAP_HW_TX_CSUM_B,
- HCLGEVF_CAP_TX_PUSH_B,
- HCLGEVF_CAP_PHY_IMP_B,
- HCLGEVF_CAP_TQP_TXRX_INDEP_B,
- HCLGEVF_CAP_HW_PAD_B,
- HCLGEVF_CAP_STASH_B,
- HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
- HCLGEVF_CAP_RXD_ADV_LAYOUT_B = 15,
-};
-
-enum HCLGEVF_API_CAP_BITS {
- HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
-};
-
-#define HCLGEVF_QUERY_CAP_LENGTH 3
-struct hclgevf_query_version_cmd {
- __le32 firmware;
- __le32 hardware;
- __le32 api_caps;
- __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
-};
-
#define HCLGEVF_MSIX_OFT_ROCEE_S 0
#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
#define HCLGEVF_VEC_NUM_S 0
@@ -203,50 +73,6 @@ struct hclgevf_cfg_gro_status_cmd {
u8 rsv[23];
};
-#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
-#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
-#define HCLGEVF_RSS_HASH_KEY_NUM 16
-struct hclgevf_rss_config_cmd {
- u8 hash_config;
- u8 rsv[7];
- u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
-};
-
-struct hclgevf_rss_input_tuple_cmd {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
- u8 rsv[16];
-};
-
-#define HCLGEVF_RSS_CFG_TBL_SIZE 16
-
-struct hclgevf_rss_indirection_table_cmd {
- __le16 start_table_index;
- __le16 rss_set_bitmap;
- u8 rsv[4];
- u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
-};
-
-#define HCLGEVF_RSS_TC_OFFSET_S 0
-#define HCLGEVF_RSS_TC_OFFSET_M GENMASK(10, 0)
-#define HCLGEVF_RSS_TC_SIZE_MSB_B 11
-#define HCLGEVF_RSS_TC_SIZE_S 12
-#define HCLGEVF_RSS_TC_SIZE_M GENMASK(14, 12)
-#define HCLGEVF_RSS_TC_VALID_B 15
-#define HCLGEVF_MAX_TC_NUM 8
-#define HCLGEVF_RSS_TC_SIZE_MSB_OFFSET 3
-
-struct hclgevf_rss_tc_mode_cmd {
- __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
- u8 rsv[8];
-};
-
#define HCLGEVF_LINK_STS_B 0
#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
struct hclgevf_link_status_cmd {
@@ -273,9 +99,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
u8 rsv[14];
};
-#define HCLGEVF_TYPE_CRQ 0
-#define HCLGEVF_TYPE_CSQ 1
-
/* this bit indicates that the driver is ready for hardware reset */
#define HCLGEVF_NIC_SW_RST_RDY_B 16
#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
@@ -285,6 +108,9 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
+#define hclgevf_cmd_setup_basic_desc(desc, opcode, is_read) \
+ hclge_comm_cmd_setup_basic_desc(desc, opcode, is_read)
+
struct hclgevf_dev_specs_0_cmd {
__le32 rsv0;
__le32 mac_entry_num;
@@ -305,38 +131,6 @@ struct hclgevf_dev_specs_1_cmd {
u8 rsv1[18];
};
-/* capabilities bits map between imp firmware and local driver */
-struct hclgevf_caps_bit_map {
- u16 imp_bit;
- u16 local_bit;
-};
-
-static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
-{
- writel(value, base + reg);
-}
-
-static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
-{
- u8 __iomem *reg_addr = READ_ONCE(base);
-
- return readl(reg_addr + reg);
-}
-
-#define hclgevf_write_dev(a, reg, value) \
- hclgevf_write_reg((a)->io_base, reg, value)
-#define hclgevf_read_dev(a, reg) \
- hclgevf_read_reg((a)->io_base, reg)
-
-#define HCLGEVF_SEND_SYNC(flag) \
- ((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
-
-int hclgevf_cmd_init(struct hclgevf_dev *hdev);
-void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
-int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
-
-int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
-void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
- enum hclgevf_opcode_type opcode,
- bool is_read);
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num);
+void hclgevf_arq_init(struct hclgevf_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index fdc19868b818..1b535142c65a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -13,11 +13,6 @@ static int hclgevf_devlink_info_get(struct devlink *devlink,
struct hclgevf_devlink_priv *priv = devlink_priv(devlink);
char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN];
struct hclgevf_dev *hdev = priv->hdev;
- int ret;
-
- ret = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (ret)
- return ret;
snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
@@ -121,7 +116,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 41afaeea881b..f24046250341 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -9,6 +9,7 @@
#include "hclge_mbx.h"
#include "hnae3.h"
#include "hclgevf_devlink.h"
+#include "hclge_comm_rss.h"
#define HCLGEVF_NAME "hclgevf"
@@ -30,30 +31,22 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
{0, }
};
-static const u8 hclgevf_hash_key[] = {
- 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
- 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
- 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
- 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
- 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
-};
-
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
-static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG,
- HCLGEVF_NIC_CSQ_BASEADDR_H_REG,
- HCLGEVF_NIC_CSQ_DEPTH_REG,
- HCLGEVF_NIC_CSQ_TAIL_REG,
- HCLGEVF_NIC_CSQ_HEAD_REG,
- HCLGEVF_NIC_CRQ_BASEADDR_L_REG,
- HCLGEVF_NIC_CRQ_BASEADDR_H_REG,
- HCLGEVF_NIC_CRQ_DEPTH_REG,
- HCLGEVF_NIC_CRQ_TAIL_REG,
- HCLGEVF_NIC_CRQ_HEAD_REG,
- HCLGEVF_VECTOR0_CMDQ_SRC_REG,
- HCLGEVF_VECTOR0_CMDQ_STATE_REG,
- HCLGEVF_CMDQ_INTR_EN_REG,
- HCLGEVF_CMDQ_INTR_GEN_REG};
+static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CSQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CSQ_TAIL_REG,
+ HCLGE_COMM_NIC_CSQ_HEAD_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
+ HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
+ HCLGE_COMM_NIC_CRQ_DEPTH_REG,
+ HCLGE_COMM_NIC_CRQ_TAIL_REG,
+ HCLGE_COMM_NIC_CRQ_HEAD_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG,
+ HCLGE_COMM_CMDQ_INTR_EN_REG,
+ HCLGE_COMM_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
HCLGEVF_RST_ING,
@@ -92,109 +85,40 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
HCLGEVF_TQP_INTR_GL2_REG,
HCLGEVF_TQP_INTR_RL_REG};
-static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
-{
- if (!handle->client)
- return container_of(handle, struct hclgevf_dev, nic);
- else if (handle->client->type == HNAE3_CLIENT_ROCE)
- return container_of(handle, struct hclgevf_dev, roce);
- else
- return container_of(handle, struct hclgevf_dev, nic);
-}
-
-static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_desc desc;
- struct hclgevf_tqp *tqp;
- int status;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_QUERY_RX_STATUS,
- true);
-
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- status, i);
- return status;
- }
- tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- le32_to_cpu(desc.data[1]);
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
- true);
-
- desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "Query tqp stat fail, status = %d,queue = %d\n",
- status, i);
- return status;
- }
- tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- le32_to_cpu(desc.data[1]);
- }
-
- return 0;
-}
-
-static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_tqp *tqp;
- u64 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
- }
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
- }
-
- return buff;
+ return hclge_comm_cmd_send(&hw->hw, desc, num);
}
-static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
+void hclgevf_arq_init(struct hclgevf_dev *hdev)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
- return kinfo->num_tqps * 2;
+ spin_lock(&cmdq->crq.lock);
+ /* initialize the pointers of async rx queue of mailbox */
+ hdev->arq.hdev = hdev;
+ hdev->arq.head = 0;
+ hdev->arq.tail = 0;
+ atomic_set(&hdev->arq.count, 0);
+ spin_unlock(&cmdq->crq.lock);
}
-static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- u8 *buff = data;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
- tqp->index);
- buff += ETH_GSTRING_LEN;
- }
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
- struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
- tqp->index);
- buff += ETH_GSTRING_LEN;
- }
-
- return buff;
+ if (!handle->client)
+ return container_of(handle, struct hclgevf_dev, nic);
+ else if (handle->client->type == HNAE3_CLIENT_ROCE)
+ return container_of(handle, struct hclgevf_dev, roce);
+ else
+ return container_of(handle, struct hclgevf_dev, nic);
}
static void hclgevf_update_stats(struct hnae3_handle *handle,
@@ -203,7 +127,7 @@ static void hclgevf_update_stats(struct hnae3_handle *handle,
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int status;
- status = hclgevf_tqps_update_stats(handle);
+ status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
if (status)
dev_err(&hdev->pdev->dev,
"VF update of TQPS stats fail, status = %d.\n",
@@ -215,7 +139,7 @@ static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
if (strset == ETH_SS_TEST)
return -EOPNOTSUPP;
else if (strset == ETH_SS_STATS)
- return hclgevf_tqps_get_sset_count(handle, strset);
+ return hclge_comm_tqps_get_sset_count(handle);
return 0;
}
@@ -226,12 +150,12 @@ static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
u8 *p = (char *)data;
if (strset == ETH_SS_STATS)
- p = hclgevf_tqps_get_strings(handle, p);
+ p = hclge_comm_tqps_get_strings(handle, p);
}
static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
{
- hclgevf_tqps_get_stats(handle, data);
+ hclge_comm_tqps_get_stats(handle, data);
}
static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
@@ -265,8 +189,8 @@ static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
basic_info = (struct hclge_basic_info *)resp_msg;
hdev->hw_tc_map = basic_info->hw_tc_map;
- hdev->mbx_api_version = basic_info->mbx_api_version;
- caps = basic_info->pf_caps;
+ hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version);
+ caps = le32_to_cpu(basic_info->pf_caps);
if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
@@ -299,10 +223,8 @@ static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
-#define HCLGEVF_TQPS_ALLOC_OFFSET 0
-#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
-#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
+ struct hclge_mbx_vf_queue_info *queue_info;
u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
struct hclge_vf_to_pf_msg send_msg;
int status;
@@ -317,12 +239,10 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
return status;
}
- memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
- sizeof(u16));
- memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
- sizeof(u16));
- memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
- sizeof(u16));
+ queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg;
+ hdev->num_tqps = le16_to_cpu(queue_info->num_tqps);
+ hdev->rss_size_max = le16_to_cpu(queue_info->rss_size);
+ hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len);
return 0;
}
@@ -330,9 +250,8 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
-#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
-#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
+ struct hclge_mbx_vf_queue_depth *queue_depth;
u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
struct hclge_vf_to_pf_msg send_msg;
int ret;
@@ -347,10 +266,9 @@ static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
return ret;
}
- memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
- sizeof(u16));
- memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
- sizeof(u16));
+ queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg;
+ hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc);
+ hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc);
return 0;
}
@@ -364,11 +282,11 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
int ret;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
- memcpy(send_msg.data, &queue_id, sizeof(queue_id));
+ *(__le16 *)send_msg.data = cpu_to_le16(queue_id);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
sizeof(resp_data));
if (!ret)
- qid_in_pf = *(u16 *)resp_data;
+ qid_in_pf = le16_to_cpu(*(__le16 *)resp_data);
return qid_in_pf;
}
@@ -397,11 +315,12 @@ static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
- struct hclgevf_tqp *tqp;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_comm_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
- sizeof(struct hclgevf_tqp), GFP_KERNEL);
+ sizeof(struct hclge_comm_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
@@ -420,16 +339,24 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
* HCLGEVF_TQP_MAX_SIZE_DEV_V2.
*/
if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2)
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGEVF_TQP_REG_OFFSET +
i * HCLGEVF_TQP_REG_SIZE;
else
- tqp->q.io_base = hdev->hw.io_base +
+ tqp->q.io_base = hdev->hw.hw.io_base +
HCLGEVF_TQP_REG_OFFSET +
HCLGEVF_TQP_EXT_REG_OFFSET +
(i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) *
HCLGEVF_TQP_REG_SIZE;
+ /* when device supports tx push and has device memory,
+ * the queue can execute push mode or doorbell mode on
+ * device memory.
+ */
+ if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
+ tqp->q.mem_base = hdev->hw.hw.mem_base +
+ HCLGEVF_TQP_MEM_OFFSET(hdev, i);
+
tqp++;
}
@@ -448,7 +375,7 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
kinfo->num_tx_desc = hdev->num_tx_desc;
kinfo->num_rx_desc = hdev->num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+ for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
num_tc++;
@@ -539,7 +466,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->pdev = hdev->pdev;
nic->numa_node_mask = hdev->numa_node_mask;
nic->flags |= HNAE3_SUPPORT_VF;
- nic->kinfo.io_base = hdev->hw.io_base;
+ nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclgevf_knic_setup(hdev);
if (ret)
@@ -576,7 +503,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
vector->vector = pci_irq_vector(hdev->pdev, i);
- vector->io_addr = hdev->hw.io_base +
+ vector->io_addr = hdev->hw.hw.io_base +
HCLGEVF_VECTOR_REG_BASE +
(i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
hdev->vector_status[i] = 0;
@@ -606,137 +533,11 @@ static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
return -EINVAL;
}
-static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
- const u8 hfunc, const u8 *key)
-{
- struct hclgevf_rss_config_cmd *req;
- unsigned int key_offset = 0;
- struct hclgevf_desc desc;
- int key_counts;
- int key_size;
- int ret;
-
- key_counts = HCLGEVF_RSS_KEY_SIZE;
- req = (struct hclgevf_rss_config_cmd *)desc.data;
-
- while (key_counts) {
- hclgevf_cmd_setup_basic_desc(&desc,
- HCLGEVF_OPC_RSS_GENERIC_CONFIG,
- false);
-
- req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
- req->hash_config |=
- (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
-
- key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
- memcpy(req->hash_key,
- key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
-
- key_counts -= key_size;
- key_offset++;
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "Configure RSS config fail, status = %d\n",
- ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
-{
- return HCLGEVF_RSS_KEY_SIZE;
-}
-
-static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
-{
- const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
- struct hclgevf_rss_indirection_table_cmd *req;
- struct hclgevf_desc desc;
- int rss_cfg_tbl_num;
- int status;
- int i, j;
-
- req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
- rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
- HCLGEVF_RSS_CFG_TBL_SIZE;
-
- for (i = 0; i < rss_cfg_tbl_num; i++) {
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
- false);
- req->start_table_index =
- cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
- req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
- for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
- req->rss_result[j] =
- indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
-
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status) {
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to set RSS indirection table\n",
- status);
- return status;
- }
- }
-
- return 0;
-}
-
-static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
-{
- struct hclgevf_rss_tc_mode_cmd *req;
- u16 tc_offset[HCLGEVF_MAX_TC_NUM];
- u16 tc_valid[HCLGEVF_MAX_TC_NUM];
- u16 tc_size[HCLGEVF_MAX_TC_NUM];
- struct hclgevf_desc desc;
- u16 roundup_size;
- unsigned int i;
- int status;
-
- req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
-
- roundup_size = roundup_pow_of_two(rss_size);
- roundup_size = ilog2(roundup_size);
-
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- tc_valid[i] = 1;
- tc_size[i] = roundup_size;
- tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
- }
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
- for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- u16 mode = 0;
-
- hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
- HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_bit(mode, HCLGEVF_RSS_TC_SIZE_MSB_B,
- tc_size[i] >> HCLGEVF_RSS_TC_SIZE_MSB_OFFSET &
- 0x1);
- hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
- HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
-
- req->rss_tc_mode[i] = cpu_to_le16(mode);
- }
- status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (status)
- dev_err(&hdev->pdev->dev,
- "VF failed(=%d) to set rss tc mode\n", status);
-
- return status;
-}
-
/* for revision 0x20, vf shared the same rss config with pf */
static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RSS_MBX_RESP_LEN 8
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
struct hclge_vf_to_pf_msg send_msg;
u16 msg_num, hash_key_index;
@@ -744,7 +545,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
int ret;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
- msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
+ msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
HCLGEVF_RSS_MBX_RESP_LEN;
for (index = 0; index < msg_num; index++) {
send_msg.data[0] = index;
@@ -761,7 +562,7 @@ static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
if (index == msg_num - 1)
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0],
- HCLGEVF_RSS_KEY_SIZE - hash_key_index);
+ HCLGE_COMM_RSS_KEY_SIZE - hash_key_index);
else
memcpy(&rss_cfg->rss_hash_key[hash_key_index],
&resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
@@ -774,29 +575,11 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- int i, ret;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- /* Get hash algorithm */
- if (hfunc) {
- switch (rss_cfg->hash_algo) {
- case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
- *hfunc = ETH_RSS_HASH_TOP;
- break;
- case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
- *hfunc = ETH_RSS_HASH_XOR;
- break;
- default:
- *hfunc = ETH_RSS_HASH_UNKNOWN;
- break;
- }
- }
-
- /* Get the RSS Key required by the user */
- if (key)
- memcpy(key, rss_cfg->rss_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
+ hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
} else {
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
@@ -805,67 +588,28 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
if (ret)
return ret;
memcpy(key, rss_cfg->rss_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
+ HCLGE_COMM_RSS_KEY_SIZE);
}
}
- if (indir)
- for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
- indir[i] = rss_cfg->rss_indirection_tbl[i];
+ hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
+ hdev->ae_dev->dev_specs.rss_ind_tbl_size);
return 0;
}
-static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
- u8 *hash_algo)
-{
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- return 0;
- case ETH_RSS_HASH_XOR:
- *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
- return 0;
- case ETH_RSS_HASH_NO_CHANGE:
- *hash_algo = hdev->rss_cfg.hash_algo;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 hash_algo;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+ ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key,
+ hfunc);
if (ret)
return ret;
-
- /* Set the RSS Hash Key if specififed by the user */
- if (key) {
- ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "invalid hfunc type %u\n", hfunc);
- return ret;
- }
-
- /* Update the shadow RSS key with user specified qids */
- memcpy(rss_cfg->rss_hash_key, key,
- HCLGEVF_RSS_KEY_SIZE);
- } else {
- ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
- rss_cfg->rss_hash_key);
- if (ret)
- return ret;
- }
- rss_cfg->hash_algo = hash_algo;
}
/* update the shadow RSS table with user specified qids */
@@ -873,179 +617,26 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* update the hardware */
- return hclgevf_set_rss_indir_table(hdev);
-}
-
-static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
-{
- u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
-
- if (nfc->data & RXH_L4_B_2_3)
- hash_sets |= HCLGEVF_D_PORT_BIT;
- else
- hash_sets &= ~HCLGEVF_D_PORT_BIT;
-
- if (nfc->data & RXH_IP_SRC)
- hash_sets |= HCLGEVF_S_IP_BIT;
- else
- hash_sets &= ~HCLGEVF_S_IP_BIT;
-
- if (nfc->data & RXH_IP_DST)
- hash_sets |= HCLGEVF_D_IP_BIT;
- else
- hash_sets &= ~HCLGEVF_D_IP_BIT;
-
- if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
- hash_sets |= HCLGEVF_V_TAG_BIT;
-
- return hash_sets;
-}
-
-static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc,
- struct hclgevf_rss_input_tuple_cmd *req)
-{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 tuple_sets;
-
- req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
-
- tuple_sets = hclgevf_get_rss_hash_bits(nfc);
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- req->ipv4_tcp_en = tuple_sets;
- break;
- case TCP_V6_FLOW:
- req->ipv6_tcp_en = tuple_sets;
- break;
- case UDP_V4_FLOW:
- req->ipv4_udp_en = tuple_sets;
- break;
- case UDP_V6_FLOW:
- req->ipv6_udp_en = tuple_sets;
- break;
- case SCTP_V4_FLOW:
- req->ipv4_sctp_en = tuple_sets;
- break;
- case SCTP_V6_FLOW:
- if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
- (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
- return -EINVAL;
-
- req->ipv6_sctp_en = tuple_sets;
- break;
- case IPV4_FLOW:
- req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- break;
- case IPV6_FLOW:
- req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
int ret;
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
- if (nfc->data &
- ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
-
- ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "failed to init rss tuple cmd, ret = %d\n", ret);
- return ret;
- }
-
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
+ ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->rss_cfg, nfc);
+ if (ret)
dev_err(&hdev->pdev->dev,
- "Set rss tuple fail, status = %d\n", ret);
- return ret;
- }
-
- rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
- rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
- rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
- rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
- rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
- rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
- rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
- rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
- return 0;
-}
+ "failed to set rss tuple, ret = %d.\n", ret);
-static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
- int flow_type, u8 *tuple_sets)
-{
- switch (flow_type) {
- case TCP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
- break;
- case UDP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
- break;
- case TCP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
- break;
- case UDP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
- break;
- case SCTP_V4_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
- break;
- case SCTP_V6_FLOW:
- *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
- break;
- case IPV4_FLOW:
- case IPV6_FLOW:
- *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
-{
- u64 tuple_data = 0;
-
- if (tuple_sets & HCLGEVF_D_PORT_BIT)
- tuple_data |= RXH_L4_B_2_3;
- if (tuple_sets & HCLGEVF_S_PORT_BIT)
- tuple_data |= RXH_L4_B_0_1;
- if (tuple_sets & HCLGEVF_D_IP_BIT)
- tuple_data |= RXH_IP_DST;
- if (tuple_sets & HCLGEVF_S_IP_BIT)
- tuple_data |= RXH_IP_SRC;
-
- return tuple_data;
+ return ret;
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
@@ -1060,47 +651,20 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
nfc->data = 0;
- ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
- &tuple_sets);
+ ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type,
+ &tuple_sets);
if (ret || !tuple_sets)
return ret;
- nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
+ nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
return 0;
}
-static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
- struct hclgevf_rss_cfg *rss_cfg)
-{
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
- int ret;
-
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
-
- req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
- req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
- req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
- req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
- req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
- req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
- req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
- req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
-
- ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "Configure rss input fail, status = %d\n", ret);
- return ret;
-}
-
static int hclgevf_get_tc_size(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
return rss_cfg->rss_size;
}
@@ -1273,12 +837,11 @@ static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id,
u16 stream_id, bool enable)
{
struct hclgevf_cfg_com_tqp_queue_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
- false);
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
req->stream_id = cpu_to_le16(stream_id);
if (enable)
@@ -1302,18 +865,6 @@ static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable)
return 0;
}
-static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct hclgevf_tqp *tqp;
- int i;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
- memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
- }
-}
-
static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
{
struct hclge_vf_to_pf_msg send_msg;
@@ -1428,7 +979,7 @@ static int hclgevf_update_mac_list(struct hnae3_handle *handle,
/* if the mac addr is already in the mac list, no need to add a new
* one into it, just check the mac addr state, convert it to a new
- * new state, or just remove it, or do nothing.
+ * state, or just remove it, or do nothing.
*/
mac_node = hclgevf_find_mac_node(list, addr);
if (mac_node) {
@@ -1514,15 +1065,18 @@ static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
struct list_head *list,
enum HCLGEVF_MAC_ADDR_TYPE mac_type)
{
+ char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hclgevf_mac_addr_node *mac_node, *tmp;
int ret;
list_for_each_entry_safe(mac_node, tmp, list, node) {
ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
if (ret) {
+ hnae3_format_mac_addr(format_mac_addr,
+ mac_node->mac_addr);
dev_err(&hdev->pdev->dev,
- "failed to configure mac %pM, state = %d, ret = %d\n",
- mac_node->mac_addr, mac_node->state, ret);
+ "failed to configure mac %s, state = %d, ret = %d\n",
+ format_mac_addr, mac_node->state, ret);
return;
}
if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
@@ -1685,11 +1239,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
{
-#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
-#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
-#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
-
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_mbx_vlan_filter *vlan_filter;
struct hclge_vf_to_pf_msg send_msg;
int ret;
@@ -1711,11 +1262,11 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_VLAN_FILTER);
- send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
- memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
- sizeof(vlan_id));
- memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
- sizeof(proto));
+ vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data;
+ vlan_filter->is_kill = is_kill;
+ vlan_filter->vlan_id = cpu_to_le16(vlan_id);
+ vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto));
+
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack.
@@ -1787,7 +1338,7 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
for (i = 1; i < handle->kinfo.num_tqps; i++) {
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
- memcpy(send_msg.data, &i, sizeof(i));
+ *(__le16 *)send_msg.data = cpu_to_le16(i);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (ret)
return ret;
@@ -1799,10 +1350,13 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle)
static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_mbx_mtu_info *mtu_info;
struct hclge_vf_to_pf_msg send_msg;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
- memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
+ mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data;
+ mtu_info->mtu = cpu_to_le32(new_mtu);
+
return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
}
@@ -1859,13 +1413,13 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
int ret;
if (hdev->reset_type == HNAE3_VF_RESET)
- ret = readl_poll_timeout(hdev->hw.io_base +
+ ret = readl_poll_timeout(hdev->hw.hw.io_base +
HCLGEVF_VF_RST_ING, val,
!(val & HCLGEVF_VF_RST_ING_BIT),
HCLGEVF_RESET_WAIT_US,
HCLGEVF_RESET_WAIT_TIMEOUT_US);
else
- ret = readl_poll_timeout(hdev->hw.io_base +
+ ret = readl_poll_timeout(hdev->hw.hw.io_base +
HCLGEVF_RST_ING, val,
!(val & HCLGEVF_RST_ING_BITS),
HCLGEVF_RESET_WAIT_US,
@@ -1891,13 +1445,13 @@ static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
{
u32 reg_val;
- reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
if (enable)
reg_val |= HCLGEVF_NIC_SW_RST_RDY;
else
reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG,
reg_val);
}
@@ -1948,7 +1502,7 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
hdev->rst_stats.vf_func_rst_cnt++;
}
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
hclgevf_reset_handshake(hdev, true);
@@ -1977,9 +1531,9 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG));
dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
- hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG));
+ hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG));
dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
@@ -2163,24 +1717,20 @@ static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
int retry_cnt = 0;
int ret;
-retry:
- down(&hdev->reset_sem);
- set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- hdev->reset_type = rst_type;
- ret = hclgevf_reset_prepare(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev, "fail to prepare to reset, ret=%d\n",
- ret);
- if (hdev->reset_pending ||
- retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
- dev_err(&hdev->pdev->dev,
- "reset_pending:0x%lx, retry_cnt:%d\n",
- hdev->reset_pending, retry_cnt);
- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
- msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
- goto retry;
- }
+ while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) {
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = rst_type;
+ ret = hclgevf_reset_prepare(hdev);
+ if (!ret && !hdev->reset_pending)
+ break;
+
+ dev_err(&hdev->pdev->dev,
+ "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
+ ret, hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGEVF_RESET_RETRY_WAIT_MS);
}
/* disable misc vector before reset done */
@@ -2220,7 +1770,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
vector->vector_irq = pci_irq_vector(hdev->pdev,
HCLGEVF_MISC_VECTOR_NUM);
- vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
/* vector status always valid for Vector 0 */
hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
@@ -2341,7 +1891,7 @@ static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
struct hclge_vf_to_pf_msg send_msg;
int ret;
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return;
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
@@ -2378,7 +1928,7 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
}
if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
- hclgevf_tqps_update_stats(handle);
+ hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
/* VF does not need to request link status when this bit is set, because
* PF will push its link status to VFs when link status changed.
@@ -2419,7 +1969,7 @@ static void hclgevf_service_task(struct work_struct *work)
static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
{
- hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr);
}
static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
@@ -2429,14 +1979,14 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
/* fetch the events from their corresponding regs */
cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
- HCLGEVF_VECTOR0_CMDQ_STATE_REG);
+ HCLGE_COMM_VECTOR0_CMDQ_STATE_REG);
if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
dev_info(&hdev->pdev->dev,
"receive reset interrupt 0x%x!\n", rst_ing_reg);
set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
*clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
hdev->rst_stats.vf_rst_cnt++;
/* set up VF hardware reset status, its PF will clear
@@ -2496,8 +2046,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
@@ -2560,8 +2109,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
roce->rinfo.base_vector = hdev->roce_base_msix_offset;
roce->rinfo.netdev = nic->kinfo.netdev;
- roce->rinfo.roce_io_base = hdev->hw.io_base;
- roce->rinfo.roce_mem_base = hdev->hw.mem_base;
+ roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
+ roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
@@ -2573,13 +2122,13 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
static int hclgevf_config_gro(struct hclgevf_dev *hdev)
{
struct hclgevf_cfg_gro_status_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
false);
req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
@@ -2593,71 +2142,37 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
return ret;
}
-static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
-{
- u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_tuple_cfg *tuple_sets;
- u32 i;
-
- rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
- tuple_sets = &rss_cfg->rss_tuple_sets;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- u8 *rss_ind_tbl;
-
- rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
-
- rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
- sizeof(*rss_ind_tbl), GFP_KERNEL);
- if (!rss_ind_tbl)
- return -ENOMEM;
-
- rss_cfg->rss_indirection_tbl = rss_ind_tbl;
- memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
- HCLGEVF_RSS_KEY_SIZE);
-
- tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en =
- hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
- HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
- HCLGEVF_RSS_INPUT_TUPLE_SCTP;
- tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- }
-
- /* Initialize RSS indirect table */
- for (i = 0; i < rss_ind_tbl_size; i++)
- rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
-
- return 0;
-}
-
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
int ret;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
- rss_cfg->rss_hash_key);
+ ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw,
+ rss_cfg->rss_algo,
+ rss_cfg->rss_hash_key);
if (ret)
return ret;
- ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
+ ret = hclge_comm_set_rss_input_tuple(&hdev->nic, &hdev->hw.hw,
+ false, rss_cfg);
if (ret)
return ret;
}
- ret = hclgevf_set_rss_indir_table(hdev);
+ ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
+ rss_cfg->rss_indirection_tbl);
if (ret)
return ret;
- return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
+ hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map,
+ tc_offset, tc_valid, tc_size);
+
+ return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ tc_valid, tc_size);
}
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
@@ -2711,7 +2226,7 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state);
- hclgevf_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclgevf_request_link_info(hdev);
@@ -2729,7 +2244,7 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
if (hdev->reset_type != HNAE3_VF_RESET)
hclgevf_reset_tqp(handle);
- hclgevf_reset_tqp_stats(handle);
+ hclge_comm_reset_tqp_stats(handle);
hclgevf_update_link_status(hdev, 0);
}
@@ -3034,8 +2549,6 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_MEM_BAR 4
-
struct pci_dev *pdev = hdev->pdev;
struct hclgevf_hw *hw = &hdev->hw;
@@ -3043,11 +2556,11 @@ static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev)
if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR)))
return 0;
- hw->mem_base = devm_ioremap_wc(&pdev->dev,
- pci_resource_start(pdev,
- HCLGEVF_MEM_BAR),
- pci_resource_len(pdev, HCLGEVF_MEM_BAR));
- if (!hw->mem_base) {
+ hw->hw.mem_base =
+ devm_ioremap_wc(&pdev->dev,
+ pci_resource_start(pdev, HCLGEVF_MEM_BAR),
+ pci_resource_len(pdev, HCLGEVF_MEM_BAR));
+ if (!hw->hw.mem_base) {
dev_err(&pdev->dev, "failed to map device memory\n");
return -EFAULT;
}
@@ -3081,12 +2594,11 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
pci_set_master(pdev);
hw = &hdev->hw;
- hw->hdev = hdev;
- hw->io_base = pci_iomap(pdev, 2, 0);
- if (!hw->io_base) {
+ hw->hw.io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->hw.io_base) {
dev_err(&pdev->dev, "can't map configuration register space\n");
ret = -ENOMEM;
- goto err_clr_master;
+ goto err_release_regions;
}
ret = hclgevf_dev_mem_map(hdev);
@@ -3096,9 +2608,8 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
return 0;
err_unmap_io_base:
- pci_iounmap(pdev, hdev->hw.io_base);
-err_clr_master:
- pci_clear_master(pdev);
+ pci_iounmap(pdev, hdev->hw.hw.io_base);
+err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
@@ -3110,11 +2621,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
- if (hdev->hw.mem_base)
- devm_iounmap(&pdev->dev, hdev->hw.mem_base);
+ if (hdev->hw.hw.mem_base)
+ devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
- pci_iounmap(pdev, hdev->hw.io_base);
- pci_clear_master(pdev);
+ pci_iounmap(pdev, hdev->hw.hw.io_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -3122,10 +2632,10 @@ static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
{
struct hclgevf_query_res_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int ret;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true);
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3179,13 +2689,13 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
ae_dev->dev_specs.max_non_tso_bd_num =
HCLGEVF_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
- ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
- struct hclgevf_desc *desc)
+ struct hclge_desc *desc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_dev_specs_0_cmd *req0;
@@ -3212,7 +2722,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
- dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+ dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
@@ -3221,7 +2731,7 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
{
- struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
+ struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
int ret;
int i;
@@ -3235,11 +2745,10 @@ static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclgevf_cmd_setup_basic_desc(&desc[i],
- HCLGEVF_OPC_QUERY_DEV_SPECS, true);
- desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
+ HCLGE_OPC_QUERY_DEV_SPECS, true);
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
}
- hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
- true);
+ hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
if (ret)
@@ -3256,7 +2765,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
int ret = 0;
- if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+ if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
+ hdev->reset_type == HNAE3_FLR_RESET) &&
test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
hclgevf_uninit_msi(hdev);
@@ -3318,7 +2828,10 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
- ret = hclgevf_cmd_init(hdev);
+ hclgevf_arq_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->fw_version, false,
+ hdev->reset_pending);
if (ret) {
dev_err(&pdev->dev, "cmd failed %d\n", ret);
return ret;
@@ -3342,6 +2855,11 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
+ /* get current port based vlan state from PF */
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+ if (ret)
+ return ret;
+
set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
hclgevf_init_rxd_adv_layout(hdev);
@@ -3364,11 +2882,14 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_devlink_init;
- ret = hclgevf_cmd_queue_init(hdev);
+ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_cmd_queue_init;
- ret = hclgevf_cmd_init(hdev);
+ hclgevf_arq_init(hdev);
+ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
+ &hdev->fw_version, false,
+ hdev->reset_pending);
if (ret)
goto err_cmd_init;
@@ -3421,7 +2942,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
- ret = hclgevf_rss_init_cfg(hdev);
+ ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev,
+ &hdev->rss_cfg);
if (ret) {
dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
goto err_config;
@@ -3434,7 +2956,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
- /* ensure vf tbl list as empty before init*/
+ /* ensure vf tbl list as empty before init */
ret = hclgevf_clear_vport_list(hdev);
if (ret) {
dev_err(&pdev->dev,
@@ -3468,7 +2990,7 @@ err_misc_irq_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
err_cmd_init:
- hclgevf_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_cmd_queue_init:
hclgevf_devlink_uninit(hdev);
err_devlink_init:
@@ -3492,7 +3014,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
hclgevf_uninit_msi(hdev);
}
- hclgevf_cmd_uninit(hdev);
+ hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
hclgevf_devlink_uninit(hdev);
hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
@@ -3595,6 +3117,9 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u16 tc_offset[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_valid[HCLGE_COMM_MAX_TC_NUM];
+ u16 tc_size[HCLGE_COMM_MAX_TC_NUM];
u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps;
u32 *rss_indir;
@@ -3603,7 +3128,10 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
hclgevf_update_rss_size(handle, new_tqps_num);
- ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
+ hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
+ tc_offset, tc_valid, tc_size);
+ ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
+ tc_valid, tc_size);
if (ret)
return ret;
@@ -3648,7 +3176,7 @@ static int hclgevf_get_status(struct hnae3_handle *handle)
static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed,
- u8 *duplex)
+ u8 *duplex, u32 *lane_num)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -3704,7 +3232,7 @@ static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
}
static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
@@ -3780,7 +3308,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
for (i = 0; i < reg_um; i++)
*reg++ = hclgevf_read_dev(&hdev->hw,
ring_reg_addr_list[i] +
- 0x200 * j);
+ HCLGEVF_TQP_REG_SIZE * j);
for (i = 0; i < separator_num; i++)
*reg++ = SEPARATOR_VALUE;
}
@@ -3798,7 +3326,7 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
}
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
- u8 *port_base_vlan_info, u8 data_size)
+ struct hclge_mbx_port_base_vlan *port_base_vlan)
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
@@ -3823,7 +3351,7 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
- memcpy(send_msg.data, port_base_vlan_info, data_size);
+ memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan));
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (!ret) {
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
@@ -3862,7 +3390,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.update_stats = hclgevf_update_stats,
.get_strings = hclgevf_get_strings,
.get_sset_count = hclgevf_get_sset_count,
- .get_rss_key_size = hclgevf_get_rss_key_size,
+ .get_rss_key_size = hclge_comm_get_rss_key_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
.get_rss_tuple = hclgevf_get_rss_tuple,
@@ -3900,7 +3428,7 @@ static struct hnae3_ae_algo ae_algovf = {
.pdev_id_table = ae_algovf_pci_tbl,
};
-static int hclgevf_init(void)
+static int __init hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
@@ -3915,7 +3443,7 @@ static int hclgevf_init(void)
return 0;
}
-static void hclgevf_exit(void)
+static void __exit hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index f6f736c0091c..59ca6c794d6d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -10,6 +10,8 @@
#include "hclge_mbx.h"
#include "hclgevf_cmd.h"
#include "hnae3.h"
+#include "hclge_comm_rss.h"
+#include "hclge_comm_tqp_stats.h"
#define HCLGEVF_MOD_VERSION "1.0"
#define HCLGEVF_DRIVER_NAME "hclgevf"
@@ -32,21 +34,6 @@
#define HCLGEVF_VECTOR_REG_OFFSET 0x4
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
-/* bar registers for cmdq */
-#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
-#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
-#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
-#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
-#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
-#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
-#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C
-#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
-#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
-#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
-
-#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
-#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
-
/* bar registers for common func */
#define HCLGEVF_GRO_EN_REG 0x28000
#define HCLGEVF_RXD_ADV_LAYOUT_EN_REG 0x28008
@@ -86,10 +73,6 @@
#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
#define HCLGEVF_TQP_INTR_RL_REG 0x20900
-/* Vector0 interrupt CMDQ event source register(RW) */
-#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
-/* Vector0 interrupt CMDQ event status register(RO) */
-#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
/* RST register bits for RESET event */
@@ -112,27 +95,24 @@
#define HCLGEVF_WAIT_RESET_DONE 100
#define HCLGEVF_RSS_IND_TBL_SIZE 512
-#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
-#define HCLGEVF_RSS_KEY_SIZE 40
-#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
-#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
-#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
-#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
-
-#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
-#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
-#define HCLGEVF_D_PORT_BIT BIT(0)
-#define HCLGEVF_S_PORT_BIT BIT(1)
-#define HCLGEVF_D_IP_BIT BIT(2)
-#define HCLGEVF_S_IP_BIT BIT(3)
-#define HCLGEVF_V_TAG_BIT BIT(4)
-#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
- (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
+
+#define HCLGEVF_TQP_MEM_SIZE 0x10000
+#define HCLGEVF_MEM_BAR 4
+/* in the bar4, the first half is for roce, and the second half is for nic */
+#define HCLGEVF_NIC_MEM_OFFSET(hdev) \
+ (pci_resource_len((hdev)->pdev, HCLGEVF_MEM_BAR) >> 1)
+#define HCLGEVF_TQP_MEM_OFFSET(hdev, i) \
+ (HCLGEVF_NIC_MEM_OFFSET(hdev) + HCLGEVF_TQP_MEM_SIZE * (i))
#define HCLGEVF_MAC_MAX_FRAME 9728
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
+#define hclgevf_read_dev(a, reg) \
+ hclge_comm_read_reg((a)->hw.io_base, reg)
+#define hclgevf_write_dev(a, reg, value) \
+ hclge_comm_write_reg((a)->hw.io_base, reg, value)
+
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
@@ -154,7 +134,6 @@ enum hclgevf_states {
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
- HCLGEVF_STATE_CMD_DISABLE,
HCLGEVF_STATE_LINK_UPDATING,
HCLGEVF_STATE_PROMISC_CHANGED,
HCLGEVF_STATE_RST_FAIL,
@@ -173,29 +152,9 @@ struct hclgevf_mac {
};
struct hclgevf_hw {
- void __iomem *io_base;
- void __iomem *mem_base;
+ struct hclge_comm_hw hw;
int num_vec;
- struct hclgevf_cmq cmq;
struct hclgevf_mac mac;
- void *hdev; /* hchgevf device it is part of */
-};
-
-/* TQP stats */
-struct hlcgevf_tqp_stats {
- /* query_tqp_tx_queue_statistics, opcode id: 0x0B03 */
- u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
- /* query_tqp_rx_queue_statistics, opcode id: 0x0B13 */
- u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
-};
-
-struct hclgevf_tqp {
- struct device *dev; /* device for DMA mapping */
- struct hnae3_queue q;
- struct hlcgevf_tqp_stats tqp_stats;
- u16 index; /* global index in a NIC controller */
-
- bool alloced;
};
struct hclgevf_cfg {
@@ -208,27 +167,6 @@ struct hclgevf_cfg {
u32 numa_node_map;
};
-struct hclgevf_rss_tuple_cfg {
- u8 ipv4_tcp_en;
- u8 ipv4_udp_en;
- u8 ipv4_sctp_en;
- u8 ipv4_fragment_en;
- u8 ipv6_tcp_en;
- u8 ipv6_udp_en;
- u8 ipv6_sctp_en;
- u8 ipv6_fragment_en;
-};
-
-struct hclgevf_rss_cfg {
- u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
- u32 hash_algo;
- u32 rss_size;
- u8 hw_tc_map;
- /* shadow table */
- u8 *rss_indirection_tbl;
- struct hclgevf_rss_tuple_cfg rss_tuple_sets;
-};
-
struct hclgevf_misc_vector {
u8 __iomem *addr;
int vector_irq;
@@ -273,7 +211,7 @@ struct hclgevf_dev {
struct hnae3_ae_dev *ae_dev;
struct hclgevf_hw hw;
struct hclgevf_misc_vector misc_vector;
- struct hclgevf_rss_cfg rss_cfg;
+ struct hclge_comm_rss_cfg rss_cfg;
unsigned long state;
unsigned long flr_state;
unsigned long default_reset_request;
@@ -324,7 +262,7 @@ struct hclgevf_dev {
struct delayed_work service_task;
- struct hclgevf_tqp *htqp;
+ struct hclge_comm_tqp *htqp;
struct hnae3_handle nic;
struct hnae3_handle roce;
@@ -355,5 +293,5 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
- u8 *port_base_vlan_info, u8 data_size);
+ struct hclge_mbx_port_base_vlan *port_base_vlan);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index c5ac6ecf36e1..bbf7b14079de 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -17,7 +17,7 @@ static int hclgevf_resp_to_errno(u16 resp_code)
static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
{
/* this function should be called with mbx_resp.mbx_mutex held
- * to prtect the received_response from race condition
+ * to protect the received_response from race condition
*/
hdev->mbx_resp.received_resp = false;
hdev->mbx_resp.origin_mbx_msg = 0;
@@ -32,8 +32,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
* message to PF.
* @hdev: pointer to struct hclgevf_dev
- * @resp_msg: pointer to store the original message type and response status
- * @len: the resp_msg data array length.
+ * @code0: the message opcode VF send to PF.
+ * @code1: the message sub-opcode VF send to PF.
+ * @resp_data: pointer to store response data from PF to VF.
+ * @resp_len: the length of resp_data from PF to VF.
*/
static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len)
@@ -53,7 +55,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state))
return -EIO;
usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
@@ -97,7 +100,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
u8 *resp_data, u16 resp_len)
{
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclgevf_desc desc;
+ struct hclge_desc desc;
int status;
req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
@@ -121,7 +124,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
if (need_resp) {
mutex_lock(&hdev->mbx_resp.mbx_mutex);
hclgevf_reset_mbx_resp_status(hdev);
- req->match_id = hdev->mbx_resp.match_id;
+ req->match_id = cpu_to_le16(hdev->mbx_resp.match_id);
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status) {
dev_err(&hdev->pdev->dev,
@@ -151,35 +154,37 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
{
- u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);
+ u32 tail = hclgevf_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG);
- return tail == hw->cmq.crq.next_to_use;
+ return tail == hw->hw.cmq.crq.next_to_use;
}
static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
struct hclge_mbx_pf_to_vf_cmd *req)
{
+ u16 vf_mbx_msg_subcode = le16_to_cpu(req->msg.vf_mbx_msg_subcode);
+ u16 vf_mbx_msg_code = le16_to_cpu(req->msg.vf_mbx_msg_code);
struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp;
+ u16 resp_status = le16_to_cpu(req->msg.resp_status);
+ u16 match_id = le16_to_cpu(req->match_id);
if (resp->received_resp)
dev_warn(&hdev->pdev->dev,
- "VF mbx resp flag not clear(%u)\n",
- req->msg.vf_mbx_msg_code);
-
- resp->origin_mbx_msg =
- (req->msg.vf_mbx_msg_code << 16);
- resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
- resp->resp_status =
- hclgevf_resp_to_errno(req->msg.resp_status);
+ "VF mbx resp flag not clear(%u)\n",
+ vf_mbx_msg_code);
+
+ resp->origin_mbx_msg = (vf_mbx_msg_code << 16);
+ resp->origin_mbx_msg |= vf_mbx_msg_subcode;
+ resp->resp_status = hclgevf_resp_to_errno(resp_status);
memcpy(resp->additional_info, req->msg.resp_data,
HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
- if (req->match_id) {
+ if (match_id) {
/* If match_id is not zero, it means PF support match_id.
* if the match_id is right, VF get the right response, or
* ignore the response. and driver will clear hdev->mbx_resp
* when send next message which need response.
*/
- if (req->match_id == resp->match_id)
+ if (match_id == resp->match_id)
resp->received_resp = true;
} else {
resp->received_resp = true;
@@ -196,7 +201,7 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%u)\n",
- req->msg.code);
+ le16_to_cpu(req->msg.code));
return;
}
@@ -212,14 +217,16 @@ static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev,
void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
{
struct hclge_mbx_pf_to_vf_cmd *req;
- struct hclgevf_cmq_ring *crq;
- struct hclgevf_desc *desc;
+ struct hclge_comm_cmq_ring *crq;
+ struct hclge_desc *desc;
u16 flag;
+ u16 code;
- crq = &hdev->hw.cmq.crq;
+ crq = &hdev->hw.hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_info(&hdev->pdev->dev, "vf crq need init\n");
return;
}
@@ -228,10 +235,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ code = le16_to_cpu(req->msg.code);
if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
dev_warn(&hdev->pdev->dev,
"dropped invalid mailbox message, code = %u\n",
- req->msg.code);
+ code);
/* dropping/not processing this invalid message */
crq->desc[crq->next_to_use].flag = 0;
@@ -247,7 +255,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
* timeout and simultaneously queue the async messages for later
* prcessing in context of mailbox task i.e. the slow path.
*/
- switch (req->msg.code) {
+ switch (code) {
case HCLGE_MBX_PF_VF_RESP:
hclgevf_handle_mbx_response(hdev, req);
break;
@@ -261,7 +269,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
default:
dev_err(&hdev->pdev->dev,
"VF received unsupported(%u) mbx msg from PF\n",
- req->msg.code);
+ code);
break;
}
crq->desc[crq->next_to_use].flag = 0;
@@ -269,7 +277,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
- hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+ hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG,
crq->next_to_use);
}
@@ -283,33 +291,39 @@ static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
+ struct hclge_mbx_port_base_vlan *vlan_info;
+ struct hclge_mbx_link_status *link_info;
+ struct hclge_mbx_link_mode *link_mode;
enum hnae3_reset_type reset_type;
u16 link_status, state;
- u16 *msg_q, *vlan_info;
+ __le16 *msg_q;
+ u16 opcode;
u8 duplex;
u32 speed;
u32 tail;
u8 flag;
- u8 idx;
+ u16 idx;
tail = hdev->arq.tail;
/* process all the async queue messages */
while (tail != hdev->arq.head) {
- if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
+ &hdev->hw.hw.comm_state)) {
dev_info(&hdev->pdev->dev,
"vf crq need init in async\n");
return;
}
msg_q = hdev->arq.msg_q[hdev->arq.head];
-
- switch (msg_q[0]) {
+ opcode = le16_to_cpu(msg_q[0]);
+ switch (opcode) {
case HCLGE_MBX_LINK_STAT_CHANGE:
- link_status = msg_q[1];
- memcpy(&speed, &msg_q[2], sizeof(speed));
- duplex = (u8)msg_q[4];
- flag = (u8)msg_q[5];
+ link_info = (struct hclge_mbx_link_status *)(msg_q + 1);
+ link_status = le16_to_cpu(link_info->link_status);
+ speed = le32_to_cpu(link_info->speed);
+ duplex = (u8)le16_to_cpu(link_info->duplex);
+ flag = link_info->flag;
/* update upper layer with new link link status */
hclgevf_update_speed_duplex(hdev, speed, duplex);
@@ -321,13 +335,14 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
break;
case HCLGE_MBX_LINK_STAT_MODE:
- idx = (u8)msg_q[1];
+ link_mode = (struct hclge_mbx_link_mode *)(msg_q + 1);
+ idx = le16_to_cpu(link_mode->idx);
if (idx)
- memcpy(&hdev->hw.mac.supported, &msg_q[2],
- sizeof(unsigned long));
+ hdev->hw.mac.supported =
+ le64_to_cpu(link_mode->link_mode);
else
- memcpy(&hdev->hw.mac.advertising, &msg_q[2],
- sizeof(unsigned long));
+ hdev->hw.mac.advertising =
+ le64_to_cpu(link_mode->link_mode);
break;
case HCLGE_MBX_ASSERTING_RESET:
/* PF has asserted reset hence VF should go in pending
@@ -335,25 +350,27 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- reset_type = (enum hnae3_reset_type)msg_q[1];
+ reset_type =
+ (enum hnae3_reset_type)le16_to_cpu(msg_q[1]);
set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
break;
case HCLGE_MBX_PUSH_VLAN_INFO:
- state = msg_q[1];
- vlan_info = &msg_q[1];
+ vlan_info =
+ (struct hclge_mbx_port_base_vlan *)(msg_q + 1);
+ state = le16_to_cpu(vlan_info->state);
hclgevf_update_port_base_vlan_info(hdev, state,
- (u8 *)vlan_info, 8);
+ vlan_info);
break;
case HCLGE_MBX_PUSH_PROMISC_INFO:
- hclgevf_parse_promisc_info(hdev, msg_q[1]);
+ hclgevf_parse_promisc_info(hdev, le16_to_cpu(msg_q[1]));
break;
default:
dev_err(&hdev->pdev->dev,
"fetched unsupported(%u) message from arq\n",
- msg_q[0]);
+ opcode);
break;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
index e4bfb6191fef..5d4895bb57a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -29,7 +29,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
TP_fast_assign(
__entry->vfid = req->dest_vfid;
- __entry->code = req->msg.code;
+ __entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
__assign_str(devname, &hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 07fdab58001d..9232caaf0bdc 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -174,7 +174,7 @@ static int hns_mdio_wait_ready(struct mii_bus *bus)
u32 cmd_reg_value;
int i;
- /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+ /* waiting for MDIO_COMMAND_REG's mdio_start==0 */
/* after that can do read or write*/
for (i = 0; i < MDIO_TIMEOUT; i++) {
cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
@@ -206,7 +206,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
}
/**
- * hns_mdio_write - access phy register
+ * hns_mdio_write_c22 - access phy register
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
@@ -214,21 +214,19 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
*
* Return 0 on success, negative on failure
*/
-static int hns_mdio_write(struct mii_bus *bus,
- int phy_id, int regnum, u16 data)
+static int hns_mdio_write_c22(struct mii_bus *bus,
+ int phy_id, int regnum, u16 data)
{
- int ret;
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
- u8 devad = ((regnum >> 16) & 0x1f);
- u8 is_c45 = !!(regnum & MII_ADDR_C45);
u16 reg = (u16)(regnum & 0xffff);
- u8 op;
u16 cmd_reg_cfg;
+ int ret;
+ u8 op;
dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
bus->id, mdio_dev->vbase);
- dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n",
- phy_id, is_c45, devad, reg, data);
+ dev_dbg(&bus->dev, "phy id=%d, reg=%#x, write data=%d\n",
+ phy_id, reg, data);
/* wait for ready */
ret = hns_mdio_wait_ready(bus);
@@ -237,58 +235,91 @@ static int hns_mdio_write(struct mii_bus *bus,
return ret;
}
- if (!is_c45) {
- cmd_reg_cfg = reg;
- op = MDIO_C22_WRITE;
- } else {
- /* config the cmd-reg to write addr*/
- MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
- MDIO_ADDR_DATA_S, reg);
+ cmd_reg_cfg = reg;
+ op = MDIO_C22_WRITE;
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+ MDIO_WDATA_DATA_S, data);
- /* check for read or write opt is finished */
- ret = hns_mdio_wait_ready(bus);
- if (ret) {
- dev_err(&bus->dev, "MDIO bus is busy\n");
- return ret;
- }
+ hns_mdio_cmd_write(mdio_dev, false, op, phy_id, cmd_reg_cfg);
+
+ return 0;
+}
+
+/**
+ * hns_mdio_write_c45 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @devad: device address to read
+ * @regnum: register num
+ * @data: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum, u16 data)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 cmd_reg_cfg;
+ int ret;
+ u8 op;
+
+ dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x, write data=%d\n",
+ phy_id, devad, reg, data);
+
+ /* wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ /* config the cmd-reg to write addr*/
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
- /* config the data needed writing */
- cmd_reg_cfg = devad;
- op = MDIO_C45_WRITE_DATA;
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
}
+ /* config the data needed writing */
+ cmd_reg_cfg = devad;
+ op = MDIO_C45_WRITE_DATA;
+
MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
MDIO_WDATA_DATA_S, data);
- hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg);
+ hns_mdio_cmd_write(mdio_dev, true, op, phy_id, cmd_reg_cfg);
return 0;
}
/**
- * hns_mdio_read - access phy register
+ * hns_mdio_read_c22 - access phy register
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
*
* Return phy register value
*/
-static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
- int ret;
- u16 reg_val;
- u8 devad = ((regnum >> 16) & 0x1f);
- u8 is_c45 = !!(regnum & MII_ADDR_C45);
- u16 reg = (u16)(regnum & 0xffff);
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 reg_val;
+ int ret;
dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
bus->id, mdio_dev->vbase);
- dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n",
- phy_id, is_c45, devad, reg);
+ dev_dbg(&bus->dev, "phy id=%d, reg=%#x!\n", phy_id, reg);
/* Step 1: wait for ready */
ret = hns_mdio_wait_ready(bus);
@@ -297,29 +328,74 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
}
- if (!is_c45) {
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C22_READ, phy_id, reg);
- } else {
- MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
- MDIO_ADDR_DATA_S, reg);
+ hns_mdio_cmd_write(mdio_dev, false, MDIO_C22_READ, phy_id, reg);
- /* Step 2; config the cmd-reg to write addr*/
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ /* Step 2: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
- /* Step 3: check for read or write opt is finished */
- ret = hns_mdio_wait_ready(bus);
- if (ret) {
- dev_err(&bus->dev, "MDIO bus is busy\n");
- return ret;
- }
+ reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+ if (reg_val) {
+ dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+ return -EBUSY;
+ }
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_READ, phy_id, devad);
+ /* Step 3; get out data*/
+ reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+ MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+ return reg_val;
+}
+
+/**
+ * hns_mdio_read_c45 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @devad: device address to read
+ * @regnum: register num
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 reg_val;
+ int ret;
+
+ dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x!\n",
+ phy_id, devad, reg);
+
+ /* Step 1: wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ /* Step 2; config the cmd-reg to write addr*/
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* Step 3: check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
}
- /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_READ, phy_id, devad);
+
+ /* Step 5: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/
/* check for read or write opt is finished */
ret = hns_mdio_wait_ready(bus);
if (ret) {
@@ -438,8 +514,10 @@ static int hns_mdio_probe(struct platform_device *pdev)
}
new_bus->name = MDIO_BUS_NAME;
- new_bus->read = hns_mdio_read;
- new_bus->write = hns_mdio_write;
+ new_bus->read = hns_mdio_read_c22;
+ new_bus->write = hns_mdio_write_c22;
+ new_bus->read_c45 = hns_mdio_read_c45;
+ new_bus->write_c45 = hns_mdio_write_c45;
new_bus->reset = hns_mdio_reset;
new_bus->priv = mdio_dev;
new_bus->parent = &pdev->dev;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
index 19eb839177ec..061952c6c21a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
@@ -85,6 +85,7 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
struct tag_sml_funcfg_tbl *funcfg_table_elem;
struct hinic_cmd_lt_rd *read_data;
u16 out_size = sizeof(*read_data);
+ int ret = ~0;
int err;
read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
@@ -111,20 +112,25 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
switch (idx) {
case VALID:
- return funcfg_table_elem->dw0.bs.valid;
+ ret = funcfg_table_elem->dw0.bs.valid;
+ break;
case RX_MODE:
- return funcfg_table_elem->dw0.bs.nic_rx_mode;
+ ret = funcfg_table_elem->dw0.bs.nic_rx_mode;
+ break;
case MTU:
- return funcfg_table_elem->dw1.bs.mtu;
+ ret = funcfg_table_elem->dw1.bs.mtu;
+ break;
case RQ_DEPTH:
- return funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ ret = funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ break;
case QUEUE_NUM:
- return funcfg_table_elem->dw13.bs.cfg_q_num;
+ ret = funcfg_table_elem->dw13.bs.cfg_q_num;
+ break;
}
kfree(read_data);
- return ~0;
+ return ret;
}
static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
index e9e00cfa1329..e10f739d8339 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -12,7 +12,6 @@
#define TBL_ID_FUNC_CFG_SM_INST 1
#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
-#define HINIC_FUNCTION_CONFIGURE_TABLE 1
struct hinic_cmd_lt_rd {
u8 status;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index fb3e89141a0d..52ea97c818b8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -22,6 +22,10 @@
#define LP_PKT_CNT 64
+#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
+#define HINIC_MAX_MTU_SIZE (HINIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN - ETH_FCS_LEN)
+#define HINIC_MIN_MTU_SIZE 256
+
enum hinic_flags {
HINIC_LINK_UP = BIT(0),
HINIC_INTF_UP = BIT(1),
@@ -95,9 +99,6 @@ struct hinic_dev {
u16 sq_depth;
u16 rq_depth;
- struct hinic_txq_stats tx_stats;
- struct hinic_rxq_stats rx_stats;
-
u8 rss_tmpl_idx;
u8 rss_hash_engine;
u16 num_rss;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 60ae8bfc5f69..1749d26f4bef 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -43,9 +43,7 @@ static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf,
for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) {
len += fw_image->fw_section_info[i].fw_section_len;
- memcpy(&host_image->image_section_info[i],
- &fw_image->fw_section_info[i],
- sizeof(struct fw_section_info_st));
+ host_image->image_section_info[i] = fw_image->fw_section_info[i];
}
if (len != fw_image->fw_len ||
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index a85667078b72..f4b680286911 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -55,7 +55,6 @@
#define COALESCE_ALL_QUEUE 0xFFFF
#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
-#define OBJ_STR_MAX_LEN 32
struct hw2ethtool_link_mode {
enum ethtool_link_mode_bit_indices link_mode_bit;
@@ -547,7 +546,9 @@ static void hinic_get_drvinfo(struct net_device *netdev,
}
static void hinic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
@@ -580,7 +581,9 @@ static int check_ringparam_valid(struct hinic_dev *nic_dev,
}
static int hinic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
u16 new_sq_depth, new_rq_depth;
@@ -1205,8 +1208,6 @@ static u32 hinic_get_rxfh_indir_size(struct net_device *netdev)
return HINIC_RSS_INDIR_SIZE;
}
-#define ARRAY_LEN(arr) ((int)((int)sizeof(arr) / (int)sizeof(arr[0])))
-
#define HINIC_FUNC_STAT(_stat_item) { \
.name = #_stat_item, \
.size = sizeof_field(struct hinic_vport_stats, _stat_item), \
@@ -1374,7 +1375,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
break;
hinic_txq_get_stats(&nic_dev->txqs[qid], &txq_stats);
- for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++, i++) {
p = (char *)&txq_stats +
hinic_tx_queue_stats[j].offset;
data[i] = (hinic_tx_queue_stats[j].size ==
@@ -1387,7 +1388,7 @@ static void get_drv_queue_stats(struct hinic_dev *nic_dev, u64 *data)
break;
hinic_rxq_get_stats(&nic_dev->rxqs[qid], &rxq_stats);
- for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++, i++) {
p = (char *)&rxq_stats +
hinic_rx_queue_stats[j].offset;
data[i] = (hinic_rx_queue_stats[j].size ==
@@ -1411,7 +1412,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
netif_err(nic_dev, drv, netdev,
"Failed to get vport stats from firmware\n");
- for (j = 0; j < ARRAY_LEN(hinic_function_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_function_stats); j++, i++) {
p = (char *)&vport_stats + hinic_function_stats[j].offset;
data[i] = (hinic_function_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
@@ -1420,8 +1421,8 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
port_stats = kzalloc(sizeof(*port_stats), GFP_KERNEL);
if (!port_stats) {
memset(&data[i], 0,
- ARRAY_LEN(hinic_port_stats) * sizeof(*data));
- i += ARRAY_LEN(hinic_port_stats);
+ ARRAY_SIZE(hinic_port_stats) * sizeof(*data));
+ i += ARRAY_SIZE(hinic_port_stats);
goto get_drv_stats;
}
@@ -1430,7 +1431,7 @@ static void hinic_get_ethtool_stats(struct net_device *netdev,
netif_err(nic_dev, drv, netdev,
"Failed to get port stats from firmware\n");
- for (j = 0; j < ARRAY_LEN(hinic_port_stats); j++, i++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_port_stats); j++, i++) {
p = (char *)port_stats + hinic_port_stats[j].offset;
data[i] = (hinic_port_stats[j].size ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
@@ -1449,14 +1450,14 @@ static int hinic_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_TEST:
- return ARRAY_LEN(hinic_test_strings);
+ return ARRAY_SIZE(hinic_test_strings);
case ETH_SS_STATS:
q_num = nic_dev->num_qps;
- count = ARRAY_LEN(hinic_function_stats) +
- (ARRAY_LEN(hinic_tx_queue_stats) +
- ARRAY_LEN(hinic_rx_queue_stats)) * q_num;
+ count = ARRAY_SIZE(hinic_function_stats) +
+ (ARRAY_SIZE(hinic_tx_queue_stats) +
+ ARRAY_SIZE(hinic_rx_queue_stats)) * q_num;
- count += ARRAY_LEN(hinic_port_stats);
+ count += ARRAY_SIZE(hinic_port_stats);
return count;
default:
@@ -1476,27 +1477,27 @@ static void hinic_get_strings(struct net_device *netdev,
memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
return;
case ETH_SS_STATS:
- for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(hinic_function_stats); i++) {
memcpy(p, hinic_function_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < ARRAY_LEN(hinic_port_stats); i++) {
+ for (i = 0; i < ARRAY_SIZE(hinic_port_stats); i++) {
memcpy(p, hinic_port_stats[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_LEN(hinic_tx_queue_stats); j++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_tx_queue_stats); j++) {
sprintf(p, hinic_tx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
}
for (i = 0; i < nic_dev->num_qps; i++) {
- for (j = 0; j < ARRAY_LEN(hinic_rx_queue_stats); j++) {
+ for (j = 0; j < ARRAY_SIZE(hinic_rx_queue_stats); j++) {
sprintf(p, hinic_rx_queue_stats[j].name, i);
p += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 06586173add7..998717f02136 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -814,7 +814,6 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
{
struct hinic_hwif *hwif = attr->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t cell_ctxt_size;
chain->hwif = hwif;
chain->chain_type = attr->chain_type;
@@ -826,8 +825,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain,
sema_init(&chain->sem, 1);
- cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt);
- chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL);
+ chain->cell_ctxt = devm_kcalloc(&pdev->dev, chain->num_cells,
+ sizeof(*chain->cell_ctxt), GFP_KERNEL);
if (!chain->cell_ctxt)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index 307a6d4af993..d39eec9c62bf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -82,11 +82,6 @@
struct hinic_func_to_io, \
cmdqs)
-enum cmdq_wqe_type {
- WQE_LCMD_TYPE = 0,
- WQE_SCMD_TYPE = 1,
-};
-
enum completion_format {
COMPLETE_DIRECT = 0,
COMPLETE_SGE = 1,
@@ -509,8 +504,8 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
*
* Return 0 - Success, negative - Failure
**/
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
+static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
+ enum hinic_set_arm_qtype q_type, u32 q_id)
{
struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
struct hinic_hwif *hwif = cmdqs->hwif;
@@ -796,11 +791,10 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
struct hinic_cmdq_ctxt *cmdq_ctxts;
struct pci_dev *pdev = hwif->pdev;
struct hinic_pfhwdev *pfhwdev;
- size_t cmdq_ctxts_size;
int err;
- cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
- cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
+ cmdq_ctxts = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdq_ctxts), GFP_KERNEL);
if (!cmdq_ctxts)
return -ENOMEM;
@@ -884,7 +878,6 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
struct pci_dev *pdev = hwif->pdev;
struct hinic_hwdev *hwdev;
- size_t saved_wqs_size;
u16 max_wqe_size;
int err;
@@ -895,8 +888,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
if (!cmdqs->cmdq_buf_pool)
return -ENOMEM;
- saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
- cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
+ cmdqs->saved_wqs = devm_kcalloc(&pdev->dev, HINIC_MAX_CMDQ_TYPES,
+ sizeof(*cmdqs->saved_wqs), GFP_KERNEL);
if (!cmdqs->saved_wqs) {
err = -ENOMEM;
goto err_saved_wqs;
@@ -931,7 +924,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
err_set_cmdq_depth:
hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
-
+ free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]);
err_cmdq_ctxt:
hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
HINIC_MAX_CMDQ_TYPES);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 9c413e963a04..ff09cf0ed52b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -177,9 +177,6 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
enum hinic_mod_type mod, u8 cmd,
struct hinic_cmdq_buf *buf_in, u64 *out_param);
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id);
-
int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
void __iomem **db_area);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
index 7e84e4e33fff..d56e7413ace0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -22,7 +22,6 @@
(HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
#define HINIC_PPF_ELECTION_STRIDE 0x4
-#define HINIC_CSR_MAX_PORTS 4
#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
(HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 657a15447bd0..27795288c586 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -29,7 +29,6 @@
#include "hinic_hw_io.h"
#include "hinic_hw_dev.h"
-#define IO_STATUS_TIMEOUT 100
#define OUTBOUND_STATE_TIMEOUT 100
#define DB_STATE_TIMEOUT 100
@@ -42,11 +41,6 @@ enum intr_type {
INTR_MSIX_TYPE,
};
-enum io_status {
- IO_STOPPED = 0,
- IO_RUNNING = 1,
-};
-
/**
* parse_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -162,7 +156,6 @@ static int init_msix(struct hinic_hwdev *hwdev)
struct hinic_hwif *hwif = hwdev->hwif;
struct pci_dev *pdev = hwif->pdev;
int nr_irqs, num_aeqs, num_ceqs;
- size_t msix_entries_size;
int i, err;
num_aeqs = HINIC_HWIF_NUM_AEQS(hwif);
@@ -171,8 +164,8 @@ static int init_msix(struct hinic_hwdev *hwdev)
if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif))
nr_irqs = HINIC_HWIF_NUM_IRQS(hwif);
- msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries);
- hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size,
+ hwdev->msix_entries = devm_kcalloc(&pdev->dev, nr_irqs,
+ sizeof(*hwdev->msix_entries),
GFP_KERNEL);
if (!hwdev->msix_entries)
return -ENOMEM;
@@ -838,8 +831,8 @@ static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
return 0;
}
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info)
+static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct hinic_msix_config *interrupt_info)
{
u16 out_size = sizeof(*interrupt_info);
struct hinic_pfhwdev *pfhwdev;
@@ -884,7 +877,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
if (err)
return -EINVAL;
- interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt;
+ interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt;
interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;
err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
@@ -1042,13 +1035,6 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev)
hinic_free_hwif(hwdev->hwif);
}
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->max_qps;
-}
-
/**
* hinic_hwdev_num_qps - return the number QPs available for use
* @hwdev: the NIC HW device
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 416492e48274..6b5797e69781 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -46,104 +46,170 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_VF_REGISTER = 0x0,
HINIC_PORT_CMD_VF_UNREGISTER = 0x1,
- HINIC_PORT_CMD_CHANGE_MTU = 2,
+ HINIC_PORT_CMD_CHANGE_MTU = 0x2,
- HINIC_PORT_CMD_ADD_VLAN = 3,
- HINIC_PORT_CMD_DEL_VLAN = 4,
+ HINIC_PORT_CMD_ADD_VLAN = 0x3,
+ HINIC_PORT_CMD_DEL_VLAN = 0x4,
- HINIC_PORT_CMD_SET_PFC = 5,
+ HINIC_PORT_CMD_SET_ETS = 0x7,
+ HINIC_PORT_CMD_GET_ETS = 0x8,
- HINIC_PORT_CMD_SET_MAC = 9,
- HINIC_PORT_CMD_GET_MAC = 10,
- HINIC_PORT_CMD_DEL_MAC = 11,
+ HINIC_PORT_CMD_SET_PFC = 0x5,
- HINIC_PORT_CMD_SET_RX_MODE = 12,
+ HINIC_PORT_CMD_SET_MAC = 0x9,
+ HINIC_PORT_CMD_GET_MAC = 0xA,
+ HINIC_PORT_CMD_DEL_MAC = 0xB,
- HINIC_PORT_CMD_GET_PAUSE_INFO = 20,
- HINIC_PORT_CMD_SET_PAUSE_INFO = 21,
+ HINIC_PORT_CMD_SET_RX_MODE = 0xC,
- HINIC_PORT_CMD_GET_LINK_STATE = 24,
+ HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE = 0xD,
- HINIC_PORT_CMD_SET_LRO = 25,
+ HINIC_PORT_CMD_GET_PAUSE_INFO = 0x14,
+ HINIC_PORT_CMD_SET_PAUSE_INFO = 0x15,
- HINIC_PORT_CMD_SET_RX_CSUM = 26,
+ HINIC_PORT_CMD_GET_LINK_STATE = 0x18,
- HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 27,
+ HINIC_PORT_CMD_SET_LRO = 0x19,
- HINIC_PORT_CMD_GET_PORT_STATISTICS = 28,
+ HINIC_PORT_CMD_SET_RX_CSUM = 0x1A,
- HINIC_PORT_CMD_CLEAR_PORT_STATISTICS = 29,
+ HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD = 0x1B,
- HINIC_PORT_CMD_GET_VPORT_STAT = 30,
+ HINIC_PORT_CMD_GET_PORT_STATISTICS = 0x1C,
- HINIC_PORT_CMD_CLEAN_VPORT_STAT = 31,
+ HINIC_PORT_CMD_CLEAR_PORT_STATISTICS = 0x1D,
- HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 37,
+ HINIC_PORT_CMD_GET_VPORT_STAT = 0x1E,
- HINIC_PORT_CMD_SET_PORT_STATE = 41,
+ HINIC_PORT_CMD_CLEAN_VPORT_STAT = 0x1F,
- HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 43,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL = 0x25,
- HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL = 44,
+ HINIC_PORT_CMD_SET_PORT_STATE = 0x29,
+ HINIC_PORT_CMD_GET_PORT_STATE = 0x30,
- HINIC_PORT_CMD_SET_RSS_HASH_ENGINE = 45,
+ HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL = 0x2B,
- HINIC_PORT_CMD_GET_RSS_HASH_ENGINE = 46,
+ HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL = 0x2C,
- HINIC_PORT_CMD_GET_RSS_CTX_TBL = 47,
+ HINIC_PORT_CMD_SET_RSS_HASH_ENGINE = 0x2D,
- HINIC_PORT_CMD_SET_RSS_CTX_TBL = 48,
+ HINIC_PORT_CMD_GET_RSS_HASH_ENGINE = 0x2E,
- HINIC_PORT_CMD_RSS_TEMP_MGR = 49,
+ HINIC_PORT_CMD_GET_RSS_CTX_TBL = 0x2F,
- HINIC_PORT_CMD_RD_LINE_TBL = 57,
+ HINIC_PORT_CMD_SET_RSS_CTX_TBL = 0x30,
- HINIC_PORT_CMD_RSS_CFG = 66,
+ HINIC_PORT_CMD_RSS_TEMP_MGR = 0x31,
- HINIC_PORT_CMD_FWCTXT_INIT = 69,
+ HINIC_PORT_CMD_RD_LINE_TBL = 0x39,
- HINIC_PORT_CMD_GET_LOOPBACK_MODE = 72,
- HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+ HINIC_PORT_CMD_RSS_CFG = 0x42,
- HINIC_PORT_CMD_ENABLE_SPOOFCHK = 78,
+ HINIC_PORT_CMD_GET_PHY_TYPE = 0x44,
- HINIC_PORT_CMD_GET_MGMT_VERSION = 88,
+ HINIC_PORT_CMD_FWCTXT_INIT = 0x45,
- HINIC_PORT_CMD_SET_FUNC_STATE = 93,
+ HINIC_PORT_CMD_GET_LOOPBACK_MODE = 0x48,
+ HINIC_PORT_CMD_SET_LOOPBACK_MODE = 0x49,
- HINIC_PORT_CMD_GET_GLOBAL_QPN = 102,
+ HINIC_PORT_CMD_GET_JUMBO_FRAME_SIZE = 0x4A,
+ HINIC_PORT_CMD_SET_JUMBO_FRAME_SIZE = 0x4B,
- HINIC_PORT_CMD_SET_VF_RATE = 105,
+ HINIC_PORT_CMD_ENABLE_SPOOFCHK = 0x4E,
- HINIC_PORT_CMD_SET_VF_VLAN = 106,
+ HINIC_PORT_CMD_GET_MGMT_VERSION = 0x58,
- HINIC_PORT_CMD_CLR_VF_VLAN,
+ HINIC_PORT_CMD_GET_PORT_TYPE = 0x5B,
- HINIC_PORT_CMD_SET_TSO = 112,
+ HINIC_PORT_CMD_SET_FUNC_STATE = 0x5D,
- HINIC_PORT_CMD_UPDATE_FW = 114,
+ HINIC_PORT_CMD_GET_PORT_ID_BY_FUNC_ID = 0x5E,
- HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115,
+ HINIC_PORT_CMD_GET_DMA_CS = 0x64,
+ HINIC_PORT_CMD_SET_DMA_CS = 0x65,
- HINIC_PORT_CMD_LINK_STATUS_REPORT = 160,
+ HINIC_PORT_CMD_GET_GLOBAL_QPN = 0x66,
- HINIC_PORT_CMD_UPDATE_MAC = 164,
+ HINIC_PORT_CMD_SET_VF_RATE = 0x69,
- HINIC_PORT_CMD_GET_CAP = 170,
+ HINIC_PORT_CMD_SET_VF_VLAN = 0x6A,
- HINIC_PORT_CMD_GET_LINK_MODE = 217,
+ HINIC_PORT_CMD_CLR_VF_VLAN = 0x6B,
- HINIC_PORT_CMD_SET_SPEED = 218,
+ HINIC_PORT_CMD_SET_TSO = 0x70,
- HINIC_PORT_CMD_SET_AUTONEG = 219,
+ HINIC_PORT_CMD_UPDATE_FW = 0x72,
- HINIC_PORT_CMD_GET_STD_SFP_INFO = 240,
+ HINIC_PORT_CMD_SET_RQ_IQ_MAP = 0x73,
- HINIC_PORT_CMD_SET_LRO_TIMER = 244,
+ HINIC_PORT_CMD_SET_PFC_THD = 0x75,
- HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 249,
+ HINIC_PORT_CMD_LINK_STATUS_REPORT = 0xA0,
- HINIC_PORT_CMD_GET_SFP_ABS = 251,
+ HINIC_PORT_CMD_SET_LOSSLESS_ETH = 0xA3,
+
+ HINIC_PORT_CMD_UPDATE_MAC = 0xA4,
+
+ HINIC_PORT_CMD_GET_CAP = 0xAA,
+
+ HINIC_PORT_CMD_UP_TC_ADD_FLOW = 0xAF,
+ HINIC_PORT_CMD_UP_TC_DEL_FLOW = 0xB0,
+ HINIC_PORT_CMD_UP_TC_GET_FLOW = 0xB1,
+
+ HINIC_PORT_CMD_UP_TC_FLUSH_TCAM = 0xB2,
+
+ HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK = 0xB3,
+
+ HINIC_PORT_CMD_UP_TC_ENABLE = 0xB4,
+
+ HINIC_PORT_CMD_UP_TC_GET_TCAM_BLOCK = 0xB5,
+
+ HINIC_PORT_CMD_SET_IPSU_MAC = 0xCB,
+ HINIC_PORT_CMD_GET_IPSU_MAC = 0xCC,
+
+ HINIC_PORT_CMD_SET_XSFP_STATUS = 0xD4,
+
+ HINIC_PORT_CMD_GET_LINK_MODE = 0xD9,
+
+ HINIC_PORT_CMD_SET_SPEED = 0xDA,
+
+ HINIC_PORT_CMD_SET_AUTONEG = 0xDB,
+
+ HINIC_PORT_CMD_CLEAR_QP_RES = 0xDD,
+
+ HINIC_PORT_CMD_SET_SUPER_CQE = 0xDE,
+
+ HINIC_PORT_CMD_SET_VF_COS = 0xDF,
+ HINIC_PORT_CMD_GET_VF_COS = 0xE1,
+
+ HINIC_PORT_CMD_CABLE_PLUG_EVENT = 0xE5,
+
+ HINIC_PORT_CMD_LINK_ERR_EVENT = 0xE6,
+
+ HINIC_PORT_CMD_SET_COS_UP_MAP = 0xE8,
+
+ HINIC_PORT_CMD_RESET_LINK_CFG = 0xEB,
+
+ HINIC_PORT_CMD_GET_STD_SFP_INFO = 0xF0,
+
+ HINIC_PORT_CMD_FORCE_PKT_DROP = 0xF3,
+
+ HINIC_PORT_CMD_SET_LRO_TIMER = 0xF4,
+
+ HINIC_PORT_CMD_SET_VHD_CFG = 0xF7,
+
+ HINIC_PORT_CMD_SET_LINK_FOLLOW = 0xF8,
+
+ HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 0xF9,
+
+ HINIC_PORT_CMD_GET_SFP_ABS = 0xFB,
+
+ HINIC_PORT_CMD_Q_FILTER = 0xFC,
+
+ HINIC_PORT_CMD_TCAM_FILTER = 0xFE,
+
+ HINIC_PORT_CMD_SET_VLAN_FILTER = 0xFF,
};
/* cmd of mgmt CPU message for HILINK module */
@@ -566,8 +632,6 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devli
void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev);
-
int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
@@ -587,9 +651,6 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
enum hinic_msix_state flag);
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info);
-
int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
struct hinic_msix_config *interrupt_info);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index d3fc05a07fdb..045c47786a04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -631,16 +631,15 @@ static int alloc_eq_pages(struct hinic_eq *eq)
struct hinic_hwif *hwif = eq->hwif;
struct pci_dev *pdev = hwif->pdev;
u32 init_val, addr, val;
- size_t addr_size;
int err, pg;
- addr_size = eq->num_pages * sizeof(*eq->dma_addr);
- eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->dma_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->dma_addr), GFP_KERNEL);
if (!eq->dma_addr)
return -ENOMEM;
- addr_size = eq->num_pages * sizeof(*eq->virt_addr);
- eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL);
+ eq->virt_addr = devm_kcalloc(&pdev->dev, eq->num_pages,
+ sizeof(*eq->virt_addr), GFP_KERNEL);
if (!eq->virt_addr) {
err = -ENOMEM;
goto err_virt_addr_alloc;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 0428faa68e80..88567305d06e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -58,39 +58,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
}
/**
- * hinic_msix_attr_get - get message attribute of msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer)
-{
- u32 addr, val;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
- val = hinic_hwif_read_reg(hwif, addr);
-
- *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
- *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
- *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
- *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
- *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
- return 0;
-}
-
-/**
* hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
* @hwif: the HW interface of a pci function device
* @msix_index: msix_index
@@ -115,8 +82,6 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
* hinic_set_pf_action - set action on pf channel
* @hwif: the HW interface of a pci function device
* @action: action on pf channel
- *
- * Return 0 - Success, negative - Failure
**/
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index c06f2253151e..3d588896a367 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -131,10 +131,6 @@
(((u32)(val) & HINIC_MSIX_##member##_MASK) << \
HINIC_MSIX_##member##_SHIFT)
-#define HINIC_MSIX_ATTR_GET(val, member) \
- (((val) >> HINIC_MSIX_##member##_SHIFT) & \
- HINIC_MSIX_##member##_MASK)
-
#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
@@ -269,11 +265,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
u8 lli_timer_cfg, u8 lli_credit_limit,
u8 resend_timer);
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer_cfg,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer);
-
void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
enum hinic_msix_state flag);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index a6e43d686293..c4a0ba6e183a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -375,31 +375,30 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io,
{
struct hinic_hwif *hwif = func_to_io->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t qps_size, wq_size, db_size;
void *ci_addr_base;
int i, j, err;
- qps_size = num_qps * sizeof(*func_to_io->qps);
- func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL);
+ func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->qps), GFP_KERNEL);
if (!func_to_io->qps)
return -ENOMEM;
- wq_size = num_qps * sizeof(*func_to_io->sq_wq);
- func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
+ func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_wq), GFP_KERNEL);
if (!func_to_io->sq_wq) {
err = -ENOMEM;
goto err_sq_wq;
}
- wq_size = num_qps * sizeof(*func_to_io->rq_wq);
- func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL);
+ func_to_io->rq_wq = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->rq_wq), GFP_KERNEL);
if (!func_to_io->rq_wq) {
err = -ENOMEM;
goto err_rq_wq;
}
- db_size = num_qps * sizeof(*func_to_io->sq_db);
- func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL);
+ func_to_io->sq_db = devm_kcalloc(&pdev->dev, num_qps,
+ sizeof(*func_to_io->sq_db), GFP_KERNEL);
if (!func_to_io->sq_db) {
err = -ENOMEM;
goto err_sq_db;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 5078c0c73863..3f9c31d29215 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -117,7 +117,6 @@ enum hinic_mbox_tx_status {
#define MBOX_WB_STATUS_MASK 0xFF
#define MBOX_WB_ERROR_CODE_MASK 0xFF00
#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
-#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
#define MBOX_WB_STATUS_NOT_FINISHED 0x00
#define MBOX_STATUS_FINISHED(wb) \
@@ -130,11 +129,8 @@ enum hinic_mbox_tx_status {
#define SEQ_ID_START_VAL 0
#define SEQ_ID_MAX_VAL 42
-#define DST_AEQ_IDX_DEFAULT_VAL 0
-#define SRC_AEQ_IDX_DEFAULT_VAL 0
#define NO_DMA_ATTRIBUTE_VAL 0
-#define HINIC_MGMT_RSP_AEQN 0
#define HINIC_MBOX_RSP_AEQN 2
#define HINIC_MBOX_RECV_AEQN 0
@@ -146,7 +142,6 @@ enum hinic_mbox_tx_status {
#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
-#define MBOX_RESPONSE_ERROR 0x1
#define MBOX_MSG_ID_MASK 0xFF
#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
@@ -621,7 +616,7 @@ static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func
return false;
}
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
u64 mbox_header = *((u64 *)header);
@@ -649,7 +644,7 @@ void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
}
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
struct hinic_send_mbox *send_mbox;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
index 46953190d29e..33ac7814d3b3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -150,10 +150,6 @@ void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
enum hinic_mod_type mod);
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size);
-
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size);
-
int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index ebc77771f5da..4aa1f433ed24 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -643,6 +643,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = alloc_msg_buf(pf_to_mgmt);
if (err) {
dev_err(&pdev->dev, "Failed to allocate msg buffers\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
@@ -650,6 +651,7 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif);
if (err) {
dev_err(&pdev->dev, "Failed to initialize cmd chains\n");
+ destroy_workqueue(pf_to_mgmt->workq);
hinic_health_reporters_destroy(hwdev->devlink_dev);
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index 336248aa2e48..537a8098bc4e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -472,8 +472,7 @@ int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
return atomic_read(&wq->delta) - 1;
}
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
- int nr_descs)
+static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs)
{
u32 ctrl_size, task_size, bufdesc_size;
@@ -588,18 +587,16 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
/**
* hinic_sq_prepare_wqe - prepare wqe before insert to the queue
* @sq: send queue
- * @prod_idx: pi value
* @sq_wqe: wqe to prepare
* @sges: sges for use by the wqe for send for buf addresses
* @nr_sges: number of sges
**/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe,
+ struct hinic_sge *sges, int nr_sges)
{
int i;
- sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges);
+ sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
sq_prepare_task(&sq_wqe->task);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 0dfa51ad5855..178dcc874370 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -175,9 +175,8 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
u32 l4_len,
u32 offset, u32 ip_ident, u32 mss);
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe,
+ struct hinic_sge *sges, int nr_sges);
void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
unsigned int cos);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 7f0f1aa3cedd..e1a1735c00c1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -175,8 +175,6 @@ static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
/**
* cmdq_free_page - free page from cmdq
* @cmdq_pages: the pages of the cmdq queue struct that hold the page
- *
- * Return 0 - Success, negative - Failure
**/
static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
{
@@ -193,20 +191,20 @@ static int alloc_page_arrays(struct hinic_wqs *wqs)
{
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wqs->num_pages * sizeof(*wqs->page_paddr);
- wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_paddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_paddr), GFP_KERNEL);
if (!wqs->page_paddr)
return -ENOMEM;
- size = wqs->num_pages * sizeof(*wqs->page_vaddr);
- wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->page_vaddr), GFP_KERNEL);
if (!wqs->page_vaddr)
goto err_page_vaddr;
- size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
- wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wqs->shadow_page_vaddr = devm_kcalloc(&pdev->dev, wqs->num_pages,
+ sizeof(*wqs->shadow_page_vaddr),
+ GFP_KERNEL);
if (!wqs->shadow_page_vaddr)
goto err_page_shadow_vaddr;
@@ -379,15 +377,14 @@ static int alloc_wqes_shadow(struct hinic_wq *wq)
{
struct hinic_hwif *hwif = wq->hwif;
struct pci_dev *pdev = hwif->pdev;
- size_t size;
- size = wq->num_q_pages * wq->max_wqe_size;
- wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_wqe = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ wq->max_wqe_size, GFP_KERNEL);
if (!wq->shadow_wqe)
return -ENOMEM;
- size = wq->num_q_pages * sizeof(wq->prod_idx);
- wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ wq->shadow_idx = devm_kcalloc(&pdev->dev, wq->num_q_pages,
+ sizeof(*wq->shadow_idx), GFP_KERNEL);
if (!wq->shadow_idx)
goto err_shadow_idx;
@@ -772,7 +769,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
/* If we only have one page, still need to get shadown wqe when
* wqe rolling-over page
*/
- if (curr_pg != end_pg || MASKED_WQE_IDX(wq, end_prod_idx) < *prod_idx) {
+ if (curr_pg != end_pg || end_prod_idx < *prod_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
@@ -842,7 +839,10 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*cons_idx = curr_cons_idx;
- if (curr_pg != end_pg) {
+ /* If we only have one page, still need to get shadown wqe when
+ * wqe rolling-over page
+ */
+ if (curr_pg != end_pg || end_cons_idx < curr_cons_idx) {
void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index f4b6d2c1061f..c6bdeed5606e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -261,23 +261,6 @@
#define HINIC_RSS_TYPE_GET(val, member) \
(((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
-enum hinic_l4offload_type {
- HINIC_L4_OFF_DISABLE = 0,
- HINIC_TCP_OFFLOAD_ENABLE = 1,
- HINIC_SCTP_OFFLOAD_ENABLE = 2,
- HINIC_UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_vlan_offload {
- HINIC_VLAN_OFF_DISABLE = 0,
- HINIC_VLAN_OFF_ENABLE = 1,
-};
-
-enum hinic_pkt_parsed {
- HINIC_PKT_NOT_PARSED = 0,
- HINIC_PKT_PARSED = 1,
-};
-
enum hinic_l3_offload_type {
L3TYPE_UNKNOWN = 0,
IPV6_PKT = 1,
@@ -305,18 +288,10 @@ enum hinic_outer_l3type {
HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
};
-enum hinic_media_type {
- HINIC_MEDIA_UNKNOWN = 0,
-};
-
enum hinic_l2type {
HINIC_L2TYPE_ETH = 0,
};
-enum hinc_tunnel_l4type {
- HINIC_TUNNEL_L4TYPE_UNKNOWN = 0,
-};
-
struct hinic_cmdq_header {
u32 header_info;
u32 saved_data;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index f9a766b8ac43..499c657d37a9 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -62,8 +62,6 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
#define HINIC_LRO_RX_TIMER_DEFAULT 16
-#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8)
-
#define work_to_rx_mode_work(work) \
container_of(work, struct hinic_rx_mode_work, work)
@@ -82,56 +80,44 @@ static int set_features(struct hinic_dev *nic_dev,
netdev_features_t pre_features,
netdev_features_t features, bool force_change);
-static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq)
+static void gather_rx_stats(struct hinic_rxq_stats *nic_rx_stats, struct hinic_rxq *rxq)
{
- struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats;
struct hinic_rxq_stats rx_stats;
- u64_stats_init(&rx_stats.syncp);
-
hinic_rxq_get_stats(rxq, &rx_stats);
- u64_stats_update_begin(&nic_rx_stats->syncp);
nic_rx_stats->bytes += rx_stats.bytes;
nic_rx_stats->pkts += rx_stats.pkts;
nic_rx_stats->errors += rx_stats.errors;
nic_rx_stats->csum_errors += rx_stats.csum_errors;
nic_rx_stats->other_errors += rx_stats.other_errors;
- u64_stats_update_end(&nic_rx_stats->syncp);
-
- hinic_rxq_clean_stats(rxq);
}
-static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq)
+static void gather_tx_stats(struct hinic_txq_stats *nic_tx_stats, struct hinic_txq *txq)
{
- struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats;
struct hinic_txq_stats tx_stats;
- u64_stats_init(&tx_stats.syncp);
-
hinic_txq_get_stats(txq, &tx_stats);
- u64_stats_update_begin(&nic_tx_stats->syncp);
nic_tx_stats->bytes += tx_stats.bytes;
nic_tx_stats->pkts += tx_stats.pkts;
nic_tx_stats->tx_busy += tx_stats.tx_busy;
nic_tx_stats->tx_wake += tx_stats.tx_wake;
nic_tx_stats->tx_dropped += tx_stats.tx_dropped;
nic_tx_stats->big_frags_pkts += tx_stats.big_frags_pkts;
- u64_stats_update_end(&nic_tx_stats->syncp);
-
- hinic_txq_clean_stats(txq);
}
-static void update_nic_stats(struct hinic_dev *nic_dev)
+static void gather_nic_stats(struct hinic_dev *nic_dev,
+ struct hinic_rxq_stats *nic_rx_stats,
+ struct hinic_txq_stats *nic_tx_stats)
{
int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
for (i = 0; i < num_qps; i++)
- update_rx_stats(nic_dev, &nic_dev->rxqs[i]);
+ gather_rx_stats(nic_rx_stats, &nic_dev->rxqs[i]);
for (i = 0; i < num_qps; i++)
- update_tx_stats(nic_dev, &nic_dev->txqs[i]);
+ gather_tx_stats(nic_tx_stats, &nic_dev->txqs[i]);
}
/**
@@ -144,13 +130,12 @@ static int create_txqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t txq_size;
if (nic_dev->txqs)
return -EINVAL;
- txq_size = num_txqs * sizeof(*nic_dev->txqs);
- nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL);
+ nic_dev->txqs = devm_kcalloc(&netdev->dev, num_txqs,
+ sizeof(*nic_dev->txqs), GFP_KERNEL);
if (!nic_dev->txqs)
return -ENOMEM;
@@ -241,13 +226,12 @@ static int create_rxqs(struct hinic_dev *nic_dev)
{
int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev);
struct net_device *netdev = nic_dev->netdev;
- size_t rxq_size;
if (nic_dev->rxqs)
return -EINVAL;
- rxq_size = num_rxqs * sizeof(*nic_dev->rxqs);
- nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL);
+ nic_dev->rxqs = devm_kcalloc(&netdev->dev, num_rxqs,
+ sizeof(*nic_dev->rxqs), GFP_KERNEL);
if (!nic_dev->rxqs)
return -ENOMEM;
@@ -562,8 +546,6 @@ int hinic_close(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- update_nic_stats(nic_dev);
-
up(&nic_dev->mgmt_lock);
if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
@@ -857,26 +839,19 @@ static void hinic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct hinic_dev *nic_dev = netdev_priv(netdev);
- struct hinic_rxq_stats *nic_rx_stats;
- struct hinic_txq_stats *nic_tx_stats;
-
- nic_rx_stats = &nic_dev->rx_stats;
- nic_tx_stats = &nic_dev->tx_stats;
-
- down(&nic_dev->mgmt_lock);
+ struct hinic_rxq_stats nic_rx_stats = {};
+ struct hinic_txq_stats nic_tx_stats = {};
if (nic_dev->flags & HINIC_INTF_UP)
- update_nic_stats(nic_dev);
-
- up(&nic_dev->mgmt_lock);
+ gather_nic_stats(nic_dev, &nic_rx_stats, &nic_tx_stats);
- stats->rx_bytes = nic_rx_stats->bytes;
- stats->rx_packets = nic_rx_stats->pkts;
- stats->rx_errors = nic_rx_stats->errors;
+ stats->rx_bytes = nic_rx_stats.bytes;
+ stats->rx_packets = nic_rx_stats.pkts;
+ stats->rx_errors = nic_rx_stats.errors;
- stats->tx_bytes = nic_tx_stats->bytes;
- stats->tx_packets = nic_tx_stats->pkts;
- stats->tx_errors = nic_tx_stats->tx_dropped;
+ stats->tx_bytes = nic_tx_stats.bytes;
+ stats->tx_packets = nic_tx_stats.pkts;
+ stats->tx_errors = nic_tx_stats.tx_dropped;
}
static int hinic_set_features(struct net_device *netdev,
@@ -985,8 +960,6 @@ static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
* @in_size: input size
* @buf_out: output buffer
* @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
**/
static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
@@ -1119,6 +1092,16 @@ static int set_features(struct hinic_dev *nic_dev,
}
}
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ ret = hinic_set_vlan_fliter(nic_dev,
+ !!(features &
+ NETIF_F_HW_VLAN_CTAG_FILTER));
+ if (ret) {
+ err = ret;
+ failed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ }
+
if (err) {
nic_dev->netdev->features = features ^ failed_features;
return -EIO;
@@ -1175,8 +1158,6 @@ static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
static int nic_dev_init(struct pci_dev *pdev)
{
struct hinic_rx_mode_work *rx_mode_work;
- struct hinic_txq_stats *tx_stats;
- struct hinic_rxq_stats *rx_stats;
struct hinic_dev *nic_dev;
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -1216,7 +1197,8 @@ static int nic_dev_init(struct pci_dev *pdev)
else
netdev->netdev_ops = &hinicvf_netdev_ops;
- netdev->max_mtu = ETH_MAX_MTU;
+ netdev->max_mtu = HINIC_MAX_MTU_SIZE;
+ netdev->min_mtu = HINIC_MIN_MTU_SIZE;
nic_dev = netdev_priv(netdev);
nic_dev->netdev = netdev;
@@ -1238,15 +1220,8 @@ static int nic_dev_init(struct pci_dev *pdev)
sema_init(&nic_dev->mgmt_lock, 1);
- tx_stats = &nic_dev->tx_stats;
- rx_stats = &nic_dev->rx_stats;
-
- u64_stats_init(&tx_stats->syncp);
- u64_stats_init(&rx_stats->syncp);
-
- nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev,
- VLAN_BITMAP_SIZE(nic_dev),
- GFP_KERNEL);
+ nic_dev->vlan_bitmap = devm_bitmap_zalloc(&pdev->dev, VLAN_N_VID,
+ GFP_KERNEL);
if (!nic_dev->vlan_bitmap) {
err = -ENOMEM;
goto err_vlan_bitmap;
@@ -1394,12 +1369,8 @@ static int hinic_probe(struct pci_dev *pdev,
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n");
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "Failed to set DMA mask\n");
- goto err_dma_mask;
- }
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ goto err_dma_mask;
}
err = nic_dev_init(pdev);
@@ -1420,8 +1391,6 @@ err_pci_regions:
return err;
}
-#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
-
static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
{
struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
@@ -1516,8 +1485,15 @@ static struct pci_driver hinic_driver = {
static int __init hinic_module_init(void)
{
+ int ret;
+
hinic_dbg_register_debugfs(HINIC_DRV_NAME);
- return pci_register_driver(&hinic_driver);
+
+ ret = pci_register_driver(&hinic_driver);
+ if (ret)
+ hinic_dbg_unregister_debugfs();
+
+ return ret;
}
static void __exit hinic_module_exit(void)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 28ae6f1201a8..9406237c461e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -17,9 +17,6 @@
#include "hinic_port.h"
#include "hinic_dev.h"
-#define HINIC_MIN_MTU_SIZE 256
-#define HINIC_MAX_JUMBO_FRAME_SIZE 15872
-
enum mac_op {
MAC_DEL,
MAC_SET,
@@ -147,24 +144,12 @@ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr)
**/
int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu)
{
- struct net_device *netdev = nic_dev->netdev;
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_port_mtu_cmd port_mtu_cmd;
struct hinic_hwif *hwif = hwdev->hwif;
u16 out_size = sizeof(port_mtu_cmd);
struct pci_dev *pdev = hwif->pdev;
- int err, max_frame;
-
- if (new_mtu < HINIC_MIN_MTU_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size");
- return -EINVAL;
- }
-
- max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) {
- netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size");
- return -EINVAL;
- }
+ int err;
port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
port_mtu_cmd.mtu = new_mtu;
@@ -462,6 +447,39 @@ int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en)
return 0;
}
+int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
+{
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif = hwdev->hwif;
+ struct pci_dev *pdev = hwif->pdev;
+ struct hinic_vlan_filter vlan_filter;
+ u16 out_size = sizeof(vlan_filter);
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ vlan_filter.enable = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_VLAN_FILTER,
+ &vlan_filter, sizeof(vlan_filter),
+ &vlan_filter, &out_size);
+ if (vlan_filter.status == HINIC_MGMT_CMD_UNSUPPORTED) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if ((err == HINIC_MBOX_VF_CMD_ERROR) &&
+ HINIC_IS_VF(hwif)) {
+ err = HINIC_MGMT_CMD_UNSUPPORTED;
+ } else if (err || !out_size || vlan_filter.status) {
+ dev_err(&pdev->dev,
+ "Failed to set vlan fliter, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, vlan_filter.status, out_size);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs)
{
struct hinic_hwdev *hwdev = nic_dev->hwdev;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index c9ae3d4dc547..c8694ac7c702 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -351,6 +351,16 @@ struct hinic_vlan_cfg {
u8 rsvd1[5];
};
+struct hinic_vlan_filter {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_idx;
+ u8 rsvd1[2];
+ u32 enable;
+};
+
struct hinic_rss_template_mgmt {
u8 status;
u8 version;
@@ -831,6 +841,8 @@ int hinic_get_vport_stats(struct hinic_dev *nic_dev,
int hinic_set_rx_vlan_offload(struct hinic_dev *nic_dev, u8 en);
+int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en);
+
int hinic_get_mgmt_version(struct hinic_dev *nic_dev, u8 *mgmt_ver);
int hinic_set_link_settings(struct hinic_hwdev *hwdev,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index fed3b6bc0d76..ceec8be2a73b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -50,7 +50,7 @@
* hinic_rxq_clean_stats - Clean the statistics of specific queue
* @rxq: Logical Rx Queue
**/
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
+static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
@@ -73,7 +73,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
@@ -83,7 +82,6 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
} while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
@@ -481,7 +479,8 @@ static void rx_add_napi(struct hinic_rxq *rxq)
{
struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
- netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
+ netif_napi_add_weight(rxq->netdev, &rxq->napi, rx_poll,
+ nic_dev->rx_weight);
napi_enable(&rxq->napi);
}
@@ -548,7 +547,7 @@ static int rx_request_irq(struct hinic_rxq *rxq)
goto err_req_irq;
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
- err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
if (err)
goto err_irq_affinity;
@@ -565,7 +564,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
{
struct hinic_rq *rq = rxq->rq;
- irq_set_affinity_hint(rq->irq, NULL);
+ irq_update_affinity_hint(rq->irq, NULL);
free_irq(rq->irq, rxq);
rx_del_napi(rxq);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 507dcbae9085..8f7bd6a049bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -41,8 +41,6 @@ struct hinic_rxq {
struct napi_struct napi;
};
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq);
-
void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index 01e7d3c0b68e..ee357088d021 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -24,6 +24,7 @@ MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto,
#define HINIC_VLAN_PRIORITY_SHIFT 13
#define HINIC_ADD_VLAN_IN_MAC 0x8000
#define HINIC_TX_RATE_TABLE_FULL 12
+#define HINIC_MAX_QOS 7
static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
u16 vlan_id, u16 func_id)
@@ -488,6 +489,24 @@ static struct vf_cmd_check_handle nic_cmd_support_vf[] = {
{HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B},
{HINIC_PORT_CMD_GET_CAP, hinic_mbox_check_func_id_8B},
{HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_GET_VF_COS, NULL},
+ {HINIC_PORT_CMD_SET_VHD_CFG, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_VLAN_FILTER, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_Q_FILTER, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_TCAM_FILTER, NULL},
+ {HINIC_PORT_CMD_UP_TC_ADD_FLOW, NULL},
+ {HINIC_PORT_CMD_UP_TC_DEL_FLOW, NULL},
+ {HINIC_PORT_CMD_UP_TC_FLUSH_TCAM, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UP_TC_CTRL_TCAM_BLOCK, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_UP_TC_ENABLE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_CABLE_PLUG_EVENT, NULL},
+ {HINIC_PORT_CMD_LINK_ERR_EVENT, NULL},
+ {HINIC_PORT_CMD_SET_PORT_STATE, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_ETS, NULL},
+ {HINIC_PORT_CMD_SET_ANTI_ATTACK_RATE, NULL},
+ {HINIC_PORT_CMD_RESET_LINK_CFG, hinic_mbox_check_func_id_8B},
+ {HINIC_PORT_CMD_SET_LINK_FOLLOW, NULL},
+ {HINIC_PORT_CMD_CLEAR_QP_RES, NULL},
};
#define CHECK_IPSU_15BIT 0X8000
@@ -774,7 +793,7 @@ int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
u16 vlanprio, cur_vlanprio;
sriov_info = &nic_dev->sriov_info;
- if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS)
return -EINVAL;
if (vlan_proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
@@ -820,7 +839,7 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
cur_trust = nic_io->vf_infos[vf].trust;
/* same request, so just return success */
- if ((setting && cur_trust) || (!setting && !cur_trust))
+ if (setting == cur_trust)
return 0;
err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
@@ -852,12 +871,6 @@ int hinic_ndo_set_vf_bw(struct net_device *netdev,
return -EINVAL;
}
- if (max_tx_rate < min_tx_rate) {
- netif_err(nic_dev, drv, netdev, "Max rate %d must be greater than or equal to min rate %d\n",
- max_tx_rate, min_tx_rate);
- return -EINVAL;
- }
-
err = hinic_port_link_state(nic_dev, &link_state);
if (err) {
netif_err(nic_dev, drv, netdev,
@@ -946,7 +959,7 @@ int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
/* same request, so just return success */
- if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ if (setting == cur_spoofchk)
return 0;
err = hinic_set_vf_spoofchk(sriov_info->hwdev,
@@ -1137,8 +1150,8 @@ static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
}
-static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
- u16 start_vf_id, u16 end_vf_id)
+static void hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
{
struct hinic_dev *nic_dev;
u16 func_idx, idx;
@@ -1151,8 +1164,6 @@ static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
HINIC_HW_WQ_PAGE_SIZE);
hinic_clear_vf_infos(nic_dev, idx);
}
-
- return 0;
}
int hinic_vf_func_init(struct hinic_hwdev *hwdev)
@@ -1181,7 +1192,6 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev)
dev_err(&hwdev->hwif->pdev->dev,
"Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n",
err, register_info.status, out_size);
- hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC);
return -EIO;
}
} else {
@@ -1299,7 +1309,7 @@ int hinic_pci_sriov_disable(struct pci_dev *pdev)
return 0;
}
-int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct hinic_sriov_info *sriov_info;
int err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
index ba627a362f9a..d4d4e63d31ea 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -98,8 +98,6 @@ void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
int hinic_pci_sriov_disable(struct pci_dev *dev);
-int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
-
int hinic_vf_func_init(struct hinic_hwdev *hwdev);
void hinic_vf_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c5bdb0d374ef..ad47ac51a139 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -4,6 +4,7 @@
* Copyright(c) 2017 Huawei Technologies Co., Ltd
*/
+#include <linux/if_vlan.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
@@ -73,7 +74,7 @@ enum hinic_offload_type {
* hinic_txq_clean_stats - Clean the statistics of specific queue
* @txq: Logical Tx Queue
**/
-void hinic_txq_clean_stats(struct hinic_txq *txq)
+static void hinic_txq_clean_stats(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
@@ -97,7 +98,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
unsigned int start;
- u64_stats_update_begin(&stats->syncp);
do {
start = u64_stats_fetch_begin(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
@@ -107,7 +107,6 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
- u64_stats_update_end(&stats->syncp);
}
/**
@@ -531,7 +530,7 @@ netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
flush_skbs:
@@ -615,7 +614,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
if (err)
@@ -808,7 +807,8 @@ static int tx_request_irq(struct hinic_txq *txq)
qp = container_of(sq, struct hinic_qp, sq);
- netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight);
+ netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll,
+ nic_dev->tx_weight);
hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
@@ -862,7 +862,6 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
int err, irqname_len;
- size_t sges_size;
txq->netdev = netdev;
txq->sq = sq;
@@ -871,13 +870,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
- sges_size = txq->max_sges * sizeof(*txq->sges);
- txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->sges), GFP_KERNEL);
if (!txq->sges)
return -ENOMEM;
- sges_size = txq->max_sges * sizeof(*txq->free_sges);
- txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL);
+ txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
+ sizeof(*txq->free_sges), GFP_KERNEL);
if (!txq->free_sges) {
err = -ENOMEM;
goto err_alloc_free_sges;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
index b3c8657774a7..91dc778362f3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -40,8 +40,6 @@ struct hinic_txq {
struct napi_struct napi;
};
-void hinic_txq_clean_stats(struct hinic_txq *txq);
-
void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index b482f6f633bd..3ee89ae496d0 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1178,7 +1178,8 @@ found:
DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
for (i = 0; i < 6; i++)
- DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
+ DEB(DEB_PROBE,printk(" %2.2X", eth_addr[i]));
+ eth_hw_addr_set(dev, eth_addr);
DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index c612ef526d16..3e7d7c4bafdc 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -986,6 +986,7 @@ static int
ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
int i, ret = 0;
ether1_banner();
@@ -1015,7 +1016,8 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ eth_hw_addr_set(dev, addr);
if (ether1_init_2(dev)) {
ret = -ENODEV;
diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
index 48e001881c75..0af70094aba3 100644
--- a/drivers/net/ethernet/i825xx/lasi_82596.c
+++ b/drivers/net/ethernet/i825xx/lasi_82596.c
@@ -147,6 +147,7 @@ lan_init_chip(struct parisc_device *dev)
struct net_device *netdevice;
struct i596_private *lp;
int retval = -ENOMEM;
+ u8 addr[ETH_ALEN];
int i;
if (!dev->irq) {
@@ -167,13 +168,14 @@ lan_init_chip(struct parisc_device *dev)
netdevice->base_addr = dev->hpa.start;
netdevice->irq = dev->irq;
- if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) {
+ if (pdc_lan_station_id(addr, netdevice->base_addr)) {
for (i = 0; i < 6; i++) {
- netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
+ addr[i] = gsc_readb(LAN_PROM_ADDR + i);
}
printk(KERN_INFO
"%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
}
+ eth_hw_addr_set(netdevice, addr);
lp = netdev_priv(netdevice);
lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 27937c5d7956..54bb4d9a0d1e 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -78,6 +78,7 @@ static int sni_82596_probe(struct platform_device *dev)
void __iomem *mpu_addr;
void __iomem *ca_addr;
u8 __iomem *eth_addr;
+ u8 mac[ETH_ALEN];
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
@@ -109,17 +110,19 @@ static int sni_82596_probe(struct platform_device *dev)
goto probe_failed;
/* someone seems to like messed up stuff */
- netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
- netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
- netdevice->dev_addr[2] = readb(eth_addr + 0x09);
- netdevice->dev_addr[3] = readb(eth_addr + 0x08);
- netdevice->dev_addr[4] = readb(eth_addr + 0x07);
- netdevice->dev_addr[5] = readb(eth_addr + 0x06);
+ mac[0] = readb(eth_addr + 0x0b);
+ mac[1] = readb(eth_addr + 0x0a);
+ mac[2] = readb(eth_addr + 0x09);
+ mac[3] = readb(eth_addr + 0x08);
+ mac[4] = readb(eth_addr + 0x07);
+ mac[5] = readb(eth_addr + 0x06);
+ eth_hw_addr_set(netdevice, mac);
iounmap(eth_addr);
- if (!netdevice->irq) {
+ if (netdevice->irq < 0) {
printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
__FILE__, netdevice->base_addr);
+ retval = netdevice->irq;
goto probe_failed;
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 79aef681ac85..d82eca563266 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -150,7 +150,7 @@ struct rfd_struct
#define RFD_ERR_RNR 0x02 /* status: receiver out of resources */
#define RFD_ERR_OVR 0x01 /* DMA Overrun! */
-#define RFD_ERR_FTS 0x0080 /* Frame to short */
+#define RFD_ERR_FTS 0x0080 /* Frame too short */
#define RFD_ERR_NEOP 0x0040 /* No EOP flag (for bitstuffing only) */
#define RFD_ERR_TRUN 0x0020 /* (82596 only/SF mode) indicates truncated frame */
#define RFD_MATCHADD 0x0002 /* status: Destinationaddress !matches IA (only 82596) */
@@ -250,7 +250,7 @@ struct mcsetup_cmd_struct
unsigned short cmd_cmd;
unsigned short cmd_link;
unsigned short mc_cnt; /* number of bytes in the MC-List */
- unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */
+ unsigned char mc_list[][6]; /* pointer to 6 bytes entries */
};
/*
diff --git a/drivers/net/ethernet/ibm/ehea/ehea.h b/drivers/net/ethernet/ibm/ehea/ehea.h
index b140835d4c23..208c440a602b 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea.h
+++ b/drivers/net/ethernet/ibm/ehea/ehea.h
@@ -19,6 +19,7 @@
#include <linux/ethtool.h>
#include <linux/vmalloc.h>
#include <linux/if_vlan.h>
+#include <linux/platform_device.h>
#include <asm/ibmebus.h>
#include <asm/io.h>
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 6cb86032ce46..1db5b6790a41 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -159,8 +159,8 @@ static int ehea_nway_reset(struct net_device *dev)
static void ehea_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 ehea_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index bad94e4d50f4..b4aff59b3eb4 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -29,6 +29,8 @@
#include <asm/kexec.h>
#include <linux/mutex.h>
#include <linux/prefetch.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <net/ip.h>
@@ -1544,7 +1546,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
kfree(init_attr);
- netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
+ netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll);
ret = 0;
goto out;
@@ -1615,7 +1617,7 @@ static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
* For TSO packets we only copy the headers into the
* immediate area.
*/
- immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ immediate_len = skb_tcp_all_headers(skb);
}
if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
@@ -2898,6 +2900,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
ret = of_device_register(&port->ofdev);
if (ret) {
pr_err("failed to register device. ret=%d\n", ret);
+ put_device(&port->ofdev.dev);
goto out;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 6b3fc8823c54..c97095abd26a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2137,8 +2137,11 @@ emac_ethtool_set_link_ksettings(struct net_device *ndev,
return 0;
}
-static void emac_ethtool_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *rp)
+static void
+emac_ethtool_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
@@ -2281,8 +2284,8 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
{
struct emac_instance *dev = netdev_priv(ndev);
- strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, "ibm_emac", sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
dev->cell_index, dev->ofdev->dev.of_node);
}
@@ -2936,9 +2939,9 @@ static int emac_init_config(struct emac_instance *dev)
}
/* Fixup some feature bits based on the device tree */
- if (of_get_property(np, "has-inverted-stacr-oc", NULL))
+ if (of_property_read_bool(np, "has-inverted-stacr-oc"))
dev->features |= EMAC_FTR_STACR_OC_INVERT;
- if (of_get_property(np, "has-new-stacr-staopc", NULL))
+ if (of_property_read_bool(np, "has-new-stacr-staopc"))
dev->features |= EMAC_FTR_HAS_NEW_STACR;
/* CAB lacks the appropriate properties */
@@ -2976,11 +2979,9 @@ static int emac_init_config(struct emac_instance *dev)
/* Read MAC-address */
err = of_get_ethdev_address(np, dev->ndev);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&dev->ofdev->dev, err,
+ "Can't get valid [local-]mac-address from OF !\n");
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
@@ -3041,7 +3042,7 @@ static int emac_probe(struct platform_device *ofdev)
* property here for now, but new flat device trees should set a
* status property to "disabled" instead.
*/
- if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
+ if (of_property_read_bool(np, "unused") || !of_device_is_available(np))
return -ENODEV;
/* Find ourselves in the bootlist if we are there */
@@ -3332,7 +3333,7 @@ static void __init emac_make_bootlist(void)
if (of_match_node(emac_match, np) == NULL)
continue;
- if (of_get_property(np, "unused", NULL))
+ if (of_property_read_bool(np, "unused"))
continue;
idx = of_get_property(np, "cell-index", NULL);
if (idx == NULL)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 075c07303f16..ff5487bbebe3 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -605,8 +605,8 @@ static int mal_probe(struct platform_device *ofdev)
init_dummy_netdev(&mal->dummy_dev);
- netif_napi_add(&mal->dummy_dev, &mal->napi, mal_poll,
- CONFIG_IBM_EMAC_POLL_WEIGHT);
+ netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll,
+ CONFIG_IBM_EMAC_POLL_WEIGHT);
/* Load power-on reset defaults */
mal_reset(mal);
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index 242ef976fd15..50358cf00130 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -242,7 +242,7 @@ static int rgmii_probe(struct platform_device *ofdev)
}
/* Check for RGMII flags */
- if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL))
+ if (of_property_read_bool(ofdev->dev.of_node, "has-mdio"))
dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO;
/* CAB lacks the right properties, fix this up */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 45ba40cf4d07..113fcb3e353e 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -141,6 +141,13 @@ static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
}
+static unsigned int ibmveth_real_max_tx_queues(void)
+{
+ unsigned int n_cpu = num_online_cpus();
+
+ return min(n_cpu, IBMVETH_MAX_QUEUES);
+}
+
/* setup the initial settings for a buffer pool */
static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
u32 pool_index, u32 pool_size,
@@ -456,6 +463,38 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
}
}
+static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx],
+ adapter->tx_ltb_size, DMA_TO_DEVICE);
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+}
+
+static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size,
+ GFP_KERNEL);
+ if (!adapter->tx_ltb_ptr[idx]) {
+ netdev_err(adapter->netdev,
+ "unable to allocate tx long term buffer\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev,
+ adapter->tx_ltb_ptr[idx],
+ adapter->tx_ltb_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) {
+ netdev_err(adapter->netdev,
+ "unable to DMA map tx long term buffer\n");
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
@@ -538,6 +577,11 @@ static int ibmveth_open(struct net_device *netdev)
goto out_unmap_buffer_list;
}
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ if (ibmveth_allocate_tx_ltb(adapter, i))
+ goto out_free_tx_ltb;
+ }
+
adapter->rx_queue.index = 0;
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
@@ -595,25 +639,15 @@ static int ibmveth_open(struct net_device *netdev)
rc = -ENOMEM;
- adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
- netdev->mtu + IBMVETH_BUFF_OH,
- &adapter->bounce_buffer_dma, GFP_KERNEL);
- if (!adapter->bounce_buffer) {
- netdev_err(netdev, "unable to alloc bounce buffer\n");
- goto out_free_irq;
- }
-
netdev_dbg(netdev, "initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
netdev_dbg(netdev, "open complete\n");
return 0;
-out_free_irq:
- free_irq(netdev->irq, netdev);
out_free_buffer_pools:
while (--i >= 0) {
if (adapter->rx_buff_pool[i].active)
@@ -623,6 +657,12 @@ out_free_buffer_pools:
out_unmap_filter_list:
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
+
+out_free_tx_ltb:
+ while (--i >= 0) {
+ ibmveth_free_tx_ltb(adapter, i);
+ }
+
out_unmap_buffer_list:
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -650,8 +690,7 @@ static int ibmveth_close(struct net_device *netdev)
napi_disable(&adapter->napi);
- if (!adapter->pool_config)
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
@@ -685,9 +724,8 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_free_coherent(&adapter->vdev->dev,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- adapter->bounce_buffer, adapter->bounce_buffer_dma);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ ibmveth_free_tx_ltb(adapter, i);
netdev_dbg(netdev, "close complete\n");
@@ -727,8 +765,8 @@ static void ibmveth_init_link_settings(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
- strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
+ strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
+ strscpy(info->version, ibmveth_driver_version, sizeof(info->version));
}
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
@@ -760,9 +798,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
if (netif_running(dev)) {
restart = 1;
- adapter->pool_config = 1;
ibmveth_close(dev);
- adapter->pool_config = 0;
}
set_attr = 0;
@@ -844,9 +880,7 @@ static int ibmveth_set_tso(struct net_device *dev, u32 data)
if (netif_running(dev)) {
restart = 1;
- adapter->pool_config = 1;
ibmveth_close(dev);
- adapter->pool_config = 0;
}
set_attr = 0;
@@ -953,6 +987,69 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
}
+static void ibmveth_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ channels->max_tx = ibmveth_real_max_tx_queues();
+ channels->tx_count = netdev->real_num_tx_queues;
+
+ channels->max_rx = netdev->real_num_rx_queues;
+ channels->rx_count = netdev->real_num_rx_queues;
+}
+
+static int ibmveth_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int old = netdev->real_num_tx_queues,
+ goal = channels->tx_count;
+ int rc, i;
+
+ /* If ndo_open has not been called yet then don't allocate, just set
+ * desired netdev_queue's and return
+ */
+ if (!(netdev->flags & IFF_UP))
+ return netif_set_real_num_tx_queues(netdev, goal);
+
+ /* We have IBMVETH_MAX_QUEUES netdev_queue's allocated
+ * but we may need to alloc/free the ltb's.
+ */
+ netif_tx_stop_all_queues(netdev);
+
+ /* Allocate any queue that we need */
+ for (i = old; i < goal; i++) {
+ if (adapter->tx_ltb_ptr[i])
+ continue;
+
+ rc = ibmveth_allocate_tx_ltb(adapter, i);
+ if (!rc)
+ continue;
+
+ /* if something goes wrong, free everything we just allocated */
+ netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ break;
+ }
+ rc = netif_set_real_num_tx_queues(netdev, goal);
+ if (rc) {
+ netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ }
+ /* Free any that are no longer needed */
+ for (i = old; i > goal; i--) {
+ if (adapter->tx_ltb_ptr[i - 1])
+ ibmveth_free_tx_ltb(adapter, i - 1);
+ }
+
+ netif_tx_wake_all_queues(netdev);
+
+ return rc;
+}
+
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -961,6 +1058,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_ethtool_stats = ibmveth_get_ethtool_stats,
.get_link_ksettings = ibmveth_get_link_ksettings,
.set_link_ksettings = ibmveth_set_link_ksettings,
+ .get_channels = ibmveth_get_channels,
+ .set_channels = ibmveth_set_channels
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -969,7 +1068,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
static int ibmveth_send(struct ibmveth_adapter *adapter,
- union ibmveth_buf_desc *descs, unsigned long mss)
+ unsigned long desc, unsigned long mss)
{
unsigned long correlator;
unsigned int retry_count;
@@ -982,12 +1081,9 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
retry_count = 1024;
correlator = 0;
do {
- ret = h_send_logical_lan(adapter->vdev->unit_address,
- descs[0].desc, descs[1].desc,
- descs[2].desc, descs[3].desc,
- descs[4].desc, descs[5].desc,
- correlator, &correlator, mss,
- adapter->fw_large_send_support);
+ ret = h_send_logical_lan(adapter->vdev->unit_address, desc,
+ correlator, &correlator, mss,
+ adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -1020,34 +1116,13 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- unsigned int desc_flags;
- union ibmveth_buf_desc descs[6];
- int last, i;
- int force_bounce = 0;
- dma_addr_t dma_addr;
+ unsigned int desc_flags, total_bytes;
+ union ibmveth_buf_desc desc;
+ int i, queue_num = skb_get_queue_mapping(skb);
unsigned long mss = 0;
if (ibmveth_is_packet_unsupported(skb, netdev))
goto out;
-
- /* veth doesn't handle frag_list, so linearize the skb.
- * When GRO is enabled SKB's can have frag_list.
- */
- if (adapter->is_active_trunk &&
- skb_has_frag_list(skb) && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
- /*
- * veth handles a maximum of 6 segments including the header, so
- * we have to linearize the skb if there are more than this.
- */
- if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
/* veth can't checksum offload UDP */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
((skb->protocol == htons(ETH_P_IP) &&
@@ -1077,56 +1152,6 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags |= IBMVETH_BUF_LRG_SND;
}
-retry_bounce:
- memset(descs, 0, sizeof(descs));
-
- /*
- * If a linear packet is below the rx threshold then
- * copy it into the static bounce buffer. This avoids the
- * cost of a TCE insert and remove.
- */
- if (force_bounce || (!skb_is_nonlinear(skb) &&
- (skb->len < tx_copybreak))) {
- skb_copy_from_linear_data(skb, adapter->bounce_buffer,
- skb->len);
-
- descs[0].fields.flags_len = desc_flags | skb->len;
- descs[0].fields.address = adapter->bounce_buffer_dma;
-
- if (ibmveth_send(adapter, descs, 0)) {
- adapter->tx_send_failed++;
- netdev->stats.tx_dropped++;
- } else {
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
- }
-
- goto out;
- }
-
- /* Map the header */
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed;
-
- descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
- descs[0].fields.address = dma_addr;
-
- /* Map the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed_frags;
-
- descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
- descs[i+1].fields.address = dma_addr;
- }
-
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
if (adapter->fw_large_send_support) {
mss = (unsigned long)skb_shinfo(skb)->gso_size;
@@ -1143,7 +1168,36 @@ retry_bounce:
}
}
- if (ibmveth_send(adapter, descs, mss)) {
+ /* Copy header into mapped buffer */
+ if (unlikely(skb->len > adapter->tx_ltb_size)) {
+ netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n",
+ skb->len, adapter->tx_ltb_size);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
+ total_bytes = skb_headlen(skb);
+ /* Copy frags into mapped buffers */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes,
+ skb_frag_address_safe(frag), skb_frag_size(frag));
+ total_bytes += skb_frag_size(frag);
+ }
+
+ if (unlikely(total_bytes != skb->len)) {
+ netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n",
+ skb->len, total_bytes);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ desc.fields.flags_len = desc_flags | skb->len;
+ desc.fields.address = adapter->tx_ltb_dma[queue_num];
+ /* finish writing to long_term_buff before VIOS accessing it */
+ dma_wmb();
+
+ if (ibmveth_send(adapter, desc.desc, mss)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1151,41 +1205,11 @@ retry_bounce:
netdev->stats.tx_bytes += skb->len;
}
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
- for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
out:
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
-map_failed_frags:
- last = i+1;
- for (i = 1; i < last; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-map_failed:
- if (!firmware_has_feature(FW_FEATURE_CMO))
- netdev_err(netdev, "tx: unable to map xmit buffer\n");
- adapter->tx_map_failed++;
- if (skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
- force_bounce = 1;
- goto retry_bounce;
}
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
@@ -1506,9 +1530,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
only the buffer pools necessary to hold the new MTU */
if (netif_running(adapter->netdev)) {
need_restart = 1;
- adapter->pool_config = 1;
ibmveth_close(adapter->netdev);
- adapter->pool_config = 0;
}
/* Look for an active buffer pool that can hold the new MTU */
@@ -1568,6 +1590,8 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
+ /* add size of mapped tx buffers */
+ ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1660,8 +1684,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -EINVAL;
}
- netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
-
+ netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1);
if (!netdev)
return -ENOMEM;
@@ -1671,10 +1694,9 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
- adapter->pool_config = 0;
ibmveth_init_link_settings(netdev);
- netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
+ netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
@@ -1727,6 +1749,18 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
kobject_uevent(kobj, KOBJ_ADD);
}
+ rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(),
+ IBMVETH_DEFAULT_QUEUES));
+ if (rc) {
+ netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",
+ rc);
+ free_netdev(netdev);
+ return rc;
+ }
+ adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
+ for (i = 0; i < IBMVETH_MAX_QUEUES; i++)
+ adapter->tx_ltb_ptr[i] = NULL;
+
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
netdev_dbg(netdev, "registering netdev...\n");
@@ -1800,9 +1834,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
return -ENOMEM;
}
pool->active = 1;
- adapter->pool_config = 1;
ibmveth_close(netdev);
- adapter->pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
} else {
@@ -1828,10 +1860,8 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
}
if (netif_running(netdev)) {
- adapter->pool_config = 1;
ibmveth_close(netdev);
pool->active = 0;
- adapter->pool_config = 0;
if ((rc = ibmveth_open(netdev)))
return rc;
}
@@ -1842,9 +1872,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
return -EINVAL;
} else {
if (netif_running(netdev)) {
- adapter->pool_config = 1;
ibmveth_close(netdev);
- adapter->pool_config = 0;
pool->size = value;
if ((rc = ibmveth_open(netdev)))
return rc;
@@ -1857,9 +1885,7 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
return -EINVAL;
} else {
if (netif_running(netdev)) {
- adapter->pool_config = 1;
ibmveth_close(netdev);
- adapter->pool_config = 0;
pool->buff_size = value;
if ((rc = ibmveth_open(netdev)))
return rc;
@@ -1890,6 +1916,7 @@ static struct attribute *veth_pool_attrs[] = {
&veth_size_attr,
NULL,
};
+ATTRIBUTE_GROUPS(veth_pool);
static const struct sysfs_ops veth_pool_ops = {
.show = veth_pool_show,
@@ -1899,7 +1926,7 @@ static const struct sysfs_ops veth_pool_ops = {
static struct kobj_type ktype_veth_pool = {
.release = NULL,
.sysfs_ops = &veth_pool_ops,
- .default_attrs = veth_pool_attrs,
+ .default_groups = veth_pool_groups,
};
static int ibmveth_resume(struct device *dev)
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 27dfff200166..8468e2c59d7a 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -46,23 +46,23 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+/* FW allows us to send 6 descriptors but we only use one so mark
+ * the other 5 as unused (0)
+ */
static inline long h_send_logical_lan(unsigned long unit_address,
- unsigned long desc1, unsigned long desc2, unsigned long desc3,
- unsigned long desc4, unsigned long desc5, unsigned long desc6,
- unsigned long corellator_in, unsigned long *corellator_out,
- unsigned long mss, unsigned long large_send_support)
+ unsigned long desc, unsigned long corellator_in,
+ unsigned long *corellator_out, unsigned long mss,
+ unsigned long large_send_support)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
if (large_send_support)
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in, mss);
+ desc, 0, 0, 0, 0, 0, corellator_in, mss);
else
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in);
+ desc, 0, 0, 0, 0, 0, corellator_in);
*corellator_out = retbuf[0];
@@ -98,6 +98,9 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
+#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
+#define IBMVETH_MAX_QUEUES 16U
+#define IBMVETH_DEFAULT_QUEUES 8U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -137,16 +140,16 @@ struct ibmveth_adapter {
unsigned int mcastFilterSize;
void * buffer_list_addr;
void * filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
struct ibmveth_rx_q rx_queue;
- int pool_config;
int rx_csum;
int large_send;
bool is_active_trunk;
- void *bounce_buffer;
- dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0bb3911dd014..c63d3ec9d328 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -53,6 +53,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
@@ -60,12 +61,14 @@
#include <asm/hvcall.h>
#include <linux/atomic.h>
#include <asm/vio.h>
+#include <asm/xive.h>
#include <asm/iommu.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
#include <linux/utsname.h>
+#include <linux/cpu.h>
#include "ibmvnic.h"
@@ -110,6 +113,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
+static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -168,6 +172,196 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter)
return ibmvnic_send_crq(adapter, &crq);
}
+static void ibmvnic_clean_queue_affinity(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_sub_crq_queue *queue)
+{
+ if (!(queue && queue->irq))
+ return;
+
+ cpumask_clear(queue->affinity_mask);
+
+ if (irq_set_affinity_and_hint(queue->irq, NULL))
+ netdev_warn(adapter->netdev,
+ "%s: Clear affinity failed, queue addr = %p, IRQ = %d\n",
+ __func__, queue, queue->irq);
+}
+
+static void ibmvnic_clean_affinity(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_sub_crq_queue **rxqs;
+ struct ibmvnic_sub_crq_queue **txqs;
+ int num_rxqs, num_txqs;
+ int rc, i;
+
+ rc = 0;
+ rxqs = adapter->rx_scrq;
+ txqs = adapter->tx_scrq;
+ num_txqs = adapter->num_active_tx_scrqs;
+ num_rxqs = adapter->num_active_rx_scrqs;
+
+ netdev_dbg(adapter->netdev, "%s: Cleaning irq affinity hints", __func__);
+ if (txqs) {
+ for (i = 0; i < num_txqs; i++)
+ ibmvnic_clean_queue_affinity(adapter, txqs[i]);
+ }
+ if (rxqs) {
+ for (i = 0; i < num_rxqs; i++)
+ ibmvnic_clean_queue_affinity(adapter, rxqs[i]);
+ }
+}
+
+static int ibmvnic_set_queue_affinity(struct ibmvnic_sub_crq_queue *queue,
+ unsigned int *cpu, int *stragglers,
+ int stride)
+{
+ cpumask_var_t mask;
+ int i;
+ int rc = 0;
+
+ if (!(queue && queue->irq))
+ return rc;
+
+ /* cpumask_var_t is either a pointer or array, allocation works here */
+ if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ /* while we have extra cpu give one extra to this irq */
+ if (*stragglers) {
+ stride++;
+ (*stragglers)--;
+ }
+ /* atomic write is safer than writing bit by bit directly */
+ for (i = 0; i < stride; i++) {
+ cpumask_set_cpu(*cpu, mask);
+ *cpu = cpumask_next_wrap(*cpu, cpu_online_mask,
+ nr_cpu_ids, false);
+ }
+ /* set queue affinity mask */
+ cpumask_copy(queue->affinity_mask, mask);
+ rc = irq_set_affinity_and_hint(queue->irq, queue->affinity_mask);
+ free_cpumask_var(mask);
+
+ return rc;
+}
+
+/* assumes cpu read lock is held */
+static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
+ struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
+ struct ibmvnic_sub_crq_queue *queue;
+ int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
+ int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
+ int total_queues, stride, stragglers, i;
+ unsigned int num_cpu, cpu;
+ bool is_rx_queue;
+ int rc = 0;
+
+ netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
+ if (!(adapter->rx_scrq && adapter->tx_scrq)) {
+ netdev_warn(adapter->netdev,
+ "%s: Set affinity failed, queues not allocated\n",
+ __func__);
+ return;
+ }
+
+ total_queues = num_rxqs + num_txqs;
+ num_cpu = num_online_cpus();
+ /* number of cpu's assigned per irq */
+ stride = max_t(int, num_cpu / total_queues, 1);
+ /* number of leftover cpu's */
+ stragglers = num_cpu >= total_queues ? num_cpu % total_queues : 0;
+ /* next available cpu to assign irq to */
+ cpu = cpumask_next(-1, cpu_online_mask);
+
+ for (i = 0; i < total_queues; i++) {
+ is_rx_queue = false;
+ /* balance core load by alternating rx and tx assignments
+ * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
+ */
+ if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
+ queue = rxqs[i_rxqs++];
+ is_rx_queue = true;
+ } else {
+ queue = txqs[i_txqs++];
+ }
+
+ rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
+ stride);
+ if (rc)
+ goto out;
+
+ if (!queue || is_rx_queue)
+ continue;
+
+ rc = __netif_set_xps_queue(adapter->netdev,
+ cpumask_bits(queue->affinity_mask),
+ i_txqs - 1, XPS_CPUS);
+ if (rc)
+ netdev_warn(adapter->netdev, "%s: Set XPS on queue %d failed, rc = %d.\n",
+ __func__, i_txqs - 1, rc);
+ }
+
+out:
+ if (rc) {
+ netdev_warn(adapter->netdev,
+ "%s: Set affinity failed, queue addr = %p, IRQ = %d, rc = %d.\n",
+ __func__, queue, queue->irq, rc);
+ ibmvnic_clean_affinity(adapter);
+ }
+}
+
+static int ibmvnic_cpu_online(unsigned int cpu, struct hlist_node *node)
+{
+ struct ibmvnic_adapter *adapter;
+
+ adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
+ ibmvnic_set_affinity(adapter);
+ return 0;
+}
+
+static int ibmvnic_cpu_dead(unsigned int cpu, struct hlist_node *node)
+{
+ struct ibmvnic_adapter *adapter;
+
+ adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node_dead);
+ ibmvnic_set_affinity(adapter);
+ return 0;
+}
+
+static int ibmvnic_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
+{
+ struct ibmvnic_adapter *adapter;
+
+ adapter = hlist_entry_safe(node, struct ibmvnic_adapter, node);
+ ibmvnic_clean_affinity(adapter);
+ return 0;
+}
+
+static enum cpuhp_state ibmvnic_online;
+
+static int ibmvnic_cpu_notif_add(struct ibmvnic_adapter *adapter)
+{
+ int ret;
+
+ ret = cpuhp_state_add_instance_nocalls(ibmvnic_online, &adapter->node);
+ if (ret)
+ return ret;
+ ret = cpuhp_state_add_instance_nocalls(CPUHP_IBMVNIC_DEAD,
+ &adapter->node_dead);
+ if (!ret)
+ return ret;
+ cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
+ return ret;
+}
+
+static void ibmvnic_cpu_notif_remove(struct ibmvnic_adapter *adapter)
+{
+ cpuhp_state_remove_instance_nocalls(ibmvnic_online, &adapter->node);
+ cpuhp_state_remove_instance_nocalls(CPUHP_IBMVNIC_DEAD,
+ &adapter->node_dead);
+}
+
static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
unsigned long length, unsigned long *number,
unsigned long *irq)
@@ -255,12 +449,14 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
+ u64 prev = 0;
int rc;
if (!reuse_ltb(ltb, size)) {
dev_dbg(dev,
"LTB size changed from 0x%llx to 0x%x, reallocating\n",
ltb->size, size);
+ prev = ltb->size;
free_long_term_buff(adapter, ltb);
}
@@ -281,8 +477,8 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
bitmap_set(adapter->map_ids, ltb->map_id, 1);
dev_dbg(dev,
- "Allocated new LTB [map %d, size 0x%llx]\n",
- ltb->map_id, ltb->size);
+ "Allocated new LTB [map %d, size 0x%llx was 0x%llx]\n",
+ ltb->map_id, ltb->size, prev);
}
/* Ensure ltb is zeroed - specially when reusing it. */
@@ -308,7 +504,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc);
- rc = -1;
+ rc = -EIO;
goto out;
}
rc = 0;
@@ -343,6 +539,208 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
ltb->map_id = 0;
}
+/**
+ * free_ltb_set - free the given set of long term buffers (LTBS)
+ * @adapter: The ibmvnic adapter containing this ltb set
+ * @ltb_set: The ltb_set to be freed
+ *
+ * Free the set of LTBs in the given set.
+ */
+
+static void free_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set)
+{
+ int i;
+
+ for (i = 0; i < ltb_set->num_ltbs; i++)
+ free_long_term_buff(adapter, &ltb_set->ltbs[i]);
+
+ kfree(ltb_set->ltbs);
+ ltb_set->ltbs = NULL;
+ ltb_set->num_ltbs = 0;
+}
+
+/**
+ * alloc_ltb_set() - Allocate a set of long term buffers (LTBs)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb_set: container object for the set of LTBs
+ * @num_buffs: Number of buffers in the LTB
+ * @buff_size: Size of each buffer in the LTB
+ *
+ * Allocate a set of LTBs to accommodate @num_buffs buffers of @buff_size
+ * each. We currently cap size each LTB to IBMVNIC_ONE_LTB_SIZE. If the
+ * new set of LTBs have fewer LTBs than the old set, free the excess LTBs.
+ * If new set needs more than in old set, allocate the remaining ones.
+ * Try and reuse as many LTBs as possible and avoid reallocation.
+ *
+ * Any changes to this allocation strategy must be reflected in
+ * map_rxpool_buff_to_ltb() and map_txpool_buff_to_ltb().
+ */
+static int alloc_ltb_set(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_ltb_set *ltb_set, int num_buffs,
+ int buff_size)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_ltb_set old_set;
+ struct ibmvnic_ltb_set new_set;
+ int rem_size;
+ int tot_size; /* size of all ltbs */
+ int ltb_size; /* size of one ltb */
+ int nltbs;
+ int rc;
+ int n;
+ int i;
+
+ dev_dbg(dev, "%s() num_buffs %d, buff_size %d\n", __func__, num_buffs,
+ buff_size);
+
+ ltb_size = rounddown(IBMVNIC_ONE_LTB_SIZE, buff_size);
+ tot_size = num_buffs * buff_size;
+
+ if (ltb_size > tot_size)
+ ltb_size = tot_size;
+
+ nltbs = tot_size / ltb_size;
+ if (tot_size % ltb_size)
+ nltbs++;
+
+ old_set = *ltb_set;
+
+ if (old_set.num_ltbs == nltbs) {
+ new_set = old_set;
+ } else {
+ int tmp = nltbs * sizeof(struct ibmvnic_long_term_buff);
+
+ new_set.ltbs = kzalloc(tmp, GFP_KERNEL);
+ if (!new_set.ltbs)
+ return -ENOMEM;
+
+ new_set.num_ltbs = nltbs;
+
+ /* Free any excess ltbs in old set */
+ for (i = new_set.num_ltbs; i < old_set.num_ltbs; i++)
+ free_long_term_buff(adapter, &old_set.ltbs[i]);
+
+ /* Copy remaining ltbs to new set. All LTBs except the
+ * last one are of the same size. alloc_long_term_buff()
+ * will realloc if the size changes.
+ */
+ n = min(old_set.num_ltbs, new_set.num_ltbs);
+ for (i = 0; i < n; i++)
+ new_set.ltbs[i] = old_set.ltbs[i];
+
+ /* Any additional ltbs in new set will have NULL ltbs for
+ * now and will be allocated in alloc_long_term_buff().
+ */
+
+ /* We no longer need the old_set so free it. Note that we
+ * may have reused some ltbs from old set and freed excess
+ * ltbs above. So we only need to free the container now
+ * not the LTBs themselves. (i.e. dont free_ltb_set()!)
+ */
+ kfree(old_set.ltbs);
+ old_set.ltbs = NULL;
+ old_set.num_ltbs = 0;
+
+ /* Install the new set. If allocations fail below, we will
+ * retry later and know what size LTBs we need.
+ */
+ *ltb_set = new_set;
+ }
+
+ i = 0;
+ rem_size = tot_size;
+ while (rem_size) {
+ if (ltb_size > rem_size)
+ ltb_size = rem_size;
+
+ rem_size -= ltb_size;
+
+ rc = alloc_long_term_buff(adapter, &new_set.ltbs[i], ltb_size);
+ if (rc)
+ goto out;
+ i++;
+ }
+
+ WARN_ON(i != new_set.num_ltbs);
+
+ return 0;
+out:
+ /* We may have allocated one/more LTBs before failing and we
+ * want to try and reuse on next reset. So don't free ltb set.
+ */
+ return rc;
+}
+
+/**
+ * map_rxpool_buf_to_ltb - Map given rxpool buffer to offset in an LTB.
+ * @rxpool: The receive buffer pool containing buffer
+ * @bufidx: Index of buffer in rxpool
+ * @ltbp: (Output) pointer to the long term buffer containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [rxpool, bufidx] to an LTB in the
+ * pool and its corresponding offset. Assume for now that each LTB is of
+ * different size but could possibly be optimized based on the allocation
+ * strategy in alloc_ltb_set().
+ */
+static void map_rxpool_buf_to_ltb(struct ibmvnic_rx_pool *rxpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON(bufidx >= rxpool->size);
+
+ for (i = 0; i < rxpool->ltb_set.num_ltbs; i++) {
+ ltb = &rxpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / rxpool->buff_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * rxpool->buff_size;
+}
+
+/**
+ * map_txpool_buf_to_ltb - Map given txpool buffer to offset in an LTB.
+ * @txpool: The transmit buffer pool containing buffer
+ * @bufidx: Index of buffer in txpool
+ * @ltbp: (Output) pointer to the long term buffer (LTB) containing the buffer
+ * @offset: (Output) offset of buffer in the LTB from @ltbp
+ *
+ * Map the given buffer identified by [txpool, bufidx] to an LTB in the
+ * pool and its corresponding offset.
+ */
+static void map_txpool_buf_to_ltb(struct ibmvnic_tx_pool *txpool,
+ unsigned int bufidx,
+ struct ibmvnic_long_term_buff **ltbp,
+ unsigned int *offset)
+{
+ struct ibmvnic_long_term_buff *ltb;
+ int nbufs; /* # of buffers in one ltb */
+ int i;
+
+ WARN_ON_ONCE(bufidx >= txpool->num_buffers);
+
+ for (i = 0; i < txpool->ltb_set.num_ltbs; i++) {
+ ltb = &txpool->ltb_set.ltbs[i];
+ nbufs = ltb->size / txpool->buf_size;
+ if (bufidx < nbufs)
+ break;
+ bufidx -= nbufs;
+ }
+
+ *ltbp = ltb;
+ *offset = bufidx * txpool->buf_size;
+}
+
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -359,6 +757,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_sub_crq_queue *rx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
union sub_crq *sub_crq;
int buffers_added = 0;
unsigned long lpar_rc;
@@ -367,7 +766,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
dma_addr_t dma_addr;
unsigned char *dst;
int shift = 0;
- int index;
+ int bufidx;
int i;
if (!pool->active)
@@ -383,14 +782,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
- index = pool->free_map[pool->next_free];
+ bufidx = pool->free_map[pool->next_free];
/* We maybe reusing the skb from earlier resets. Allocate
* only if necessary. But since the LTB may have changed
* during reset (see init_rx_pools()), update LTB below
* even if reusing skb.
*/
- skb = pool->rx_buff[index].skb;
+ skb = pool->rx_buff[bufidx].skb;
if (!skb) {
skb = netdev_alloc_skb(adapter->netdev,
pool->buff_size);
@@ -405,26 +804,26 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
pool->next_free = (pool->next_free + 1) % pool->size;
/* Copy the skb to the long term mapped DMA buffer */
- offset = index * pool->buff_size;
- dst = pool->long_term_buff.buff + offset;
+ map_rxpool_buf_to_ltb(pool, bufidx, &ltb, &offset);
+ dst = ltb->buff + offset;
memset(dst, 0, pool->buff_size);
- dma_addr = pool->long_term_buff.addr + offset;
+ dma_addr = ltb->addr + offset;
/* add the skb to an rx_buff in the pool */
- pool->rx_buff[index].data = dst;
- pool->rx_buff[index].dma = dma_addr;
- pool->rx_buff[index].skb = skb;
- pool->rx_buff[index].pool_index = pool->index;
- pool->rx_buff[index].size = pool->buff_size;
+ pool->rx_buff[bufidx].data = dst;
+ pool->rx_buff[bufidx].dma = dma_addr;
+ pool->rx_buff[bufidx].skb = skb;
+ pool->rx_buff[bufidx].pool_index = pool->index;
+ pool->rx_buff[bufidx].size = pool->buff_size;
/* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
sub_crq->rx_add.correlator =
- cpu_to_be64((u64)&pool->rx_buff[index]);
+ cpu_to_be64((u64)&pool->rx_buff[bufidx]);
sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
- sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
+ sub_crq->rx_add.map_id = ltb->map_id;
/* The length field of the sCRQ is defined to be 24 bits so the
* buffer size needs to be left shifted by a byte before it is
@@ -464,10 +863,10 @@ failure:
sub_crq = &ind_bufp->indir_arr[i];
rx_buff = (struct ibmvnic_rx_buff *)
be64_to_cpu(sub_crq->rx_add.correlator);
- index = (int)(rx_buff - pool->rx_buff);
- pool->free_map[pool->next_free] = index;
- dev_kfree_skb_any(pool->rx_buff[index].skb);
- pool->rx_buff[index].skb = NULL;
+ bufidx = (int)(rx_buff - pool->rx_buff);
+ pool->free_map[pool->next_free] = bufidx;
+ dev_kfree_skb_any(pool->rx_buff[bufidx].skb);
+ pool->rx_buff[bufidx].skb = NULL;
}
adapter->replenish_add_buff_failure += ind_bufp->index;
atomic_add(buffers_added, &pool->available);
@@ -540,13 +939,15 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
dma_addr_t stok;
+ int rc;
stok = dma_map_single(dev, &adapter->stats,
sizeof(struct ibmvnic_statistics),
DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, stok)) {
- dev_err(dev, "Couldn't map stats buffer\n");
- return -1;
+ rc = dma_mapping_error(dev, stok);
+ if (rc) {
+ dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
+ return rc;
}
adapter->stats_token = stok;
@@ -575,7 +976,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(rx_pool->free_map);
- free_long_term_buff(adapter, &rx_pool->long_term_buff);
+ free_ltb_set(adapter, &rx_pool->ltb_set);
if (!rx_pool->rx_buff)
continue;
@@ -655,7 +1056,7 @@ static int init_rx_pools(struct net_device *netdev)
u64 num_pools;
u64 pool_size; /* # of buffers in one pool */
u64 buff_size;
- int i, j;
+ int i, j, rc;
pool_size = adapter->req_rx_add_entries_per_subcrq;
num_pools = adapter->req_rx_queues;
@@ -674,7 +1075,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!adapter->rx_pool) {
dev_err(dev, "Failed to allocate rx pools\n");
- return -1;
+ return -ENOMEM;
}
/* Set num_active_rx_pools early. If we fail below after partial
@@ -697,6 +1098,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->free_map) {
dev_err(dev, "Couldn't alloc free_map %d\n", i);
+ rc = -ENOMEM;
goto out_release;
}
@@ -705,6 +1107,7 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n");
+ rc = -ENOMEM;
goto out_release;
}
}
@@ -718,8 +1121,9 @@ update_ltb:
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
i, rx_pool->size, rx_pool->buff_size);
- if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size))
+ rc = alloc_ltb_set(adapter, &rx_pool->ltb_set,
+ rx_pool->size, rx_pool->buff_size);
+ if (rc)
goto out;
for (j = 0; j < rx_pool->size; ++j) {
@@ -756,7 +1160,7 @@ out:
/* We failed to allocate one or more LTBs or map them on the VIOS.
* Hold onto the pools and any LTBs that we did allocate/map.
*/
- return -1;
+ return rc;
}
static void release_vpd_data(struct ibmvnic_adapter *adapter)
@@ -775,7 +1179,7 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
{
kfree(tx_pool->tx_buff);
kfree(tx_pool->free_map);
- free_long_term_buff(adapter, &tx_pool->long_term_buff);
+ free_ltb_set(adapter, &tx_pool->ltb_set);
}
/**
@@ -817,13 +1221,13 @@ static int init_one_tx_pool(struct net_device *netdev,
sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL);
if (!tx_pool->tx_buff)
- return -1;
+ return -ENOMEM;
tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map) {
kfree(tx_pool->tx_buff);
tx_pool->tx_buff = NULL;
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < pool_size; i++)
@@ -914,7 +1318,7 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
- return -1;
+ return -ENOMEM;
adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
@@ -924,7 +1328,7 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
- return -1;
+ return -ENOMEM;
}
/* Set num_active_tx_pools early. If we fail below after partial
@@ -965,17 +1369,16 @@ update_ltb:
for (i = 0; i < num_pools; i++) {
struct ibmvnic_tx_pool *tso_pool;
struct ibmvnic_tx_pool *tx_pool;
- u32 ltb_size;
tx_pool = &adapter->tx_pool[i];
- ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
- i, tx_pool->long_term_buff.buff,
- tx_pool->num_buffers, tx_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tx pool %d [%d, %d]\n",
+ i, tx_pool->num_buffers, tx_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tx_pool->ltb_set,
+ tx_pool->num_buffers, tx_pool->buf_size);
+ if (rc)
+ goto out;
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
@@ -984,14 +1387,14 @@ update_ltb:
tx_pool->free_map[j] = j;
tso_pool = &adapter->tso_pool[i];
- ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
- if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
- ltb_size))
- goto out;
- dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
- i, tso_pool->long_term_buff.buff,
- tso_pool->num_buffers, tso_pool->buf_size);
+ dev_dbg(dev, "Updating LTB for tso pool %d [%d, %d]\n",
+ i, tso_pool->num_buffers, tso_pool->buf_size);
+
+ rc = alloc_ltb_set(adapter, &tso_pool->ltb_set,
+ tso_pool->num_buffers, tso_pool->buf_size);
+ if (rc)
+ goto out;
tso_pool->consumer_index = 0;
tso_pool->producer_index = 0;
@@ -1050,7 +1453,7 @@ static int init_napi(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
netif_napi_add(adapter->netdev, &adapter->napi[i],
- ibmvnic_poll, NAPI_POLL_WEIGHT);
+ ibmvnic_poll);
}
adapter->num_active_rx_napi = adapter->req_rx_queues;
@@ -1113,7 +1516,7 @@ static int ibmvnic_login(struct net_device *netdev)
retry = false;
if (retry_count > retries) {
netdev_warn(netdev, "Login attempts exceeded\n");
- return -1;
+ return -EACCES;
}
adapter->init_done_rc = 0;
@@ -1154,25 +1557,26 @@ static int ibmvnic_login(struct net_device *netdev)
timeout)) {
netdev_warn(netdev,
"Capabilities query timed out\n");
- return -1;
+ return -ETIMEDOUT;
}
rc = init_sub_crqs(adapter);
if (rc) {
netdev_warn(netdev,
"SCRQ initialization failed\n");
- return -1;
+ return rc;
}
rc = init_sub_crq_irqs(adapter);
if (rc) {
netdev_warn(netdev,
"SCRQ irq initialization failed\n");
- return -1;
+ return rc;
}
} else if (adapter->init_done_rc) {
- netdev_warn(netdev, "Adapter login failed\n");
- return -1;
+ netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
+ adapter->init_done_rc);
+ return -EIO;
}
} while (retry);
@@ -1231,7 +1635,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_err(netdev, "timeout setting link state\n");
- return -1;
+ return -ETIMEDOUT;
}
if (adapter->init_done_rc == PARTIALSUCCESS) {
@@ -1418,10 +1822,19 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
ibmvnic_napi_disable(adapter);
- release_resources(adapter);
+ ibmvnic_disable_irqs(adapter);
return rc;
}
+ adapter->tx_queues_active = true;
+
+ /* Since queues were stopped until now, there shouldn't be any
+ * one in ibmvnic_complete_tx() or ibmvnic_xmit() so maybe we
+ * don't need the synchronize_rcu()? Leaving it for consistency
+ * with setting ->tx_queues_active = false.
+ */
+ synchronize_rcu();
+
netif_tx_start_all_queues(netdev);
if (prev_state == VNIC_CLOSED) {
@@ -1468,9 +1881,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
- release_resources(adapter);
- release_rx_pools(adapter);
- release_tx_pools(adapter);
goto out;
}
}
@@ -1487,6 +1897,13 @@ out:
adapter->state = VNIC_OPEN;
rc = 0;
}
+
+ if (rc) {
+ release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ }
+
return rc;
}
@@ -1592,6 +2009,14 @@ static void ibmvnic_cleanup(struct net_device *netdev)
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
/* ensure that transmissions are stopped if called by do_reset */
+
+ adapter->tx_queues_active = false;
+
+ /* Ensure complete_tx() and ibmvnic_xmit() see ->tx_queues_active
+ * update so they don't restart a queue after we stop it below.
+ */
+ synchronize_rcu();
+
if (test_bit(0, &adapter->resetting))
netif_tx_disable(netdev);
else
@@ -1831,14 +2256,21 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
tx_buff->skb = NULL;
adapter->netdev->stats.tx_dropped++;
}
+
ind_bufp->index = 0;
+
if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
- __netif_subqueue_stopped(adapter->netdev, queue_num) &&
- !test_bit(0, &adapter->resetting)) {
- netif_wake_subqueue(adapter->netdev, queue_num);
- netdev_dbg(adapter->netdev, "Started queue %d\n",
- queue_num);
+ __netif_subqueue_stopped(adapter->netdev, queue_num)) {
+ rcu_read_lock();
+
+ if (adapter->tx_queues_active) {
+ netif_wake_subqueue(adapter->netdev, queue_num);
+ netdev_dbg(adapter->netdev, "Started queue %d\n",
+ queue_num);
+ }
+
+ rcu_read_unlock();
}
}
@@ -1875,6 +2307,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
struct ibmvnic_ind_xmit_queue *ind_bufp;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_sub_crq_queue *tx_scrq;
+ struct ibmvnic_long_term_buff *ltb;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
netdev_tx_t ret = NETDEV_TX_OK;
@@ -1890,14 +2323,15 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- int index = 0;
+ int bufidx = 0;
u8 proto = 0;
- tx_scrq = adapter->tx_scrq[queue_num];
- txq = netdev_get_tx_queue(netdev, queue_num);
- ind_bufp = &tx_scrq->ind_buf;
-
- if (test_bit(0, &adapter->resetting)) {
+ /* If a reset is in progress, drop the packet since
+ * the scrqs may get torn down. Otherwise use the
+ * rcu to ensure reset waits for us to complete.
+ */
+ rcu_read_lock();
+ if (!adapter->tx_queues_active) {
dev_kfree_skb_any(skb);
tx_send_failed++;
@@ -1906,6 +2340,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
}
+ tx_scrq = adapter->tx_scrq[queue_num];
+ txq = netdev_get_tx_queue(netdev, queue_num);
+ ind_bufp = &tx_scrq->ind_buf;
+
if (ibmvnic_xmit_workarounds(skb, netdev)) {
tx_dropped++;
tx_send_failed++;
@@ -1913,14 +2351,15 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
+
if (skb_is_gso(skb))
tx_pool = &adapter->tso_pool[queue_num];
else
tx_pool = &adapter->tx_pool[queue_num];
- index = tx_pool->free_map[tx_pool->consumer_index];
+ bufidx = tx_pool->free_map[tx_pool->consumer_index];
- if (index == IBMVNIC_INVALID_MAP) {
+ if (bufidx == IBMVNIC_INVALID_MAP) {
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
@@ -1931,10 +2370,11 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
- offset = index * tx_pool->buf_size;
- dst = tx_pool->long_term_buff.buff + offset;
+ map_txpool_buf_to_ltb(tx_pool, bufidx, &ltb, &offset);
+
+ dst = ltb->buff + offset;
memset(dst, 0, tx_pool->buf_size);
- data_dma_addr = tx_pool->long_term_buff.addr + offset;
+ data_dma_addr = ltb->addr + offset;
if (skb_shinfo(skb)->nr_frags) {
int cur, i;
@@ -1961,9 +2401,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
- tx_buff = &tx_pool->tx_buff[index];
+ tx_buff = &tx_pool->tx_buff[bufidx];
tx_buff->skb = skb;
- tx_buff->index = index;
+ tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
memset(&tx_crq, 0, sizeof(tx_crq));
@@ -1975,10 +2415,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_is_gso(skb))
tx_crq.v1.correlator =
- cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
+ cpu_to_be32(bufidx | IBMVNIC_TSO_POOL_MASK);
else
- tx_crq.v1.correlator = cpu_to_be32(index);
- tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
+ tx_crq.v1.correlator = cpu_to_be32(bufidx);
+ tx_crq.v1.dma_reg = cpu_to_be16(ltb->map_id);
tx_crq.v1.sge_len = cpu_to_be32(skb->len);
tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
@@ -2042,7 +2482,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_packets++;
tx_bytes += skb->len;
- txq->trans_start = jiffies;
+ txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
@@ -2067,6 +2507,7 @@ tx_err:
netif_carrier_off(netdev);
}
out:
+ rcu_read_unlock();
netdev->stats.tx_dropped += tx_dropped;
netdev->stats.tx_bytes += tx_bytes;
netdev->stats.tx_packets += tx_packets;
@@ -2202,6 +2643,19 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
}
/*
+ * Initialize the init_done completion and return code values. We
+ * can get a transport event just after registering the CRQ and the
+ * tasklet will use this to communicate the transport event. To ensure
+ * we don't miss the notification/error, initialize these _before_
+ * regisering the CRQ.
+ */
+static inline void reinit_init_done(struct ibmvnic_adapter *adapter)
+{
+ reinit_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+}
+
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2288,7 +2742,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
/* If someone else changed the adapter state
* when we dropped the rtnl, fail the reset
*/
- rc = -1;
+ rc = -EAGAIN;
goto out;
}
adapter->state = VNIC_CLOSED;
@@ -2307,6 +2761,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_init_done(adapter);
+
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
rc = init_crq_queue(adapter);
} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
@@ -2330,10 +2786,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
}
rc = ibmvnic_reset_init(adapter, true);
- if (rc) {
- rc = IBMVNIC_INIT_FAILED;
+ if (rc)
goto out;
- }
/* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
@@ -2452,7 +2906,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- reinit_completion(&adapter->init_done);
+ reinit_init_done(adapter);
+
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -2593,22 +3048,82 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_adapter *adapter;
- bool saved_state = false;
+ unsigned int timeout = 5000;
struct ibmvnic_rwi *tmprwi;
+ bool saved_state = false;
struct ibmvnic_rwi *rwi;
unsigned long flags;
+ struct device *dev;
+ bool need_reset;
+ int num_fails = 0;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
+ dev = &adapter->vdev->dev;
- if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ /* Wait for ibmvnic_probe() to complete. If probe is taking too long
+ * or if another reset is in progress, defer work for now. If probe
+ * eventually fails it will flush and terminate our work.
+ *
+ * Three possibilities here:
+ * 1. Adpater being removed - just return
+ * 2. Timed out on probe or another reset in progress - delay the work
+ * 3. Completed probe - perform any resets in queue
+ */
+ if (adapter->state == VNIC_PROBING &&
+ !wait_for_completion_timeout(&adapter->probe_done, timeout)) {
+ dev_err(dev, "Reset thread timed out on probe");
queue_delayed_work(system_long_wq,
&adapter->ibmvnic_delayed_reset,
IBMVNIC_RESET_DELAY);
return;
}
+ /* adapter is done with probe (i.e state is never VNIC_PROBING now) */
+ if (adapter->state == VNIC_REMOVING)
+ return;
+
+ /* ->rwi_list is stable now (no one else is removing entries) */
+
+ /* ibmvnic_probe() may have purged the reset queue after we were
+ * scheduled to process a reset so there maybe no resets to process.
+ * Before setting the ->resetting bit though, we have to make sure
+ * that there is infact a reset to process. Otherwise we may race
+ * with ibmvnic_open() and end up leaving the vnic down:
+ *
+ * __ibmvnic_reset() ibmvnic_open()
+ * ----------------- --------------
+ *
+ * set ->resetting bit
+ * find ->resetting bit is set
+ * set ->state to IBMVNIC_OPEN (i.e
+ * assume reset will open device)
+ * return
+ * find reset queue empty
+ * return
+ *
+ * Neither performed vnic login/open and vnic stays down
+ *
+ * If we hold the lock and conditionally set the bit, either we
+ * or ibmvnic_open() will complete the open.
+ */
+ need_reset = false;
+ spin_lock(&adapter->rwi_lock);
+ if (!list_empty(&adapter->rwi_list)) {
+ if (test_and_set_bit_lock(0, &adapter->resetting)) {
+ queue_delayed_work(system_long_wq,
+ &adapter->ibmvnic_delayed_reset,
+ IBMVNIC_RESET_DELAY);
+ } else {
+ need_reset = true;
+ }
+ }
+ spin_unlock(&adapter->rwi_lock);
+
+ if (!need_reset)
+ return;
+
rwi = get_next_rwi(adapter);
while (rwi) {
spin_lock_irqsave(&adapter->state_lock, flags);
@@ -2651,11 +3166,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- if (rc) {
- /* give backing device time to settle down */
+ if (rc)
+ num_fails++;
+ else
+ num_fails = 0;
+
+ /* If auto-priority-failover is enabled we can get
+ * back to back failovers during resets, resulting
+ * in at least two failed resets (from high-priority
+ * backing device to low-priority one and then back)
+ * If resets continue to fail beyond that, give the
+ * adapter some time to settle down before retrying.
+ */
+ if (num_fails >= 3) {
netdev_dbg(adapter->netdev,
- "[S:%s] Hard reset failed, waiting 60 secs\n",
- adapter_state_to_string(adapter->state));
+ "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
+ adapter_state_to_string(adapter->state),
+ num_fails);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
@@ -2671,19 +3198,19 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
/*
- * If there is another reset queued, free the previous rwi
- * and process the new reset even if previous reset failed
- * (the previous reset could have failed because of a fail
- * over for instance, so process the fail over).
- *
* If there are no resets queued and the previous reset failed,
* the adapter would be in an undefined state. So retry the
* previous reset as a hard reset.
+ *
+ * Else, free the previous rwi and, if there is another reset
+ * queued, process the new reset even if previous reset failed
+ * (the previous reset could have failed because of a fail
+ * over for instance, so process the fail over).
*/
- if (rwi)
- kfree(tmprwi);
- else if (rc)
+ if (!rwi && rc)
rwi = tmprwi;
+ else
+ kfree(tmprwi);
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
@@ -2713,12 +3240,23 @@ static void __ibmvnic_delayed_reset(struct work_struct *work)
__ibmvnic_reset(&adapter->ibmvnic_reset);
}
+static void flush_reset_queue(struct ibmvnic_adapter *adapter)
+{
+ struct list_head *entry, *tmp_entry;
+
+ if (!list_empty(&adapter->rwi_list)) {
+ list_for_each_safe(entry, tmp_entry, &adapter->rwi_list) {
+ list_del(entry);
+ kfree(list_entry(entry, struct ibmvnic_rwi, list));
+ }
+ }
+}
+
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
enum ibmvnic_reset_reason reason)
{
- struct list_head *entry, *tmp_entry;
- struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
+ struct ibmvnic_rwi *rwi, *tmp;
unsigned long flags;
int ret;
@@ -2737,13 +3275,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- if (adapter->state == VNIC_PROBING) {
- netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = -EAGAIN;
- ret = EAGAIN;
- goto err;
- }
-
list_for_each_entry(tmp, &adapter->rwi_list, list) {
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
@@ -2761,10 +3292,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
/* if we just received a transport event,
* flush reset queue and process this reset
*/
- if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
- list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
- list_del(entry);
- }
+ if (adapter->force_reset_recovery)
+ flush_reset_queue(adapter);
+
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
@@ -3072,17 +3602,14 @@ static u32 ibmvnic_get_link(struct net_device *netdev)
}
static void ibmvnic_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
- ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
- } else {
- ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
- }
+ ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
+ ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
@@ -3092,26 +3619,26 @@ static void ibmvnic_get_ringparam(struct net_device *netdev,
}
static int ibmvnic_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
+ if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
+ ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
+ netdev_err(netdev, "Invalid request.\n");
+ netdev_err(netdev, "Max tx buffers = %llu\n",
+ adapter->max_rx_add_entries_per_subcrq);
+ netdev_err(netdev, "Max rx buffers = %llu\n",
+ adapter->max_tx_entries_per_subcrq);
+ return -EINVAL;
+ }
+
adapter->desired.rx_entries = ring->rx_pending;
adapter->desired.tx_entries = ring->tx_pending;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
- adapter->req_tx_entries_per_subcrq != ring->tx_pending))
- netdev_info(netdev,
- "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- ring->rx_pending, ring->tx_pending,
- adapter->req_rx_add_entries_per_subcrq,
- adapter->req_tx_entries_per_subcrq);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_channels(struct net_device *netdev,
@@ -3119,14 +3646,8 @@ static void ibmvnic_get_channels(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
- channels->max_rx = adapter->max_rx_queues;
- channels->max_tx = adapter->max_tx_queues;
- } else {
- channels->max_rx = IBMVNIC_MAX_QUEUES;
- channels->max_tx = IBMVNIC_MAX_QUEUES;
- }
-
+ channels->max_rx = adapter->max_rx_queues;
+ channels->max_tx = adapter->max_tx_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->req_rx_queues;
@@ -3139,22 +3660,11 @@ static int ibmvnic_set_channels(struct net_device *netdev,
struct ethtool_channels *channels)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int ret;
- ret = 0;
adapter->desired.rx_queues = channels->rx_count;
adapter->desired.tx_queues = channels->tx_count;
- ret = wait_for_reset(adapter);
-
- if (!ret &&
- (adapter->req_rx_queues != channels->rx_count ||
- adapter->req_tx_queues != channels->tx_count))
- netdev_info(netdev,
- "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
- channels->rx_count, channels->tx_count,
- adapter->req_rx_queues, adapter->req_tx_queues);
- return ret;
+ return wait_for_reset(adapter);
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -3162,43 +3672,32 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
struct ibmvnic_adapter *adapter = netdev_priv(dev);
int i;
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
- i++, data += ETH_GSTRING_LEN)
- memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
+ if (stringset != ETH_SS_STATS)
+ return;
- for (i = 0; i < adapter->req_tx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
+ memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
- snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN,
- "tx%d_dropped_packets", i);
- data += ETH_GSTRING_LEN;
- }
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
- data += ETH_GSTRING_LEN;
+ snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
+ data += ETH_GSTRING_LEN;
+ }
- snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
- data += ETH_GSTRING_LEN;
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
+ data += ETH_GSTRING_LEN;
- snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
- data += ETH_GSTRING_LEN;
- }
- break;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
+ data += ETH_GSTRING_LEN;
- case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- ibmvnic_priv_flags[i]);
- break;
- default:
- return;
+ snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
+ data += ETH_GSTRING_LEN;
}
}
@@ -3211,8 +3710,6 @@ static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
return ARRAY_SIZE(ibmvnic_stats) +
adapter->req_tx_queues * NUM_TX_STATS +
adapter->req_rx_queues * NUM_RX_STATS;
- case ETH_SS_PRIV_FLAGS:
- return ARRAY_SIZE(ibmvnic_priv_flags);
default:
return -EOPNOTSUPP;
}
@@ -3265,26 +3762,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
}
}
-static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
-
- return adapter->priv_flags;
-}
-
-static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
-
- if (which_maxes)
- adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
- else
- adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
-
- return 0;
-}
-
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -3298,8 +3775,6 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_sset_count = ibmvnic_get_sset_count,
.get_ethtool_stats = ibmvnic_get_ethtool_stats,
.get_link_ksettings = ibmvnic_get_link_ksettings,
- .get_priv_flags = ibmvnic_get_priv_flags,
- .set_priv_flags = ibmvnic_set_priv_flags,
};
/* Routines for managing CRQs/sCRQs */
@@ -3342,6 +3817,8 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
if (!adapter->tx_scrq || !adapter->rx_scrq)
return -EINVAL;
+ ibmvnic_clean_affinity(adapter);
+
for (i = 0; i < adapter->req_tx_queues; i++) {
netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
@@ -3391,6 +3868,7 @@ static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
free_pages((unsigned long)scrq->msgs, 2);
+ free_cpumask_var(scrq->affinity_mask);
kfree(scrq);
}
@@ -3411,6 +3889,8 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
dev_warn(dev, "Couldn't allocate crq queue messages page\n");
goto zero_page_failed;
}
+ if (!zalloc_cpumask_var(&scrq->affinity_mask, GFP_KERNEL))
+ goto cpumask_alloc_failed;
scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -3463,6 +3943,8 @@ reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
map_failed:
+ free_cpumask_var(scrq->affinity_mask);
+cpumask_alloc_failed:
free_pages((unsigned long)scrq->msgs, 2);
zero_page_failed:
kfree(scrq);
@@ -3474,6 +3956,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
{
int i;
+ ibmvnic_clean_affinity(adapter);
if (adapter->tx_scrq) {
for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
if (!adapter->tx_scrq[i])
@@ -3536,6 +4019,30 @@ static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
return rc;
}
+/* We can not use the IRQ chip EOI handler because that has the
+ * unintended effect of changing the interrupt priority.
+ */
+static void ibmvnic_xics_eoi(struct device *dev, struct ibmvnic_sub_crq_queue *scrq)
+{
+ u64 val = 0xff000000 | scrq->hw_irq;
+ unsigned long rc;
+
+ rc = plpar_hcall_norets(H_EOI, val);
+ if (rc)
+ dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", val, rc);
+}
+
+/* Due to a firmware bug, the hypervisor can send an interrupt to a
+ * transmit or receive queue just prior to a partition migration.
+ * Force an EOI after migration.
+ */
+static void ibmvnic_clear_pending_interrupt(struct device *dev,
+ struct ibmvnic_sub_crq_queue *scrq)
+{
+ if (!xive_enabled())
+ ibmvnic_xics_eoi(dev, scrq);
+}
+
static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -3549,15 +4056,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
if (test_bit(0, &adapter->resetting) &&
adapter->reset_reason == VNIC_RESET_MOBILITY) {
- u64 val = (0xff000000) | scrq->hw_irq;
-
- rc = plpar_hcall_norets(H_EOI, val);
- /* H_EOI would fail with rc = H_FUNCTION when running
- * in XIVE mode which is expected, but not an error.
- */
- if (rc && (rc != H_FUNCTION))
- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
- val, rc);
+ ibmvnic_clear_pending_interrupt(dev, scrq);
}
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
@@ -3628,9 +4127,15 @@ restart_loop:
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev,
scrq->pool_index)) {
- netif_wake_subqueue(adapter->netdev, scrq->pool_index);
- netdev_dbg(adapter->netdev, "Started queue %d\n",
- scrq->pool_index);
+ rcu_read_lock();
+ if (adapter->tx_queues_active) {
+ netif_wake_subqueue(adapter->netdev,
+ scrq->pool_index);
+ netdev_dbg(adapter->netdev,
+ "Started queue %d\n",
+ scrq->pool_index);
+ }
+ rcu_read_unlock();
}
}
@@ -3729,6 +4234,11 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
goto req_rx_irq_failed;
}
}
+
+ cpus_read_lock();
+ ibmvnic_set_affinity(adapter);
+ cpus_read_unlock();
+
return rc;
req_rx_irq_failed:
@@ -3759,7 +4269,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
if (!allqueues)
- return -1;
+ return -ENOMEM;
for (i = 0; i < total_queues; i++) {
allqueues[i] = init_sub_crq_queue(adapter);
@@ -3828,7 +4338,7 @@ tx_failed:
for (i = 0; i < registered_queues; i++)
release_sub_crq_queue(adapter, allqueues[i], 1);
kfree(allqueues);
- return -1;
+ return -ENOMEM;
}
static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
@@ -3836,11 +4346,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int max_entries;
+ int cap_reqs;
+
+ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
+ * the PROMISC flag). Initialize this count upfront. When the tasklet
+ * receives a response to all of these, it will send the next protocol
+ * message (QUERY_IP_OFFLOAD).
+ */
+ if (!(adapter->netdev->flags & IFF_PROMISC) ||
+ adapter->promisc_supported)
+ cap_reqs = 7;
+ else
+ cap_reqs = 6;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
+
if (adapter->min_tx_entries_per_subcrq > entries_page ||
adapter->min_rx_add_entries_per_subcrq > entries_page) {
dev_err(dev, "Fatal, invalid entries per sub-crq\n");
@@ -3859,16 +4383,16 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->desired.rx_entries =
adapter->max_rx_add_entries_per_subcrq;
- max_entries = IBMVNIC_MAX_LTB_SIZE /
+ max_entries = IBMVNIC_LTB_SET_SIZE /
(adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.tx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.tx_entries = max_entries;
}
if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
- adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
+ adapter->desired.rx_entries > IBMVNIC_LTB_SET_SIZE) {
adapter->desired.rx_entries = max_entries;
}
@@ -3901,44 +4425,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+ } else {
+ atomic_add(cap_reqs, &adapter->running_cap_crqs);
}
-
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -3946,16 +4471,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -4187,7 +4717,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
if (!adapter->tx_scrq || !adapter->rx_scrq) {
netdev_err(adapter->netdev,
"RX or TX queues are not allocated, device login failed\n");
- return -1;
+ return -ENOMEM;
}
release_login_buffer(adapter);
@@ -4307,7 +4837,7 @@ buf_map_failed:
kfree(login_buffer);
adapter->login_buf = NULL;
buf_alloc_failed:
- return -1;
+ return -ENOMEM;
}
static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
@@ -4349,118 +4879,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)
static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
+ int cap_reqs;
+
+ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
+ * upfront. When the tasklet receives a response to all of these, it
+ * can send out the next protocol messaage (REQUEST_CAPABILITY).
+ */
+ cap_reqs = 25;
+
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
- atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
- atomic_inc(&adapter->running_cap_crqs);
+
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
@@ -4764,6 +5308,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
char *name;
atomic_dec(&adapter->running_cap_crqs);
+ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
+ atomic_read(&adapter->running_cap_crqs));
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -4827,10 +5373,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
}
/* Done receiving requested capabilities, query IP offload support */
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_query_ip_offload(adapter);
- }
}
static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
@@ -5128,10 +5672,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
}
out:
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_request_cap(adapter, 0);
- }
}
static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
@@ -5263,9 +5805,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
}
if (!completion_done(&adapter->init_done)) {
- complete(&adapter->init_done);
if (!adapter->init_done_rc)
adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
}
break;
@@ -5288,6 +5830,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
adapter->fw_done_rc = -EIO;
complete(&adapter->fw_done);
}
+
+ /* if we got here during crq-init, retry crq-init */
+ if (!completion_done(&adapter->init_done)) {
+ adapter->init_done_rc = -EAGAIN;
+ complete(&adapter->init_done);
+ }
+
if (!completion_done(&adapter->stats_done))
complete(&adapter->stats_done);
if (test_bit(0, &adapter->resetting))
@@ -5427,33 +5976,21 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
- bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
- /* This barrier makes sure ibmvnic_next_crq()'s
- * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
- * before ibmvnic_handle_crq()'s
- * switch(gen_crq->first) and switch(gen_crq->cmd).
- */
- dma_rmb();
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- }
- /* remain in tasklet until all
- * capabilities responses are received
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
*/
- if (!adapter->wait_capability)
- done = true;
+ dma_rmb();
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
}
- /* if capabilities CRQ's were sent in this tasklet, the following
- * tasklet must wait until all responses are received
- */
- if (atomic_read(&adapter->running_cap_crqs) != 0)
- adapter->wait_capability = true;
+
spin_unlock_irqrestore(&queue->lock, flags);
}
@@ -5616,10 +6153,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
adapter->from_passive_init = false;
- if (reset)
- reinit_completion(&adapter->init_done);
-
- adapter->init_done_rc = 0;
rc = ibmvnic_send_crq_init(adapter);
if (rc) {
dev_err(dev, "Send crq init failed with error %d\n", rc);
@@ -5628,18 +6161,20 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
- return -1;
+ return -ETIMEDOUT;
}
if (adapter->init_done_rc) {
release_crq_queue(adapter);
+ dev_err(dev, "CRQ-init failed, %d\n", adapter->init_done_rc);
return adapter->init_done_rc;
}
if (adapter->from_passive_init) {
adapter->state = VNIC_OPEN;
adapter->from_passive_init = false;
- return -1;
+ dev_err(dev, "CRQ-init failed, passive-init\n");
+ return -EINVAL;
}
if (reset &&
@@ -5650,6 +6185,15 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
release_sub_crqs(adapter, 0);
rc = init_sub_crqs(adapter);
} else {
+ /* no need to reinitialize completely, but we do
+ * need to clean up transmits that were in flight
+ * when we processed the reset. Failure to do so
+ * will confound the upper layer, usually TCP, by
+ * creating the illusion of transmits that are
+ * awaiting completion.
+ */
+ clean_tx_pools(adapter);
+
rc = reset_sub_crq_queues(adapter);
}
} else {
@@ -5678,6 +6222,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
+ unsigned long flags;
bool init_success;
int rc;
@@ -5722,6 +6267,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
spin_lock_init(&adapter->rwi_lock);
spin_lock_init(&adapter->state_lock);
mutex_init(&adapter->fw_lock);
+ init_completion(&adapter->probe_done);
init_completion(&adapter->init_done);
init_completion(&adapter->fw_done);
init_completion(&adapter->reset_done);
@@ -5732,6 +6278,33 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_success = false;
do {
+ reinit_init_done(adapter);
+
+ /* clear any failovers we got in the previous pass
+ * since we are reinitializing the CRQ
+ */
+ adapter->failover_pending = false;
+
+ /* If we had already initialized CRQ, we may have one or
+ * more resets queued already. Discard those and release
+ * the CRQ before initializing the CRQ again.
+ */
+ release_crq_queue(adapter);
+
+ /* Since we are still in PROBING state, __ibmvnic_reset()
+ * will not access the ->rwi_list and since we released CRQ,
+ * we won't get _new_ transport events. But there maybe an
+ * ongoing ibmvnic_reset() call. So serialize access to
+ * rwi_list. If we win the race, ibvmnic_reset() could add
+ * a reset after we purged but thats ok - we just may end
+ * up with an extra reset (i.e similar to having two or more
+ * resets in the queue at once).
+ * CHECK.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+ flush_reset_queue(adapter);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
rc = init_crq_queue(adapter);
if (rc) {
dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
@@ -5763,12 +6336,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_dev_file_err;
netif_carrier_off(netdev);
- rc = register_netdev(netdev);
- if (rc) {
- dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto ibmvnic_register_fail;
- }
- dev_info(&dev->dev, "ibmvnic registered\n");
if (init_success) {
adapter->state = VNIC_PROBED;
@@ -5781,8 +6348,27 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
+ goto ibmvnic_register_fail;
+ }
+ dev_info(&dev->dev, "ibmvnic registered\n");
+
+ rc = ibmvnic_cpu_notif_add(adapter);
+ if (rc) {
+ netdev_err(netdev, "Registering cpu notifier failed\n");
+ goto cpu_notif_add_failed;
+ }
+
+ complete(&adapter->probe_done);
+
return 0;
+cpu_notif_add_failed:
+ unregister_netdev(netdev);
+
ibmvnic_register_fail:
device_remove_file(&dev->dev, &dev_attr_failover);
@@ -5795,6 +6381,17 @@ ibmvnic_stats_fail:
ibmvnic_init_fail:
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+
+ /* cleanup worker thread after releasing CRQ so we don't get
+ * transport events (i.e new work items for the worker thread).
+ */
+ adapter->state = VNIC_REMOVING;
+ complete(&adapter->probe_done);
+ flush_work(&adapter->ibmvnic_reset);
+ flush_delayed_work(&adapter->ibmvnic_delayed_reset);
+
+ flush_reset_queue(adapter);
+
mutex_destroy(&adapter->fw_lock);
free_netdev(netdev);
@@ -5822,6 +6419,8 @@ static void ibmvnic_remove(struct vio_dev *dev)
spin_unlock_irqrestore(&adapter->state_lock, flags);
+ ibmvnic_cpu_notif_remove(adapter);
+
flush_work(&adapter->ibmvnic_reset);
flush_delayed_work(&adapter->ibmvnic_delayed_reset);
@@ -5871,10 +6470,14 @@ static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
be64_to_cpu(session_token));
rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
H_SESSION_ERR_DETECTED, session_token, 0, 0);
- if (rc)
+ if (rc) {
netdev_err(netdev,
"H_VIOCTL initiated failover failed, rc %ld\n",
rc);
+ goto last_resort;
+ }
+
+ return count;
last_resort:
netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
@@ -5948,15 +6551,40 @@ static struct vio_driver ibmvnic_driver = {
/* module functions */
static int __init ibmvnic_module_init(void)
{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/ibmvnic:online",
+ ibmvnic_cpu_online,
+ ibmvnic_cpu_down_prep);
+ if (ret < 0)
+ goto out;
+ ibmvnic_online = ret;
+ ret = cpuhp_setup_state_multi(CPUHP_IBMVNIC_DEAD, "net/ibmvnic:dead",
+ NULL, ibmvnic_cpu_dead);
+ if (ret)
+ goto err_dead;
+
+ ret = vio_register_driver(&ibmvnic_driver);
+ if (ret)
+ goto err_vio_register;
+
pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
IBMVNIC_DRIVER_VERSION);
- return vio_register_driver(&ibmvnic_driver);
+ return 0;
+err_vio_register:
+ cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
+err_dead:
+ cpuhp_remove_multi_state(ibmvnic_online);
+out:
+ return ret;
}
static void __exit ibmvnic_module_exit(void)
{
vio_unregister_driver(&ibmvnic_driver);
+ cpuhp_remove_multi_state(CPUHP_IBMVNIC_DEAD);
+ cpuhp_remove_multi_state(ibmvnic_online);
}
module_init(ibmvnic_module_init);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index b8e42f67d897..4e18b4cefa97 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -18,8 +18,6 @@
#define IBMVNIC_NAME "ibmvnic"
#define IBMVNIC_DRIVER_VERSION "1.0.1"
#define IBMVNIC_INVALID_MAP -1
-#define IBMVNIC_STATS_TIMEOUT 1
-#define IBMVNIC_INIT_FAILED 2
#define IBMVNIC_OPEN_FAILED 3
/* basic structures plus 100 2k buffers */
@@ -38,16 +36,52 @@
#define IBMVNIC_TSO_BUFS 64
#define IBMVNIC_TSO_POOL_MASK 0x80000000
-#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
-#define IBMVNIC_BUFFER_HLEN 500
+/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
+ * has a set of buffers. The size of each buffer is determined by the MTU.
+ *
+ * Each Rx/Tx pool is also associated with a DMA region that is shared
+ * with the "hardware" (VIOS) and used to send/receive packets. The DMA
+ * region is also referred to as a Long Term Buffer or LTB.
+ *
+ * The size of the DMA region required for an Rx/Tx pool depends on the
+ * number and size (MTU) of the buffers in the pool. At the max levels
+ * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
+ * some padding.
+ *
+ * But the size of a single DMA region is limited by MAX_ORDER in the
+ * kernel (about 16MB currently). To support say 4K Jumbo frames, we
+ * use a set of LTBs (struct ltb_set) per pool.
+ *
+ * IBMVNIC_ONE_LTB_MAX - max size of each LTB supported by kernel
+ * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
+ * (must be <= IBMVNIC_ONE_LTB_MAX)
+ * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
+ *
+ * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
+ * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
+ *
+ * The Rx and Tx pools can have upto 4096 buffers. The max size of these
+ * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
+ * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
+ *
+ * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
+ * the allocation of the LTB can fail when system is low in memory. If
+ * its too small, we would need several mappings for each of the Rx/
+ * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
+ * VNIC protocol.
+ *
+ * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
+ * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
+ * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
+ * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
+ */
+#define IBMVNIC_ONE_LTB_MAX ((u32)((1 << MAX_ORDER) * PAGE_SIZE))
+#define IBMVNIC_ONE_LTB_SIZE min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
+#define IBMVNIC_LTB_SET_SIZE (38 << 20)
+#define IBMVNIC_BUFFER_HLEN 500
#define IBMVNIC_RESET_DELAY 100
-static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
-#define IBMVNIC_USE_SERVER_MAXES 0x1
- "use-server-maxes"
-};
-
struct ibmvnic_login_buffer {
__be32 len;
__be32 version;
@@ -791,6 +825,7 @@ struct ibmvnic_sub_crq_queue {
atomic_t used;
char name[32];
u64 handle;
+ cpumask_var_t affinity_mask;
} ____cacheline_aligned;
struct ibmvnic_long_term_buff {
@@ -800,6 +835,11 @@ struct ibmvnic_long_term_buff {
u8 map_id;
};
+struct ibmvnic_ltb_set {
+ int num_ltbs;
+ struct ibmvnic_long_term_buff *ltbs;
+};
+
struct ibmvnic_tx_buff {
struct sk_buff *skb;
int index;
@@ -812,7 +852,7 @@ struct ibmvnic_tx_pool {
int *free_map;
int consumer_index;
int producer_index;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
int num_buffers;
int buf_size;
} ____cacheline_aligned;
@@ -835,7 +875,7 @@ struct ibmvnic_rx_pool {
int next_free;
int next_alloc;
int active;
- struct ibmvnic_long_term_buff long_term_buff;
+ struct ibmvnic_ltb_set ltb_set;
} ____cacheline_aligned;
struct ibmvnic_vpd {
@@ -885,7 +925,6 @@ struct ibmvnic_adapter {
struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
dma_addr_t ip_offload_ctrl_tok;
u32 msg_enable;
- u32 priv_flags;
/* Vital Product Data (VPD) */
struct ibmvnic_vpd *vpd;
@@ -921,7 +960,6 @@ struct ibmvnic_adapter {
int login_rsp_buf_sz;
atomic_t running_cap_crqs;
- bool wait_capability;
struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
@@ -933,6 +971,7 @@ struct ibmvnic_adapter {
struct ibmvnic_tx_pool *tx_pool;
struct ibmvnic_tx_pool *tso_pool;
+ struct completion probe_done;
struct completion init_done;
int init_done_rc;
@@ -945,6 +984,10 @@ struct ibmvnic_adapter {
int reset_done_rc;
bool wait_for_reset;
+ /* CPU hotplug instances for online & dead */
+ struct hlist_node node;
+ struct hlist_node node_dead;
+
/* partner capabilities */
u64 min_tx_queues;
u64 min_rx_queues;
@@ -1008,11 +1051,14 @@ struct ibmvnic_adapter {
struct work_struct ibmvnic_reset;
struct delayed_work ibmvnic_delayed_reset;
unsigned long resetting;
- bool napi_enabled, from_passive_init;
- bool login_pending;
/* last device reset time */
unsigned long last_reset_time;
+ bool napi_enabled;
+ bool from_passive_init;
+ bool login_pending;
+ /* protected by rcu */
+ bool tx_queues_active;
bool failover_pending;
bool force_reset_recovery;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0b274d8fa45b..9bc0a9519899 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -139,23 +139,6 @@ config IGBVF
To compile this driver as a module, choose M here. The module
will be called igbvf.
-config IXGB
- tristate "Intel(R) PRO/10GbE support"
- depends on PCI
- help
- This driver supports Intel(R) PRO/10GbE family of adapters for
- PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
- instead. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide that can be located at:
-
- <http://support.intel.com>
-
- More specific information on configuring the driver is in
- <file:Documentation/networking/device_drivers/ethernet/intel/ixgb.rst>.
-
- To compile this driver as a module, choose M here. The module
- will be called ixgb.
-
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
@@ -296,6 +279,7 @@ config ICE
default n
depends on PCI_MSI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on GNSS || GNSS = n
select AUXILIARY_BUS
select DIMLIB
select NET_DEVLINK
@@ -327,6 +311,16 @@ config ICE_SWITCHDEV
If unsure, say N.
+config ICE_HWTS
+ bool "Support HW cross-timestamp on platforms with PTM support"
+ default y
+ depends on ICE && X86
+ help
+ Say Y to enable hardware supported cross-timestamping on platforms
+ with PCIe PTM support. The cross-timestamp is available through
+ the PTP clock driver precise cross-timestamp ioctl
+ (PTP_SYS_OFFSET_PRECISE).
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index 3075290063f6..d80d04132073 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
-obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_IAVF) += iavf/
obj-$(CONFIG_FM10K) += fm10k/
obj-$(CONFIG_ICE) += ice/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 0bf3d47bb90d..d3fdc290937f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1430,7 +1430,6 @@ static int e100_phy_check_without_mii(struct nic *nic)
#define MII_NSC_CONG MII_RESV1
#define NSC_CONG_ENABLE 0x0100
#define NSC_CONG_TXREADY 0x0400
-#define ADVERTISE_FC_SUPPORTED 0x0400
static int e100_phy_init(struct nic *nic)
{
struct net_device *netdev = nic->netdev;
@@ -1742,11 +1741,8 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
/* If we can't map the skb, have the upper layer try later */
- if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
- dev_kfree_skb_any(skb);
- skb = NULL;
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr))
return -ENOMEM;
- }
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
@@ -2432,8 +2428,8 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev),
sizeof(info->bus_info));
}
@@ -2557,7 +2553,9 @@ static int e100_set_eeprom(struct net_device *netdev,
}
static void e100_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nic *nic = netdev_priv(netdev);
struct param_range *rfds = &nic->params.rfds;
@@ -2570,7 +2568,9 @@ static void e100_get_ringparam(struct net_device *netdev,
}
static int e100_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct nic *nic = netdev_priv(netdev);
struct param_range *rfds = &nic->params.rfds;
@@ -2844,7 +2844,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
nic = netdev_priv(netdev);
- netif_napi_add(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
+ netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
nic->netdev = netdev;
nic->pdev = pdev;
nic->msg_enable = (1 << debug) - 1;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 0a57172dfcbc..d06d29c6c037 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -531,15 +531,17 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000_driver_name,
+ strscpy(drvinfo->driver, e1000_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -556,7 +558,9 @@ static void e1000_get_ringparam(struct net_device *netdev,
}
static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 1042e79a1397..4542e2bc28e8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -2000,7 +2000,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
@@ -4376,7 +4376,7 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
/**
* e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table.
* @hw: Struct containing variables accessed by shared code
- * @offset: Offset in VLAN filer table to write
+ * @offset: Offset in VLAN filter table to write
* @value: Value to write into VLAN filter table
*/
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
@@ -4396,7 +4396,7 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
}
/**
- * e1000_clear_vfta - Clears the VLAN filer table
+ * e1000_clear_vfta - Clears the VLAN filter table
* @hw: Struct containing variables accessed by shared code
*/
static void e1000_clear_vfta(struct e1000_hw *hw)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 669060a2e6aa..da6e303ad99b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1012,7 +1012,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, e1000_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -1953,7 +1953,8 @@ void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
- struct e1000_tx_buffer *buffer_info)
+ struct e1000_tx_buffer *buffer_info,
+ int budget)
{
if (buffer_info->dma) {
if (buffer_info->mapped_as_page)
@@ -1966,7 +1967,7 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
buffer_info->dma = 0;
}
if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
+ napi_consume_skb(buffer_info->skb, budget);
buffer_info->skb = NULL;
}
buffer_info->time_stamp = 0;
@@ -1990,7 +1991,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
}
netdev_reset_queue(adapter->netdev);
@@ -2707,7 +2708,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -2958,7 +2959,7 @@ dma_error:
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info, 0);
}
return 0;
@@ -3138,7 +3139,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
if (skb->data_len && hdr_len == len) {
switch (hw->mac_type) {
case e1000_82544: {
@@ -3856,7 +3857,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
}
}
- e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+ e1000_unmap_and_free_tx_resource(adapter, buffer_info,
+ 64);
tx_desc->upper.data = 0;
if (unlikely(++i == tx_ring->count))
@@ -4227,8 +4229,6 @@ process_skb:
*/
p = buffer_info->rxbuf.page;
if (length <= copybreak) {
- u8 *vaddr;
-
if (likely(!(netdev->features & NETIF_F_RXFCS)))
length -= 4;
skb = e1000_alloc_rx_skb(adapter,
@@ -4236,10 +4236,9 @@ process_skb:
if (!skb)
break;
- vaddr = kmap_atomic(p);
- memcpy(skb_tail_pointer(skb), vaddr,
- length);
- kunmap_atomic(vaddr);
+ memcpy(skb_tail_pointer(skb),
+ page_address(p), length);
+
/* re-use the page, so don't erase
* buffer_info->rxbuf.page
*/
@@ -4382,7 +4381,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (!skb) {
unsigned int frag_len = e1000_frag_len(adapter);
- skb = build_skb(data - E1000_HEADROOM, frag_len);
+ skb = napi_build_skb(data - E1000_HEADROOM, frag_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
break;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 4d4f5bf1e516..f4154ca7fcb4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -82,7 +82,6 @@ E1000_PARAM(Duplex, "Duplex setting");
*/
E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
#define AUTONEG_ADV_DEFAULT 0x2F
-#define AUTONEG_ADV_MASK 0x2F
/* User Specified Flow Control Override
*
@@ -95,7 +94,6 @@ E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
* Default Value: Read flow control settings from the EEPROM
*/
E1000_PARAM(FlowControl, "Flow Control setting");
-#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
/* XsumRX - Receive Checksum Offload Enable/Disable
*
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 44e58b6e7660..0baa15503c38 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -5,6 +5,9 @@
# Makefile for the Intel(R) PRO/1000 ethernet driver
#
+ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(src)
+
obj-$(CONFIG_E1000E) += e1000e.o
e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c3def0ee7788..a187582d2299 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -115,7 +115,9 @@ enum e1000_boards {
board_pch_lpt,
board_pch_spt,
board_pch_cnp,
- board_pch_tgp
+ board_pch_tgp,
+ board_pch_adp,
+ board_pch_mtp
};
struct e1000_ps_page {
@@ -328,7 +330,7 @@ struct e1000_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct pm_qos_request pm_qos_req;
- s32 ptp_delta;
+ long ptp_delta;
u16 eee_advert;
};
@@ -502,6 +504,8 @@ extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_pch_tgp_info;
+extern const struct e1000_info e1000_pch_adp_info;
+extern const struct e1000_info e1000_pch_mtp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000e_trace.h b/drivers/net/ethernet/intel/e1000e/e1000e_trace.h
new file mode 100644
index 000000000000..19d3cf4d924e
--- /dev/null
+++ b/drivers/net/ethernet/intel/e1000e/e1000e_trace.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2022, Intel Corporation. */
+/* Modeled on trace-events-sample.h */
+/* The trace subsystem name for e1000e will be "e1000e_trace".
+ *
+ * This file is named e1000e_trace.h.
+ *
+ * Since this include file's name is different from the trace
+ * subsystem name, we'll have to define TRACE_INCLUDE_FILE at the end
+ * of this file.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM e1000e_trace
+
+#if !defined(_TRACE_E1000E_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_E1000E_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(e1000e_trace_mac_register,
+ TP_PROTO(uint32_t reg),
+ TP_ARGS(reg),
+ TP_STRUCT__entry(__field(uint32_t, reg)),
+ TP_fast_assign(__entry->reg = reg;),
+ TP_printk("event: TraceHub e1000e mac register: 0x%08x",
+ __entry->reg)
+);
+
+#endif
+/* This must be outside ifdef _E1000E_TRACE_H */
+/* This trace include file is not located in the .../include/trace
+ * with the kernel tracepoint definitions, because we're a loadable
+ * module.
+ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE e1000e_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 8515e00d1b40..721f86fd5802 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -110,9 +110,9 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
static int e1000_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
+ u32 speed, supported, advertising, lp_advertising, lpa_t;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 speed, supported, advertising;
if (hw->phy.media_type == e1000_media_type_copper) {
supported = (SUPPORTED_10baseT_Half |
@@ -120,7 +120,9 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
+ SUPPORTED_Asym_Pause |
SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
SUPPORTED_TP);
if (hw->phy.type == e1000_phy_ife)
supported &= ~SUPPORTED_1000baseT_Full;
@@ -192,10 +194,16 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
if (hw->phy.media_type != e1000_media_type_copper)
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ lpa_t = mii_stat1000_to_ethtool_lpa_t(adapter->phy_regs.stat1000);
+ lp_advertising = lpa_t |
+ mii_lpa_to_ethtool_lpa_t(adapter->phy_regs.lpa);
+
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
return 0;
}
@@ -639,7 +647,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
@@ -650,12 +658,14 @@ static void e1000_get_drvinfo(struct net_device *netdev,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
static void e1000_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -666,7 +676,9 @@ static void e1000_get_ringparam(struct net_device *netdev,
}
static int e1000_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *temp_tx = NULL, *temp_rx = NULL;
@@ -904,6 +916,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
mask |= BIT(18);
break;
default:
@@ -1571,6 +1584,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index bcf680e83811..29f9fae35f42 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -114,6 +114,14 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F
#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510
#define E1000_DEV_ID_PCH_LNP_I219_V21 0x5511
+#define E1000_DEV_ID_PCH_ARL_I219_LM24 0x57A0
+#define E1000_DEV_ID_PCH_ARL_I219_V24 0x57A1
+#define E1000_DEV_ID_PCH_PTP_I219_LM25 0x57B3
+#define E1000_DEV_ID_PCH_PTP_I219_V25 0x57B4
+#define E1000_DEV_ID_PCH_PTP_I219_LM26 0x57B5
+#define E1000_DEV_ID_PCH_PTP_I219_V26 0x57B6
+#define E1000_DEV_ID_PCH_PTP_I219_LM27 0x57B7
+#define E1000_DEV_ID_PCH_PTP_I219_V27 0x57B8
#define E1000_REVISION_4 4
@@ -141,6 +149,7 @@ enum e1000_mac_type {
e1000_pch_adp,
e1000_pch_mtp,
e1000_pch_lnp,
+ e1000_pch_ptp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 5e4fc9b4e2ad..0c7fd10312c8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -322,6 +322,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -468,6 +469,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -714,6 +716,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -1009,8 +1012,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
{
u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
- u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
- u16 lat_enc_d = 0; /* latency decoded */
+ u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
+ u32 lat_enc_d = 0; /* latency decoded */
u16 lat_enc = 0; /* latency encoded */
if (link) {
@@ -1681,6 +1684,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2137,6 +2141,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3182,6 +3187,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4122,6 +4128,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
@@ -4136,9 +4143,9 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
return ret_val;
if (!(data & valid_csum_mask)) {
- e_dbg("NVM Checksum Invalid\n");
+ e_dbg("NVM Checksum valid bit not set\n");
- if (hw->mac.type < e1000_pch_cnp) {
+ if (hw->mac.type < e1000_pch_tgp) {
data |= valid_csum_mask;
ret_val = e1000_write_nvm(hw, word, 1, &data);
if (ret_val)
@@ -6021,3 +6028,43 @@ const struct e1000_info e1000_pch_tgp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_adp_info = {
+ .mac = e1000_pch_adp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_mtp_info = {
+ .mac = e1000_pch_mtp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 51512a73fdd0..5df7ad93f3d7 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -957,7 +957,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 44e2dc8328a2..bd7ef59b1f2e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -23,11 +23,12 @@
#include <linux/smp.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
-#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/suspend.h>
#include "e1000.h"
+#define CREATE_TRACE_POINTS
+#include "e1000e_trace.h"
char e1000e_driver_name[] = "e1000e";
@@ -52,6 +53,8 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
[board_pch_tgp] = &e1000_pch_tgp_info,
+ [board_pch_adp] = &e1000_pch_adp_info,
+ [board_pch_mtp] = &e1000_pch_mtp_info,
};
struct e1000_reg_info {
@@ -1387,26 +1390,18 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
/* page alloc/put takes too long and effects small
* packet throughput, so unsplit small packets and
- * save the alloc/put only valid in softirq (napi)
- * context to call kmap_*
+ * save the alloc/put
*/
if (l1 && (l1 <= copybreak) &&
((length + l1) <= adapter->rx_ps_bsize0)) {
- u8 *vaddr;
-
ps_page = &buffer_info->ps_pages[0];
- /* there is no documentation about how to call
- * kmap_atomic, so we can't hold the mapping
- * very long
- */
dma_sync_single_for_cpu(&pdev->dev,
ps_page->dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
- vaddr = kmap_atomic(ps_page->page);
- memcpy(skb_tail_pointer(skb), vaddr, l1);
- kunmap_atomic(vaddr);
+ memcpy(skb_tail_pointer(skb),
+ page_address(ps_page->page), l1);
dma_sync_single_for_device(&pdev->dev,
ps_page->dma,
PAGE_SIZE,
@@ -1606,11 +1601,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
*/
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
- u8 *vaddr;
- vaddr = kmap_atomic(buffer_info->page);
- memcpy(skb_tail_pointer(skb), vaddr,
+ memcpy(skb_tail_pointer(skb),
+ page_address(buffer_info->page),
length);
- kunmap_atomic(vaddr);
/* re-use the page, so don't erase
* buffer_info->page
*/
@@ -3551,6 +3544,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -3614,10 +3608,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
- /* flags reserved for future extensions - must be zero */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
@@ -3925,9 +3915,9 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return;
- if (info->adjfreq) {
+ if (info->adjfine) {
/* restore the previous ptp frequency delta */
- ret_val = info->adjfreq(info, adapter->ptp_delta);
+ ret_val = info->adjfine(info, adapter->ptp_delta);
} else {
/* set the default base frequency if no adjustment possible */
ret_val = e1000e_get_base_timinca(adapter, &timinca);
@@ -4070,6 +4060,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -5296,31 +5287,6 @@ static void e1000_watchdog_task(struct work_struct *work)
ew32(TARC(0), tarc0);
}
- /* disable TSO for pcie and 10/100 speeds, to avoid
- * some hardware issues
- */
- if (!(adapter->flags & FLAG_TSO_FORCE)) {
- switch (adapter->link_speed) {
- case SPEED_10:
- case SPEED_100:
- e_info("10/100 speed: disabling TSO\n");
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- break;
- case SPEED_1000:
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- break;
- default:
- /* oops */
- break;
- }
- if (hw->mac.type == e1000_pch_spt) {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
- }
-
/* enable transmits in the hardware, need to do this
* after setting TARC(0)
*/
@@ -5477,7 +5443,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
mss = skb_shinfo(skb)->gso_size;
if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
@@ -5849,7 +5815,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* points to just header, pull a few bytes of payload from
* frags into skb->data
*/
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr_len = skb_tcp_all_headers(skb);
/* we do this workaround for ES2LAN, but it is un-necessary,
* avoiding it could save a lot of cycles
*/
@@ -5939,9 +5905,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(tx_ring,
- (MAX_SKB_FRAGS *
+ ((MAX_SKB_FRAGS + 1) *
DIV_ROUND_UP(PAGE_SIZE,
- adapter->tx_fifo_limit) + 2));
+ adapter->tx_fifo_limit) + 4));
if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
@@ -6345,11 +6311,13 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
u32 mac_data;
u16 phy_data;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
/* Request ME configure the device for S0ix */
mac_data = er32(H2ME);
mac_data |= E1000_H2ME_START_DPG;
mac_data &= ~E1000_H2ME_EXIT_DPG;
+ trace_e1000e_trace_mac_register(mac_data);
ew32(H2ME, mac_data);
} else {
/* Request driver configure the device to S0ix */
@@ -6494,11 +6462,17 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
u16 phy_data;
u32 i = 0;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
+ /* Keep the GPT clock enabled for CSME */
+ mac_data = er32(FEXTNVM);
+ mac_data |= BIT(3);
+ ew32(FEXTNVM, mac_data);
/* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
mac_data |= E1000_H2ME_EXIT_DPG;
+ trace_e1000e_trace_mac_register(mac_data);
ew32(H2ME, mac_data);
/* Poll up to 2.5 seconds for ME to unconfigure DPG.
@@ -7264,7 +7238,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH);
if (ret_val)
- strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
+ strscpy((char *)pba_str, "Unknown", sizeof(pba_str));
e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str);
}
@@ -7389,9 +7363,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
resource_size_t flash_start, flash_len;
static int cards_found;
u16 aspm_disable_flag = 0;
- int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ int bars, i, err;
s32 ret_val = 0;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
@@ -7405,17 +7379,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
bars = pci_select_bars(pdev, IORESOURCE_MEM);
@@ -7424,9 +7392,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_pci_reg;
- /* AER (Advanced Error Reporting) hooks */
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
/* PCI config space info */
err = pci_save_state(pdev);
@@ -7482,8 +7447,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000e_netdev_ops;
e1000e_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ netif_napi_add(netdev, &adapter->napi, e1000e_poll);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
@@ -7535,6 +7500,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM |
NETIF_F_HW_CSUM);
+ /* disable TSO for pcie and 10/100 speeds to avoid
+ * some hardware issues and for i219 to fix transfer
+ * speed being capped at 60%
+ */
+ if (!(adapter->flags & FLAG_TSO_FORCE)) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ case SPEED_100:
+ e_info("10/100 speed: disabling TSO\n");
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ break;
+ case SPEED_1000:
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ break;
+ default:
+ /* oops */
+ break;
+ }
+ if (hw->mac.type == e1000_pch_spt) {
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ }
+ }
+
/* Set user-changeable features (subset of all device features) */
netdev->hw_features = netdev->features;
netdev->hw_features |= NETIF_F_RXFCS;
@@ -7551,10 +7542,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
/* MTU range: 68 - max_hw_frame_size */
netdev->min_mtu = ETH_MIN_MTU;
@@ -7681,7 +7670,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_pch_cnp)
adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
- strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
+ strscpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
goto err_register;
@@ -7716,7 +7705,6 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -7783,9 +7771,6 @@ static void e1000_remove(struct pci_dev *pdev)
free_netdev(netdev);
- /* AER disable */
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
@@ -7902,22 +7887,30 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_LM24), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_V24), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM25), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V25), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM26), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V26), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM27), board_pch_mtp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V27), board_pch_mtp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index ebe121db4307..3132d8f2f207 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -101,8 +101,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
* demoted to the most advanced interrupt mode available.
*/
E1000_PARAM(IntMode, "Interrupt Mode");
-#define MAX_INTMODE 2
-#define MIN_INTMODE 0
/* Enable Smart Power Down of the PHY
*
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0f0efee5fc8e..08c3d477dd6f 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
+#include <linux/ethtool.h>
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
@@ -146,11 +147,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Read did not complete\n");
+ e_dbg("MDI Read PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
+ e_dbg("MDI Read PHY Reg Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
@@ -210,11 +211,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
break;
}
if (!(mdic & E1000_MDIC_READY)) {
- e_dbg("MDI Write did not complete\n");
+ e_dbg("MDI Write PHY Reg Address %d did not complete\n", offset);
return -E1000_ERR_PHY;
}
if (mdic & E1000_MDIC_ERROR) {
- e_dbg("MDI Error\n");
+ e_dbg("MDI Write PHY Red Address %d Error\n", offset);
return -E1000_ERR_PHY;
}
if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
@@ -1011,6 +1012,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg &=
~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised &=
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
case e1000_fc_rx_pause:
/* Rx Flow control is enabled, and Tx Flow control is
@@ -1024,6 +1027,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |=
(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised |=
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
case e1000_fc_tx_pause:
/* Tx Flow control is enabled, and Rx Flow control is
@@ -1031,6 +1036,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM;
mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP;
+ phy->autoneg_advertised |= ADVERTISED_Asym_Pause;
+ phy->autoneg_advertised &= ~ADVERTISED_Pause;
break;
case e1000_fc_full:
/* Flow control (both Rx and Tx) is enabled by a software
@@ -1038,6 +1045,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |=
(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised |=
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
default:
e_dbg("Flow control param set incorrectly\n");
@@ -2697,9 +2706,14 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
void e1000_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg &= ~BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
}
@@ -2715,9 +2729,14 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
void e1000_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg |= BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
usleep_range(1000, 2000);
@@ -3037,7 +3056,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
- e1e_rphy(hw, MII_BMCR, &data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
+ if (ret_val) {
+ e_dbg("Error reading PHY register\n");
+ return ret_val;
+ }
if (data & BMCR_LOOPBACK)
return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index eb5c014c02fb..def4566a916f 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -15,32 +15,25 @@
#endif
/**
- * e1000e_phc_adjfreq - adjust the frequency of the hardware clock
+ * e1000e_phc_adjfine - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
- * @delta: Desired frequency change in parts per billion
+ * @delta: Desired frequency chance in scaled parts per million
*
* Adjust the frequency of the PHC cycle counter by the indicated delta from
* the base frequency.
+ *
+ * Scaled parts per million is ppm but with a 16 bit binary fractional field.
**/
-static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int e1000e_phc_adjfine(struct ptp_clock_info *ptp, long delta)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
struct e1000_hw *hw = &adapter->hw;
- bool neg_adj = false;
unsigned long flags;
- u64 adjustment;
- u32 timinca, incvalue;
+ u64 incvalue;
+ u32 timinca;
s32 ret_val;
- if ((delta > ptp->max_adj) || (delta <= -1000000000))
- return -EINVAL;
-
- if (delta < 0) {
- neg_adj = true;
- delta = -delta;
- }
-
/* Get the System Time Register SYSTIM base frequency */
ret_val = e1000e_get_base_timinca(adapter, &timinca);
if (ret_val)
@@ -49,12 +42,7 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
spin_lock_irqsave(&adapter->systim_lock, flags);
incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
-
- adjustment = incvalue;
- adjustment *= delta;
- adjustment = div_u64(adjustment, 1000000000);
-
- incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment);
+ incvalue = adjust_by_scaled_ppm(incvalue, delta);
timinca &= ~E1000_TIMINCA_INCVALUE_MASK;
timinca |= incvalue;
@@ -260,7 +248,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = e1000e_phc_adjfreq,
+ .adjfine = e1000e_phc_adjfine,
.adjtime = e1000e_phc_adjtime,
.gettimex64 = e1000e_phc_gettimex,
.settime64 = e1000e_phc_settime,
@@ -299,6 +287,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_adp:
case e1000_pch_mtp:
case e1000_pch_lnp:
+ case e1000_pch_ptp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 0d37f011d0ce..d53369e30040 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -502,7 +502,9 @@ static void fm10k_set_msglevel(struct net_device *netdev, u32 data)
}
static void fm10k_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -517,7 +519,9 @@ static void fm10k_get_ringparam(struct net_device *netdev,
}
static int fm10k_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 3362f26d7f99..fc373472e4e1 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -32,6 +32,8 @@ struct workqueue_struct *fm10k_workqueue;
**/
static int __init fm10k_init_module(void)
{
+ int ret;
+
pr_info("%s\n", fm10k_driver_string);
pr_info("%s\n", fm10k_copyright);
@@ -43,7 +45,13 @@ static int __init fm10k_init_module(void)
fm10k_dbg_init();
- return fm10k_register_pci_driver();
+ ret = fm10k_register_pci_driver();
+ if (ret) {
+ fm10k_dbg_exit();
+ destroy_workqueue(fm10k_workqueue);
+ }
+
+ return ret;
}
module_init(fm10k_init_module);
@@ -1595,8 +1603,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(interface->netdev, &q_vector->napi,
- fm10k_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(interface->netdev, &q_vector->napi, fm10k_poll);
/* tie q_vector and interface together */
interface->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 30ca9ee1900b..87fa5874f16e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -809,7 +809,7 @@ static s32 fm10k_mbx_read(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
* @hw: pointer to hardware structure
* @mbx: pointer to mailbox
*
- * This function copies the message from the the message array to mbmem
+ * This function copies the message from the message array to mbmem
**/
static void fm10k_mbx_write(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx)
{
@@ -1825,7 +1825,7 @@ static void fm10k_sm_mbx_process_error(struct fm10k_mbx_info *mbx)
fm10k_sm_mbx_connect_reset(mbx);
break;
case FM10K_STATE_CONNECT:
- /* try connnecting at lower version */
+ /* try connecting at lower version */
if (mbx->remote) {
while (mbx->local > 1)
mbx->local--;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2cca9e84e31e..34ab5ff9823b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1229,10 +1229,10 @@ static void fm10k_get_stats64(struct net_device *netdev,
continue;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
@@ -1245,10 +1245,10 @@ static void fm10k_get_stats64(struct net_device *netdev,
continue;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index b473cb7d7c57..d748b98274e7 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -3,7 +3,6 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/aer.h>
#include "fm10k.h"
@@ -2127,8 +2126,6 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -2227,7 +2224,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -2281,8 +2277,6 @@ static void fm10k_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index 21eff0895a7a..75cbdf2dbbe3 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -78,7 +78,7 @@ static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
* @string: Pointer to location of destination string
*
* This function pulls the string back out of the attribute and will place
- * it in the array pointed by by string. It will return success if provided
+ * it in the array pointed by string. It will return success if provided
* with a valid pointers.
**/
static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
@@ -143,7 +143,7 @@ s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
* @vlan: location of buffer to store VLAN
*
* This function pulls the MAC address back out of the attribute and will
- * place it in the array pointed by by mac_addr. It will return success
+ * place it in the array pointed by mac_addr. It will return success
* if provided with a valid pointers.
**/
s32 fm10k_tlv_attr_get_mac_vlan(u32 *attr, u8 *mac_addr, u16 *vlan)
@@ -584,7 +584,7 @@ s32 fm10k_tlv_msg_parse(struct fm10k_hw *hw, u32 *msg,
* @mbx: Unused mailbox pointer
*
* This function is a default handler for unrecognized messages. At a
- * a minimum it just indicates that the message requested was
+ * minimum it just indicates that the message requested was
* unimplemented.
**/
s32 fm10k_tlv_msg_error(struct fm10k_hw __always_unused *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4d939af0a626..6e310a539467 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -10,7 +10,6 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/ioport.h>
#include <linux/iommu.h>
@@ -33,10 +32,12 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
+#include <linux/bitfield.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include <linux/net/intel/i40e_client.h>
@@ -144,6 +145,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_IN_REMOVE,
__I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
@@ -174,8 +176,7 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking {
u16 num_entries;
- u16 search_hint;
- u16 list[0];
+ u16 list[];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
};
@@ -398,6 +399,20 @@ struct i40e_ddp_old_profile_list {
I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
+
+#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
+
struct i40e_flex_pit {
struct list_head list;
u16 src_offset;
@@ -565,6 +580,7 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
+#define I40E_FLAG_VF_VLAN_PRUNING BIT(27)
/* TOTAL_PORT_SHUTDOWN
* Allows to physically disable the link on the NIC's port.
* If enabled, (after link down request from the OS)
@@ -848,12 +864,17 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
- u32 tx_restart;
- u32 tx_busy;
+ u64 tx_restart;
+ u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
- u32 rx_buf_failed;
- u32 rx_page_failed;
+ u64 tx_stopped;
+ u64 rx_buf_failed;
+ u64 rx_page_failed;
+ u64 rx_page_reuse;
+ u64 rx_page_alloc;
+ u64 rx_page_waive;
+ u64 rx_page_busy;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
@@ -971,6 +992,7 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
+ int irq_num; /* IRQ assigned to this q_vector */
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -1087,6 +1109,21 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
(u32)(val & 0xFFFFFFFFULL));
}
+/**
+ * i40e_get_pf_count - get PCI PF count.
+ * @hw: pointer to a hw.
+ *
+ * Reports the function number of the highest PCI physical
+ * function plus 1 as it is loaded from the NVM.
+ *
+ * Return: PCI PF count.
+ **/
+static inline u32 i40e_get_pf_count(struct i40e_hw *hw)
+{
+ return FIELD_GET(I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK,
+ rd32(hw, I40E_GLGEN_PCIFCNCNT));
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
@@ -1250,9 +1287,9 @@ void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_ptp_alloc_pins(struct i40e_pf *pf);
int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+int i40e_get_partition_bw_setting(struct i40e_pf *pf);
+int i40e_set_partition_bw_setting(struct i40e_pf *pf);
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
@@ -1270,4 +1307,18 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
struct i40e_cloud_filter *filter,
bool add);
+
+/**
+ * i40e_is_tc_mqprio_enabled - check if TC MQPRIO is enabled on PF
+ * @pf: pointer to a pf.
+ *
+ * Check and return value of flag I40E_FLAG_TC_MQPRIO.
+ *
+ * Return: I40E_FLAG_TC_MQPRIO set state.
+ **/
+static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
+{
+ return pf->flags & I40E_FLAG_TC_MQPRIO;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 593912b17609..86fac8f959bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -47,9 +47,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
@@ -74,9 +74,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
@@ -115,11 +115,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ int ret_code;
int i;
/* We'll be allocating the buffer info memory first, then we can
@@ -182,10 +182,10 @@ unwind_alloc_arq_bufs:
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_dma_mem *bi;
+ int ret_code;
int i;
/* No mapped memory needed yet, just the buffer info structures */
@@ -266,9 +266,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+static int i40e_config_asq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -295,9 +295,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+static int i40e_config_arq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -334,9 +334,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_asq(struct i40e_hw *hw)
+static int i40e_init_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -393,9 +393,9 @@ init_adminq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_arq(struct i40e_hw *hw)
+static int i40e_init_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -445,9 +445,9 @@ init_adminq_exit:
*
* The main shutdown routine for the Admin Send Queue
**/
-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+static int i40e_shutdown_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&hw->aq.asq_mutex);
@@ -479,9 +479,9 @@ shutdown_asq_out:
*
* The main shutdown routine for the Admin Receive Queue
**/
-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+static int i40e_shutdown_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&hw->aq.arq_mutex);
@@ -582,12 +582,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
-i40e_status i40e_init_adminq(struct i40e_hw *hw)
+int i40e_init_adminq(struct i40e_hw *hw)
{
u16 cfg_ptr, oem_hi, oem_lo;
u16 eetrack_lo, eetrack_hi;
- i40e_status ret_code;
int retry = 0;
+ int ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -769,32 +769,33 @@ static bool i40e_asq_done(struct i40e_hw *hw)
}
/**
- * i40e_asq_send_command - send command to Admin Queue
+ * i40e_asq_send_command_atomic_exec - send command to Admin Queue
* @hw: pointer to the hw struct
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buff: buffer to use for indirect commands
* @buff_size: size of buffer for indirect commands
* @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
*
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-i40e_status i40e_asq_send_command(struct i40e_hw *hw,
- struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+static int
+i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
{
- i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
+ int status = 0;
u32 val = 0;
- mutex_lock(&hw->aq.asq_mutex);
-
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: Admin queue not initialized.\n");
@@ -910,7 +911,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- udelay(50);
+
+ if (is_atomic_context)
+ udelay(50);
+ else
+ usleep_range(40, 60);
+
total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -963,10 +969,95 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
}
asq_send_command_error:
+ return status;
+}
+
+/**
+ * i40e_asq_send_command_atomic - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine.
+ **/
+int
+i40e_asq_send_command_atomic(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context)
+{
+ int status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
+ cmd_details,
+ is_atomic_context);
+
+ mutex_unlock(&hw->aq.asq_mutex);
+ return status;
+}
+
+int
+i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
+ cmd_details, false);
+}
+
+/**
+ * i40e_asq_send_command_atomic_v2 - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ * @is_atomic_context: is the function called in an atomic context?
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Acquires the lock and calls the main send command execution
+ * routine. Returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ **/
+int
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status)
+{
+ int status;
+
+ mutex_lock(&hw->aq.asq_mutex);
+ status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
+ buff_size,
+ cmd_details,
+ is_atomic_context);
+ if (aq_status)
+ *aq_status = hw->aq.asq_last_status;
mutex_unlock(&hw->aq.asq_mutex);
return status;
}
+int
+i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
+ cmd_details, true, aq_status);
+}
+
/**
* i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
* @desc: pointer to the temp descriptor (non DMA mem)
@@ -993,14 +1084,14 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *pending)
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
{
- i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 140b677f114d..3357d65a906b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0009
-#define I40E_FW_API_VERSION_MINOR_X710 0x0009
+#define I40E_FW_API_VERSION_MINOR_X722 0x000C
+#define I40E_FW_API_VERSION_MINOR_X710 0x000F
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -1795,9 +1795,11 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
/* Set Loopback mode (0x0618) */
struct i40e_aqc_set_lb_mode {
__le16 lb_mode;
-#define I40E_AQ_LB_PHY_LOCAL 0x01
-#define I40E_AQ_LB_PHY_REMOTE 0x02
-#define I40E_AQ_LB_MAC_LOCAL 0x04
+#define I40E_LEGACY_LOOPBACK_NVM_VER 0x6000
+#define I40E_AQ_LB_MAC_LOCAL 0x01
+#define I40E_AQ_LB_PHY_LOCAL 0x05
+#define I40E_AQ_LB_PHY_REMOTE 0x06
+#define I40E_AQ_LB_MAC_LOCAL_LEGACY 0x04
u8 reserved[14];
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index cb8689222c8b..a6c9a9e343d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -20,16 +20,16 @@ enum i40e_memory_type {
};
/* prototype for functions used for dynamic memory allocation */
-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem,
- enum i40e_memory_type type,
- u64 size, u32 alignment);
-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem);
-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem,
- u32 size);
-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem);
+int i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+int i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+int i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+int i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index ea2bb0140a6e..639c5a1ca853 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
"Cannot locate client instance close routine\n");
return;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Remove failed client instance */
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
- i40e_client_del_instance(pf);
return;
}
}
@@ -538,9 +541,9 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
{
struct i40e_pf *pf = ldev->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status err;
+ int err;
- err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
+ err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_RDMA,
0, msg, len, NULL);
if (err)
dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
@@ -671,7 +674,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
- i40e_status err;
+ int err;
/* TODO: for now do not allow setting VF's VSI setting */
if (is_vf)
@@ -683,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (err) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -711,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (err) {
dev_info(&pf->pdev->dev,
- "update VSI ctxt for PE failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "update VSI ctxt for PE failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index b4d3fed0d2f2..ed88e38d488b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -14,9 +14,9 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+int i40e_set_mac_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
+ int status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -47,6 +48,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_SFP_X722_A:
hw->mac.type = I40E_MAC_X722;
break;
default:
@@ -123,154 +125,6 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
}
/**
- * i40e_stat_str - convert status err code to a string
- * @hw: pointer to the HW structure
- * @stat_err: the status error code to convert
- **/
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
-{
- switch (stat_err) {
- case 0:
- return "OK";
- case I40E_ERR_NVM:
- return "I40E_ERR_NVM";
- case I40E_ERR_NVM_CHECKSUM:
- return "I40E_ERR_NVM_CHECKSUM";
- case I40E_ERR_PHY:
- return "I40E_ERR_PHY";
- case I40E_ERR_CONFIG:
- return "I40E_ERR_CONFIG";
- case I40E_ERR_PARAM:
- return "I40E_ERR_PARAM";
- case I40E_ERR_MAC_TYPE:
- return "I40E_ERR_MAC_TYPE";
- case I40E_ERR_UNKNOWN_PHY:
- return "I40E_ERR_UNKNOWN_PHY";
- case I40E_ERR_LINK_SETUP:
- return "I40E_ERR_LINK_SETUP";
- case I40E_ERR_ADAPTER_STOPPED:
- return "I40E_ERR_ADAPTER_STOPPED";
- case I40E_ERR_INVALID_MAC_ADDR:
- return "I40E_ERR_INVALID_MAC_ADDR";
- case I40E_ERR_DEVICE_NOT_SUPPORTED:
- return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_MASTER_REQUESTS_PENDING:
- return "I40E_ERR_MASTER_REQUESTS_PENDING";
- case I40E_ERR_INVALID_LINK_SETTINGS:
- return "I40E_ERR_INVALID_LINK_SETTINGS";
- case I40E_ERR_AUTONEG_NOT_COMPLETE:
- return "I40E_ERR_AUTONEG_NOT_COMPLETE";
- case I40E_ERR_RESET_FAILED:
- return "I40E_ERR_RESET_FAILED";
- case I40E_ERR_SWFW_SYNC:
- return "I40E_ERR_SWFW_SYNC";
- case I40E_ERR_NO_AVAILABLE_VSI:
- return "I40E_ERR_NO_AVAILABLE_VSI";
- case I40E_ERR_NO_MEMORY:
- return "I40E_ERR_NO_MEMORY";
- case I40E_ERR_BAD_PTR:
- return "I40E_ERR_BAD_PTR";
- case I40E_ERR_RING_FULL:
- return "I40E_ERR_RING_FULL";
- case I40E_ERR_INVALID_PD_ID:
- return "I40E_ERR_INVALID_PD_ID";
- case I40E_ERR_INVALID_QP_ID:
- return "I40E_ERR_INVALID_QP_ID";
- case I40E_ERR_INVALID_CQ_ID:
- return "I40E_ERR_INVALID_CQ_ID";
- case I40E_ERR_INVALID_CEQ_ID:
- return "I40E_ERR_INVALID_CEQ_ID";
- case I40E_ERR_INVALID_AEQ_ID:
- return "I40E_ERR_INVALID_AEQ_ID";
- case I40E_ERR_INVALID_SIZE:
- return "I40E_ERR_INVALID_SIZE";
- case I40E_ERR_INVALID_ARP_INDEX:
- return "I40E_ERR_INVALID_ARP_INDEX";
- case I40E_ERR_INVALID_FPM_FUNC_ID:
- return "I40E_ERR_INVALID_FPM_FUNC_ID";
- case I40E_ERR_QP_INVALID_MSG_SIZE:
- return "I40E_ERR_QP_INVALID_MSG_SIZE";
- case I40E_ERR_QP_TOOMANY_WRS_POSTED:
- return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
- case I40E_ERR_INVALID_FRAG_COUNT:
- return "I40E_ERR_INVALID_FRAG_COUNT";
- case I40E_ERR_QUEUE_EMPTY:
- return "I40E_ERR_QUEUE_EMPTY";
- case I40E_ERR_INVALID_ALIGNMENT:
- return "I40E_ERR_INVALID_ALIGNMENT";
- case I40E_ERR_FLUSHED_QUEUE:
- return "I40E_ERR_FLUSHED_QUEUE";
- case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
- return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
- case I40E_ERR_INVALID_IMM_DATA_SIZE:
- return "I40E_ERR_INVALID_IMM_DATA_SIZE";
- case I40E_ERR_TIMEOUT:
- return "I40E_ERR_TIMEOUT";
- case I40E_ERR_OPCODE_MISMATCH:
- return "I40E_ERR_OPCODE_MISMATCH";
- case I40E_ERR_CQP_COMPL_ERROR:
- return "I40E_ERR_CQP_COMPL_ERROR";
- case I40E_ERR_INVALID_VF_ID:
- return "I40E_ERR_INVALID_VF_ID";
- case I40E_ERR_INVALID_HMCFN_ID:
- return "I40E_ERR_INVALID_HMCFN_ID";
- case I40E_ERR_BACKING_PAGE_ERROR:
- return "I40E_ERR_BACKING_PAGE_ERROR";
- case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
- return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
- case I40E_ERR_INVALID_PBLE_INDEX:
- return "I40E_ERR_INVALID_PBLE_INDEX";
- case I40E_ERR_INVALID_SD_INDEX:
- return "I40E_ERR_INVALID_SD_INDEX";
- case I40E_ERR_INVALID_PAGE_DESC_INDEX:
- return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
- case I40E_ERR_INVALID_SD_TYPE:
- return "I40E_ERR_INVALID_SD_TYPE";
- case I40E_ERR_MEMCPY_FAILED:
- return "I40E_ERR_MEMCPY_FAILED";
- case I40E_ERR_INVALID_HMC_OBJ_INDEX:
- return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
- case I40E_ERR_INVALID_HMC_OBJ_COUNT:
- return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
- case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
- return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
- case I40E_ERR_SRQ_ENABLED:
- return "I40E_ERR_SRQ_ENABLED";
- case I40E_ERR_ADMIN_QUEUE_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_ERROR";
- case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
- return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
- case I40E_ERR_BUF_TOO_SHORT:
- return "I40E_ERR_BUF_TOO_SHORT";
- case I40E_ERR_ADMIN_QUEUE_FULL:
- return "I40E_ERR_ADMIN_QUEUE_FULL";
- case I40E_ERR_ADMIN_QUEUE_NO_WORK:
- return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
- case I40E_ERR_BAD_IWARP_CQE:
- return "I40E_ERR_BAD_IWARP_CQE";
- case I40E_ERR_NVM_BLANK_MODE:
- return "I40E_ERR_NVM_BLANK_MODE";
- case I40E_ERR_NOT_IMPLEMENTED:
- return "I40E_ERR_NOT_IMPLEMENTED";
- case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
- return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
- case I40E_ERR_DIAG_TEST_FAILED:
- return "I40E_ERR_DIAG_TEST_FAILED";
- case I40E_ERR_NOT_READY:
- return "I40E_ERR_NOT_READY";
- case I40E_NOT_SUPPORTED:
- return "I40E_NOT_SUPPORTED";
- case I40E_ERR_FIRMWARE_API_VERSION:
- return "I40E_ERR_FIRMWARE_API_VERSION";
- case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
- return hw->err_str;
-}
-
-/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -353,13 +207,13 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
+int i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
@@ -382,15 +236,15 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
*
* Internal function to get or set RSS look up table
**/
-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
- u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size,
- bool set)
+static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ int status;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -435,8 +289,8 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
*
* get the RSS lookup table, PF or VSI type
**/
-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
false);
@@ -452,8 +306,8 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* set the RSS lookup table, PF or VSI type
**/
-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
}
@@ -467,16 +321,16 @@ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
**/
-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key,
- bool set)
+static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_key *cmd_resp =
(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ int status;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -507,9 +361,9 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
* @key: pointer to key info struct
*
**/
-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
}
@@ -522,9 +376,9 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
*
* set the RSS key per VSI
**/
-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
@@ -794,10 +648,10 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
* hw_addr, back, device_id, vendor_id, subsystem_device_id,
* subsystem_vendor_id, and revision_id
**/
-i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+int i40e_init_shared_code(struct i40e_hw *hw)
{
- i40e_status status = 0;
u32 port, ari, func_rid;
+ int status = 0;
i40e_set_mac_type(hw);
@@ -834,15 +688,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
* @addrs: the requestor's mac addr store
* @cmd_details: pointer to command details structure or NULL
**/
-static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
- u16 *flags,
- struct i40e_aqc_mac_address_read_data *addrs,
- struct i40e_asq_cmd_details *cmd_details)
+static int
+i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_mac_address_read *cmd_data =
(struct i40e_aqc_mac_address_read *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
@@ -861,14 +716,14 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
* @mac_addr: address to write
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
- u16 flags, u8 *mac_addr,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_mac_address_write *cmd_data =
(struct i40e_aqc_mac_address_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
@@ -891,11 +746,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
*
* Reads the adapter's MAC address from register
**/
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
u16 flags = 0;
+ int status;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
@@ -912,11 +767,11 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
*
* Reads the adapter's Port MAC address
**/
-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
u16 flags = 0;
+ int status;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (status)
@@ -970,13 +825,13 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
*
* Reads the part number string from the EEPROM.
**/
-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
{
- i40e_status status = 0;
u16 pba_word = 0;
u16 pba_size = 0;
u16 pba_ptr = 0;
+ int status = 0;
u16 i = 0;
status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
@@ -1085,8 +940,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
* @hw: pointer to the hardware structure
* @retry_limit: how many times to retry before failure
**/
-static i40e_status i40e_poll_globr(struct i40e_hw *hw,
- u32 retry_limit)
+static int i40e_poll_globr(struct i40e_hw *hw,
+ u32 retry_limit)
{
u32 cnt, reg = 0;
@@ -1112,7 +967,7 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw,
* Assuming someone else has triggered a global reset,
* assure the global reset is complete and then reset the PF
**/
-i40e_status i40e_pf_reset(struct i40e_hw *hw)
+int i40e_pf_reset(struct i40e_hw *hw)
{
u32 cnt = 0;
u32 cnt1 = 0;
@@ -1451,15 +1306,16 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
*
* Returns the various PHY abilities supported on the Port.
**/
-i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
- bool qualified_modules, bool report_init,
- struct i40e_aq_get_phy_abilities_resp *abilities,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- i40e_status status;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
+ struct i40e_aq_desc desc;
+ int status;
if (!abilities)
return I40E_ERR_PARAM;
@@ -1530,14 +1386,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
* of the PHY Config parameters. This status will be indicated by the
* command response.
**/
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
- struct i40e_aq_set_phy_config *config,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aq_set_phy_config *cmd =
(struct i40e_aq_set_phy_config *)&desc.params.raw;
- enum i40e_status_code status;
+ int status;
if (!config)
return I40E_ERR_PARAM;
@@ -1552,7 +1408,7 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
return status;
}
-static noinline_for_stack enum i40e_status_code
+static noinline_for_stack int
i40e_set_fc_status(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp *abilities,
bool atomic_restart)
@@ -1610,11 +1466,11 @@ i40e_set_fc_status(struct i40e_hw *hw,
*
* Set the requested flow control mode using set_phy_config.
**/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_restart)
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status;
+ int status;
*aq_failures = 0x0;
@@ -1653,13 +1509,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*
* Tell the firmware that the driver is taking over from PXE
**/
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_clear_pxe *cmd =
(struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_pxe_mode);
@@ -1681,14 +1537,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
*
* Sets up the link and restarts the Auto-Negotiation over the link.
**/
-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
- bool enable_link,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_link_restart_an *cmd =
(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_link_restart_an);
@@ -1713,17 +1569,17 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
*
* Returns the link status of the adapter.
**/
-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
- bool enable_lse, struct i40e_link_status *link,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_link_status *resp =
(struct i40e_aqc_get_link_status *)&desc.params.raw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
- i40e_status status;
bool tx_pause, rx_pause;
u16 command_flags;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
@@ -1809,14 +1665,14 @@ aq_get_link_info_exit:
*
* Set link interrupt mask.
**/
-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
- u16 mask,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_int_mask *cmd =
(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_int_mask);
@@ -1829,6 +1685,32 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_mac_loopback
+ * @hw: pointer to the HW struct
+ * @ena_lpbk: Enable or Disable loopback
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable/disable loopback on a given port
+ */
+int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_lb_mode *cmd =
+ (struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_lb_modes);
+ if (ena_lpbk) {
+ if (hw->nvm.version <= I40E_LEGACY_LOOPBACK_NVM_VER)
+ cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL_LEGACY);
+ else
+ cmd->lb_mode = cpu_to_le16(I40E_AQ_LB_MAC_LOCAL);
+ }
+
+ return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+}
+
+/**
* i40e_aq_set_phy_debug
* @hw: pointer to the hw struct
* @cmd_flags: debug command flags
@@ -1836,13 +1718,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
*
* Reset the external PHY.
**/
-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
@@ -1877,9 +1759,9 @@ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
*
* Add a VSI context to the hardware.
**/
-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -1887,7 +1769,7 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_vsi);
@@ -1899,8 +1781,9 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
if (status)
goto aq_add_vsi_exit;
@@ -1920,15 +1803,15 @@ aq_add_vsi_exit:
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
- u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -1948,15 +1831,15 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
- u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -1978,16 +1861,16 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
* @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details,
- bool rx_only_promisc)
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2018,14 +1901,15 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
* @set: set multicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2051,16 +1935,16 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- enum i40e_status_code status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2073,7 +1957,8 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
@@ -2086,16 +1971,16 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- enum i40e_status_code status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2114,7 +1999,8 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
cmd->seid = cpu_to_le16(seid);
cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
@@ -2127,15 +2013,15 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable, u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2162,14 +2048,14 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
*
* Set or clear the broadcast promiscuous flag (filter) for a given VSI.
**/
-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
- u16 seid, bool set_filter,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2195,15 +2081,15 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
* @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2225,9 +2111,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
* @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -2235,7 +2121,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
@@ -2267,9 +2153,9 @@ aq_get_vsi_params_exit:
*
* Update a VSI context.
**/
-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -2277,7 +2163,7 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
@@ -2285,8 +2171,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, &vsi_ctx->info,
+ sizeof(vsi_ctx->info),
+ cmd_details, true);
vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
@@ -2304,15 +2191,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
*
* Fill the buf with switch configuration returned from AdminQ command
**/
-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
- struct i40e_aqc_get_switch_config_resp *buf,
- u16 buf_size, u16 *start_seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *scfg =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_config);
@@ -2338,15 +2225,15 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
*
* Set switch configuration bits
**/
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags,
- u16 valid_flags, u8 mode,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_switch_config *scfg =
(struct i40e_aqc_set_switch_config *)&desc.params.raw;
- enum i40e_status_code status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_switch_config);
@@ -2375,16 +2262,16 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
*
* Get the firmware version from the admin queue commands
**/
-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
- u16 *fw_major_version, u16 *fw_minor_version,
- u32 *fw_build,
- u16 *api_major_version, u16 *api_minor_version,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_version *resp =
(struct i40e_aqc_get_version *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
@@ -2414,14 +2301,14 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
*
* Send the driver version to the firmware
**/
-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
- i40e_status status;
+ int status;
u16 len;
if (dv == NULL)
@@ -2456,9 +2343,9 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
- i40e_status status = 0;
+ int status = 0;
if (hw->phy.get_link_info) {
status = i40e_update_link_info(hw);
@@ -2477,10 +2364,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
* i40e_update_link_info - update status of the HW network link
* @hw: pointer to the hw struct
**/
-noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
+noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- i40e_status status = 0;
+ int status = 0;
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
@@ -2527,19 +2414,19 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
* This asks the FW to add a VEB between the uplink and downlink
* elements. If the uplink SEID is 0, this will be a floating VEB.
**/
-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
- u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
- bool enable_stats,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_veb *cmd =
(struct i40e_aqc_add_veb *)&desc.params.raw;
struct i40e_aqc_add_veb_completion *resp =
(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
- i40e_status status;
u16 veb_flags = 0;
+ int status;
/* SEIDs need to either both be set or both be 0 for floating VEB */
if (!!uplink_seid != !!downlink_seid)
@@ -2585,17 +2472,17 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
* This retrieves the parameters for a particular VEB, specified by
* uplink_seid, and returns them to the caller.
**/
-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
- u16 veb_seid, u16 *switch_id,
- bool *floating, u16 *statistic_index,
- u16 *vebs_used, u16 *vebs_free,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
(struct i40e_aqc_get_veb_parameters_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
if (veb_seid == 0)
return I40E_ERR_PARAM;
@@ -2630,33 +2517,28 @@ get_veb_exit:
}
/**
- * i40e_aq_add_macvlan
- * @hw: pointer to the hw struct
- * @seid: VSI for the mac address
+ * i40e_prepare_add_macvlan
* @mv_list: list of macvlans to be added
+ * @desc: pointer to AQ descriptor structure
* @count: length of the list
- * @cmd_details: pointer to command details structure or NULL
+ * @seid: VSI for the mac address
*
- * Add MAC/VLAN addresses to the HW filtering
+ * Internal helper function that prepares the add macvlan request
+ * and returns the buffer size.
**/
-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_add_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details)
+static u16
+i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
+ struct i40e_aq_desc *desc, u16 count, u16 seid)
{
- struct i40e_aq_desc desc;
struct i40e_aqc_macvlan *cmd =
- (struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
+ (struct i40e_aqc_macvlan *)&desc->params.raw;
u16 buf_size;
int i;
- if (count == 0 || !mv_list || !hw)
- return I40E_ERR_PARAM;
-
buf_size = count * sizeof(*mv_list);
/* prep the rest of the request */
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+ i40e_fill_default_direct_cmd_desc(desc, i40e_aqc_opc_add_macvlan);
cmd->num_addresses = cpu_to_le16(count);
cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
cmd->seid[1] = 0;
@@ -2667,14 +2549,71 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
- desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ desc->flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ return buf_size;
+}
- return status;
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+int
+i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
+}
+
+/**
+ * i40e_aq_add_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Add MAC/VLAN addresses to the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+int
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
}
/**
@@ -2687,15 +2626,16 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
*
* Remove MAC/VLAN addresses from the HW filtering
**/
-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_macvlan *cmd =
(struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
u16 buf_size;
+ int status;
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
@@ -2713,13 +2653,59 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
if (buf_size > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
- cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, mv_list, buf_size,
+ cmd_details, true);
return status;
}
/**
+ * i40e_aq_remove_macvlan_v2
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ * @aq_status: pointer to Admin Queue status return value
+ *
+ * Remove MAC/VLAN addresses from the HW filtering.
+ * The _v2 version returns the last Admin Queue status in aq_status
+ * to avoid race conditions in access to hw->aq.asq_last_status.
+ * It also calls _v2 versions of asq_send_command functions to
+ * get the aq_status on the stack.
+ **/
+int
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status)
+{
+ struct i40e_aqc_macvlan *cmd;
+ struct i40e_aq_desc desc;
+ u16 buf_size;
+
+ if (count == 0 || !mv_list || !hw)
+ return I40E_ERR_PARAM;
+
+ buf_size = count * sizeof(*mv_list);
+
+ /* prep the rest of the request */
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+ cmd = (struct i40e_aqc_macvlan *)&desc.params.raw;
+ cmd->num_addresses = cpu_to_le16(count);
+ cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+ cmd->seid[1] = 0;
+ cmd->seid[2] = 0;
+
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buf_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+ return i40e_asq_send_command_atomic_v2(hw, &desc, mv_list, buf_size,
+ cmd_details, true, aq_status);
+}
+
+/**
* i40e_mirrorrule_op - Internal helper function to add/delete mirror rule
* @hw: pointer to the hw struct
* @opcode: AQ opcode for add or delete mirror rule
@@ -2736,19 +2722,19 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
**/
-static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
+static int i40e_mirrorrule_op(struct i40e_hw *hw,
+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+ u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_delete_mirror_rule *cmd =
(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
struct i40e_aqc_add_delete_mirror_rule_completion *resp =
(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- i40e_status status;
u16 buf_size;
+ int status;
buf_size = count * sizeof(*mr_list);
@@ -2796,10 +2782,11 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
{
if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
@@ -2827,10 +2814,11 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
@@ -2859,14 +2847,14 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
*
* send msg to vf
**/
-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_pf_vf_message *cmd =
(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
cmd->id = cpu_to_le32(vfid);
@@ -2894,14 +2882,14 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
*
* Read the register using the admin queue commands
**/
-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd_resp =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (reg_val == NULL)
return I40E_ERR_PARAM;
@@ -2929,14 +2917,14 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
*
* Write to a register using the admin queue commands
**/
-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
- u32 reg_addr, u64 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
@@ -2960,16 +2948,16 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
*
* requests common resource using the admin queue commands
**/
-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- enum i40e_aq_resource_access_type access,
- u8 sdp_number, u64 *timeout,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_request_resource *cmd_resp =
(struct i40e_aqc_request_resource *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
@@ -2999,15 +2987,15 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
*
* release common resource using the admin queue commands
**/
-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- u8 sdp_number,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_request_resource *cmd =
(struct i40e_aqc_request_resource *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
@@ -3031,15 +3019,15 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
*
* Read the NVM using the admin queue commands
**/
-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3077,14 +3065,14 @@ i40e_aq_read_nvm_exit:
*
* Erase the NVM sector using the admin queue commands
**/
-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, bool last_command,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3125,8 +3113,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
u16 id, ocp_cfg_word0;
- i40e_status status;
u8 major_rev;
+ int status;
u32 i = 0;
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
@@ -3367,14 +3355,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
*
* Get the device capabilities descriptions from the firmware
**/
-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
- void *buff, u16 buff_size, u16 *data_size,
- enum i40e_admin_queue_opc list_type_opc,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_list_capabilites *cmd;
struct i40e_aq_desc desc;
- i40e_status status = 0;
+ int status = 0;
cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
@@ -3416,15 +3404,15 @@ exit:
*
* Update the NVM using the admin queue commands
**/
-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command, u8 preservation_flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3469,13 +3457,13 @@ i40e_aq_update_nvm_exit:
*
* Rearrange NVM structure, available only for transition FW
**/
-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_nvm_update *cmd;
- i40e_status status;
struct i40e_aq_desc desc;
+ int status;
cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
@@ -3509,17 +3497,17 @@ i40e_aq_rearrange_nvm_exit:
*
* Requests the complete LLDP MIB (entire packet).
**/
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
- u8 mib_type, void *buff, u16 buff_size,
- u16 *local_len, u16 *remote_len,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_get_mib *cmd =
(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
struct i40e_aqc_lldp_get_mib *resp =
(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -3559,14 +3547,14 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
*
* Set the LLDP MIB.
**/
-enum i40e_status_code
+int
i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_lldp_set_local_mib *cmd;
- enum i40e_status_code status;
struct i40e_aq_desc desc;
+ int status;
cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
if (buff_size == 0 || !buff)
@@ -3598,14 +3586,14 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes
**/
-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
- bool enable_update,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_update_mib *cmd =
(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
@@ -3627,14 +3615,14 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
* Restore LLDP Agent factory settings if @restore set to True. In other case
* only returns factory setting in AQ response.
**/
-enum i40e_status_code
+int
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_restore *cmd =
(struct i40e_aqc_lldp_restore *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3664,14 +3652,14 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
*
* Stop or Shutdown the embedded LLDP Agent
**/
-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
- bool persist,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_stop *cmd =
(struct i40e_aqc_lldp_stop *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
@@ -3699,13 +3687,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
*
* Start the embedded LLDP Agent on all ports.
**/
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_start *cmd =
(struct i40e_aqc_lldp_start *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
@@ -3731,14 +3719,14 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
* @dcb_enable: True if DCB configuration needs to be applied
*
**/
-enum i40e_status_code
+int
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_dcb_parameters *cmd =
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -3764,12 +3752,12 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
*
* Get CEE DCBX mode operational configuration from firmware
**/
-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
- void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -3795,17 +3783,17 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
* and this function will call cpu_to_le16 to convert from Host byte order to
* Little Endian order.
**/
-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 protocol_index,
- u8 *filter_index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_udp_tunnel *cmd =
(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
struct i40e_aqc_del_udp_tunnel_completion *resp =
(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
@@ -3826,13 +3814,13 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
* @index: filter index
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_remove_udp_tunnel *cmd =
(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
@@ -3851,13 +3839,13 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
*
* This deletes a switch element from the switch.
**/
-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *cmd =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
- i40e_status status;
+ int status;
if (seid == 0)
return I40E_ERR_PARAM;
@@ -3866,7 +3854,8 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
cmd->seid = cpu_to_le16(seid);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+ status = i40e_asq_send_command_atomic(hw, &desc, NULL, 0,
+ cmd_details, true);
return status;
}
@@ -3880,11 +3869,11 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
* recomputed and modified. The retval field in the descriptor
* will be set to 0 when RPB is modified.
**/
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
@@ -3904,15 +3893,15 @@ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
*
* Generic command handler for Tx scheduler AQ commands
**/
-static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
void *buff, u16 buff_size,
- enum i40e_admin_queue_opc opcode,
+ enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_tx_sched_ind *cmd =
(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
- i40e_status status;
+ int status;
bool cmd_param_flag = false;
switch (opcode) {
@@ -3962,14 +3951,14 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
* @max_credit: Max BW limit credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_configure_vsi_bw_limit *cmd =
(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_vsi_bw_limit);
@@ -3990,10 +3979,10 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
* @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_configure_vsi_tc_bw,
@@ -4008,11 +3997,12 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
* @opcode: Tx scheduler AQ command opcode
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
sizeof(*ets_data), opcode, cmd_details);
@@ -4025,7 +4015,8 @@ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
* @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+int
+i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details)
@@ -4042,10 +4033,11 @@ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold VSI BW configuration
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_vsi_bw_config,
@@ -4059,10 +4051,11 @@ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold VSI BW configuration per TC
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_vsi_ets_sla_config,
@@ -4076,10 +4069,11 @@ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold switching component's per TC BW config
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_switching_comp_ets_config,
@@ -4093,10 +4087,11 @@ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold current ETS configuration for the Physical Port
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_port_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_port_ets_config,
@@ -4110,10 +4105,11 @@ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold switching component's BW configuration
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_switching_comp_bw_config,
@@ -4132,11 +4128,11 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
* Returns 0 if the values passed are valid and within
* range else returns an error.
**/
-static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings)
+static int
+i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
{
u32 fcoe_cntx_size, fcoe_filt_size;
- u32 pe_cntx_size, pe_filt_size;
u32 fcoe_fmax;
u32 val;
@@ -4180,8 +4176,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
case I40E_HASH_FILTER_SIZE_256K:
case I40E_HASH_FILTER_SIZE_512K:
case I40E_HASH_FILTER_SIZE_1M:
- pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
- pe_filt_size <<= (u32)settings->pe_filt_num;
break;
default:
return I40E_ERR_PARAM;
@@ -4198,8 +4192,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
case I40E_DMA_CNTX_SIZE_64K:
case I40E_DMA_CNTX_SIZE_128K:
case I40E_DMA_CNTX_SIZE_256K:
- pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
- pe_cntx_size <<= (u32)settings->pe_cntx_num;
break;
default:
return I40E_ERR_PARAM;
@@ -4224,11 +4216,11 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
* for a single PF. It is expected that these settings are programmed
* at the driver initialization time.
**/
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings)
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
{
- i40e_status ret = 0;
u32 hash_lut_size = 0;
+ int ret = 0;
u32 val;
if (!settings)
@@ -4298,11 +4290,11 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
* In return it will update the total number of perfect filter count in
* the stats member.
**/
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_control_packet_filter *cmd =
@@ -4311,7 +4303,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
(struct i40e_aqc_add_remove_control_packet_filter_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
if (vsi_seid == 0)
return I40E_ERR_PARAM;
@@ -4357,7 +4349,7 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
- i40e_status status;
+ int status;
status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
seid, 0, true, NULL,
@@ -4379,14 +4371,14 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
* is not passed then only register at 'reg_addr0' is read.
*
**/
-static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
- u32 reg_addr0, u32 *reg_val0,
- u32 reg_addr1, u32 *reg_val1)
+static int i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
{
struct i40e_aq_desc desc;
struct i40e_aqc_alternate_write *cmd_resp =
(struct i40e_aqc_alternate_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!reg_val0)
return I40E_ERR_PARAM;
@@ -4415,12 +4407,12 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
*
* Suspend port's Tx traffic
**/
-i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_tx_sched_ind *cmd;
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
@@ -4437,11 +4429,11 @@ i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
*
* Resume port's Tx traffic
**/
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
@@ -4511,18 +4503,18 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* Dump internal FW/HW data for debug purposes.
*
**/
-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
- u8 table_id, u32 start_index, u16 buff_size,
- void *buff, u16 *ret_buff_size,
- u8 *ret_next_table, u32 *ret_next_index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_dump_internals *cmd =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
struct i40e_aqc_debug_dump_internals *resp =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -4563,12 +4555,12 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
*
* Read bw from the alternate ram for the given pf
**/
-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
- u32 *max_bw, u32 *min_bw,
- bool *min_valid, bool *max_valid)
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
{
- i40e_status status;
u32 max_bw_addr, min_bw_addr;
+ int status;
/* Calculate the address of the min/max bw registers */
max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
@@ -4603,13 +4595,14 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
*
* Configure partitions guaranteed/max bw
**/
-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
- struct i40e_aqc_configure_partition_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status;
- struct i40e_aq_desc desc;
u16 bwd_size = sizeof(*bw_data);
+ struct i40e_aq_desc desc;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
@@ -4638,11 +4631,11 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
@@ -4683,11 +4676,11 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
*
* Writes specified PHY register value
**/
-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
@@ -4724,13 +4717,13 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
+ u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
- u8 port_num = hw->func_caps.mdio_port_num;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4798,13 +4791,13 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
- u32 command = 0;
- u16 retry = 1000;
u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
+ u16 retry = 1000;
+ u32 command = 0;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4865,16 +4858,17 @@ phy_write_end:
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status;
+ int status;
switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -4903,16 +4897,17 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status;
+ int status;
switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -4954,17 +4949,17 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
*
* Blinks PHY link LED
**/
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
{
- i40e_status status = 0;
- u32 i;
- u16 led_ctl;
- u16 gpio_led_port;
- u16 led_reg;
u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u16 gpio_led_port;
u8 phy_addr = 0;
+ int status = 0;
+ u16 led_ctl;
u8 port_num;
+ u16 led_reg;
+ u32 i;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
@@ -5026,12 +5021,12 @@ phy_blinking_end:
* @led_addr: LED register address
* @reg_val: read register value
**/
-static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
- u32 *reg_val)
+static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
{
- enum i40e_status_code status;
u8 phy_addr = 0;
u8 port_num;
+ int status;
u32 i;
*reg_val = 0;
@@ -5060,12 +5055,12 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
* @led_addr: LED register address
* @reg_val: register value to write
**/
-static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
- u32 reg_val)
+static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
{
- enum i40e_status_code status;
u8 phy_addr = 0;
u8 port_num;
+ int status;
u32 i;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
@@ -5095,17 +5090,17 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
* @val: original value of register to use
*
**/
-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
- u16 *val)
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
{
- i40e_status status = 0;
u16 gpio_led_port;
u8 phy_addr = 0;
- u16 reg_val;
+ u32 reg_val_aq;
+ int status = 0;
u16 temp_addr;
+ u16 reg_val;
u8 port_num;
u32 i;
- u32 reg_val_aq;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status =
@@ -5150,12 +5145,12 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* Set led's on or off when controlled by the PHY
*
**/
-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
- u16 led_addr, u32 mode)
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
{
- i40e_status status = 0;
u32 led_ctl = 0;
u32 led_reg = 0;
+ int status = 0;
status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
@@ -5199,14 +5194,14 @@ restore_config:
* Use the firmware to read the Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!reg_val)
return I40E_ERR_PARAM;
@@ -5230,8 +5225,8 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
**/
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
- i40e_status status = 0;
bool use_register;
+ int status = 0;
int retry = 5;
u32 val = 0;
@@ -5265,14 +5260,14 @@ do_retry:
* Use the firmware to write to an Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
@@ -5292,8 +5287,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
**/
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
- i40e_status status = 0;
bool use_register;
+ int status = 0;
int retry = 5;
use_register = (((hw->aq.api_maj_ver == 1) &&
@@ -5355,16 +5350,16 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
* may use simple wrapper i40e_aq_set_phy_register.
**/
-enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr, bool page_change,
- bool set_mdio, u8 mdio_num,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
@@ -5400,16 +5395,16 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
* may use simple wrapper i40e_aq_get_phy_register.
**/
-enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr, bool page_change,
- bool set_mdio, u8 mdio_num,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
@@ -5440,18 +5435,17 @@ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
* @error_info: returns error information
* @cmd_details: pointer to command details structure or NULL
**/
-enum
-i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
struct i40e_aqc_write_ddp_resp *resp;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
@@ -5484,15 +5478,14 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
-enum
-i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
@@ -5591,14 +5584,13 @@ i40e_find_section_in_profile(u32 section_type,
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
**/
-static enum
-i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
- struct i40e_profile_aq_section *aq)
+static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
{
- i40e_status status;
struct i40e_aq_desc desc;
u8 *msg = NULL;
u16 msglen;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
desc.flags |= cpu_to_le16(aq->flags);
@@ -5638,14 +5630,14 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
*
* Validates supported devices and profile's sections.
*/
-static enum i40e_status_code
+static int
i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id, bool rollback)
{
struct i40e_profile_section_header *sec = NULL;
- i40e_status status = 0;
struct i40e_section_table *sec_tbl;
u32 vendor_dev_id;
+ int status = 0;
u32 dev_cnt;
u32 sec_off;
u32 i;
@@ -5703,16 +5695,16 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Handles the download of a complete package.
*/
-enum i40e_status_code
+int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
- i40e_status status = 0;
- struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_aq_section *ddp_aq;
- u32 section_size = 0;
+ struct i40e_section_table *sec_tbl;
u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ int status = 0;
u32 sec_off;
u32 i;
@@ -5766,15 +5758,15 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Rolls back previously loaded package.
*/
-enum i40e_status_code
+int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
struct i40e_profile_section_header *sec = NULL;
- i40e_status status = 0;
struct i40e_section_table *sec_tbl;
u32 offset = 0, info = 0;
u32 section_size = 0;
+ int status = 0;
u32 sec_off;
int i;
@@ -5818,15 +5810,15 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Register a profile to the list of loaded profiles.
*/
-enum i40e_status_code
+int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
- i40e_status status = 0;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_info *pinfo;
u32 offset = 0, info = 0;
+ int status = 0;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -5860,7 +5852,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
* of the function.
*
**/
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
@@ -5868,8 +5860,8 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- enum i40e_status_code status;
u16 buff_len;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
@@ -5897,7 +5889,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
* function.
*
**/
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
@@ -5905,8 +5897,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
u16 buff_len;
+ int status;
int i;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -5954,7 +5946,7 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
* of the function.
*
**/
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
@@ -5962,8 +5954,8 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- enum i40e_status_code status;
u16 buff_len;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
@@ -5991,7 +5983,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
* function.
*
**/
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
@@ -5999,8 +5991,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
u16 buff_len;
+ int status;
int i;
i40e_fill_default_direct_cmd_desc(&desc,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 673f341f4c0c..90638b67f8dc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -12,7 +12,7 @@
*
* Get the DCBX status from the Firmware
**/
-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
{
u32 reg;
@@ -497,15 +497,15 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
*
* Parse DCB configuration from the LLDPDU
**/
-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
{
- i40e_status ret = 0;
struct i40e_lldp_org_tlv *tlv;
- u16 type;
- u16 length;
u16 typelength;
u16 offset = 0;
+ int ret = 0;
+ u16 length;
+ u16 type;
if (!lldpmib || !dcbcfg)
return I40E_ERR_PARAM;
@@ -551,12 +551,12 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
*
* Query DCB configuration from the Firmware
**/
-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
{
- i40e_status ret = 0;
struct i40e_virt_mem mem;
+ int ret = 0;
u8 *lldpmib;
/* Allocate the LLDPDU */
@@ -767,9 +767,9 @@ static void i40e_cee_to_dcb_config(
*
* Get IEEE mode DCB configuration from the Firmware
**/
-static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
{
- i40e_status ret = 0;
+ int ret = 0;
/* IEEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
@@ -797,11 +797,11 @@ out:
*
* Get DCB configuration from the Firmware
**/
-i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+int i40e_get_dcb_config(struct i40e_hw *hw)
{
- i40e_status ret = 0;
- struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ int ret = 0;
/* If Firmware version < v4.33 on X710/XL710, IEEE only */
if ((hw->mac.type == I40E_MAC_XL710) &&
@@ -867,11 +867,11 @@ out:
*
* Update DCB configuration from the Firmware
**/
-i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
- i40e_status ret = 0;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
+ int ret = 0;
if (!hw->func_caps.dcb)
return I40E_NOT_SUPPORTED;
@@ -940,13 +940,13 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
* Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
* Status of agent is reported via @lldp_status parameter.
**/
-enum i40e_status_code
+int
i40e_get_fw_lldp_status(struct i40e_hw *hw,
enum i40e_get_fw_lldp_status_resp *lldp_status)
{
struct i40e_virt_mem mem;
- i40e_status ret;
u8 *lldpmib;
+ int ret;
if (!lldp_status)
return I40E_ERR_PARAM;
@@ -1238,13 +1238,13 @@ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
*
* Set DCB configuration to the Firmware
**/
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+int i40e_set_dcb_config(struct i40e_hw *hw)
{
struct i40e_dcbx_config *dcbcfg;
struct i40e_virt_mem mem;
u8 mib_type, *lldpmib;
- i40e_status ret;
u16 miblen;
+ int ret;
/* update the hw local config */
dcbcfg = &hw->local_dcbx_config;
@@ -1274,8 +1274,8 @@ i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
*
* send DCB configuration to FW
**/
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
{
u16 length, offset = 0, tlvid, typelength;
struct i40e_lldp_org_tlv *tlv;
@@ -1888,13 +1888,13 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
*
* Reads the LLDP configuration data from NVM using passed addresses
**/
-static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg,
- u8 module, u32 word_offset)
+static int _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
{
u32 address, offset = (2 * word_offset);
- i40e_status ret;
__le16 raw_mem;
+ int ret;
u16 mem;
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -1950,10 +1950,10 @@ err_lldp_cfg:
*
* Reads the LLDP configuration data from NVM
**/
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg)
+int i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
{
- i40e_status ret = 0;
+ int ret = 0;
u32 mem;
if (!lldp_cfg)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2370ceecb061..6b60dc9b7736 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -264,20 +264,20 @@ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
struct i40e_rx_pb_config *old_pb_cfg,
struct i40e_rx_pb_config *new_pb_cfg);
-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
- u16 *status);
-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg);
-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg);
-i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw,
- bool enable_mib_change);
-enum i40e_status_code
+int i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_get_dcb_config(struct i40e_hw *hw);
+int i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+int
i40e_get_fw_lldp_status(struct i40e_hw *hw,
enum i40e_get_fw_lldp_status_resp *lldp_status);
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg);
+int i40e_set_dcb_config(struct i40e_hw *hw);
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index e32c61909b31..195421d863ab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB ETS configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB ETS configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB PFC configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB PFC configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index e1069ae658ad..7e8183762fd9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -36,7 +36,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
{
struct i40e_ddp_profile_list *profile_list;
u8 buff[I40E_PROFILE_LIST_SIZE];
- i40e_status status;
+ int status;
int i;
status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
@@ -91,7 +91,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
{
struct i40e_ddp_profile_list *profile_list;
u8 buff[I40E_PROFILE_LIST_SIZE];
- i40e_status status;
+ int status;
int i;
status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
@@ -117,14 +117,14 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
*
* Register a profile to the list of loaded profiles.
*/
-static enum i40e_status_code
+static int
i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
struct i40e_profile_section_header *sec;
struct i40e_profile_info *pinfo;
- i40e_status status;
u32 offset = 0, info = 0;
+ int status;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -157,14 +157,14 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Removes DDP profile from the NIC.
**/
-static enum i40e_status_code
+static int
i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
struct i40e_profile_section_header *sec;
struct i40e_profile_info *pinfo;
- i40e_status status;
u32 offset = 0, info = 0;
+ int status;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -270,12 +270,12 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
struct i40e_profile_segment *profile_hdr;
struct i40e_profile_info pinfo;
struct i40e_package_header *pkg_hdr;
- i40e_status status;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 track_id;
int istatus;
+ int status;
pkg_hdr = (struct i40e_package_header *)data;
if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 2c1b1da1220e..9954493cd448 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
(unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
@@ -275,9 +275,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->rx_stats.alloc_page_failed,
rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n",
+ " rx_rings[%i]: rx_stats: realloc_count = 0, page_reuse_count = %lld\n",
i,
- rx_ring->rx_stats.realloc_count,
rx_ring->rx_stats.page_reuse_count);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i\n",
@@ -310,10 +309,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->stats.bytes,
tx_ring->tx_stats.restart_queue);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+ " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld, tx_stopped = %lld\n",
i,
tx_ring->tx_stats.tx_busy,
- tx_ring->tx_stats.tx_done_old);
+ tx_ring->tx_stats.tx_done_old,
+ tx_ring->tx_stats.tx_stopped);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i\n",
i, tx_ring->size);
@@ -742,10 +742,8 @@ static void i40e_dbg_dump_vf(struct i40e_pf *pf, int vf_id)
vsi = pf->vsi[vf->lan_vsi_idx];
dev_info(&pf->pdev->dev, "vf %2d: VSI id=%d, seid=%d, qps=%d\n",
vf_id, vf->lan_vsi_id, vsi->seid, vf->num_queue_pairs);
- dev_info(&pf->pdev->dev, " num MDD=%lld, invalid msg=%lld, valid msg=%lld\n",
- vf->num_mdd_events,
- vf->num_invalid_msgs,
- vf->num_valid_msgs);
+ dev_info(&pf->pdev->dev, " num MDD=%lld\n",
+ vf->num_mdd_events);
} else {
dev_info(&pf->pdev->dev, "invalid VF id %d\n", vf_id);
}
@@ -920,9 +918,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
- i40e_status ret;
- u16 vid;
unsigned int v;
+ int ret;
+ u16 vid;
cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) {
@@ -1286,7 +1284,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
struct i40e_aq_desc *desc;
- i40e_status ret;
+ int ret;
desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
if (!desc)
@@ -1332,9 +1330,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
struct i40e_aq_desc *desc;
- i40e_status ret;
u16 buffer_len;
u8 *buff;
+ int ret;
desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
if (!desc)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 1bcb0ec0f0c0..d9c51a238dcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -24,8 +24,10 @@
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_5G_BASE_T_BC 0x101F
+#define I40E_DEV_ID_1G_BASE_T_BC 0x0DD2
#define I40E_IS_X710TL_DEVICE(d) \
- (((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
+ (((d) == I40E_DEV_ID_1G_BASE_T_BC) || \
+ ((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
((d) == I40E_DEV_ID_10G_BASE_T_BC))
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
@@ -33,6 +35,7 @@
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_SFP_X722_A 0x0DDA
#endif /* _I40E_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index ef4d3762bf37..97fe1787a8f4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -10,8 +10,8 @@
* @reg: reg to be tested
* @mask: bits to be touched
**/
-static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
- u32 reg, u32 mask)
+static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
{
static const u32 patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
@@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
return 0;
}
-struct i40e_diag_reg_test_info i40e_reg_list[] = {
+const struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
{I40E_QTX_CTL(0), 0x0000FFBF, 1,
I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
@@ -74,31 +74,32 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = {
*
* Perform registers diagnostic test
**/
-i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+int i40e_diag_reg_test(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg, mask;
+ u32 elements;
u32 i, j;
for (i = 0; i40e_reg_list[i].offset != 0 &&
!ret_code; i++) {
+ elements = i40e_reg_list[i].elements;
/* set actual reg range for dynamically allocated resources */
if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
hw->func_caps.num_tx_qp != 0)
- i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ elements = hw->func_caps.num_tx_qp;
if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
hw->func_caps.num_msix_vectors != 0)
- i40e_reg_list[i].elements =
- hw->func_caps.num_msix_vectors - 1;
+ elements = hw->func_caps.num_msix_vectors - 1;
/* test register access */
mask = i40e_reg_list[i].mask;
- for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
+ for (j = 0; j < elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
@@ -114,9 +115,9 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
*
* Perform EEPROM diagnostic test
**/
-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+int i40e_diag_eeprom_test(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
u16 reg_val;
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index c3340f320a18..c3ce5f35211f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -20,9 +20,9 @@ struct i40e_diag_reg_test_info {
u32 stride; /* bytes between each element */
};
-extern struct i40e_diag_reg_test_info i40e_reg_list[];
+extern const struct i40e_diag_reg_test_info i40e_reg_list[];
-i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+int i40e_diag_reg_test(struct i40e_hw *hw);
+int i40e_diag_eeprom_test(struct i40e_hw *hw);
#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 513ba6974355..afc4fa8c66af 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -154,7 +154,7 @@ __i40e_add_ethtool_stats(u64 **data, void *pointer,
* @ring: the ring to copy
*
* Queue statistics must be copied while protected by
- * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats.
+ * u64_stats_fetch_begin, so we can't directly use i40e_add_ethtool_stats.
* Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the
* ring pointer is null, zero out the queue stat values and update the data
* pointer. Otherwise safely copy the stats from the ring into the supplied
@@ -172,16 +172,16 @@ i40e_add_queue_stats(u64 **data, struct i40e_ring *ring)
/* To avoid invalid statistics values, ensure that we keep retrying
* the copy until we get a consistent value according to
- * u64_stats_fetch_retry_irq. But first, make sure our ring is
+ * u64_stats_fetch_retry. But first, make sure our ring is
* non-null before attempting to access its syncp.
*/
do {
- start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+ start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
for (i = 0; i < size; i++) {
i40e_add_one_ethtool_stat(&(*data)[i], ring,
&stats[i]);
}
- } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (ring && u64_stats_fetch_retry(&ring->syncp, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
@@ -236,8 +236,6 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
#define I40E_PFC_STAT(_name, _stat) \
I40E_STAT(struct i40e_pfc_stats, _name, _stat)
-#define I40E_QUEUE_STAT(_name, _stat) \
- I40E_STAT(struct i40e_ring, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -293,8 +291,14 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
I40E_VSI_STAT("tx_linearize", tx_linearize),
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
I40E_VSI_STAT("tx_busy", tx_busy),
+ I40E_VSI_STAT("tx_stopped", tx_stopped),
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
+ I40E_VSI_STAT("rx_cache_reuse", rx_page_reuse),
+ I40E_VSI_STAT("rx_cache_alloc", rx_page_alloc),
+ I40E_VSI_STAT("rx_cache_waive", rx_page_waive),
+ I40E_VSI_STAT("rx_cache_busy", rx_page_busy),
+ I40E_VSI_STAT("tx_restart", tx_restart),
};
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
@@ -451,6 +455,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_DISABLE_FW_LLDP, 0),
I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
+ I40E_PRIV_FLAG("vf-vlan-pruning",
+ I40E_FLAG_VF_VLAN_PRUNING, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -1135,6 +1141,71 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#define I40E_LBIT_SIZE 8
+/**
+ * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed
+ * @speed: speed in decimal
+ * @ks: ethtool ksettings
+ *
+ * Return i40e_aq_link_speed based on speed
+ **/
+static enum i40e_aq_link_speed
+i40e_speed_to_link_speed(__u32 speed, const struct ethtool_link_ksettings *ks)
+{
+ enum i40e_aq_link_speed link_speed = I40E_LINK_SPEED_UNKNOWN;
+ bool speed_changed = false;
+ int i, j;
+
+ static const struct {
+ __u32 speed;
+ enum i40e_aq_link_speed link_speed;
+ __u8 bit[I40E_LBIT_SIZE];
+ } i40e_speed_lut[] = {
+#define I40E_LBIT(mode) ETHTOOL_LINK_MODE_ ## mode ##_Full_BIT
+ {SPEED_100, I40E_LINK_SPEED_100MB, {I40E_LBIT(100baseT)} },
+ {SPEED_1000, I40E_LINK_SPEED_1GB,
+ {I40E_LBIT(1000baseT), I40E_LBIT(1000baseX),
+ I40E_LBIT(1000baseKX)} },
+ {SPEED_10000, I40E_LINK_SPEED_10GB,
+ {I40E_LBIT(10000baseT), I40E_LBIT(10000baseKR),
+ I40E_LBIT(10000baseLR), I40E_LBIT(10000baseCR),
+ I40E_LBIT(10000baseSR), I40E_LBIT(10000baseKX4)} },
+
+ {SPEED_25000, I40E_LINK_SPEED_25GB,
+ {I40E_LBIT(25000baseCR), I40E_LBIT(25000baseKR),
+ I40E_LBIT(25000baseSR)} },
+ {SPEED_40000, I40E_LINK_SPEED_40GB,
+ {I40E_LBIT(40000baseKR4), I40E_LBIT(40000baseCR4),
+ I40E_LBIT(40000baseSR4), I40E_LBIT(40000baseLR4)} },
+ {SPEED_20000, I40E_LINK_SPEED_20GB,
+ {I40E_LBIT(20000baseKR2)} },
+ {SPEED_2500, I40E_LINK_SPEED_2_5GB, {I40E_LBIT(2500baseT)} },
+ {SPEED_5000, I40E_LINK_SPEED_5GB, {I40E_LBIT(2500baseT)} }
+#undef I40E_LBIT
+};
+
+ for (i = 0; i < ARRAY_SIZE(i40e_speed_lut); i++) {
+ if (i40e_speed_lut[i].speed == speed) {
+ for (j = 0; j < I40E_LBIT_SIZE; j++) {
+ if (test_bit(i40e_speed_lut[i].bit[j],
+ ks->link_modes.supported)) {
+ speed_changed = true;
+ break;
+ }
+ if (!i40e_speed_lut[i].bit[j])
+ break;
+ }
+ if (speed_changed) {
+ link_speed = i40e_speed_lut[i].link_speed;
+ break;
+ }
+ }
+ }
+ return link_speed;
+}
+
+#undef I40E_LBIT_SIZE
+
/**
* i40e_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
@@ -1151,12 +1222,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings copy_ks;
struct i40e_aq_set_phy_config config;
struct i40e_pf *pf = np->vsi->back;
+ enum i40e_aq_link_speed link_speed;
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
bool autoneg_changed = false;
- i40e_status status = 0;
int timeout = 50;
+ int status = 0;
int err = 0;
+ __u32 speed;
u8 autoneg;
/* Changing port settings is not supported if this isn't the
@@ -1189,6 +1262,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
+ speed = copy_ks.base.speed;
/* get our own copy of the bits to check against */
memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings));
@@ -1207,13 +1281,16 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
/* set autoneg back to what it currently is */
copy_ks.base.autoneg = safe_ks.base.autoneg;
+ copy_ks.base.speed = safe_ks.base.speed;
/* If copy_ks.base and safe_ks.base are not the same now, then they are
* trying to set something that we do not support.
*/
if (memcmp(&copy_ks.base, &safe_ks.base,
- sizeof(struct ethtool_link_settings)))
+ sizeof(struct ethtool_link_settings))) {
+ netdev_err(netdev, "Only speed and autoneg are supported.\n");
return -EOPNOTSUPP;
+ }
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) {
timeout--;
@@ -1323,6 +1400,27 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
40000baseLR4_Full))
config.link_speed |= I40E_LINK_SPEED_40GB;
+ /* Autonegotiation must be disabled to change speed */
+ if ((speed != SPEED_UNKNOWN && safe_ks.base.speed != speed) &&
+ (autoneg == AUTONEG_DISABLE ||
+ (safe_ks.base.autoneg == AUTONEG_DISABLE && !autoneg_changed))) {
+ link_speed = i40e_speed_to_link_speed(speed, ks);
+ if (link_speed == I40E_LINK_SPEED_UNKNOWN) {
+ netdev_info(netdev, "Given speed is not supported\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ } else {
+ config.link_speed = link_speed;
+ }
+ } else {
+ if (safe_ks.base.speed != speed) {
+ netdev_info(netdev,
+ "Unable to set speed, disable autoneg\n");
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ }
+
/* If speed didn't get set, set it to what it currently is.
* This is needed because if advertise is 0 (as it is when autoneg
* is disabled) then speed won't get set.
@@ -1357,8 +1455,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
- "Set phy config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Set phy config failed, err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@@ -1367,8 +1465,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_update_link_info(hw);
if (status)
netdev_dbg(netdev,
- "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Updating link info failed with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
@@ -1387,7 +1485,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status status = 0;
+ int status = 0;
u32 flags = 0;
int err = 0;
@@ -1419,8 +1517,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
- "Set phy config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Set phy config failed, err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@@ -1433,8 +1531,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
* (e.g. no physical connection etc.)
*/
netdev_dbg(netdev,
- "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Updating link info failed with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
@@ -1449,7 +1547,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status status = 0;
+ int status = 0;
int err = 0;
u8 fec_cfg;
@@ -1536,12 +1634,12 @@ static int i40e_nway_reset(struct net_device *netdev)
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
- i40e_status ret = 0;
+ int ret = 0;
ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
if (ret) {
- netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -1601,9 +1699,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
- i40e_status status;
u8 aq_failures;
int err = 0;
+ int status;
u32 is_an;
/* Changing the port's flow control is not supported if this isn't the
@@ -1657,20 +1755,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
status = i40e_set_fc(hw, &aq_failures, link_up);
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -1905,10 +2003,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
+ strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
if (pf->hw.pf_id == 0)
@@ -1916,7 +2014,9 @@ static void i40e_get_drvinfo(struct net_device *netdev,
}
static void i40e_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
@@ -1944,7 +2044,9 @@ static bool i40e_active_tx_ring_index(struct i40e_vsi *vsi, u16 index)
}
static int i40e_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -2083,9 +2185,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err)
goto rx_unwind;
- err = i40e_alloc_rx_bi(&rx_rings[i]);
- if (err)
- goto rx_unwind;
/* now allocate the Rx buffers to make sure the OS
* has enough memory, any failure here means abort
@@ -2484,8 +2583,8 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- i40e_status status;
bool link_up = false;
+ int status;
netif_info(pf, hw, netdev, "link test\n");
status = i40e_get_link_status(&pf->hw, &link_up);
@@ -2576,15 +2675,16 @@ static void i40e_diag_test(struct net_device *netdev,
set_bit(__I40E_TESTING, pf->state);
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
+ dev_warn(&pf->pdev->dev,
+ "Cannot start offline testing when PF is in reset state.\n");
+ goto skip_ol_tests;
+ }
+
if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) {
dev_warn(&pf->pdev->dev,
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
- data[I40E_ETH_TEST_REG] = 1;
- data[I40E_ETH_TEST_EEPROM] = 1;
- data[I40E_ETH_TEST_INTR] = 1;
- data[I40E_ETH_TEST_LINK] = 1;
- eth_test->flags |= ETH_TEST_FL_FAILED;
- clear_bit(__I40E_TESTING, pf->state);
goto skip_ol_tests;
}
@@ -2631,9 +2731,17 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_INTR] = 0;
}
-skip_ol_tests:
-
netif_info(pf, drv, netdev, "testing finished\n");
+ return;
+
+skip_ol_tests:
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, pf->state);
+ netif_info(pf, drv, netdev, "testing failed\n");
}
static void i40e_get_wol(struct net_device *netdev,
@@ -2699,11 +2807,11 @@ static int i40e_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- i40e_status ret = 0;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
int blink_freq = 2;
u16 temp_status;
+ int ret = 0;
switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -3079,10 +3187,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
if (cmd->flow_type == TCP_V4_FLOW ||
cmd->flow_type == UDP_V4_FLOW) {
- if (i_set & I40E_L3_SRC_MASK)
- cmd->data |= RXH_IP_SRC;
- if (i_set & I40E_L3_DST_MASK)
- cmd->data |= RXH_IP_DST;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (i_set & I40E_X722_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_X722_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ } else {
+ if (i_set & I40E_L3_SRC_MASK)
+ cmd->data |= RXH_IP_SRC;
+ if (i_set & I40E_L3_DST_MASK)
+ cmd->data |= RXH_IP_DST;
+ }
} else if (cmd->flow_type == TCP_V6_FLOW ||
cmd->flow_type == UDP_V6_FLOW) {
if (i_set & I40E_L3_V6_SRC_MASK)
@@ -3440,12 +3555,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
/**
* i40e_get_rss_hash_bits - Read RSS Hash bits from register
+ * @hw: hw structure
* @nfc: pointer to user request
* @i_setc: bits currently set
*
* Returns value of bits to be set per user request
**/
-static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
+static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw,
+ struct ethtool_rxnfc *nfc,
+ u64 i_setc)
{
u64 i_set = i_setc;
u64 src_l3 = 0, dst_l3 = 0;
@@ -3464,8 +3582,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
dst_l3 = I40E_L3_V6_DST_MASK;
} else if (nfc->flow_type == TCP_V4_FLOW ||
nfc->flow_type == UDP_V4_FLOW) {
- src_l3 = I40E_L3_SRC_MASK;
- dst_l3 = I40E_L3_DST_MASK;
+ if (hw->mac.type == I40E_MAC_X722) {
+ src_l3 = I40E_X722_L3_SRC_MASK;
+ dst_l3 = I40E_X722_L3_DST_MASK;
+ } else {
+ src_l3 = I40E_L3_SRC_MASK;
+ dst_l3 = I40E_L3_DST_MASK;
+ }
} else {
/* Any other flow type are not supported here */
return i_set;
@@ -3483,6 +3606,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc)
return i_set;
}
+#define FLOW_PCTYPES_SIZE 64
/**
* i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
* @pf: pointer to the physical function struct
@@ -3495,9 +3619,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- u8 flow_pctype = 0;
+ DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE);
u64 i_set, i_setc;
+ bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE);
+
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev,
"Change of RSS hash input set is not supported when MFP mode is enabled\n");
@@ -3513,36 +3639,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
switch (nfc->flow_type) {
case TCP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case TCP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes);
if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK,
+ flow_pctypes);
break;
case UDP_V4_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
break;
case UDP_V6_FLOW:
- flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
- if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
+ set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes);
+ if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+ set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP,
+ flow_pctypes);
+ set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP,
+ flow_pctypes);
+ }
hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
break;
case AH_ESP_V4_FLOW:
@@ -3575,17 +3700,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL;
}
- if (flow_pctype) {
- i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0,
- flow_pctype)) |
- ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1,
- flow_pctype)) << 32);
- i_set = i40e_get_rss_hash_bits(nfc, i_setc);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype),
- (u32)i_set);
- i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype),
- (u32)(i_set >> 32));
- hena |= BIT_ULL(flow_pctype);
+ if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) {
+ u8 flow_id;
+
+ for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) {
+ i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32);
+ i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc);
+
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id),
+ (u32)i_set);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id),
+ (u32)(i_set >> 32));
+ hena |= BIT_ULL(flow_id);
+ }
}
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
@@ -4338,11 +4466,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
/* First 4 bytes of L4 header */
- if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
- new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
- else if (!usr_ip4_spec->l4_4_bytes)
- new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
- else
+ if (usr_ip4_spec->l4_4_bytes)
return -EOPNOTSUPP;
/* Filtering on Type of Service is not supported. */
@@ -4376,16 +4500,12 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
(struct in6_addr *)&ipv6_full_mask))
new_mask |= I40E_L3_V6_DST_MASK;
else if (ipv6_addr_any((struct in6_addr *)
- &usr_ip6_spec->ip6src))
+ &usr_ip6_spec->ip6dst))
new_mask &= ~I40E_L3_V6_DST_MASK;
else
return -EOPNOTSUPP;
- if (usr_ip6_spec->l4_4_bytes == htonl(0xFFFFFFFF))
- new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
- else if (!usr_ip6_spec->l4_4_bytes)
- new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
- else
+ if (usr_ip6_spec->l4_4_bytes)
return -EOPNOTSUPP;
/* Filtering on Traffic class is not supported. */
@@ -4912,7 +5032,7 @@ static int i40e_set_channels(struct net_device *dev,
/* We do not support setting channels via ethtool when TCs are
* configured through mqprio
*/
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return -EINVAL;
/* verify they are not requesting separate vectors */
@@ -5127,7 +5247,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 reset_needed = 0;
- i40e_status status;
+ int status;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -5242,8 +5362,8 @@ flags_complete:
0, NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@@ -5275,6 +5395,20 @@ flags_complete:
return -EOPNOTSUPP;
}
+ if ((changed_flags & I40E_FLAG_VF_VLAN_PRUNING) &&
+ pf->num_alloc_vfs) {
+ dev_warn(&pf->pdev->dev,
+ "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((changed_flags & I40E_FLAG_LEGACY_RX) &&
+ I40E_2K_TOO_SMALL_WITH_PADDING) {
+ dev_warn(&pf->pdev->dev,
+ "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
+ return -EOPNOTSUPP;
+ }
+
if ((changed_flags & new_flags &
I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
(new_flags & I40E_FLAG_MFP_ENABLED))
@@ -5308,9 +5442,8 @@ flags_complete:
return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
- "Starting FW LLDP agent failed: error: %s, %s\n",
- i40e_stat_str(&pf->hw,
- status),
+ "Starting FW LLDP agent failed: error: %pe, %s\n",
+ ERR_PTR(status),
i40e_aq_str(&pf->hw,
adq_err));
return -EINVAL;
@@ -5350,8 +5483,8 @@ static int i40e_get_module_info(struct net_device *netdev,
u32 sff8472_comp = 0;
u32 sff8472_swap = 0;
u32 sff8636_rev = 0;
- i40e_status status;
u32 type = 0;
+ int status;
/* Check if firmware supports reading module EEPROM. */
if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
@@ -5455,8 +5588,8 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
bool is_sfp = false;
- i40e_status status;
u32 value = 0;
+ int status;
int i;
if (!ee || !ee->len || !data)
@@ -5497,10 +5630,10 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
- enum i40e_status_code status = 0;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ int status = 0;
/* Get initial PHY capabilities */
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
@@ -5562,11 +5695,11 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status = I40E_SUCCESS;
struct i40e_aq_set_phy_config config;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ int status = I40E_SUCCESS;
__le16 eee_capability;
/* Deny parameters we don't support */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 163ee8c6311c..46f7950a0049 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -17,17 +17,17 @@
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
**/
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz)
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
+ int ret_code = I40E_SUCCESS;
struct i40e_dma_mem mem;
- i40e_status ret_code = I40E_SUCCESS;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
@@ -106,19 +106,19 @@ exit:
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
**/
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg)
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
- u64 *pd_addr;
+ int ret_code = 0;
u64 page_desc;
+ u64 *pd_addr;
if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
@@ -185,15 +185,15 @@ exit:
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
**/
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
+ int ret_code = 0;
u64 *pd_addr;
/* calculate index */
@@ -241,11 +241,11 @@ exit:
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
**/
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -269,9 +269,9 @@ exit:
* @idx: the page index
* @is_pf: used to distinguish between VF and PF
**/
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
@@ -290,11 +290,11 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
**/
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -318,9 +318,9 @@ exit:
* @idx: segment descriptor index to find the relevant page descriptor
* @is_pf: used to distinguish between VF and PF
**/
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 3113792afaff..9960da07a573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -187,28 +187,28 @@ struct i40e_hmc_info {
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz);
-
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg);
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d6e92ecddfbd..40c101f286d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -74,12 +74,12 @@ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
* Assumptions:
* - HMC Resource Profile has been selected before calling this function.
**/
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num)
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
{
struct i40e_hmc_obj_info *obj, *full_obj;
- i40e_status ret_code = 0;
+ int ret_code = 0;
u64 l2fpm_size;
u32 size_exp;
@@ -229,11 +229,11 @@ init_lan_hmc_out:
* 1. caller can deallocate the memory used by pd after this function
* returns.
**/
-static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+static int i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (!i40e_prep_remove_pd_page(hmc_info, idx))
ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
@@ -256,11 +256,11 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
* 1. caller can deallocate the memory used by backing storage after this
* function returns.
**/
-static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+static int i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (!i40e_prep_remove_sd_bp(hmc_info, idx))
ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
@@ -276,15 +276,15 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
**/
-static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_create_obj_info *info)
+static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 pd_idx = 0, pd_lmt = 0;
bool pd_error = false;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
u64 sd_size;
u32 i, j;
@@ -435,13 +435,13 @@ exit:
* - This function will be called after i40e_init_lan_hmc() and before
* any LAN/FCoE HMC objects can be created.
**/
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model)
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
{
struct i40e_hmc_lan_create_obj_info info;
- i40e_status ret_code = 0;
u8 hmc_fn_id = hw->hmc.hmc_fn_id;
struct i40e_hmc_obj_info *obj;
+ int ret_code = 0;
/* Initialize part of the create object info struct */
info.hmc_info = &hw->hmc;
@@ -520,13 +520,13 @@ configure_lan_hmc_out:
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
**/
-static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_delete_obj_info *info)
+static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
u32 i, j;
if (NULL == info) {
@@ -632,10 +632,10 @@ exit:
* This must be called by drivers as they are shutting down and being
* removed from the OS.
**/
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw)
{
struct i40e_hmc_lan_delete_obj_info info;
- i40e_status ret_code;
+ int ret_code;
info.hmc_info = &hw->hmc;
info.rsrc_type = I40E_HMC_LAN_FULL;
@@ -915,9 +915,9 @@ static void i40e_write_qword(u8 *hmc_bits,
* @context_bytes: pointer to the context bit array (DMA memory)
* @hmc_type: the type of HMC resource
**/
-static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
- u8 *context_bytes,
- enum i40e_hmc_lan_rsrc_type hmc_type)
+static int i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
{
/* clean the bit array */
memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
@@ -931,9 +931,9 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
* @ce_info: a description of the struct to be filled
* @dest: the struct to be filled
**/
-static i40e_status i40e_set_hmc_context(u8 *context_bytes,
- struct i40e_context_ele *ce_info,
- u8 *dest)
+static int i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
{
int f;
@@ -973,18 +973,18 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
* base pointer. This function is used for LAN Queue contexts.
**/
static
-i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
- enum i40e_hmc_lan_rsrc_type rsrc_type,
- u32 obj_idx)
+int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
{
struct i40e_hmc_info *hmc_info = &hw->hmc;
u32 obj_offset_in_sd, obj_offset_in_pd;
struct i40e_hmc_sd_entry *sd_entry;
struct i40e_hmc_pd_entry *pd_entry;
u32 pd_idx, pd_lmt, rel_pd_idx;
- i40e_status ret_code = 0;
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
if (NULL == hmc_info) {
ret_code = I40E_ERR_BAD_PTR;
@@ -1042,11 +1042,11 @@ exit:
* @hw: the hardware struct
* @queue: the queue we care about
**/
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue)
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_TX, queue);
@@ -1062,12 +1062,12 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
* @queue: the queue we care about
* @s: the struct to be filled
**/
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s)
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_TX, queue);
@@ -1083,11 +1083,11 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
* @hw: the hardware struct
* @queue: the queue we care about
**/
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue)
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_RX, queue);
@@ -1103,12 +1103,12 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
* @queue: the queue we care about
* @s: the struct to be filled
**/
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s)
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_RX, queue);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index c46a2c449e60..9f960404c2b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -137,22 +137,22 @@ struct i40e_hmc_lan_delete_obj_info {
u32 count;
};
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num);
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model);
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
-
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s);
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s);
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e118cf9265c7..b847bd105b16 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -66,6 +66,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
@@ -77,6 +78,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
@@ -99,6 +101,24 @@ MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
+static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
+ struct net_device *netdev, int delta)
+{
+ struct netdev_hw_addr *ha;
+
+ if (!f || !netdev)
+ return;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ if (ether_addr_equal(ha->addr, f->macaddr)) {
+ ha->refcount += delta;
+ if (ha->refcount <= 0)
+ ha->refcount = 1;
+ break;
+ }
+ }
+}
+
/**
* i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
@@ -178,10 +198,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
* @id: an owner id to stick on the items assigned
*
* Returns the base item index of the lump, or negative for error
- *
- * The search_hint trick and lack of advanced fit-finding only work
- * because we're highly likely to have all the same size lump requests.
- * Linear search time and any fragmentation should be minimal.
**/
static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
@@ -196,8 +212,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
return -EINVAL;
}
- /* start the linear search with an imperfect hint */
- i = pile->search_hint;
+ /* Allocate last queue in the pile for FDIR VSI queue
+ * so it doesn't fragment the qp_pile
+ */
+ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
+ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
+ pile->num_entries - 1);
+ return -ENOMEM;
+ }
+ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
+ return pile->num_entries - 1;
+ }
+
+ i = 0;
while (i < pile->num_entries) {
/* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) {
@@ -216,7 +245,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i;
- pile->search_hint = i + j;
break;
}
@@ -239,7 +267,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{
int valid_id = (id | I40E_PILE_VALID_BIT);
int count = 0;
- int i;
+ u16 i;
if (!pile || index >= pile->num_entries)
return -EINVAL;
@@ -251,8 +279,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
count++;
}
- if (count && index < pile->search_hint)
- pile->search_hint = index;
return count;
}
@@ -359,7 +385,9 @@ static void i40e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
break;
default:
- netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n");
+ set_bit(__I40E_DOWN_REQUESTED, pf->state);
+ set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state);
break;
}
@@ -391,10 +419,10 @@ static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
@@ -444,10 +472,10 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
if (!ring)
continue;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
@@ -526,6 +554,47 @@ void i40e_pf_reset_stats(struct i40e_pf *pf)
}
/**
+ * i40e_compute_pci_to_hw_id - compute index form PCI function.
+ * @vsi: ptr to the VSI to read from.
+ * @hw: ptr to the hardware info.
+ **/
+static u32 i40e_compute_pci_to_hw_id(struct i40e_vsi *vsi, struct i40e_hw *hw)
+{
+ int pf_count = i40e_get_pf_count(hw);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ return (hw->port * BIT(7)) / pf_count + vsi->vf_id;
+
+ return hw->port + BIT(7);
+}
+
+/**
+ * i40e_stat_update64 - read and update a 64 bit stat from the chip.
+ * @hw: ptr to the hardware info.
+ * @hireg: the high 32 bit reg to read.
+ * @loreg: the low 32 bit reg to read.
+ * @offset_loaded: has the initial offset been loaded yet.
+ * @offset: ptr to current offset value.
+ * @stat: ptr to the stat.
+ *
+ * Since the device stats are not reset at PFReset, they will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void i40e_stat_update64(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+ new_data = rd64(hw, loreg);
+
+ if (!offset_loaded || new_data < *offset)
+ *offset = new_data;
+ *stat = new_data - *offset;
+}
+
+/**
* i40e_stat_update48 - read and update a 48 bit stat from the chip
* @hw: ptr to the hardware info
* @hireg: the high 32 bit reg to read
@@ -597,6 +666,34 @@ static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
}
/**
+ * i40e_stats_update_rx_discards - update rx_discards.
+ * @vsi: ptr to the VSI to be updated.
+ * @hw: ptr to the hardware info.
+ * @stat_idx: VSI's stat_counter_idx.
+ * @offset_loaded: ptr to the VSI's stat_offsets_loaded.
+ * @stat_offset: ptr to stat_offset to store first read of specific register.
+ * @stat: ptr to VSI's stat to be updated.
+ **/
+static void
+i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
+ int stat_idx, bool offset_loaded,
+ struct i40e_eth_stats *stat_offset,
+ struct i40e_eth_stats *stat)
+{
+ u64 rx_rdpc, rx_rxerr;
+
+ i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
+ &stat_offset->rx_discards, &rx_rdpc);
+ i40e_stat_update64(hw,
+ I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
+ I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
+ offset_loaded, &stat_offset->rx_discards_other,
+ &rx_rxerr);
+
+ stat->rx_discards = rx_rdpc + rx_rxerr;
+}
+
+/**
* i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
* @vsi: the VSI to be updated
**/
@@ -655,6 +752,10 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
I40E_GLV_BPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
+
+ i40e_stats_update_rx_discards(vsi, hw, stat_idx,
+ vsi->stat_offsets_loaded, oes, es);
+
vsi->stat_offsets_loaded = true;
}
@@ -749,18 +850,19 @@ void i40e_update_veb_stats(struct i40e_veb *veb)
**/
static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
+ u64 rx_page, rx_buf, rx_reuse, rx_alloc, rx_waive, rx_busy;
struct i40e_pf *pf = vsi->back;
struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
- u32 tx_restart, tx_busy;
+ u64 tx_restart, tx_busy;
struct i40e_ring *p;
- u32 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
u16 q;
@@ -780,8 +882,13 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
rx_b = rx_p = 0;
tx_b = tx_p = 0;
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
+ tx_stopped = 0;
rx_page = 0;
rx_buf = 0;
+ rx_reuse = 0;
+ rx_alloc = 0;
+ rx_waive = 0;
+ rx_busy = 0;
rcu_read_lock();
for (q = 0; q < vsi->num_queue_pairs; q++) {
/* locate Tx ring */
@@ -790,16 +897,17 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
tx_busy += p->tx_stats.tx_busy;
tx_linearize += p->tx_stats.tx_linearize;
tx_force_wb += p->tx_stats.tx_force_wb;
+ tx_stopped += p->tx_stats.tx_stopped;
/* locate Rx ring */
p = READ_ONCE(vsi->rx_rings[q]);
@@ -807,14 +915,18 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
rx_b += bytes;
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
rx_page += p->rx_stats.alloc_page_failed;
+ rx_reuse += p->rx_stats.page_reuse_count;
+ rx_alloc += p->rx_stats.page_alloc_count;
+ rx_waive += p->rx_stats.page_waive_count;
+ rx_busy += p->rx_stats.page_busy_count;
if (i40e_enabled_xdp_vsi(vsi)) {
/* locate XDP ring */
@@ -823,10 +935,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
continue;
do {
- start = u64_stats_fetch_begin_irq(&p->syncp);
+ start = u64_stats_fetch_begin(&p->syncp);
packets = p->stats.packets;
bytes = p->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+ } while (u64_stats_fetch_retry(&p->syncp, start));
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
@@ -840,8 +952,13 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
vsi->tx_busy = tx_busy;
vsi->tx_linearize = tx_linearize;
vsi->tx_force_wb = tx_force_wb;
+ vsi->tx_stopped = tx_stopped;
vsi->rx_page_failed = rx_page;
vsi->rx_buf_failed = rx_buf;
+ vsi->rx_page_reuse = rx_reuse;
+ vsi->rx_page_alloc = rx_alloc;
+ vsi->rx_page_waive = rx_waive;
+ vsi->rx_page_busy = rx_busy;
ns->rx_packets = rx_p;
ns->rx_bytes = rx_b;
@@ -1328,6 +1445,114 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
}
/**
+ * i40e_get_vf_new_vlan - Get new vlan id on a vf
+ * @vsi: the vsi to configure
+ * @new_mac: new mac filter to be added
+ * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Get new VLAN id based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * Returns the value of the new vlan filter or
+ * the old value if no new filter is needed.
+ */
+static s16 i40e_get_vf_new_vlan(struct i40e_vsi *vsi,
+ struct i40e_new_mac_filter *new_mac,
+ struct i40e_mac_filter *f,
+ int vlan_filters,
+ bool trusted)
+{
+ s16 pvid = le16_to_cpu(vsi->info.pvid);
+ struct i40e_pf *pf = vsi->back;
+ bool is_any;
+
+ if (new_mac)
+ f = new_mac->f;
+
+ if (pvid && f->vlan != pvid)
+ return pvid;
+
+ is_any = (trusted ||
+ !(pf->flags & I40E_FLAG_VF_VLAN_PRUNING));
+
+ if ((vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) ||
+ (is_any && !vlan_filters && f->vlan == 0)) {
+ if (is_any)
+ return I40E_VLAN_ANY;
+ else
+ return 0;
+ }
+
+ return f->vlan;
+}
+
+/**
+ * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
+ * @vsi: the vsi to configure
+ * @tmp_add_list: list of filters ready to be added
+ * @tmp_del_list: list of filters ready to be deleted
+ * @vlan_filters: the number of active VLAN filters
+ * @trusted: flag if the VF is trusted
+ *
+ * Correct VF VLAN filters based on current VLAN filters, trust, PVID
+ * and vf-vlan-prune-disable flag.
+ *
+ * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
+ *
+ * This function is only expected to be called from within
+ * i40e_sync_vsi_filters.
+ *
+ * NOTE: This function expects to be called while under the
+ * mac_filter_hash_lock
+ */
+static int i40e_correct_vf_mac_vlan_filters(struct i40e_vsi *vsi,
+ struct hlist_head *tmp_add_list,
+ struct hlist_head *tmp_del_list,
+ int vlan_filters,
+ bool trusted)
+{
+ struct i40e_mac_filter *f, *add_head;
+ struct i40e_new_mac_filter *new_mac;
+ struct hlist_node *h;
+ int bkt, new_vlan;
+
+ hlist_for_each_entry(new_mac, tmp_add_list, hlist) {
+ new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL,
+ vlan_filters, trusted);
+ }
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
+ new_vlan = i40e_get_vf_new_vlan(vsi, NULL, f, vlan_filters,
+ trusted);
+ if (new_vlan != f->vlan) {
+ add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
+ if (!add_head)
+ return -ENOMEM;
+ /* Create a temporary i40e_new_mac_filter */
+ new_mac = kzalloc(sizeof(*new_mac), GFP_ATOMIC);
+ if (!new_mac)
+ return -ENOMEM;
+ new_mac->f = add_head;
+ new_mac->state = add_head->state;
+
+ /* Add the new filter to the tmp list */
+ hlist_add_head(&new_mac->hlist, tmp_add_list);
+
+ /* Put the original filter into the delete list */
+ f->state = I40E_FILTER_REMOVE;
+ hash_del(&f->hlist);
+ hlist_add_head(&f->hlist, tmp_del_list);
+ }
+ }
+
+ vsi->has_vlan_filter = !!vlan_filters;
+ return 0;
+}
+
+/**
* i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
@@ -1592,13 +1817,13 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (vsi->type == I40E_VSI_MAIN) {
- i40e_status ret;
+ int ret;
ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
if (ret)
- netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
@@ -1629,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS key, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS key, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -1641,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -1811,11 +2036,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
* non-zero req_queue_pairs says that user requested a new
* queue count via ethtool's set_channels, so use this
* value for queues distribution across traffic classes
+ * We need at least one queue pair for the interface
+ * to be usable as we see in else statement.
*/
if (vsi->req_queue_pairs > 0)
vsi->num_queue_pairs = vsi->req_queue_pairs;
else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
vsi->num_queue_pairs = pf->num_lan_msix;
+ else
+ vsi->num_queue_pairs = 1;
}
/* Number of queues per enabled TC */
@@ -2036,6 +2265,7 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
hlist_for_each_entry_safe(new, h, from, hlist) {
/* We can simply free the wrapper structure */
hlist_del(&new->hlist);
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new);
}
}
@@ -2118,19 +2348,19 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_del, int *retval)
{
struct i40e_hw *hw = &vsi->back->hw;
- i40e_status aq_ret;
- int aq_err;
+ enum i40e_admin_queue_err aq_status;
+ int aq_ret;
- aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
- aq_err = hw->aq.asq_last_status;
+ aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
+ &aq_status);
/* Explicitly ignore and do not report when firmware returns ENOENT */
- if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
- "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
- vsi_name, i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw, aq_err));
+ "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
+ vsi_name, ERR_PTR(aq_ret),
+ i40e_aq_str(hw, aq_status));
}
}
@@ -2153,10 +2383,10 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
- int aq_err, fcnt;
+ enum i40e_admin_queue_err aq_status;
+ int fcnt;
- i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
- aq_err = hw->aq.asq_last_status;
+ i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status);
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
@@ -2164,17 +2394,19 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err), vsi_name);
+ i40e_aq_str(hw, aq_status), vsi_name);
} else if (vsi->type == I40E_VSI_SRIOV ||
vsi->type == I40E_VSI_VMDQ1 ||
vsi->type == I40E_VSI_VMDQ2) {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
- i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi_name);
} else {
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
- i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+ i40e_aq_str(hw, aq_status), vsi_name,
+ vsi->type);
}
}
}
@@ -2191,13 +2423,13 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
*
* Returns status indicating success or failure;
**/
-static i40e_status
+static int
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_mac_filter *f)
{
bool enable = f->state == I40E_FILTER_NEW;
struct i40e_hw *hw = &vsi->back->hw;
- i40e_status aq_ret;
+ int aq_ret;
if (f->vlan == I40E_VLAN_ANY) {
aq_ret = i40e_aq_set_vsi_broadcast(hw,
@@ -2236,7 +2468,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
+ int aq_ret;
if (vsi->type == I40E_VSI_MAIN &&
pf->lan_veb != I40E_NO_VEB &&
@@ -2256,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "Set default VSI failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "Set default VSI failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
} else {
@@ -2268,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
true);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "set unicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "set unicast promisc failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
@@ -2278,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
promisc, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "set multicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "set multicast promisc failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@@ -2309,12 +2541,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
unsigned int vlan_filters = 0;
char vsi_name[16] = "PF";
int filter_list_len = 0;
- i40e_status aq_ret = 0;
u32 changed_flags = 0;
struct hlist_node *h;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
+ int aq_ret = 0;
int retval = 0;
u16 cmd_flags;
int list_size;
@@ -2379,10 +2611,18 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vlan_filters++;
}
- retval = i40e_correct_mac_vlan_filters(vsi,
- &tmp_add_list,
- &tmp_del_list,
- vlan_filters);
+ if (vsi->type != I40E_VSI_SRIOV)
+ retval = i40e_correct_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters);
+ else
+ retval = i40e_correct_vf_mac_vlan_filters
+ (vsi, &tmp_add_list, &tmp_del_list,
+ vlan_filters, pf->vf[vsi->vf_id].trusted);
+
+ hlist_for_each_entry(new, &tmp_add_list, hlist)
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, 1);
+
if (retval)
goto err_no_memory_locked;
@@ -2515,6 +2755,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
if (new->f->state == I40E_FILTER_NEW)
new->f->state = new->state;
hlist_del(&new->hlist);
+ netdev_hw_addr_refcnt(new->f, vsi->netdev, -1);
kfree(new);
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2573,9 +2814,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multi promisc failed on %s, err %s aq_err %s\n",
+ "set multi promisc failed on %s, err %pe aq_err %s\n",
vsi_name,
- i40e_stat_str(hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
@@ -2593,10 +2834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
cur_promisc ? "on" : "off",
vsi_name,
- i40e_stat_str(hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@@ -2655,15 +2896,35 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
/**
- * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
+ *
+ * @vsi: VSI to calculate rx_buf_len from
+ */
+static u16 i40e_calculate_vsi_rx_buf_len(struct i40e_vsi *vsi)
+{
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ return SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048);
+
+ return PAGE_SIZE < 8192 ? I40E_RXBUFFER_3072 : I40E_RXBUFFER_2048;
+}
+
+/**
+ * i40e_max_vsi_frame_size - returns the maximum allowed frame size for VSI
* @vsi: the vsi
+ * @xdp_prog: XDP program
**/
-static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
+static int i40e_max_vsi_frame_size(struct i40e_vsi *vsi,
+ struct bpf_prog *xdp_prog)
{
- if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
- return I40E_RXBUFFER_2048;
+ u16 rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
+ u16 chain_len;
+
+ if (xdp_prog && !xdp_prog->aux->xdp_has_frags)
+ chain_len = 1;
else
- return I40E_RXBUFFER_3072;
+ chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+
+ return min_t(u16, rx_buf_len * chain_len, I40E_MAX_RXBUFFER);
}
/**
@@ -2678,12 +2939,13 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ int frame_size;
- if (i40e_enabled_xdp_vsi(vsi)) {
- int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- if (frame_size > i40e_max_xdp_frame_size(vsi))
- return -EINVAL;
+ frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog);
+ if (new_mtu > frame_size - I40E_PACKET_HDR_PAD) {
+ netdev_err(netdev, "Error changing mtu to %d, Max is %d\n",
+ new_mtu, frame_size - I40E_PACKET_HDR_PAD);
+ return -EINVAL;
}
netdev_dbg(netdev, "changing MTU from %d to %d\n",
@@ -2724,7 +2986,7 @@ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
/* Don't modify stripping options if a port VLAN is active */
if (vsi->info.pvid)
@@ -2744,8 +3006,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vlan stripping failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "update vlan stripping failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@@ -2758,7 +3020,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
/* Don't modify stripping options if a port VLAN is active */
if (vsi->info.pvid)
@@ -2779,8 +3041,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vlan stripping failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "update vlan stripping failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@@ -2806,8 +3068,21 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
int bkt;
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
- if (f->state == I40E_FILTER_REMOVE)
+ /* If we're asked to add a filter that has been marked for
+ * removal, it is safe to simply restore it to active state.
+ * __i40e_del_filter will have simply deleted any filters which
+ * were previously marked NEW or FAILED, so if it is currently
+ * marked REMOVE it must have previously been ACTIVE. Since we
+ * haven't yet run the sync filters task, just restore this
+ * filter to the ACTIVE state so that the sync task leaves it
+ * in place.
+ */
+ if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) {
+ f->state = I40E_FILTER_ACTIVE;
+ continue;
+ } else if (f->state == I40E_FILTER_REMOVE) {
continue;
+ }
add_f = i40e_add_filter(vsi, f->macaddr, vid);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
@@ -2998,7 +3273,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
@@ -3011,8 +3286,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add pvid failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "add pvid failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
return -ENOENT;
@@ -3175,8 +3450,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u16 pf_q = vsi->base_queue + ring->queue_index;
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_txq tx_ctx;
- i40e_status err = 0;
u32 qtx_ctl = 0;
+ int err = 0;
if (ring_is_xdp(ring))
ring->xsk_pool = i40e_xsk_pool(ring);
@@ -3300,7 +3575,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
u16 pf_q = vsi->base_queue + ring->queue_index;
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_rxq rx_ctx;
- i40e_status err = 0;
+ int err = 0;
bool ok;
int ret;
@@ -3312,12 +3587,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (ring->vsi->type == I40E_VSI_MAIN)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- kfree(ring->rx_bi);
ring->xsk_pool = i40e_xsk_pool(ring);
if (ring->xsk_pool) {
- ret = i40e_alloc_rx_bi_zc(ring);
- if (ret)
- return ret;
ring->rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
@@ -3335,9 +3606,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->queue_index);
} else {
- ret = i40e_alloc_rx_bi(ring);
- if (ret)
- return ret;
ring->rx_buf_len = vsi->rx_buf_len;
if (ring->vsi->type == I40E_VSI_MAIN) {
ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -3348,6 +3616,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
}
}
+ xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+
rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
@@ -3393,10 +3663,16 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
}
/* configure Rx buffer alignment */
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+ if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+ if (I40E_2K_TOO_SMALL_WITH_PADDING) {
+ dev_info(&vsi->back->pdev->dev,
+ "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
+ return -EOPNOTSUPP;
+ }
clear_ring_build_skb_enabled(ring);
- else
+ } else {
set_ring_build_skb_enabled(ring);
+ }
ring->rx_offset = i40e_rx_offset(ring);
@@ -3457,20 +3733,16 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
int err = 0;
u16 i;
- if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
- vsi->max_frame = I40E_MAX_RXBUFFER;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
+ vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog);
+ vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi);
+
#if (PAGE_SIZE < 8192)
- } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+ if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING &&
+ vsi->netdev->mtu <= ETH_DATA_LEN) {
vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = I40E_MAX_RXBUFFER;
- vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
- I40E_RXBUFFER_2048;
+ vsi->max_frame = vsi->rx_buf_len;
}
+#endif
/* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -3625,7 +3897,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
- /* Linked list for the queuepairs assigned to this vector */
+ /* begin of linked list for RX queue assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
@@ -3641,6 +3913,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_RQCTL(qp), val);
if (has_xdp) {
+ /* TX queue with next queue set to TX */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3650,7 +3923,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_TQCTL(nextqp), val);
}
-
+ /* TX queue with next RX or end of linked list */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3719,7 +3992,6 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- u32 val;
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
@@ -3736,28 +4008,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_RQCTL(0), val);
+ /* Associate the queue pair to the vector and enable the queue
+ * interrupt RX queue in linked list with next queue set to TX
+ */
+ wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
if (i40e_enabled_xdp_vsi(vsi)) {
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ /* TX queue in linked list with next queue set to TX */
+ wr32(hw, I40E_QINT_TQCTL(nextqp),
+ I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
}
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(0), val);
+ /* last TX queue so the next RX queue doesn't matter */
+ wr32(hw, I40E_QINT_TQCTL(0),
+ I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
i40e_flush(hw);
}
@@ -3884,6 +4148,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
}
/* register for affinity change notifications */
+ q_vector->irq_num = irq_num;
q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
q_vector->affinity_notify.release = i40e_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
@@ -3891,10 +4156,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
*
* get_cpu_mask returns a static constant mask with
* a permanent lifetime so it's ok to pass to
- * irq_set_affinity_hint without making a copy.
+ * irq_update_affinity_hint without making a copy.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
vsi->irqs_ready = true;
@@ -3905,7 +4170,7 @@ free_queue_irqs:
vector--;
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -3988,7 +4253,6 @@ static void i40e_free_misc_vector(struct i40e_pf *pf)
i40e_flush(&pf->hw);
if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
- synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
}
@@ -4726,8 +4990,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
- irq_set_affinity_hint(irq_num, NULL);
- synchronize_irq(irq_num);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, vsi->q_vectors[i]);
/* Tear down the interrupt queue link list
@@ -5211,7 +5474,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
u8 num_tc = 0;
struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
@@ -5243,7 +5506,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
{
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return i40e_mqprio_get_enabled_tc(pf);
/* If neither MQPRIO nor DCB is enabled for this PF then just return
@@ -5275,16 +5538,16 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
u32 tc_bw_max;
+ int ret;
int i;
/* Get the VSI level BW configuration */
ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5294,8 +5557,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5336,11 +5599,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
struct i40e_pf *pf = vsi->back;
- i40e_status ret;
+ int ret;
int i;
/* There is no need to reset BW when mqprio mode is on. */
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return 0;
if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
@@ -5412,7 +5675,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
vsi->tc_config.tc_info[i].qoffset);
}
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
return;
/* Assign UP2TC map for the VSI */
@@ -5484,8 +5747,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -5540,8 +5803,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
&bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed querying vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Failed querying vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5573,7 +5836,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ctxt.vf_num = 0;
ctxt.uplink_seid = vsi->uplink_seid;
ctxt.info = vsi->info;
- if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
if (ret)
goto out;
@@ -5607,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Update vsi tc config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5620,8 +5883,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed updating vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Failed updating vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5658,6 +5921,26 @@ static int i40e_get_link_speed(struct i40e_vsi *vsi)
}
/**
+ * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
+ * @vsi: Pointer to vsi structure
+ * @max_tx_rate: max TX rate in bytes to be converted into Mbits
+ *
+ * Helper function to convert units before send to set BW limit
+ **/
+static u64 i40e_bw_bytes_to_mbits(struct i40e_vsi *vsi, u64 max_tx_rate)
+{
+ if (max_tx_rate < I40E_BW_MBPS_DIVISOR) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Setting max tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
+ } else {
+ do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
+ }
+
+ return max_tx_rate;
+}
+
+/**
* i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
* @vsi: VSI to be configured
* @seid: seid of the channel/VSI
@@ -5679,10 +5962,10 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
max_tx_rate, seid);
return -EINVAL;
}
- if (max_tx_rate && max_tx_rate < 50) {
+ if (max_tx_rate && max_tx_rate < I40E_BW_CREDIT_DIVISOR) {
dev_warn(&pf->pdev->dev,
"Setting max tx rate to minimum usable value of 50Mbps.\n");
- max_tx_rate = 50;
+ max_tx_rate = I40E_BW_CREDIT_DIVISOR;
}
/* Tx rate credits are in values of 50Mbps, 0 is disabled */
@@ -5692,8 +5975,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
I40E_MAX_BW_INACTIVE_ACCUM, NULL);
if (ret)
dev_err(&pf->pdev->dev,
- "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
- max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
+ "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
+ max_tx_rate, seid, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@@ -5768,8 +6051,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret)
dev_info(&pf->pdev->dev,
- "Failed to delete cloud filter, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed to delete cloud filter, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
kfree(cfilter);
}
@@ -5903,8 +6186,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
kfree(lut);
return ret;
@@ -6002,8 +6285,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "add new vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "add new vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -6034,7 +6317,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
- i40e_status ret;
+ int ret;
int i;
memset(&bw_data, 0, sizeof(bw_data));
@@ -6070,9 +6353,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
- i40e_status ret;
- int i;
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ int ret;
+ int i;
/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -6248,8 +6531,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
mode, NULL);
if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
@@ -6297,7 +6580,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi,
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
if (vsi->type == I40E_VSI_MAIN) {
- if (pf->flags & I40E_FLAG_TC_MQPRIO)
+ if (i40e_is_tc_mqprio_enabled(pf))
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
else
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
@@ -6408,6 +6691,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
vsi->tc_seid_map[i] = ch->seid;
}
}
+
+ /* reset to reconfigure TX queue contexts */
+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
return ret;
err_free:
@@ -6446,8 +6732,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "VEB bw config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "VEB bw config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6456,8 +6742,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed getting veb bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed getting veb bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
@@ -6540,8 +6826,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
ret = i40e_aq_resume_port_tx(hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Resume Port Tx failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Resume Port Tx failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@@ -6565,8 +6851,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Suspend Port Tx failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Suspend Port Tx failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@@ -6605,8 +6891,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
ret = i40e_set_dcb_config(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev,
- "Set DCB Config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Set DCB Config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6722,8 +7008,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
i40e_aqc_opc_modify_switching_comp_ets, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Modify Port ETS failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Modify Port ETS failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6760,8 +7046,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
ret = i40e_aq_dcb_updated(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "DCB Updated failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "DCB Updated failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6844,8 +7130,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
i40e_aqc_opc_enable_switching_comp_ets, NULL);
if (err) {
dev_info(&pf->pdev->dev,
- "Enable Port ETS failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "Enable Port ETS failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
@@ -6924,8 +7210,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
- "Query for DCB configuration failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "Query for DCB configuration failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
@@ -7143,15 +7429,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
* @pf: board private structure
* @is_up: whether the link state should be forced up or down
**/
-static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
{
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config = {0};
bool non_zero_phy_type = is_up;
struct i40e_hw *hw = &pf->hw;
- i40e_status err;
u64 mask;
u8 speed;
+ int err;
/* Card might've been put in an unstable state by other drivers
* and applications, which causes incorrect speed values being
@@ -7163,8 +7449,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
- "failed to get phy cap., ret = %s last_status = %s\n",
- i40e_stat_str(hw, err),
+ "failed to get phy cap., ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -7175,8 +7461,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
- "failed to get phy cap., ret = %s last_status = %s\n",
- i40e_stat_str(hw, err),
+ "failed to get phy cap., ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -7220,8 +7506,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
- "set phy config ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ "set phy config ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return err;
}
@@ -7384,11 +7670,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
* This function deletes a mac filter on the channel VSI which serves as the
* macvlan. Returns 0 on success.
**/
-static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
- const u8 *macaddr, int *aq_err)
+static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
{
struct i40e_aqc_remove_macvlan_element_data element;
- i40e_status status;
+ int status;
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
@@ -7410,12 +7696,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
* This function adds a mac filter on the channel VSI which serves as the
* macvlan. Returns 0 on success.
**/
-static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
- const u8 *macaddr, int *aq_err)
+static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
{
struct i40e_aqc_add_macvlan_element_data element;
- i40e_status status;
u16 cmd_flags = 0;
+ int status;
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
@@ -7505,42 +7791,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
struct i40e_fwd_adapter *fwd)
{
+ struct i40e_channel *ch = NULL, *ch_tmp, *iter;
int ret = 0, num_tc = 1, i, aq_err;
- struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- if (list_empty(&vsi->macvlan_list))
- return -EINVAL;
-
/* Go through the list and find an available channel */
- list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
- if (!i40e_is_channel_macvlan(ch)) {
- ch->fwd = fwd;
+ list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
+ if (!i40e_is_channel_macvlan(iter)) {
+ iter->fwd = fwd;
/* record configuration for macvlan interface in vdev */
for (i = 0; i < num_tc; i++)
netdev_bind_sb_channel_queue(vsi->netdev, vdev,
i,
- ch->num_queue_pairs,
- ch->base_queue);
- for (i = 0; i < ch->num_queue_pairs; i++) {
+ iter->num_queue_pairs,
+ iter->base_queue);
+ for (i = 0; i < iter->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
- pf_q = ch->base_queue + i;
+ pf_q = iter->base_queue + i;
/* Get to TX ring ptr */
tx_ring = vsi->tx_rings[pf_q];
- tx_ring->ch = ch;
+ tx_ring->ch = iter;
/* Get the RX ring ptr */
rx_ring = vsi->rx_rings[pf_q];
- rx_ring->ch = ch;
+ rx_ring->ch = iter;
}
+ ch = iter;
break;
}
}
+ if (!ch)
+ return -EINVAL;
+
/* Guarantee all rings are updated before we update the
* MAC address filter.
*/
@@ -7560,8 +7847,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
rx_ring->netdev = NULL;
}
dev_info(&pf->pdev->dev,
- "Error adding mac filter on macvlan err %s, aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Error adding mac filter on macvlan err %pe, aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@@ -7633,8 +7920,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Update vsi tc config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -7690,7 +7977,7 @@ static void *i40e_fwd_add(struct net_device *netdev, struct net_device *vdev)
netdev_info(netdev, "Macvlans are not supported when DCB is enabled\n");
return ERR_PTR(-EINVAL);
}
- if ((pf->flags & I40E_FLAG_TC_MQPRIO)) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
netdev_info(netdev, "Macvlans are not supported when HW TC offload is on\n");
return ERR_PTR(-EINVAL);
}
@@ -7849,8 +8136,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
ch->fwd = NULL;
} else {
dev_info(&pf->pdev->dev,
- "Error deleting mac filter on macvlan err %s, aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, aq_err));
}
break;
@@ -7943,7 +8230,7 @@ config_tc:
/* Quiesce VSI queues */
i40e_quiesce_vsi(vsi);
- if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
+ if (!hw && !i40e_is_tc_mqprio_enabled(pf))
i40e_remove_queue_channels(vsi);
/* Configure VSI for enabled TCs */
@@ -7967,11 +8254,11 @@ config_tc:
"Setup channel (id:%u) utilizing num_queues %d\n",
vsi->seid, vsi->tc_config.tc_info[0].qcount);
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (!ret) {
u64 credits = max_tx_rate;
@@ -8492,6 +8779,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
}
+ if (!tc) {
+ dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
return -EBUSY;
@@ -8596,8 +8888,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
kfree(filter);
if (err) {
dev_err(&pf->pdev->dev,
- "Failed to delete cloud filter, err %s\n",
- i40e_stat_str(&pf->hw, err));
+ "Failed to delete cloud filter, err %pe\n",
+ ERR_PTR(err));
return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
}
@@ -8717,6 +9009,27 @@ int i40e_open(struct net_device *netdev)
}
/**
+ * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
+ * @vsi: vsi structure
+ *
+ * This updates netdev's number of tx/rx queues
+ *
+ * Returns status of setting tx/rx queues
+ **/
+static int i40e_netif_set_realnum_tx_rx_queues(struct i40e_vsi *vsi)
+{
+ int ret;
+
+ ret = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (ret)
+ return ret;
+
+ return netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+}
+
+/**
* i40e_vsi_open -
* @vsi: the VSI to open
*
@@ -8752,13 +9065,7 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
goto err_setup_rx;
/* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev,
- vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(vsi->netdev,
- vsi->num_queue_pairs);
+ err = i40e_netif_set_realnum_tx_rx_queues(vsi);
if (err)
goto err_set_queues;
@@ -9144,8 +9451,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
} else {
dev_info(&pf->pdev->dev,
- "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -9593,8 +9900,8 @@ static void i40e_link_event(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 new_link_speed, old_link_speed;
- i40e_status status;
bool new_link, old_link;
+ int status;
#ifdef CONFIG_I40E_DCB
int err;
#endif /* CONFIG_I40E_DCB */
@@ -9805,9 +10112,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
struct i40e_arq_event_info event;
struct i40e_hw *hw = &pf->hw;
u16 pending, i = 0;
- i40e_status ret;
u16 opcode;
u32 oldval;
+ int ret;
u32 val;
/* Do not run clean AQ when PF reset fails */
@@ -9971,8 +10278,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@@ -9983,8 +10290,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi switch failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi switch failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -10007,8 +10314,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@@ -10019,8 +10326,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi switch failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi switch failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -10164,8 +10471,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
buf_len = data_size;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
- "capability discovery failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "capability discovery failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENODEV;
@@ -10286,7 +10593,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
struct i40e_cloud_filter *cfilter;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
- i40e_status ret;
+ int ret;
/* Add cloud filters back if they exist */
hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
@@ -10302,8 +10609,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
if (ret) {
dev_dbg(&pf->pdev->dev,
- "Failed to rebuild cloud filter, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed to rebuild cloud filter, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -10321,7 +10628,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
static int i40e_rebuild_channels(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
- i40e_status ret;
+ int ret;
if (list_empty(&vsi->ch_list))
return 0;
@@ -10374,6 +10681,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
}
/**
+ * i40e_clean_xps_state - clean xps state for every tx_ring
+ * @vsi: ptr to the VSI
+ **/
+static void i40e_clean_xps_state(struct i40e_vsi *vsi)
+{
+ int i;
+
+ if (vsi->tx_rings)
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ if (vsi->tx_rings[i])
+ clear_bit(__I40E_TX_XPS_INIT_DONE,
+ vsi->tx_rings[i]->state);
+}
+
+/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
*
@@ -10382,7 +10704,7 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret = 0;
+ int ret = 0;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
@@ -10397,8 +10719,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
i40e_pf_quiesce_all_vsi(pf);
for (v = 0; v < pf->num_alloc_vsi; v++) {
- if (pf->vsi[v])
+ if (pf->vsi[v]) {
+ i40e_clean_xps_state(pf->vsi[v]);
pf->vsi[v]->seid = 0;
+ }
}
i40e_shutdown_adminq(&pf->hw);
@@ -10429,7 +10753,7 @@ static void i40e_send_version(struct i40e_pf *pf)
dv.minor_version = 0xff;
dv.build_version = 0xff;
dv.subbuild_version = 0;
- strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
+ strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
}
@@ -10485,7 +10809,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw)
static int i40e_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
ret = i40e_pf_reset(hw);
if (ret) {
@@ -10507,43 +10831,35 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
- int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+ const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
u32 val;
int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- i40e_check_recovery_mode(pf)) {
+ is_recovery_mode_reported)
i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
- }
if (test_bit(__I40E_DOWN, pf->state) &&
- !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
- !old_recovery_mode_bit)
+ !test_bit(__I40E_RECOVERY_MODE, pf->state))
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
i40e_get_oem_version(&pf->hw);
- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
- /* The following delay is necessary for 4.33 firmware and older
- * to recover after EMP reset. 200 ms should suffice but we
- * put here 300 ms to be sure that FW is ready to operate
- * after reset.
- */
- mdelay(300);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
+ /* The following delay is necessary for firmware update. */
+ mdelay(1000);
}
/* re-verify the eeprom if we just had an EMP reset */
@@ -10554,13 +10870,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* accordingly with regard to resources initialization
* and deinitialization
*/
- if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
- old_recovery_mode_bit) {
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
if (i40e_get_capabilities(pf,
i40e_aqc_opc_list_func_capabilities))
goto end_unlock;
- if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ if (is_recovery_mode_reported) {
/* we're staying in recovery mode so we'll reinitialize
* misc vector here
*/
@@ -10610,7 +10925,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
* unless I40E_FLAG_TC_MQPRIO was enabled or DCB
* is not supported with new link speed
*/
- if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ if (i40e_is_tc_mqprio_enabled(pf)) {
i40e_aq_set_dcb_parameters(hw, false, NULL);
} else {
if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
@@ -10647,8 +10962,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
@@ -10705,10 +11020,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
if (vsi->mqprio_qopt.max_rate[0]) {
- u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 max_tx_rate = i40e_bw_bytes_to_mbits(vsi,
+ vsi->mqprio_qopt.max_rate[0]);
u64 credits = 0;
- do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
if (ret)
goto end_unlock;
@@ -10751,14 +11066,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
/* reinit the misc interrupt */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
ret = i40e_setup_misc_vector(pf);
+ if (ret)
+ goto end_unlock;
+ }
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -10780,9 +11098,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_set_promiscuous(pf, pf->cur_promisc);
if (ret)
dev_warn(&pf->pdev->dev,
- "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
pf->cur_promisc ? "on" : "off",
- i40e_stat_str(&pf->hw, ret),
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
@@ -10814,6 +11132,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired)
{
int ret;
+
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -11659,8 +11980,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi,
- i40e_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
@@ -11753,7 +12073,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return -ENOMEM;
pf->irq_pile->num_entries = vectors;
- pf->irq_pile->search_hint = 0;
/* track first vector for misc interrupts, ignore return */
(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
@@ -11915,8 +12234,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
(struct i40e_aqc_get_set_rss_key_data *)seed);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot get RSS key, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot get RSS key, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -11929,8 +12248,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot get RSS lut, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot get RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -12174,6 +12493,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return pf->alloc_rss_size;
pf->alloc_rss_size = new_rss_size;
@@ -12203,11 +12524,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
* i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
+int i40e_get_partition_bw_setting(struct i40e_pf *pf)
{
- i40e_status status;
bool min_valid, max_valid;
u32 max_bw, min_bw;
+ int status;
status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
&min_valid, &max_valid);
@@ -12226,10 +12547,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
* i40e_set_partition_bw_setting - Set BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+int i40e_set_partition_bw_setting(struct i40e_pf *pf)
{
struct i40e_aqc_configure_partition_bw_data bw_data;
- i40e_status status;
+ int status;
memset(&bw_data, 0, sizeof(bw_data));
@@ -12248,12 +12569,12 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
* i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
{
/* Commit temporary BW setting to permanent NVM image */
enum i40e_admin_queue_err last_aq_status;
- i40e_status ret;
u16 nvm_word;
+ int ret;
if (pf->hw.partition_id != 1) {
dev_info(&pf->pdev->dev,
@@ -12268,8 +12589,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot acquire NVM for read access, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12285,8 +12606,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
i40e_release_nvm(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12299,8 +12620,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot acquire NVM for write access, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12319,8 +12640,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
i40e_release_nvm(&pf->hw);
if (ret)
dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "BW settings NOT SAVED, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
bw_commit_out:
@@ -12341,7 +12662,7 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
- i40e_status read_status = I40E_SUCCESS;
+ int read_status = I40E_SUCCESS;
u16 sr_emp_sr_settings_ptr = 0;
u16 features_enable = 0;
u16 link_behavior = 0;
@@ -12374,8 +12695,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
err_nvm:
dev_warn(&pf->pdev->dev,
- "total-port-shutdown feature is off due to read nvm error: %s\n",
- i40e_stat_str(&pf->hw, read_status));
+ "total-port-shutdown feature is off due to read nvm error: %pe\n",
+ ERR_PTR(read_status));
return ret;
}
@@ -12556,7 +12877,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
goto sw_init_done;
}
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
- pf->qp_pile->search_hint = 0;
pf->tx_timeout_recovery_level = 1;
@@ -12646,6 +12966,29 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
}
/**
+ * i40e_set_loopback - turn on/off loopback mode on underlying PF
+ * @vsi: ptr to VSI
+ * @ena: flag to indicate the on/off setting
+ */
+static int i40e_set_loopback(struct i40e_vsi *vsi, bool ena)
+{
+ bool if_running = netif_running(vsi->netdev) &&
+ !test_and_set_bit(__I40E_VSI_DOWN, vsi->state);
+ int ret;
+
+ if (if_running)
+ i40e_down(vsi);
+
+ ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
+ if (ret)
+ netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
+ if (if_running)
+ i40e_up(vsi);
+
+ return ret;
+}
+
+/**
* i40e_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
@@ -12670,7 +13013,8 @@ static int i40e_set_features(struct net_device *netdev,
else
i40e_vlan_stripping_disable(vsi);
- if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
+ if (!(features & NETIF_F_HW_TC) &&
+ (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
dev_err(&pf->pdev->dev,
"Offloaded tc filters active, can't turn hw_tc_offload off");
return -EINVAL;
@@ -12684,6 +13028,9 @@ static int i40e_set_features(struct net_device *netdev,
if (need_reset)
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
+ if ((features ^ netdev->features) & NETIF_F_LOOPBACK)
+ return i40e_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+
return 0;
}
@@ -12694,7 +13041,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
u8 type, filter_index;
- i40e_status ret;
+ int ret;
type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
I40E_AQC_TUNNEL_TYPE_NGE;
@@ -12702,8 +13049,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
NULL);
if (ret) {
- netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -12718,12 +13065,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- i40e_status ret;
+ int ret;
ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
- netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -12836,6 +13183,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
}
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
@@ -12965,7 +13314,7 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
@@ -12983,15 +13332,15 @@ out_err:
static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int frame_size = i40e_max_vsi_frame_size(vsi, prog);
struct i40e_pf *pf = vsi->back;
struct bpf_prog *old_prog;
bool need_reset;
int i;
/* Don't allow frames that span over multiple buffers */
- if (frame_size > vsi->rx_buf_len) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
+ if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
return -EINVAL;
}
@@ -13001,15 +13350,29 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
if (need_reset)
i40e_prep_for_reset(pf);
+ /* VSI shall be deleted in a moment, just return EINVAL */
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
- if (!prog)
+ if (!prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
/* Wait until ndo_xsk_wakeup completes. */
synchronize_rcu();
+ }
i40e_reset_and_rebuild(pf, true, true);
}
+ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, true))
+ return -ENOMEM;
+ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
+ if (i40e_realloc_rx_bi_zc(vsi, false))
+ return -ENOMEM;
+ }
+
for (i = 0; i < vsi->num_queue_pairs; i++)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
@@ -13019,11 +13382,13 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
/* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start.
*/
- if (need_reset && prog)
+ if (need_reset && prog) {
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
+ xdp_features_set_redirect_target(vsi->netdev, true);
+ }
return 0;
}
@@ -13242,6 +13607,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
i40e_queue_pair_disable_irq(vsi, queue_pair);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
+ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
@@ -13375,8 +13741,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np->vsi = vsi;
hw_enc_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_CSUM |
NETIF_F_HIGHDMA |
NETIF_F_SOFT_FEATURES |
NETIF_F_TSO |
@@ -13407,6 +13772,23 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
/* record features VLANs can make use of */
netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
+#define I40E_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ I40E_GSO_PARTIAL_FEATURES;
+
+ netdev->mpls_features |= NETIF_F_SG;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->mpls_features |= NETIF_F_TSO;
+ netdev->mpls_features |= NETIF_F_TSO6;
+ netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES;
+
/* enable macvlan offloads */
netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
@@ -13417,11 +13799,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
- netdev->hw_features |= hw_features;
+ netdev->hw_features |= hw_features | NETIF_F_LOOPBACK;
netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+ netdev->features &= ~NETIF_F_HW_TC;
+
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, hw->mac.perm_addr);
@@ -13439,6 +13823,11 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
} else {
/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
* are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
@@ -13579,8 +13968,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -13609,8 +13998,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -13629,8 +14018,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -13652,9 +14041,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* message and continue
*/
dev_info(&pf->pdev->dev,
- "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
enabled_tc,
- i40e_stat_str(&pf->hw, ret),
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -13748,8 +14137,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "add vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -13761,15 +14150,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->id = ctxt.vsi_number;
}
- vsi->active_filters = 0;
- clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
spin_lock_bh(&vsi->mac_filter_hash_lock);
+ vsi->active_filters = 0;
/* If macvlan filters already exist, force them to get loaded */
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
f->state = I40E_FILTER_NEW;
f_count++;
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -13780,8 +14169,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
@@ -14151,6 +14540,9 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
ret = i40e_config_netdev(vsi);
if (ret)
goto err_netdev;
+ ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
+ if (ret)
+ goto err_netdev;
ret = register_netdev(vsi->netdev);
if (ret)
goto err_netdev;
@@ -14224,8 +14616,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "query veb bw config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -14234,8 +14626,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&ets_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw ets config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "query veb bw ets config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -14431,8 +14823,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't add VEB, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't add VEB, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
@@ -14442,16 +14834,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
&veb->stats_idx, NULL, NULL, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get VEB statistics idx, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get VEB statistics idx, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get VEB bw info, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get VEB bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
@@ -14661,8 +15053,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
&next_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "get switch config failed err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "get switch config failed err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
kfree(aq_buf);
@@ -14707,8 +15099,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
ret = i40e_fetch_switch_configuration(pf, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't fetch switch config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't fetch switch config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@@ -14734,8 +15126,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@@ -15072,13 +15464,12 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
*
* Return 0 on success, negative on failure.
**/
-static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
+static int i40e_pf_loop_reset(struct i40e_pf *pf)
{
/* wait max 10 seconds for PF reset to succeed */
const unsigned long time_end = jiffies + 10 * HZ;
-
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
ret = i40e_pf_reset(hw);
while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
@@ -15124,9 +15515,9 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf)
* Return 0 if NIC is healthy or negative value when there are issues
* with resets
**/
-static i40e_status i40e_handle_resets(struct i40e_pf *pf)
+static int i40e_handle_resets(struct i40e_pf *pf)
{
- const i40e_status pfr = i40e_pf_loop_reset(pf);
+ const int pfr = i40e_pf_loop_reset(pf);
const bool is_empr = i40e_check_fw_empr(pf);
if (is_empr || pfr != I40E_SUCCESS)
@@ -15151,6 +15542,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
int err;
int v_idx;
+ pci_set_drvdata(pf->pdev, pf);
pci_save_state(pf->pdev);
/* set up periodic task facility */
@@ -15221,10 +15613,9 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
err_switch_setup:
i40e_reset_interrupt_capability(pf);
- del_timer_sync(&pf->service_timer);
+ timer_shutdown_sync(&pf->service_timer);
i40e_shutdown_adminq(hw);
iounmap(hw->hw_addr);
- pci_disable_pcie_error_reporting(pf->pdev);
pci_release_mem_regions(pf->pdev);
pci_disable_device(pf->pdev);
kfree(pf);
@@ -15264,13 +15655,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_aq_get_phy_abilities_resp abilities;
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
- i40e_status status;
#endif /* CONFIG_I40E_DCB */
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
u16 wol_nvm_bits;
u16 link_status;
+#ifdef CONFIG_I40E_DCB
+ int status;
+#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
u32 i;
@@ -15282,12 +15675,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
/* set up pci connections */
@@ -15298,7 +15688,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
/* Now that we have a PCI connection, we need to do the
@@ -15451,8 +15840,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
- dev_info(&pdev->dev,
- "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+ dev_dbg(&pdev->dev,
+ "The driver for the device detected a newer version of the NVM image v%u.%u than v%u.%u.\n",
hw->aq.api_maj_ver,
hw->aq.api_min_ver,
I40E_FW_API_VERSION_MAJOR,
@@ -15642,8 +16031,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Reconfigure hardware for allowing smaller MSS in the case
@@ -15661,8 +16050,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -15760,23 +16149,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strlcpy(width, "8", PCI_WIDTH_SIZE); break;
+ strscpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strlcpy(width, "4", PCI_WIDTH_SIZE); break;
+ strscpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strlcpy(width, "2", PCI_WIDTH_SIZE); break;
+ strscpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strlcpy(width, "1", PCI_WIDTH_SIZE); break;
+ strscpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
@@ -15794,8 +16183,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
- dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
@@ -15805,8 +16194,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
- dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* make sure the MFS hasn't been set lower than the default */
@@ -15843,7 +16232,7 @@ err_vsis:
kfree(pf->vsi);
err_switch_setup:
i40e_reset_interrupt_capability(pf);
- del_timer_sync(&pf->service_timer);
+ timer_shutdown_sync(&pf->service_timer);
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
@@ -15856,7 +16245,6 @@ err_pf_reset:
err_ioremap:
kfree(pf);
err_pf_alloc:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -15877,7 +16265,7 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
- i40e_status ret_code;
+ int ret_code;
int i;
i40e_dbg_pf_exit(pf);
@@ -15888,8 +16276,13 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
- while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
+ * flags, once they are set, i40e_rebuild should not be called as
+ * i40e_prep_for_reset always returns early.
+ */
+ while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
usleep_range(1000, 2000);
+ set_bit(__I40E_IN_REMOVE, pf->state);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
@@ -15900,7 +16293,7 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_SUSPENDED, pf->state);
set_bit(__I40E_DOWN, pf->state);
if (pf->service_timer.function)
- del_timer_sync(&pf->service_timer);
+ timer_shutdown_sync(&pf->service_timer);
if (pf->service_task.func)
cancel_work_sync(&pf->service_task);
@@ -15999,7 +16392,6 @@ unmap:
kfree(pf);
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -16088,6 +16480,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
+
i40e_reset_and_rebuild(pf, false, false);
}
@@ -16117,9 +16512,9 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
u8 mac_addr[6];
u16 flags = 0;
+ int ret;
/* Get current MAC address in case it's an LAA */
if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
@@ -16330,6 +16725,8 @@ static struct pci_driver i40e_driver = {
**/
static int __init i40e_init_module(void)
{
+ int err;
+
pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
@@ -16347,7 +16744,14 @@ static int __init i40e_init_module(void)
}
i40e_dbg_init();
- return pci_register_driver(&i40e_driver);
+ err = pci_register_driver(&i40e_driver);
+ if (err) {
+ destroy_workqueue(i40e_wq);
+ i40e_dbg_exit();
+ return err;
+ }
+
+ return 0;
}
module_init(i40e_init_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index fe6dca846028..9da0c87f0328 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -13,10 +13,10 @@
* in this file) as an equivalent of the FLASH part mapped into the SR.
* We are accessing FLASH always thru the Shadow RAM.
**/
-i40e_status i40e_init_nvm(struct i40e_hw *hw)
+int i40e_init_nvm(struct i40e_hw *hw)
{
struct i40e_nvm_info *nvm = &hw->nvm;
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 fla, gens;
u8 sr_size;
@@ -52,12 +52,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
* This function will request NVM ownership for reading
* via the proper Admin Command.
**/
-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
- enum i40e_aq_resource_access_type access)
+int i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
{
- i40e_status ret_code = 0;
u64 gtime, timeout;
u64 time_left = 0;
+ int ret_code = 0;
if (hw->nvm.blank_nvm_mode)
goto i40e_i40e_acquire_nvm_exit;
@@ -111,7 +111,7 @@ i40e_i40e_acquire_nvm_exit:
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
- i40e_status ret_code = I40E_SUCCESS;
+ int ret_code = I40E_SUCCESS;
u32 total_delay = 0;
if (hw->nvm.blank_nvm_mode)
@@ -138,9 +138,9 @@ void i40e_release_nvm(struct i40e_hw *hw)
*
* Polls the SRCTL Shadow RAM register done bit.
**/
-static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;
/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
@@ -165,10 +165,10 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
if (offset >= hw->nvm.sr_size) {
@@ -216,13 +216,13 @@ read_nvm_exit:
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
- u8 module_pointer, u32 offset,
- u16 words, void *data,
- bool last_command)
+static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
{
- i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -264,10 +264,10 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM using the AdminQ
**/
-static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
*data = le16_to_cpu(*(__le16 *)data);
@@ -286,8 +286,8 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
* Do not use this function except in cases where the nvm lock is already
* taken via i40e_acquire_nvm().
**/
-static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
- u16 offset, u16 *data)
+static int __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset, u16 *data)
{
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_word_aq(hw, offset, data);
@@ -303,10 +303,10 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM.
**/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -330,17 +330,17 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
* @words_data_size: Words to read from NVM
* @data_ptr: Pointer to memory location where resulting buffer will be stored
**/
-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr,
- u16 module_offset,
- u16 data_offset,
- u16 words_data_size,
- u16 *data_ptr)
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
{
- i40e_status status;
u16 specific_ptr = 0;
u16 ptr_value = 0;
u32 offset = 0;
+ int status;
if (module_ptr != 0) {
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -406,10 +406,10 @@ enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u16 index, word;
/* Loop thru the selected region */
@@ -437,13 +437,13 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code;
- u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
+ u16 read_size;
+ int ret_code;
u16 i = 0;
do {
@@ -493,9 +493,9 @@ read_nvm_buffer_aq_exit:
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method.
**/
-static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
- u16 offset, u16 *words,
- u16 *data)
+static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset, u16 *words,
+ u16 *data)
{
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
@@ -514,10 +514,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -544,12 +544,12 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command)
+static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
{
- i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -594,14 +594,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
**/
-static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum)
+static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
- i40e_status ret_code;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
+ int ret_code;
u16 *data;
u16 i = 0;
@@ -675,17 +675,18 @@ i40e_calc_nvm_checksum_exit:
* on ARQ completion event reception by caller.
* This function will commit SR to NVM.
**/
-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+int i40e_update_nvm_checksum(struct i40e_hw *hw)
{
- i40e_status ret_code;
- u16 checksum;
__le16 le_sum;
+ int ret_code;
+ u16 checksum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- le_sum = cpu_to_le16(checksum);
- if (!ret_code)
+ if (!ret_code) {
+ le_sum = cpu_to_le16(checksum);
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &le_sum, true);
+ }
return ret_code;
}
@@ -698,12 +699,12 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
* Performs checksum calculation and validates the NVM SW checksum. If the
* caller does not need checksum, the value can be NULL.
**/
-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum)
+int i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
- i40e_status ret_code = 0;
- u16 checksum_sr = 0;
u16 checksum_local = 0;
+ u16 checksum_sr = 0;
+ int ret_code = 0;
/* We must acquire the NVM lock in order to correctly synchronize the
* NVM accesses across multiple PFs. Without doing so it is possible
@@ -732,36 +733,36 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
return ret_code;
}
-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *errno);
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *perrno);
-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -806,12 +807,12 @@ static const char * const i40e_nvm_update_state_str[] = {
*
* Dispatches command depending on what update state is current
**/
-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ int status;
/* assume success */
*perrno = 0;
@@ -922,12 +923,12 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1061,12 +1062,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1103,13 +1104,13 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
bool retry_attempt = false;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1186,8 +1187,8 @@ retry:
*/
if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
!retry_attempt) {
- i40e_status old_status = status;
u32 old_asq_status = hw->aq.asq_last_status;
+ int old_status = status;
u32 gtime;
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
@@ -1369,17 +1370,17 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- i40e_status status;
struct i40e_aq_desc *aq_desc;
u32 buff_size = 0;
u8 *buff = NULL;
u32 aq_desc_len;
u32 aq_data_len;
+ int status;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
if (cmd->offset == 0xffff)
@@ -1428,8 +1429,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
buff_size, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_exec_aq err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "%s err %pe aq_err %s\n",
+ __func__, ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
@@ -1453,9 +1454,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
@@ -1522,9 +1523,9 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
@@ -1556,13 +1557,13 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- i40e_status status;
u8 module, transaction;
+ int status;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
@@ -1595,13 +1596,13 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
*
* module, offset, data_size and data are in cmd structure
**/
-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
{
- i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ int status = 0;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
@@ -1635,14 +1636,14 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
*
* module, offset, data_size and data are in cmd structure
**/
-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
u8 preservation_flags;
+ int status = 0;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 2f6815b2f8df..2bd4de03dafa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -56,5 +56,4 @@ do { \
(h)->bus.func, ##__VA_ARGS__); \
} while (0)
-typedef enum i40e_status_code i40e_status;
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index aaea297640e0..fe845987d99a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -16,17 +16,36 @@
*/
/* adminq functions */
-i40e_status i40e_init_adminq(struct i40e_hw *hw);
+int i40e_init_adminq(struct i40e_hw *hw);
void i40e_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *events_pending);
-i40e_status i40e_asq_send_command(struct i40e_hw *hw,
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+int
+i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_asq_send_command_v2(struct i40e_hw *hw,
+ struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+int
+i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context);
+int
+i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
+ struct i40e_asq_cmd_details *cmd_details,
+ bool is_atomic_context,
+ enum i40e_admin_queue_err *aq_status);
/* debug function for adminq */
void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
@@ -34,314 +53,332 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
- u16 led_addr, u32 mode);
-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
- u16 *val);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
/* admin send queue commands */
-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
- u16 *fw_major_version, u16 *fw_minor_version,
- u32 *fw_build,
- u16 *api_major_version, u16 *api_minor_version,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
- u32 reg_addr, u64 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
- bool qualified_modules, bool report_init,
- struct i40e_aq_get_phy_abilities_resp *abilities,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
- struct i40e_aq_set_phy_config *config,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_reset);
-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
- bool enable_link,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
- bool enable_lse, struct i40e_link_status *link,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
- u64 advt_reg,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_reset);
+int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
+ bool ena_lpbk,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
- u16 vsi_id, bool set_filter,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
- bool rx_only_promisc);
-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable, u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
- u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
- bool enable_stats,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
- u16 veb_seid, u16 *switch_id, bool *floating,
- u16 *statistic_index, u16 *vebs_used,
- u16 *vebs_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
+int
+i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details,
+ enum i40e_admin_queue_err *aq_status);
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free);
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free);
-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
- struct i40e_aqc_get_switch_config_resp *buf,
- u16 buf_size, u16 *start_seid,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags,
- u16 valid_flags, u8 mode,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- enum i40e_aq_resource_access_type access,
- u8 sdp_number, u64 *timeout,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- u8 sdp_number,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, bool last_command,
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
- void *buff, u16 buff_size, u16 *data_size,
- enum i40e_admin_queue_opc list_type_opc,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command, u8 preservation_flags,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
- u8 mib_type, void *buff, u16 buff_size,
- u16 *local_len, u16 *remote_len,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
- bool enable_update,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
- bool persist,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
- bool dcb_enable,
- struct i40e_asq_cmd_details
- *cmd_details);
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
- void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 protocol_index,
- u8 *filter_index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
- u16 flags, u8 *mac_addr,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
- u16 seid, u16 credit, u8 max_bw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_port_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg);
-enum i40e_status_code
+int i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+int
i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
/* i40e_common */
-i40e_status i40e_init_shared_code(struct i40e_hw *hw);
-i40e_status i40e_pf_reset(struct i40e_hw *hw);
+int i40e_init_shared_code(struct i40e_hw *hw);
+int i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
-i40e_status i40e_update_link_info(struct i40e_hw *hw);
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
- u32 *max_bw, u32 *min_bw, bool *min_valid,
- bool *max_valid);
-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
- struct i40e_aqc_configure_partition_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+int i40e_update_link_info(struct i40e_hw *hw);
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid,
+ bool *max_valid);
+int
+i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+int i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
-i40e_status i40e_init_nvm(struct i40e_hw *hw);
-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
- enum i40e_aq_resource_access_type access);
+int i40e_init_nvm(struct i40e_hw *hw);
+int i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr,
- u16 module_offset,
- u16 data_offset,
- u16 words_data_size,
- u16 *data_ptr);
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum);
-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *);
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+int i40e_update_nvm_checksum(struct i40e_hw *hw);
+int i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *errno);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+int i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -390,41 +427,41 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg);
-i40e_status i40e_vf_reset(struct i40e_hw *hw);
-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval,
- u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings);
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
- u8 table_id, u32 start_index, u16 buff_size,
- void *buff, u16 *ret_buff_size,
- u8 *ret_next_table, u32 *ret_next_index,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_vf_reset(struct i40e_hw *hw);
+int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum virtchnl_ops v_opcode,
+ int v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-enum i40e_status_code
+int
i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr, bool page_change,
bool set_mdio, u8 mdio_num,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int
i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr, bool page_change,
bool set_mdio, u8 mdio_num,
@@ -437,43 +474,43 @@ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
-i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *
- cmd_details);
-i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *
- cmd_details);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
struct i40e_profile_section_header *
i40e_find_section_in_profile(u32 section_type,
struct i40e_profile_segment *profile);
-enum i40e_status_code
+int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-enum i40e_status_code
+int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-enum i40e_status_code
+int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 09b1d5aed1c9..c37abbb3cd06 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -27,7 +27,6 @@
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB
-#define to_dev(obj) container_of(obj, struct device, kobj)
enum i40e_ptp_pin {
SDP3_2 = 0,
@@ -335,43 +334,25 @@ static void i40e_ptp_convert_to_hwtstamp(struct skb_shared_hwtstamps *hwtstamps,
}
/**
- * i40e_ptp_adjfreq - Adjust the PHC frequency
+ * i40e_ptp_adjfine - Adjust the PHC frequency
* @ptp: The PTP clock structure
- * @ppb: Parts per billion adjustment from the base
+ * @scaled_ppm: Scaled parts per million adjustment from base
*
- * Adjust the frequency of the PHC by the indicated parts per billion from the
- * base frequency.
+ * Adjust the frequency of the PHC by the indicated delta from the base
+ * frequency.
+ *
+ * Scaled parts per million is ppm with a 16 bit binary fractional field.
**/
-static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
struct i40e_hw *hw = &pf->hw;
- u64 adj, freq, diff;
- int neg_adj = 0;
-
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
-
- freq = I40E_PTP_40GB_INCVAL;
- freq *= ppb;
- diff = div_u64(freq, 1000000000ULL);
+ u64 adj, base_adj;
- if (neg_adj)
- adj = I40E_PTP_40GB_INCVAL - diff;
- else
- adj = I40E_PTP_40GB_INCVAL + diff;
-
- /* At some link speeds, the base incval is so large that directly
- * multiplying by ppb would result in arithmetic overflow even when
- * using a u64. Avoid this by instead calculating the new incval
- * always in terms of the 40GbE clock rate and then multiplying by the
- * link speed factor afterwards. This does result in slightly lower
- * precision at lower link speeds, but it is fairly minor.
- */
smp_mb(); /* Force any pending update before accessing. */
- adj *= READ_ONCE(pf->ptp_adj_mult);
+ base_adj = I40E_PTP_40GB_INCVAL * READ_ONCE(pf->ptp_adj_mult);
+
+ adj = adjust_by_scaled_ppm(base_adj, scaled_ppm);
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
@@ -1205,10 +1186,6 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work);
- /* Reserved for future extensions. */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
pf->ptp_tx = false;
@@ -1402,11 +1379,11 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strlcpy(pf->ptp_caps.name, i40e_driver_name,
+ strscpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
- pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
+ pf->ptp_caps.adjfine = i40e_ptp_adjfine;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
pf->ptp_caps.settime64 = i40e_ptp_settime;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 8d0588a27a05..7339003aa17c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -211,6 +211,11 @@
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
@@ -413,6 +418,9 @@
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
@@ -640,6 +648,14 @@
#define I40E_VFQF_HKEY1_MAX_INDEX 12
#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
#define I40E_VFQF_HLUT1_MAX_INDEX 15
+#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX 143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0
+#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX 143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0
+#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index 77be0702d07c..4d2782e76038 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -9,65 +9,30 @@ enum i40e_status_code {
I40E_SUCCESS = 0,
I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2,
- I40E_ERR_PHY = -3,
I40E_ERR_CONFIG = -4,
I40E_ERR_PARAM = -5,
- I40E_ERR_MAC_TYPE = -6,
I40E_ERR_UNKNOWN_PHY = -7,
- I40E_ERR_LINK_SETUP = -8,
- I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_MASTER_REQUESTS_PENDING = -12,
- I40E_ERR_INVALID_LINK_SETTINGS = -13,
- I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
- I40E_ERR_SWFW_SYNC = -16,
I40E_ERR_NO_AVAILABLE_VSI = -17,
I40E_ERR_NO_MEMORY = -18,
I40E_ERR_BAD_PTR = -19,
- I40E_ERR_RING_FULL = -20,
- I40E_ERR_INVALID_PD_ID = -21,
- I40E_ERR_INVALID_QP_ID = -22,
- I40E_ERR_INVALID_CQ_ID = -23,
- I40E_ERR_INVALID_CEQ_ID = -24,
- I40E_ERR_INVALID_AEQ_ID = -25,
I40E_ERR_INVALID_SIZE = -26,
- I40E_ERR_INVALID_ARP_INDEX = -27,
- I40E_ERR_INVALID_FPM_FUNC_ID = -28,
- I40E_ERR_QP_INVALID_MSG_SIZE = -29,
- I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
- I40E_ERR_INVALID_FRAG_COUNT = -31,
I40E_ERR_QUEUE_EMPTY = -32,
- I40E_ERR_INVALID_ALIGNMENT = -33,
- I40E_ERR_FLUSHED_QUEUE = -34,
- I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
- I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
I40E_ERR_TIMEOUT = -37,
- I40E_ERR_OPCODE_MISMATCH = -38,
- I40E_ERR_CQP_COMPL_ERROR = -39,
- I40E_ERR_INVALID_VF_ID = -40,
- I40E_ERR_INVALID_HMCFN_ID = -41,
- I40E_ERR_BACKING_PAGE_ERROR = -42,
- I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
- I40E_ERR_INVALID_PBLE_INDEX = -44,
I40E_ERR_INVALID_SD_INDEX = -45,
I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
I40E_ERR_INVALID_SD_TYPE = -47,
- I40E_ERR_MEMCPY_FAILED = -48,
I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
- I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
- I40E_ERR_SRQ_ENABLED = -52,
I40E_ERR_ADMIN_QUEUE_ERROR = -53,
I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
I40E_ERR_BUF_TOO_SHORT = -55,
I40E_ERR_ADMIN_QUEUE_FULL = -56,
I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
- I40E_ERR_BAD_IWARP_CQE = -58,
I40E_ERR_NVM_BLANK_MODE = -59,
I40E_ERR_NOT_IMPLEMENTED = -60,
- I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
I40E_ERR_DIAG_TEST_FAILED = -62,
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index b5b12299931f..33b4e30f5e00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -55,6 +55,55 @@
* being built from shared code.
*/
+#define NO_DEV "(i40e no_device)"
+
+TRACE_EVENT(i40e_napi_poll,
+
+ TP_PROTO(struct napi_struct *napi, struct i40e_q_vector *q, int budget,
+ int budget_per_ring, unsigned int rx_cleaned, unsigned int tx_cleaned,
+ bool rx_clean_complete, bool tx_clean_complete),
+
+ TP_ARGS(napi, q, budget, budget_per_ring, rx_cleaned, tx_cleaned,
+ rx_clean_complete, tx_clean_complete),
+
+ TP_STRUCT__entry(
+ __field(int, budget)
+ __field(int, budget_per_ring)
+ __field(unsigned int, rx_cleaned)
+ __field(unsigned int, tx_cleaned)
+ __field(int, rx_clean_complete)
+ __field(int, tx_clean_complete)
+ __field(int, irq_num)
+ __field(int, curr_cpu)
+ __string(qname, q->name)
+ __string(dev_name, napi->dev ? napi->dev->name : NO_DEV)
+ __bitmask(irq_affinity, nr_cpumask_bits)
+ ),
+
+ TP_fast_assign(
+ __entry->budget = budget;
+ __entry->budget_per_ring = budget_per_ring;
+ __entry->rx_cleaned = rx_cleaned;
+ __entry->tx_cleaned = tx_cleaned;
+ __entry->rx_clean_complete = rx_clean_complete;
+ __entry->tx_clean_complete = tx_clean_complete;
+ __entry->irq_num = q->irq_num;
+ __entry->curr_cpu = get_cpu();
+ __assign_str(qname, q->name);
+ __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+ __assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask),
+ nr_cpumask_bits);
+ ),
+
+ TP_printk("i40e_napi_poll on dev %s q %s irq %d irq_mask %s curr_cpu %d "
+ "budget %d bpr %d rx_cleaned %u tx_cleaned %u "
+ "rx_clean_complete %d tx_clean_complete %d",
+ __get_str(dev_name), __get_str(qname), __entry->irq_num,
+ __get_bitmask(irq_affinity), __entry->curr_cpu, __entry->budget,
+ __entry->budget_per_ring, __entry->rx_cleaned, __entry->tx_cleaned,
+ __entry->rx_clean_complete, __entry->tx_clean_complete)
+);
+
/* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(
i40e_tx_template,
@@ -113,45 +162,45 @@ DECLARE_EVENT_CLASS(
TP_PROTO(struct i40e_ring *ring,
union i40e_16byte_rx_desc *desc,
- struct sk_buff *skb),
+ struct xdp_buff *xdp),
- TP_ARGS(ring, desc, skb),
+ TP_ARGS(ring, desc, xdp),
TP_STRUCT__entry(
__field(void*, ring)
__field(void*, desc)
- __field(void*, skb)
+ __field(void*, xdp)
__string(devname, ring->netdev->name)
),
TP_fast_assign(
__entry->ring = ring;
__entry->desc = desc;
- __entry->skb = skb;
+ __entry->xdp = xdp;
__assign_str(devname, ring->netdev->name);
),
TP_printk(
- "netdev: %s ring: %p desc: %p skb %p",
+ "netdev: %s ring: %p desc: %p xdp %p",
__get_str(devname), __entry->ring,
- __entry->desc, __entry->skb)
+ __entry->desc, __entry->xdp)
);
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
union i40e_16byte_rx_desc *desc,
- struct sk_buff *skb),
+ struct xdp_buff *xdp),
- TP_ARGS(ring, desc, skb));
+ TP_ARGS(ring, desc, xdp));
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
union i40e_16byte_rx_desc *desc,
- struct sk_buff *skb),
+ struct xdp_buff *xdp),
- TP_ARGS(ring, desc, skb));
+ TP_ARGS(ring, desc, xdp));
DECLARE_EVENT_CLASS(
i40e_xmit_template,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 10a83e5385c7..8b8bf4880faa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3,6 +3,7 @@
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "i40e.h"
#include "i40e_trace.h"
@@ -170,10 +171,10 @@ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
struct i40e_fdir_filter *data)
{
bool is_vlan = !!data->vlan_tag;
- struct vlan_hdr vlan;
- struct ipv6hdr ipv6;
- struct ethhdr eth;
- struct iphdr ip;
+ struct vlan_hdr vlan = {};
+ struct ipv6hdr ipv6 = {};
+ struct ethhdr eth = {};
+ struct iphdr ip = {};
u8 *tmp;
if (ipv4) {
@@ -371,7 +372,6 @@ static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
}
}
-#define IP_HEADER_OFFSET 14
#define I40E_UDPIP_DUMMY_PACKET_LEN 42
#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
/**
@@ -830,8 +830,6 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
i40e_clean_tx_ring(tx_ring);
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
if (tx_ring->desc) {
dma_free_coherent(tx_ring->dev, tx_ring->size,
@@ -925,11 +923,13 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi)
* @vsi: the VSI we care about
* @tx_ring: Tx ring to clean
* @napi_budget: Used to determine if we are in netpoll
+ * @tx_cleaned: Out parameter set to the number of TXes cleaned
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
- struct i40e_ring *tx_ring, int napi_budget)
+ struct i40e_ring *tx_ring, int napi_budget,
+ unsigned int *tx_cleaned)
{
int i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
@@ -1050,6 +1050,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
}
}
+ *tx_cleaned = total_packets;
return !!budget;
}
@@ -1382,8 +1383,6 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
new_buff->page_offset = old_buff->page_offset;
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
- rx_ring->rx_stats.page_reuse_count++;
-
/* clear contents of buffer_info */
old_buff->page = NULL;
}
@@ -1433,13 +1432,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
if (!tx_ring->tx_bi)
goto err;
- if (ring_is_xdp(tx_ring)) {
- tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
- GFP_KERNEL);
- if (!tx_ring->xsk_descs)
- goto err;
- }
-
u64_stats_init(&tx_ring->syncp);
/* round up to nearest 4K */
@@ -1463,21 +1455,11 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
return 0;
err:
- kfree(tx_ring->xsk_descs);
- tx_ring->xsk_descs = NULL;
kfree(tx_ring->tx_bi);
tx_ring->tx_bi = NULL;
return -ENOMEM;
}
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
-
- rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi ? 0 : -ENOMEM;
-}
-
static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
@@ -1495,11 +1477,6 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
-
if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
goto skip_free;
@@ -1544,6 +1521,7 @@ skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
}
@@ -1596,6 +1574,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->next_to_process = 0;
rx_ring->next_to_use = 0;
/* XDP RX-queue info only needed for RX rings exposed to XDP */
@@ -1608,6 +1587,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+ rx_ring->rx_bi =
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
+ if (!rx_ring->rx_bi)
+ return -ENOMEM;
+
return 0;
}
@@ -1632,21 +1616,19 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
writel(val, rx_ring->tail);
}
+#if (PAGE_SIZE >= 8192)
static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
unsigned int size)
{
unsigned int truesize;
-#if (PAGE_SIZE < 8192)
- truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
-#else
truesize = rx_ring->rx_offset ?
SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
-#endif
return truesize;
}
+#endif
/**
* i40e_alloc_mapped_page - recycle or make a new page
@@ -1675,6 +1657,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
return false;
}
+ rx_ring->rx_stats.page_alloc_count++;
+
/* map page for use */
dma = dma_map_page_attrs(rx_ring->dev, page, 0,
i40e_rx_pg_size(rx_ring),
@@ -1982,32 +1966,41 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
/**
* i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buffer: buffer containing the page
- * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
+ * @rx_stats: rx stats structure for the rx ring
*
* If page is reusable, we have a green light for calling i40e_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
* pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
+ * page freed.
+ *
+ * rx_stats will be updated to indicate whether the page was waived
+ * or busy if it could not be reused.
*/
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
- int rx_buffer_pgcnt)
+ struct i40e_rx_queue_stats *rx_stats)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (!dev_page_is_reusable(page))
+ if (!dev_page_is_reusable(page)) {
+ rx_stats->page_waive_count++;
return false;
+ }
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+ if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) {
+ rx_stats->page_busy_count++;
return false;
+ }
#else
#define I40E_LAST_OFFSET \
(SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
- if (rx_buffer->page_offset > I40E_LAST_OFFSET)
+ if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
+ rx_stats->page_busy_count++;
return false;
+ }
#endif
/* If we have drained the page fragment pool we need to update
@@ -2023,33 +2016,14 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
}
/**
- * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
- * @skb: sk_buff to place the data into
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buffer->page to the skb.
- * It will just attach the page as a frag to the skb.
- *
- * The function will then update the page offset.
+ * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
+ * @rx_buffer: Rx buffer to adjust
+ * @truesize: Size of adjustment
**/
-static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- struct sk_buff *skb,
- unsigned int size)
+static void i40e_rx_buffer_flip(struct i40e_rx_buffer *rx_buffer,
+ unsigned int truesize)
{
#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
-#endif
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->page_offset, size, truesize);
-
- /* page is being used so we must update the page offset */
-#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
#else
rx_buffer->page_offset += truesize;
@@ -2060,19 +2034,17 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
* i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
* @rx_ring: rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
- * @rx_buffer_pgcnt: buffer page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
- const unsigned int size,
- int *rx_buffer_pgcnt)
+ const unsigned int size)
{
struct i40e_rx_buffer *rx_buffer;
- rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- *rx_buffer_pgcnt =
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process);
+ rx_buffer->page_count =
#if (PAGE_SIZE < 8192)
page_count(rx_buffer->page);
#else
@@ -2094,25 +2066,82 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
}
/**
- * i40e_construct_skb - Allocate skb and populate it
+ * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: rx buffer to pull data from
+ *
+ * This function will clean up the contents of the rx_buffer. It will
+ * either recycle the buffer or unmap it and free the associated resources.
+ */
+static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ i40e_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+ }
+}
+
+/**
+ * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @xdp_res: Result of the XDP program
* @xdp: xdp_buff pointing to the data
+ **/
+static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
+ struct xdp_buff *xdp)
+{
+ u32 next = rx_ring->next_to_clean;
+ struct i40e_rx_buffer *rx_buffer;
+
+ xdp->flags = 0;
+
+ while (1) {
+ rx_buffer = i40e_rx_bi(rx_ring, next);
+ if (++next == rx_ring->count)
+ next = 0;
+
+ if (!rx_buffer->page)
+ continue;
+
+ if (xdp_res == I40E_XDP_CONSUMED)
+ rx_buffer->pagecnt_bias++;
+ else
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+
+ /* EOP buffer will be put in i40e_clean_rx_irq() */
+ if (next == rx_ring->next_to_process)
+ return;
+
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
+ }
+}
+
+/**
+ * i40e_construct_skb - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @xdp: xdp_buff pointing to the data
+ * @nr_frags: number of buffers for the packet
*
* This function allocates an skb. It then populates it with the page
* data from the current receive descriptor, taking care to set up the
* skb correctly.
*/
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- struct xdp_buff *xdp)
+ struct xdp_buff *xdp,
+ u32 nr_frags)
{
unsigned int size = xdp->data_end - xdp->data;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#endif
+ struct i40e_rx_buffer *rx_buffer;
unsigned int headlen;
struct sk_buff *skb;
@@ -2152,48 +2181,60 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
memcpy(__skb_put(skb, headlen), xdp->data,
ALIGN(headlen, sizeof(long)));
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
/* update all of the pointers */
size -= headlen;
if (size) {
+ if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_add_rx_frag(skb, 0, rx_buffer->page,
rx_buffer->page_offset + headlen,
- size, truesize);
-
+ size, xdp->frame_sz);
/* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
} else {
/* buffer is unused, reset bias back to rx_buffer */
rx_buffer->pagecnt_bias++;
}
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
+ sizeof(skb_frag_t) * nr_frags);
+
+ xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+
+ /* First buffer has already been processed, so bump ntc */
+ if (++rx_ring->next_to_clean == rx_ring->count)
+ rx_ring->next_to_clean = 0;
+
+ i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
+ }
+
return skb;
}
/**
* i40e_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buffer: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
+ * @nr_frags: number of buffers for the packet
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- struct xdp_buff *xdp)
+ struct xdp_buff *xdp,
+ u32 nr_frags)
{
unsigned int metasize = xdp->data - xdp->data_meta;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(xdp->data_end -
- xdp->data_hard_start);
-#endif
struct sk_buff *skb;
/* Prefetch first cache line of first page. If xdp->data_meta
@@ -2204,7 +2245,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
return NULL;
@@ -2214,42 +2255,25 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
if (metasize)
skb_metadata_set(skb, metasize);
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo;
- return skb;
-}
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
-/**
- * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
- *
- * This function will clean up the contents of the rx_buffer. It will
- * either recycle the buffer or unmap it and free the associated resources.
- */
-static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- int rx_buffer_pgcnt)
-{
- if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
- /* hand second half of page back to the ring */
- i40e_reuse_rx_page(rx_ring, rx_buffer);
+ i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- i40e_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buffer->page,
- rx_buffer->pagecnt_bias);
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
+ struct i40e_rx_buffer *rx_buffer;
+
+ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
+ /* buffer is used by skb, update page_offset */
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
}
+
+ return skb;
}
/**
@@ -2290,16 +2314,14 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
* i40e_run_xdp - run an XDP program
* @rx_ring: Rx ring being processed
* @xdp: XDP buffer containing the frame
+ * @xdp_prog: XDP program to run
**/
-static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
if (!xdp_prog)
goto xdp_out;
@@ -2322,7 +2344,7 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
result = I40E_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -2337,25 +2359,6 @@ xdp_out:
}
/**
- * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
- * @rx_ring: Rx ring
- * @rx_buffer: Rx buffer to adjust
- * @size: Size of adjustment
- **/
-static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *rx_buffer,
- unsigned int size)
-{
- unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
-
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-}
-
-/**
* i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
* @xdp_ring: XDP Tx ring
*
@@ -2413,22 +2416,72 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
}
/**
- * i40e_inc_ntc: Advance the next_to_clean index
+ * i40e_inc_ntp: Advance the next_to_process index
* @rx_ring: Rx ring
**/
-static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+static void i40e_inc_ntp(struct i40e_ring *rx_ring)
+{
+ u32 ntp = rx_ring->next_to_process + 1;
+
+ ntp = (ntp < rx_ring->count) ? ntp : 0;
+ rx_ring->next_to_process = ntp;
+ prefetch(I40E_RX_DESC(rx_ring, ntp));
+}
+
+/**
+ * i40e_add_xdp_frag: Add a frag to xdp_buff
+ * @xdp: xdp_buff pointing to the data
+ * @nr_frags: return number of buffers for the packet
+ * @rx_buffer: rx_buffer holding data of the current frag
+ * @size: size of data of current frag
+ */
+static int i40e_add_xdp_frag(struct xdp_buff *xdp, u32 *nr_frags,
+ struct i40e_rx_buffer *rx_buffer, u32 size)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(xdp);
+ } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
+ /* Overflowing packet: All frags need to be dropped */
+ return -ENOMEM;
+ }
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page,
+ rx_buffer->page_offset, size);
+
+ sinfo->xdp_frags_size += size;
+
+ if (page_is_pfmemalloc(rx_buffer->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+ *nr_frags = sinfo->nr_frags;
+
+ return 0;
+}
+
+/**
+ * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @xdp: xdp_buff pointing to the data
+ * @rx_buffer: rx_buffer of eop desc
+ */
+static void i40e_consume_xdp_buff(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp,
+ struct i40e_rx_buffer *rx_buffer)
+{
+ i40e_process_rx_buffs(rx_ring, I40E_XDP_CONSUMED, xdp);
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
+ rx_ring->next_to_clean = rx_ring->next_to_process;
+ xdp->data = NULL;
}
/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
+ * @rx_cleaned: Out parameter of the number of packets processed
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
@@ -2437,37 +2490,39 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
*
* Returns amount of work completed
**/
-static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
+ unsigned int *rx_cleaned)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ u16 clean_threshold = rx_ring->count / 2;
unsigned int offset = rx_ring->rx_offset;
- struct sk_buff *skb = rx_ring->skb;
+ struct xdp_buff *xdp = &rx_ring->xdp;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
- struct xdp_buff xdp;
int xdp_res = 0;
-#if (PAGE_SIZE < 8192)
- frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
-#endif
- xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
while (likely(total_rx_packets < (unsigned int)budget)) {
+ u16 ntp = rx_ring->next_to_process;
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
- int rx_buffer_pgcnt;
+ struct sk_buff *skb;
unsigned int size;
+ u32 nfrags = 0;
+ bool neop;
u64 qword;
/* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
+ if (cleaned_count >= clean_threshold) {
failure = failure ||
i40e_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = I40E_RX_DESC(rx_ring, ntp);
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
@@ -2486,8 +2541,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
- rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- i40e_inc_ntc(rx_ring);
+ rx_buffer = i40e_rx_bi(rx_ring, ntp);
+ i40e_inc_ntp(rx_ring);
i40e_reuse_rx_page(rx_ring, rx_buffer);
cleaned_count++;
continue;
@@ -2498,78 +2553,89 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (!size)
break;
- i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
-
+ i40e_trace(clean_rx_irq, rx_ring, rx_desc, xdp);
/* retrieve a buffer from the ring */
- if (!skb) {
+ rx_buffer = i40e_get_rx_buffer(rx_ring, size);
+
+ neop = i40e_is_non_eop(rx_ring, rx_desc);
+ i40e_inc_ntp(rx_ring);
+
+ if (!xdp->data) {
unsigned char *hard_start;
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
- xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_prepare_buff(xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
+ xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size);
#endif
- xdp_res = i40e_run_xdp(rx_ring, &xdp);
+ } else if (i40e_add_xdp_frag(xdp, &nfrags, rx_buffer, size) &&
+ !neop) {
+ /* Overflowing packet: Drop all frags on EOP */
+ i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
+ break;
}
+ if (neop)
+ continue;
+
+ xdp_res = i40e_run_xdp(rx_ring, xdp, xdp_prog);
+
if (xdp_res) {
- if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
- xdp_xmit |= xdp_res;
- i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
+ xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ i40e_process_rx_buffs(rx_ring, xdp_res, xdp);
+ size = xdp_get_buff_len(xdp);
+ } else if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
} else {
rx_buffer->pagecnt_bias++;
}
total_rx_bytes += size;
- total_rx_packets++;
- } else if (skb) {
- i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
- } else if (ring_uses_build_skb(rx_ring)) {
- skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
} else {
- skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
- }
+ if (ring_uses_build_skb(rx_ring))
+ skb = i40e_build_skb(rx_ring, xdp, nfrags);
+ else
+ skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+
+ /* drop if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
+ break;
+ }
- /* exit if we failed to retrieve a buffer */
- if (!xdp_res && !skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- rx_buffer->pagecnt_bias++;
- break;
- }
+ if (i40e_cleanup_headers(rx_ring, skb, rx_desc))
+ goto process_next;
- i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
- cleaned_count++;
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
- i40e_inc_ntc(rx_ring);
- if (i40e_is_non_eop(rx_ring, rx_desc))
- continue;
+ /* populate checksum, VLAN, and protocol */
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
- if (xdp_res || i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
- skb = NULL;
- continue;
+ i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, xdp);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
-
- /* populate checksum, VLAN, and protocol */
- i40e_process_skb_fields(rx_ring, rx_desc, skb);
-
- i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
- skb = NULL;
-
/* update budget accounting */
total_rx_packets++;
+process_next:
+ cleaned_count += nfrags + 1;
+ i40e_put_rx_buffer(rx_ring, rx_buffer);
+ rx_ring->next_to_clean = rx_ring->next_to_process;
+
+ xdp->data = NULL;
}
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
- rx_ring->skb = skb;
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
+ *rx_cleaned = total_rx_packets;
+
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_packets;
}
@@ -2692,6 +2758,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
container_of(napi, struct i40e_q_vector, napi);
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
+ bool tx_clean_complete = true;
+ bool rx_clean_complete = true;
+ unsigned int tx_cleaned = 0;
+ unsigned int rx_cleaned = 0;
bool clean_complete = true;
bool arm_wb = false;
int budget_per_ring;
@@ -2708,10 +2778,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->tx) {
bool wd = ring->xsk_pool ?
i40e_clean_xdp_tx_irq(vsi, ring) :
- i40e_clean_tx_irq(vsi, ring, budget);
+ i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned);
if (!wd) {
- clean_complete = false;
+ clean_complete = tx_clean_complete = false;
continue;
}
arm_wb |= ring->arm_wb;
@@ -2736,14 +2806,18 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
i40e_for_each_ring(ring, q_vector->rx) {
int cleaned = ring->xsk_pool ?
i40e_clean_rx_irq_zc(ring, budget_per_ring) :
- i40e_clean_rx_irq(ring, budget_per_ring);
+ i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
- clean_complete = false;
+ clean_complete = rx_clean_complete = false;
}
+ if (!i40e_enabled_xdp_vsi(vsi))
+ trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned,
+ tx_cleaned, rx_clean_complete, tx_clean_complete);
+
/* If work not completed, return budget and polling will return */
if (!clean_complete) {
int cpu_id = smp_processor_id();
@@ -2989,7 +3063,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
rc = skb_cow_head(skb, 0);
if (rc < 0)
return rc;
- vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr = skb_vlan_eth_hdr(skb);
vhdr->h_vlan_TCI = htons(tx_flags >>
I40E_TX_FLAGS_VLAN_SHIFT);
} else {
@@ -3015,6 +3089,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
{
struct sk_buff *skb = first->skb;
u64 cd_cmd, cd_tso_len, cd_mss;
+ __be16 protocol;
union {
struct iphdr *v4;
struct ipv6hdr *v6;
@@ -3026,7 +3101,7 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
unsigned char *hdr;
} l4;
u32 paylen, l4_offset;
- u16 gso_segs, gso_size;
+ u16 gso_size;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3039,15 +3114,23 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
if (err < 0)
return err;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
+
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
} else {
ip.v6->payload_len = 0;
+ first->tx_flags |= I40E_TX_FLAGS_TSO;
}
if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
@@ -3100,10 +3183,9 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size;
- gso_segs = skb_shinfo(skb)->gso_segs;
/* update GSO size and bytecount with header size */
- first->gso_segs = gso_segs;
+ first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* find the field values */
@@ -3187,13 +3269,29 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *exthdr;
u32 offset, cmd = 0;
__be16 frag_off;
+ __be16 protocol;
u8 l4_proto = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol)) {
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
+
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
+ *tx_flags |= I40E_TX_FLAGS_IPV4;
+ else
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
/* compute outer L2 header size */
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
@@ -3373,6 +3471,8 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
/* Memory barrier before checking head and tail */
smp_mb();
+ ++tx_ring->tx_stats.tx_stopped;
+
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3662,7 +3762,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
u8 prio;
/* is DCB enabled at all? */
- if (vsi->tc_config.numtc == 1)
+ if (vsi->tc_config.numtc == 1 ||
+ i40e_is_tc_mqprio_enabled(vsi->back))
return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
@@ -3688,35 +3789,55 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring)
{
- u16 i = xdp_ring->next_to_use;
- struct i40e_tx_buffer *tx_bi;
- struct i40e_tx_desc *tx_desc;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 i = 0, index = xdp_ring->next_to_use;
+ struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
+ struct i40e_tx_buffer *tx_bi = tx_head;
+ struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
void *data = xdpf->data;
u32 size = xdpf->len;
- dma_addr_t dma;
- if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
+ if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
- dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(xdp_ring->dev, dma))
- return I40E_XDP_CONSUMED;
- tx_bi = &xdp_ring->tx_bi[i];
- tx_bi->bytecount = size;
- tx_bi->gso_segs = 1;
- tx_bi->xdpf = xdpf;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- /* record length, and DMA address */
- dma_unmap_len_set(tx_bi, len, size);
- dma_unmap_addr_set(tx_bi, dma, dma);
+ for (;;) {
+ dma_addr_t dma;
- tx_desc = I40E_TX_DESC(xdp_ring, i);
- tx_desc->buffer_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
- | I40E_TXD_CMD,
- 0, size, 0);
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(xdp_ring->dev, dma))
+ goto unmap;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_bi, len, size);
+ dma_unmap_addr_set(tx_bi, dma, dma);
+
+ tx_desc->buffer_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz =
+ build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
+
+ if (++index == xdp_ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_bi = &xdp_ring->tx_bi[index];
+ tx_desc = I40E_TX_DESC(xdp_ring, index);
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ size = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
/* Make certain all of the status bits have been updated
* before next_to_watch is written.
@@ -3724,14 +3845,30 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
smp_wmb();
xdp_ring->xdp_tx_active++;
- i++;
- if (i == xdp_ring->count)
- i = 0;
- tx_bi->next_to_watch = tx_desc;
- xdp_ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ xdp_ring->next_to_use = index;
return I40E_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_bi = &xdp_ring->tx_bi[index];
+ if (dma_unmap_len(tx_bi, len))
+ dma_unmap_page(xdp_ring->dev,
+ dma_unmap_addr(tx_bi, dma),
+ dma_unmap_len(tx_bi, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_bi, len, 0);
+ if (tx_bi == tx_head)
+ break;
+
+ if (!index)
+ index += xdp_ring->count;
+ index--;
+ }
+
+ return I40E_XDP_CONSUMED;
}
/**
@@ -3749,7 +3886,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
struct i40e_tx_buffer *first;
u32 td_offset = 0;
u32 tx_flags = 0;
- __be16 protocol;
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso, count;
@@ -3791,15 +3927,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
- /* obtain protocol of skb */
- protocol = vlan_get_protocol(skb);
-
- /* setup IPv4/IPv6 offloads */
- if (protocol == htons(ETH_P_IP))
- tx_flags |= I40E_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
- tx_flags |= I40E_TX_FLAGS_IPV6;
-
tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index bfc2845c99d1..8c3d24012c54 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -277,6 +277,7 @@ struct i40e_rx_buffer {
struct page *page;
__u32 page_offset;
__u16 pagecnt_bias;
+ __u32 page_count;
};
struct i40e_queue_stats {
@@ -290,6 +291,7 @@ struct i40e_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
+ u64 tx_stopped;
int prev_pkt_ctr;
};
@@ -298,7 +300,9 @@ struct i40e_rx_queue_stats {
u64 alloc_page_failed;
u64 alloc_buff_failed;
u64 page_reuse_count;
- u64 realloc_count;
+ u64 page_alloc_count;
+ u64 page_waive_count;
+ u64 page_busy_count;
};
enum i40e_ring_state_t {
@@ -333,6 +337,17 @@ struct i40e_ring {
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ /* Storing xdp_buff on ring helps in saving the state of partially built
+ * packet when i40e_clean_rx_ring_irq() must return before it sees EOP
+ * and to resume packet building for this ring in the next call to
+ * i40e_clean_rx_ring_irq().
+ */
+ struct xdp_buff xdp;
+
+ /* Next descriptor to be processed; next_to_clean is updated only on
+ * processing EOP descriptor
+ */
+ u16 next_to_process;
/* high bit set means dynamic, use accessor routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
* these values always store the USER setting, and must be converted
@@ -377,20 +392,11 @@ struct i40e_ring {
struct rcu_head rcu; /* to avoid race on free */
u16 next_to_alloc;
- struct sk_buff *skb; /* When i40e_clean_rx_ring_irq() must
- * return before it sees the EOP for
- * the current packet, we save that skb
- * here and resume receiving this
- * packet the next time
- * i40e_clean_rx_ring_irq() is called
- * for this ring.
- */
struct i40e_channel *ch;
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
- struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -467,7 +473,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
/**
* i40e_get_head - Retrieve head from head writeback
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 19da3b22160f..8c5118c8baaf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -20,6 +20,7 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
#define I40E_XDP_CONSUMED BIT(0)
#define I40E_XDP_TX BIT(1)
#define I40E_XDP_REDIR BIT(2)
+#define I40E_XDP_EXIT BIT(3)
/*
* build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 36a4ca1ffb1a..388c3d36d96a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1172,6 +1172,7 @@ struct i40e_eth_stats {
u64 tx_broadcast; /* bptc */
u64 tx_discards; /* tdpc */
u64 tx_errors; /* tepc */
+ u64 rx_discards_other; /* rxerr1 */
};
/* Statistics collected per VEB per TC */
@@ -1403,6 +1404,10 @@ struct i40e_lldp_variables {
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_X722_L3_SRC_SHIFT 49
+#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT)
+#define I40E_X722_L3_DST_SHIFT 41
+#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT)
#define I40E_L3_SRC_SHIFT 47
#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
#define I40E_L3_V6_SRC_SHIFT 43
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 2ea4deb8fc44..be59ba3774e1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -17,7 +17,7 @@
**/
static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
enum virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg,
+ int v_retval, u8 *msg,
u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
@@ -441,14 +441,14 @@ irq_list_done:
}
/**
- * i40e_release_iwarp_qvlist
+ * i40e_release_rdma_qvlist
* @vf: pointer to the VF.
*
**/
-static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
+static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
- struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+ struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
u32 msix_vf;
u32 i;
@@ -457,7 +457,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
- struct virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_rdma_qv_info *qv_info;
u32 next_q_index, next_q_type;
struct i40e_hw *hw = &pf->hw;
u32 v_idx, reg_idx, reg;
@@ -491,18 +491,19 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
}
/**
- * i40e_config_iwarp_qvlist
+ * i40e_config_rdma_qvlist
* @vf: pointer to the VF info
* @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
-static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
- struct virtchnl_iwarp_qvlist_info *qvlist_info)
+static int
+i40e_config_rdma_qvlist(struct i40e_vf *vf,
+ struct virtchnl_rdma_qvlist_info *qvlist_info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_rdma_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf;
@@ -1246,13 +1247,13 @@ err:
* @vl: List of VLANs - apply filter for given VLANs
* @num_vlans: Number of elements in @vl
**/
-static i40e_status
+static int
i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
bool unicast_enable, s16 *vl, u16 num_vlans)
{
- i40e_status aq_ret, aq_tmp = 0;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int aq_ret, aq_tmp = 0;
int i;
/* No VLAN to set promisc on, set on VSI */
@@ -1264,9 +1265,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
return aq_ret;
@@ -1280,9 +1281,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
@@ -1297,9 +1298,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if (!aq_tmp)
@@ -1313,9 +1314,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if (!aq_tmp)
@@ -1339,13 +1340,13 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
* Called from the VF to configure the promiscuous mode of
* VF vsis and from the VF reset path to reset promiscuous mode.
**/
-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
- u16 vsi_id,
- bool allmulti,
- bool alluni)
+static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
{
- i40e_status aq_ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
+ int aq_ret = I40E_SUCCESS;
struct i40e_vsi *vsi;
u16 num_vlans;
s16 *vl;
@@ -1377,6 +1378,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vfr_reset
+ * @hw: pointer to hw struct
+ * @vf_id: VF identifier
+ *
+ * Before trigger hardware reset, we need to know if no other process has
+ * reserved the hardware for any reset operations. This check is done by
+ * examining the status of the RSTAT1 register used to signal the reset.
+ **/
+static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
+ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (reg)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EAGAIN;
+}
+
+/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
@@ -1390,9 +1417,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
+ bool vf_active;
+ u32 radq;
/* warn the VF */
- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
@@ -1406,7 +1435,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset VF using VPGEN_VFRTRIG reg */
+ /* Sync VFR reset before trigger next one */
+ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (vf_active && !radq)
+ /* waiting for finish reset by virtual driver */
+ if (i40e_sync_vfr_reset(hw, vf->vf_id))
+ dev_info(&pf->pdev->dev,
+ "Reset VF %d never finished\n",
+ vf->vf_id);
+
+ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
+ * in progress state in rstat1 register.
+ */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -1496,10 +1537,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
return true;
- /* If the VFs have been disabled, this means something else is
- * resetting the VF, so we shouldn't continue.
- */
- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
+ /* Bail out if VFs are disabled. */
+ if (test_bit(__I40E_VF_DISABLE, pf->state))
+ return true;
+
+ /* If VF is being reset already we don't need to continue. */
+ if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
return true;
i40e_trigger_vf_reset(vf, flr);
@@ -1536,7 +1579,8 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
- clear_bit(__I40E_VF_DISABLE, pf->state);
+ usleep_range(20000, 40000);
+ clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
}
@@ -1569,8 +1613,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
return false;
/* Begin reset on all VFs at once */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- i40e_trigger_vf_reset(&pf->vf[v], flr);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+ /* If VF is being reset no need to trigger reset again */
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ i40e_trigger_vf_reset(&pf->vf[v], flr);
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -1586,9 +1634,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
*/
while (v < pf->num_alloc_vfs) {
vf = &pf->vf[v];
- reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
- break;
+ if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
+ reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
+ if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
+ break;
+ }
/* If the current VF has finished resetting, move on
* to the next VF in sequence.
@@ -1616,6 +1666,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1627,6 +1681,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
if (pf->vf[v].lan_vsi_idx == 0)
continue;
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
}
@@ -1636,10 +1694,16 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
mdelay(50);
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ /* If VF is reset in another thread just continue */
+ if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
+ continue;
+
i40e_cleanup_reset_vf(&pf->vf[v]);
+ }
i40e_flush(hw);
+ usleep_range(20000, 40000);
clear_bit(__I40E_VF_DISABLE, pf->state);
return true;
@@ -1892,7 +1956,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
struct i40e_pf *pf;
struct i40e_hw *hw;
int abs_vf_id;
- i40e_status aq_ret;
+ int aq_ret;
/* validate the request */
if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
@@ -1902,25 +1966,6 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
hw = &pf->hw;
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
- /* single place to detect unsuccessful return values */
- if (v_retval) {
- vf->num_invalid_msgs++;
- dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
- vf->vf_id, v_opcode, v_retval);
- if (vf->num_invalid_msgs >
- I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
- dev_err(&pf->pdev->dev,
- "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
- set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_invalid_msgs = 0;
- }
-
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
@@ -1943,7 +1988,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
**/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
enum virtchnl_ops opcode,
- i40e_status retval)
+ int retval)
{
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
}
@@ -2018,6 +2063,25 @@ static void i40e_del_qch(struct i40e_vf *vf)
}
/**
+ * i40e_vc_get_max_frame_size
+ * @vf: pointer to the VF
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ **/
+static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
+{
+ u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
+
+ if (vf->port_vlan_id)
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2028,9 +2092,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
int num_vsis = 1;
+ int aq_ret = 0;
size_t len = 0;
int ret;
@@ -2060,11 +2124,11 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
if (i40e_vf_client_capable(pf, vf->vf_id) &&
- (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
- set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
+ set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
} else {
- clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
}
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2118,6 +2182,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
if (vf->lan_vsi_idx) {
vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
@@ -2126,6 +2191,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
/* VFs only use TC 0 */
vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) && !vf->pf_set_mac) {
+ i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
+ eth_zero_addr(vf->default_lan_addr.addr);
+ }
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
}
@@ -2153,9 +2222,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
bool allmulti = false;
bool alluni = false;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2240,10 +2309,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_pair_info *qpi;
u16 vsi_id, vsi_queue_id = 0;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
struct i40e_vsi *vsi;
u16 num_qps_all = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2261,7 +2330,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
- for (i = 0; i < I40E_MAX_VF_VSI; i++)
+ for (i = 0; i < vf->num_tc; i++)
num_qps_all += vf->ch[i].num_qps;
if (num_qps_all != qci->num_queue_pairs) {
aq_ret = I40E_ERR_PARAM;
@@ -2390,8 +2459,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
+ int aq_ret = 0;
u16 vsi_id;
- i40e_status aq_ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -2506,7 +2575,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
@@ -2564,7 +2633,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2599,6 +2668,59 @@ error_param:
}
/**
+ * i40e_check_enough_queue - find big enough queue number
+ * @vf: pointer to the VF info
+ * @needed: the number of items needed
+ *
+ * Returns the base item index of the queue, or negative for error
+ **/
+static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
+{
+ unsigned int i, cur_queues, more, pool_size;
+ struct i40e_lump_tracking *pile;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ cur_queues = vsi->alloc_queue_pairs;
+
+ /* if current allocated queues are enough for need */
+ if (cur_queues >= needed)
+ return vsi->base_queue;
+
+ pile = pf->qp_pile;
+ if (cur_queues > 0) {
+ /* if the allocated queues are not zero
+ * just check if there are enough queues for more
+ * behind the allocated queues.
+ */
+ more = needed - cur_queues;
+ for (i = vsi->base_queue + cur_queues;
+ i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT)
+ break;
+
+ if (more-- == 1)
+ /* there is enough */
+ return vsi->base_queue;
+ }
+ }
+
+ pool_size = 0;
+ for (i = 0; i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ pool_size = 0;
+ continue;
+ }
+ if (needed <= ++pool_size)
+ /* there is enough */
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2632,6 +2754,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but there is not enough for it.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs);
+ vfres->num_queue_pairs = cur_pairs;
} else {
/* successful request */
vf->num_req_queues = req_pairs;
@@ -2656,7 +2784,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_eth_stats stats;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
struct i40e_vsi *vsi;
memset(&stats, 0, sizeof(struct i40e_eth_stats));
@@ -2685,12 +2813,21 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+#define I40E_MAX_MACVLAN_PER_HW 3072
+#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
+ (num_ports))
/* If the VF is not trusted restrict the number of MAC/VLAN it can program
* MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
*/
#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
#define I40E_VC_MAX_VLAN_PER_VF 16
+#define I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(vf_num, num_ports) \
+({ typeof(vf_num) vf_num_ = (vf_num); \
+ typeof(num_ports) num_ports_ = (num_ports); \
+ ((I40E_MAX_MACVLAN_PER_PF(num_ports_) - vf_num_ * \
+ I40E_VC_MAX_MAC_ADDR_PER_VF) / vf_num_) + \
+ I40E_VC_MAX_MAC_ADDR_PER_VF; })
/**
* i40e_check_vf_permission
* @vf: pointer to the VF info
@@ -2713,6 +2850,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
+ struct i40e_hw *hw = &pf->hw;
int mac2add_cnt = 0;
int i;
@@ -2752,17 +2890,97 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* number of addresses. Check to make sure that the additions do not
* push us over the limit.
*/
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (i40e_count_filters(vsi) + mac2add_cnt) >
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if ((i40e_count_filters(vsi) + mac2add_cnt) >
I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ }
+ /* If this VF is trusted, it can use more resources than untrusted.
+ * However to ensure that every trusted VF has appropriate number of
+ * resources, divide whole pool of resources per port and then across
+ * all VFs.
+ */
+ } else {
+ if ((i40e_count_filters(vsi) + mac2add_cnt) >
+ I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
+ hw->num_ports)) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
+ return -EPERM;
+ }
}
return 0;
}
/**
+ * i40e_vc_ether_addr_type - get type of virtchnl_ether_addr
+ * @vc_ether_addr: used to extract the type
+ **/
+static u8
+i40e_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ return vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK;
+}
+
+/**
+ * i40e_is_vc_addr_legacy
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ *
+ * check if the MAC address is from an older VF
+ **/
+static bool
+i40e_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ return i40e_vc_ether_addr_type(vc_ether_addr) ==
+ VIRTCHNL_ETHER_ADDR_LEGACY;
+}
+
+/**
+ * i40e_is_vc_addr_primary
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ *
+ * check if the MAC address is the VF's primary MAC
+ * This function should only be called when the MAC address in
+ * virtchnl_ether_addr is a valid unicast MAC
+ **/
+static bool
+i40e_is_vc_addr_primary(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ return i40e_vc_ether_addr_type(vc_ether_addr) ==
+ VIRTCHNL_ETHER_ADDR_PRIMARY;
+}
+
+/**
+ * i40e_update_vf_mac_addr
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to add
+ *
+ * update the VF's cached hardware MAC if allowed
+ **/
+static void
+i40e_update_vf_mac_addr(struct i40e_vf *vf,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return;
+
+ /* If request to add MAC filter is a primary request update its default
+ * MAC address with the requested one. If it is a legacy request then
+ * check if current default is empty if so update the default MAC
+ */
+ if (i40e_is_vc_addr_primary(vc_ether_addr)) {
+ ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
+ } else if (i40e_is_vc_addr_legacy(vc_ether_addr)) {
+ if (is_zero_ether_addr(vf->default_lan_addr.addr))
+ ether_addr_copy(vf->default_lan_addr.addr, mac_addr);
+ }
+}
+
+/**
* i40e_vc_add_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2775,7 +2993,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status ret = 0;
+ int ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -2813,11 +3031,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
goto error_param;
}
- if (is_valid_ether_addr(al->list[i].addr) &&
- is_zero_ether_addr(vf->default_lan_addr.addr))
- ether_addr_copy(vf->default_lan_addr.addr,
- al->list[i].addr);
}
+ i40e_update_vf_mac_addr(vf, &al->list[i]);
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -2829,8 +3044,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- ret);
+ return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ ret, NULL, 0);
}
/**
@@ -2847,7 +3062,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status ret = 0;
+ int ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -2880,6 +3095,9 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (was_unimac_deleted)
+ eth_zero_addr(vf->default_lan_addr.addr);
+
/* program the updated filter list */
ret = i40e_sync_vsi_filters(vsi);
if (ret)
@@ -2920,7 +3138,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
@@ -2991,7 +3209,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3036,21 +3254,21 @@ error_param:
}
/**
- * i40e_vc_iwarp_msg
+ * i40e_vc_rdma_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* called from the VF for the iwarp msgs
**/
-static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+ !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -3060,42 +3278,42 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
aq_ret);
}
/**
- * i40e_vc_iwarp_qvmap_msg
+ * i40e_vc_rdma_qvmap_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @config: config qvmap or release it
*
* called from the VF for the iwarp msgs
**/
-static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
+static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
{
- struct virtchnl_iwarp_qvlist_info *qvlist_info =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- i40e_status aq_ret = 0;
+ struct virtchnl_rdma_qvlist_info *qvlist_info =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+ int aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+ !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
if (config) {
- if (i40e_config_iwarp_qvlist(vf, qvlist_info))
+ if (i40e_config_rdma_qvlist(vf, qvlist_info))
aq_ret = I40E_ERR_PARAM;
} else {
- i40e_release_iwarp_qvlist(vf);
+ i40e_release_rdma_qvlist(vf);
}
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
- VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
aq_ret);
}
@@ -3112,7 +3330,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
@@ -3142,7 +3360,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
u16 i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3177,7 +3395,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int len = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3214,7 +3432,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_hena *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3238,8 +3456,8 @@ err:
**/
static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3264,8 +3482,8 @@ err:
**/
static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3464,8 +3682,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
if (ret)
dev_err(&pf->pdev->dev,
- "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
@@ -3491,7 +3709,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
struct hlist_node *node;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i, ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3567,8 +3785,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
if (ret) {
dev_err(&pf->pdev->dev,
- "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err;
}
@@ -3622,7 +3840,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_cloud_filter *cfilter = NULL;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i, ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3701,8 +3919,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
if (ret) {
dev_err(&pf->pdev->dev,
- "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -3731,7 +3949,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
int i, adq_request_qps = 0;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
u64 speed = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3843,7 +4061,7 @@ err:
static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3961,14 +4179,14 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg);
break;
- case VIRTCHNL_OP_IWARP:
- ret = i40e_vc_iwarp_msg(vf, msg, msglen);
+ case VIRTCHNL_OP_RDMA:
+ ret = i40e_vc_rdma_msg(vf, msg, msglen);
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
+ ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
ret = i40e_vc_config_rss_key(vf, msg);
@@ -4245,6 +4463,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
/* duplicate request, so just return success */
goto error_pvid;
+ i40e_vlan_stripping_enable(vsi);
i40e_vc_reset_vf(vf, true);
/* During reset the VF got a new VSI, so refresh a pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
@@ -4260,7 +4479,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
* MAC addresses deleted.
*/
if ((!(vlan_id || qos) ||
- vlanprio != le16_to_cpu(vsi->info.pvid)) &&
+ vlanprio != le16_to_cpu(vsi->info.pvid)) &&
vsi->info.pvid) {
ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
if (ret) {
@@ -4623,6 +4842,11 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
goto out;
vf->trusted = setting;
+
+ /* request PF to sync mac/vlan filters for the VF */
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ pf->vsi[vf->lan_vsi_idx]->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+
i40e_vc_reset_vf(vf, true);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 49575a640a84..895b8feb2567 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -10,8 +10,6 @@
#define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
-#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
-
#define I40E_VLAN_PRIORITY_SHIFT 13
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0xE000
@@ -19,6 +17,7 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
#define I40E_VF_STATE_WAIT_COUNT 20
+#define I40E_VFR_WAIT_COUNT 100
/* Various queue ctrls */
enum i40e_queue_ctrl {
@@ -35,18 +34,19 @@ enum i40e_queue_ctrl {
enum i40e_vf_states {
I40E_VF_STATE_INIT = 0,
I40E_VF_STATE_ACTIVE,
- I40E_VF_STATE_IWARPENA,
+ I40E_VF_STATE_RDMAENA,
I40E_VF_STATE_DISABLED,
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
+ I40E_VF_STATE_RESETTING
};
/* VF capabilities */
enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
I40E_VIRTCHNL_VF_CAP_L2,
- I40E_VIRTCHNL_VF_CAP_IWARP,
+ I40E_VIRTCHNL_VF_CAP_RDMA,
};
/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI.
@@ -91,9 +91,6 @@ struct i40e_vf {
u8 num_queue_pairs; /* num of qps assigned to VF vsis */
u8 num_req_queues; /* num of requested qps */
u64 num_mdd_events; /* num of mdd events detected */
- /* num of continuous malformed or invalid msgs detected */
- u64 num_invalid_msgs;
- u64 num_valid_msgs; /* num of valid msgs detected */
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
@@ -111,7 +108,7 @@ struct i40e_vf {
u16 num_cloud_filters;
/* RDMA Client */
- struct virtchnl_iwarp_qvlist_info *qvlist_info;
+ struct virtchnl_rdma_qvlist_info *qvlist_info;
};
void i40e_free_vfs(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index ea06e957393e..cd7b52fb6b46 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -10,14 +10,6 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
-{
- unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
-
- rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
- return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
-}
-
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
memset(rx_ring->rx_bi_zc, 0,
@@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
+ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
+ * @rx_ring: Current rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
+ sizeof(*rx_ring->rx_bi);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+ rx_ring->rx_bi_zc = sw_ring;
+ } else {
+ kfree(rx_ring->rx_bi_zc);
+ rx_ring->rx_bi_zc = NULL;
+ rx_ring->rx_bi = sw_ring;
+ }
+ return 0;
+}
+
+/**
+ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
+{
+ struct i40e_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
+ rx_ring = vsi->rx_rings[q];
+ if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
* i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
* certain ring/qid
* @vsi: Current VSI
@@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
if (err)
return err;
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
+ if (err)
+ return err;
+
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
+ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
+ if (err)
+ return err;
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
@@ -143,27 +194,28 @@ int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
* i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
*
* Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/
-static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
+static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
{
int err, result = I40E_XDP_PASS;
struct i40e_ring *xdp_ring;
- struct bpf_prog *xdp_prog;
u32 act;
- /* NB! xdp_prog will always be !NULL, due to the fact that
- * this path is enabled by setting an XDP program.
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return I40E_XDP_REDIR;
+ if (!err)
+ return I40E_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = I40E_XDP_EXIT;
+ else
+ result = I40E_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -175,16 +227,16 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (result == I40E_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = I40E_XDP_CONSUMED;
+ break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = I40E_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough; /* handle aborts by dropping packet */
- case XDP_DROP:
- result = I40E_XDP_CONSUMED;
- break;
}
return result;
}
@@ -218,7 +270,6 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
ntu += nb_buffs;
if (ntu == rx_ring->count) {
rx_desc = I40E_RX_DESC(rx_ring, 0);
- xdp = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
@@ -241,21 +292,25 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto out;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
out:
xsk_buff_free(xdp);
@@ -268,7 +323,8 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
unsigned int *rx_packets,
unsigned int *rx_bytes,
unsigned int size,
- unsigned int xdp_res)
+ unsigned int xdp_res,
+ bool *failure)
{
struct sk_buff *skb;
@@ -278,11 +334,15 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
return;
+ if (xdp_res == I40E_XDP_EXIT) {
+ *failure = true;
+ return;
+ }
+
if (xdp_res == I40E_XDP_CONSUMED) {
xsk_buff_free(xdp_buff);
return;
}
-
if (xdp_res == I40E_XDP_PASS) {
/* NB! We are not checking for errors using
* i40e_test_staterr with
@@ -324,11 +384,17 @@ static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
u16 next_to_clean = rx_ring->next_to_clean;
u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
+ u16 cleaned_count;
+
+ /* NB! xdp_prog will always be !NULL, due to the fact that
+ * this path is enabled by setting an XDP program.
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
@@ -366,9 +432,11 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
- xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi, xdp_prog);
i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
- &rx_bytes, size, xdp_res);
+ &rx_bytes, size, xdp_res, &failure);
+ if (failure)
+ break;
total_rx_packets += rx_packets;
total_rx_bytes += rx_bytes;
xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
@@ -379,7 +447,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
- failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+ failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@@ -467,11 +535,11 @@ static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
**/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
- struct xdp_desc *descs = xdp_ring->xsk_descs;
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
if (!nb_pkts)
return true;
@@ -591,13 +659,13 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
return -ENETDOWN;
if (!i40e_enabled_xdp_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
if (queue_id >= vsi->num_queue_pairs)
- return -ENXIO;
+ return -EINVAL;
if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
ring = vsi->xdp_rings[queue_id];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index ea88f4597a07..821df248f8be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -22,7 +22,6 @@
struct i40e_vsi;
struct xsk_buff_pool;
-struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
@@ -33,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
+int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 3789269ce741..9abaff1f2aff 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -6,7 +6,6 @@
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
@@ -30,6 +29,7 @@
#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
@@ -44,6 +44,9 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
+int iavf_status_to_errno(enum iavf_status status);
+int virtchnl_status_to_errno(enum virtchnl_status_code v_status);
+
/* VSI state flags shared with common code */
enum iavf_vsi_state_t {
__IAVF_VSI_DOWN,
@@ -55,12 +58,10 @@ enum iavf_vsi_state_t {
struct iavf_vsi {
struct iavf_adapter *back;
struct net_device *netdev;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 seid;
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
int base_vector;
- u16 work_limit;
u16 qs_handle;
void *priv; /* client driver data reference. */
};
@@ -89,6 +90,7 @@ struct iavf_vsi {
#define IAVF_HKEY_ARRAY_SIZE ((IAVF_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define IAVF_HLUT_ARRAY_SIZE ((IAVF_VFQF_HLUT_MAX_INDEX + 1) * 4)
#define IAVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
+#define IAVF_MBPS_QUANTA 50
#define IAVF_VIRTCHNL_VF_RESOURCE_SIZE (sizeof(struct virtchnl_vf_resource) + \
(IAVF_MAX_VF_VSI * \
@@ -137,16 +139,36 @@ struct iavf_q_vector {
struct iavf_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
- bool is_new_mac; /* filter is new, wait for PF decision */
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
+ struct {
+ u8 is_new_mac:1; /* filter is new, wait for PF decision */
+ u8 remove:1; /* filter needs to be removed */
+ u8 add:1; /* filter needs to be added */
+ u8 is_primary:1; /* filter is a default VF MAC */
+ u8 add_handled:1; /* received response for filter add */
+ u8 padding:3;
+ };
+};
+
+#define IAVF_VLAN(vid, tpid) ((struct iavf_vlan){ vid, tpid })
+struct iavf_vlan {
+ u16 vid;
+ u16 tpid;
+};
+
+enum iavf_vlan_state_t {
+ IAVF_VLAN_INVALID,
+ IAVF_VLAN_ADD, /* filter needs to be added */
+ IAVF_VLAN_IS_NEW, /* filter is new, wait for PF answer */
+ IAVF_VLAN_ACTIVE, /* filter is accepted by PF */
+ IAVF_VLAN_DISABLE, /* filter needs to be deleted by PF, then marked INACTIVE */
+ IAVF_VLAN_INACTIVE, /* filter is inactive, we are in IFF_DOWN */
+ IAVF_VLAN_REMOVE, /* filter needs to be removed from list */
};
struct iavf_vlan_filter {
struct list_head list;
- u16 vlan;
- bool remove; /* filter needs to be removed */
- bool add; /* filter needs to be added */
+ struct iavf_vlan vlan;
+ enum iavf_vlan_state_t state;
};
#define IAVF_MAX_TRAFFIC_CLASS 4
@@ -177,6 +199,8 @@ enum iavf_state_t {
__IAVF_REMOVE, /* driver is being unloaded */
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
+ __IAVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */
+ __IAVF_INIT_CONFIG_ADAPTER,
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_INIT_FAILED, /* init failed, restarting procedure */
__IAVF_RESETTING, /* in reset */
@@ -188,6 +212,10 @@ enum iavf_state_t {
__IAVF_RUNNING, /* opened, working */
};
+enum iavf_critical_section_t {
+ __IAVF_IN_REMOVE_TASK, /* device being removed */
+};
+
#define IAVF_CLOUD_FIELD_OMAC 0x01
#define IAVF_CLOUD_FIELD_IMAC 0x02
#define IAVF_CLOUD_FIELD_IVLAN 0x04
@@ -224,16 +252,18 @@ struct iavf_cloud_filter {
/* board specific private data structure */
struct iavf_adapter {
+ struct workqueue_struct *wq;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
wait_queue_head_t down_waitqueue;
+ wait_queue_head_t vc_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
+ int num_vlan_filters;
struct list_head mac_filter_list;
struct mutex crit_lock;
struct mutex client_lock;
- struct mutex remove_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
@@ -250,8 +280,8 @@ struct iavf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
- int num_iwarp_msix;
- int iwarp_base_vector;
+ int num_rdma_msix;
+ int rdma_base_vector;
u32 client_pending;
struct iavf_client_instance *cinst;
struct msix_entry *msix_entries;
@@ -271,41 +301,67 @@ struct iavf_adapter {
#define IAVF_FLAG_LEGACY_RX BIT(15)
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
+#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
+#define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20)
/* duplicates for common code */
#define IAVF_FLAG_DCB_ENABLED 0
/* flags for admin queue service task */
- u32 aq_required;
-#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT(0)
-#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT(1)
-#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT(2)
-#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT(3)
-#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT(4)
-#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT(5)
-#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
-#define IAVF_FLAG_AQ_MAP_VECTORS BIT(7)
-#define IAVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
-#define IAVF_FLAG_AQ_GET_CONFIG BIT(10)
+ u64 aq_required;
+#define IAVF_FLAG_AQ_ENABLE_QUEUES BIT_ULL(0)
+#define IAVF_FLAG_AQ_DISABLE_QUEUES BIT_ULL(1)
+#define IAVF_FLAG_AQ_ADD_MAC_FILTER BIT_ULL(2)
+#define IAVF_FLAG_AQ_ADD_VLAN_FILTER BIT_ULL(3)
+#define IAVF_FLAG_AQ_DEL_MAC_FILTER BIT_ULL(4)
+#define IAVF_FLAG_AQ_DEL_VLAN_FILTER BIT_ULL(5)
+#define IAVF_FLAG_AQ_CONFIGURE_QUEUES BIT_ULL(6)
+#define IAVF_FLAG_AQ_MAP_VECTORS BIT_ULL(7)
+#define IAVF_FLAG_AQ_HANDLE_RESET BIT_ULL(8)
+#define IAVF_FLAG_AQ_CONFIGURE_RSS BIT_ULL(9) /* direct AQ config */
+#define IAVF_FLAG_AQ_GET_CONFIG BIT_ULL(10)
/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
-#define IAVF_FLAG_AQ_GET_HENA BIT(11)
-#define IAVF_FLAG_AQ_SET_HENA BIT(12)
-#define IAVF_FLAG_AQ_SET_RSS_KEY BIT(13)
-#define IAVF_FLAG_AQ_SET_RSS_LUT BIT(14)
-#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
-#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
-#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19)
-#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20)
-#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT(21)
-#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
-#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
-#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
-#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25)
-#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
-#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
-#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
-#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29)
+#define IAVF_FLAG_AQ_GET_HENA BIT_ULL(11)
+#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
+#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
+#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
+#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
+#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
+#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
+#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
+#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
+#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
+#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
+#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT_ULL(22)
+#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT_ULL(23)
+#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT_ULL(24)
+#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT_ULL(25)
+#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT_ULL(26)
+#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT_ULL(27)
+#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT_ULL(28)
+#define IAVF_FLAG_AQ_REQUEST_STATS BIT_ULL(29)
+#define IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS BIT_ULL(30)
+#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING BIT_ULL(31)
+#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING BIT_ULL(32)
+#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING BIT_ULL(33)
+#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING BIT_ULL(34)
+#define IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION BIT_ULL(35)
+#define IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION BIT_ULL(36)
+#define IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION BIT_ULL(37)
+#define IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION BIT_ULL(38)
+
+ /* flags for processing extended capability messages during
+ * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires
+ * both a SEND and a RECV step, which must be processed in sequence.
+ *
+ * During the __IAVF_INIT_EXTENDED_CAPS state, the driver will
+ * process one flag at a time during each state loop.
+ */
+ u64 extended_caps;
+#define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0)
+#define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1)
+
+#define IAVF_EXTENDED_CAPS \
+ (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
+ IAVF_EXTENDED_CAP_RECV_VLAN_V2)
/* OS defined structs */
struct net_device *netdev;
@@ -332,7 +388,7 @@ struct iavf_adapter {
enum virtchnl_ops current_op;
#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_cap_flags & \
- VIRTCHNL_VF_OFFLOAD_IWARP : \
+ VIRTCHNL_VF_OFFLOAD_RDMA : \
0)
#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
@@ -345,6 +401,14 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_VLAN)
+#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2)
+#define VLAN_V2_FILTERING_ALLOWED(_a) \
+ (VLAN_V2_ALLOWED((_a)) && \
+ ((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
+ (_a)->vlan_v2_caps.filtering.filtering_support.inner))
+#define VLAN_FILTERING_ALLOWED(_a) \
+ (VLAN_ALLOWED((_a)) || VLAN_V2_FILTERING_ALLOWED((_a)))
#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
@@ -356,6 +420,7 @@ struct iavf_adapter {
struct virtchnl_version_info pf_version;
#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1))
+ struct virtchnl_vlan_caps vlan_v2_caps;
u16 msg_enable;
struct iavf_eth_stats current_stats;
struct iavf_vsi vsi;
@@ -373,6 +438,11 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
+ /* snapshot of "num_active_queues" before setup_tc for qdisc add
+ * is invoked. This information is useful during qdisc del flow,
+ * to restore correct number of queues
+ */
+ int orig_num_active_queues;
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
@@ -394,7 +464,6 @@ struct iavf_device {
/* needed by iavf_ethtool.c */
extern char iavf_driver_name[];
-extern struct workqueue_struct *iavf_wq;
static inline const char *iavf_state_str(enum iavf_state_t state)
{
@@ -407,6 +476,10 @@ static inline const char *iavf_state_str(enum iavf_state_t state)
return "__IAVF_INIT_VERSION_CHECK";
case __IAVF_INIT_GET_RESOURCES:
return "__IAVF_INIT_GET_RESOURCES";
+ case __IAVF_INIT_EXTENDED_CAPS:
+ return "__IAVF_INIT_EXTENDED_CAPS";
+ case __IAVF_INIT_CONFIG_ADAPTER:
+ return "__IAVF_INIT_CONFIG_ADAPTER";
case __IAVF_INIT_SW:
return "__IAVF_INIT_SW";
case __IAVF_INIT_FAILED:
@@ -444,6 +517,7 @@ static inline void iavf_change_state(struct iavf_adapter *adapter,
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
+int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter);
void iavf_schedule_request_stats(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
@@ -462,6 +536,10 @@ int iavf_send_api_ver(struct iavf_adapter *adapter);
int iavf_verify_api_ver(struct iavf_adapter *adapter);
int iavf_send_vf_config_msg(struct iavf_adapter *adapter);
int iavf_get_vf_config(struct iavf_adapter *adapter);
+int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter);
+int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter);
+void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter);
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter);
void iavf_irq_enable(struct iavf_adapter *adapter, bool flush);
void iavf_configure_queues(struct iavf_adapter *adapter);
void iavf_deconfigure_queues(struct iavf_adapter *adapter);
@@ -475,7 +553,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter);
void iavf_del_vlans(struct iavf_adapter *adapter);
void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
void iavf_request_stats(struct iavf_adapter *adapter);
-void iavf_request_reset(struct iavf_adapter *adapter);
+int iavf_request_reset(struct iavf_adapter *adapter);
void iavf_get_hena(struct iavf_adapter *adapter);
void iavf_set_hena(struct iavf_adapter *adapter);
void iavf_set_rss_key(struct iavf_adapter *adapter);
@@ -497,6 +575,16 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
+void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid);
+int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac);
+void
+iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+ netdev_features_t prev_features,
+ netdev_features_t features);
void iavf_add_fdir_filter(struct iavf_adapter *adapter);
void iavf_del_fdir_filter(struct iavf_adapter *adapter);
void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.c b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
index 9fa3fa99b4c2..9ffbd24d83cb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.c
@@ -324,6 +324,7 @@ static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -354,12 +355,17 @@ static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_asq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_asq_bufs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
goto init_adminq_exit;
+init_free_asq_bufs:
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
init_adminq_free_rings:
iavf_free_adminq_asq(hw);
@@ -383,6 +389,7 @@ init_adminq_exit:
static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
{
enum iavf_status ret_code = 0;
+ int i;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -413,12 +420,16 @@ static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
/* initialize base registers */
ret_code = iavf_config_arq_regs(hw);
if (ret_code)
- goto init_adminq_free_rings;
+ goto init_free_arq_bufs;
/* success! */
hw->aq.arq.count = hw->aq.num_arq_entries;
goto init_adminq_exit;
+init_free_arq_bufs:
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
init_adminq_free_rings:
iavf_free_adminq_arq(hw);
@@ -551,15 +562,13 @@ init_adminq_exit:
**/
enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
{
- enum iavf_status ret_code = 0;
-
if (iavf_check_asq_alive(hw))
iavf_aq_queue_shutdown(hw, true);
iavf_shutdown_asq(hw);
iavf_shutdown_arq(hw);
- return ret_code;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c
index 0c77e4171808..93c903c02c64 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.c
@@ -127,7 +127,7 @@ void iavf_notify_client_open(struct iavf_vsi *vsi)
}
/**
- * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * iavf_client_release_qvlist - send a message to the PF to release rdma qv map
* @ldev: pointer to L2 context.
*
* Return 0 on success or < 0 on error
@@ -141,12 +141,12 @@ static int iavf_client_release_qvlist(struct iavf_info *ldev)
return -EAGAIN;
err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
IAVF_SUCCESS, NULL, 0, NULL);
if (err)
dev_err(&adapter->pdev->dev,
- "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+ "Unable to send RDMA vector release message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
return err;
@@ -215,9 +215,9 @@ iavf_client_add_instance(struct iavf_adapter *adapter)
cinst->lan_info.params = params;
set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state);
- cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+ cinst->lan_info.msix_count = adapter->num_rdma_msix;
cinst->lan_info.msix_entries =
- &adapter->msix_entries[adapter->iwarp_base_vector];
+ &adapter->msix_entries[adapter->rdma_base_vector];
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
@@ -425,17 +425,17 @@ static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
if (adapter->aq_required)
return -EAGAIN;
- err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
+ err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA,
IAVF_SUCCESS, msg, len, NULL);
if (err)
- dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+ dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
return err;
}
/**
- * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @qvlist_info: queue and vector list
@@ -446,7 +446,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
struct iavf_client *client,
struct iavf_qvlist_info *qvlist_info)
{
- struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct virtchnl_rdma_qvlist_info *v_qvlist_info;
struct iavf_adapter *adapter = ldev->vf;
struct iavf_qv_info *qv_info;
enum iavf_status err;
@@ -463,23 +463,23 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
continue;
v_idx = qv_info->v_idx;
if ((v_idx >=
- (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
- (v_idx < adapter->iwarp_base_vector))
+ (adapter->rdma_base_vector + adapter->num_rdma_msix)) ||
+ (v_idx < adapter->rdma_base_vector))
return -EINVAL;
}
- v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
+ v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info;
msg_size = struct_size(v_qvlist_info, qv_info,
v_qvlist_info->num_vectors - 1);
- adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS,
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS,
(u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
dev_err(&adapter->pdev->dev,
- "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+ "Unable to send RDMA vector config message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
goto out;
}
@@ -488,7 +488,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
for (i = 0; i < 5; i++) {
msleep(100);
if (!(adapter->client_pending &
- BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) {
err = 0;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h
index 9a7cf39ea75a..c5d51d7dc7cc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.h
@@ -159,7 +159,7 @@ struct iavf_client {
#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
u8 type;
-#define IAVF_CLIENT_IWARP 0
+#define IAVF_CLIENT_RDMA 0
struct iavf_client_ops *ops; /* client ops provided by the client */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index e9cc7f6ddc46..dd11dbbd5551 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -131,8 +131,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
return "IAVF_ERR_INVALID_MAC_ADDR";
case IAVF_ERR_DEVICE_NOT_SUPPORTED:
return "IAVF_ERR_DEVICE_NOT_SUPPORTED";
- case IAVF_ERR_MASTER_REQUESTS_PENDING:
- return "IAVF_ERR_MASTER_REQUESTS_PENDING";
+ case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+ return "IAVF_ERR_PRIMARY_REQUESTS_PENDING";
case IAVF_ERR_INVALID_LINK_SETTINGS:
return "IAVF_ERR_INVALID_LINK_SETTINGS";
case IAVF_ERR_AUTONEG_NOT_COMPLETE:
@@ -223,8 +223,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
return "IAVF_ERR_ADMIN_QUEUE_FULL";
case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
- case IAVF_ERR_BAD_IWARP_CQE:
- return "IAVF_ERR_BAD_IWARP_CQE";
+ case IAVF_ERR_BAD_RDMA_CQE:
+ return "IAVF_ERR_BAD_RDMA_CQE";
case IAVF_ERR_NVM_BLANK_MODE:
return "IAVF_ERR_NVM_BLANK_MODE";
case IAVF_ERR_NOT_IMPLEMENTED:
@@ -661,7 +661,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
/* Non Tunneled IPv6 */
IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
IAVF_PTT_UNUSED_ENTRY(91),
IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 461f5237a2f8..6f171d1d85b7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -147,7 +147,7 @@ __iavf_add_ethtool_stats(u64 **data, void *pointer,
* @ring: the ring to copy
*
* Queue statistics must be copied while protected by
- * u64_stats_fetch_begin_irq, so we can't directly use iavf_add_ethtool_stats.
+ * u64_stats_fetch_begin, so we can't directly use iavf_add_ethtool_stats.
* Assumes that queue stats are defined in iavf_gstrings_queue_stats. If the
* ring pointer is null, zero out the queue stat values and update the data
* pointer. Otherwise safely copy the stats from the ring into the supplied
@@ -165,14 +165,14 @@ iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
/* To avoid invalid statistics values, ensure that we keep retrying
* the copy until we get a consistent value according to
- * u64_stats_fetch_retry_irq. But first, make sure our ring is
+ * u64_stats_fetch_retry. But first, make sure our ring is
* non-null before attempting to access its syncp.
*/
do {
- start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
+ start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp);
for (i = 0; i < size; i++)
iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
- } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (ring && u64_stats_fetch_retry(&ring->syncp, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
@@ -331,9 +331,16 @@ static int iavf_get_link_ksettings(struct net_device *netdev,
**/
static int iavf_get_sset_count(struct net_device *netdev, int sset)
{
+ /* Report the maximum number queues, even if not every queue is
+ * currently configured. Since allocation of queues is in pairs,
+ * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set
+ * at device creation and never changes.
+ */
+
if (sset == ETH_SS_STATS)
return IAVF_STATS_LEN +
- (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES);
+ (IAVF_QUEUE_STATS_LEN * 2 *
+ netdev->real_num_tx_queues);
else if (sset == ETH_SS_PRIV_FLAGS)
return IAVF_PRIV_FLAGS_STR_LEN;
else
@@ -360,17 +367,18 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock();
- for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {
+ /* As num_active_queues describe both tx and rx queues, we can use
+ * it to iterate over rings' stats.
+ */
+ for (i = 0; i < adapter->num_active_queues; i++) {
struct iavf_ring *ring;
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->tx_rings[i] : NULL);
+ /* Tx rings stats */
+ ring = &adapter->tx_rings[i];
iavf_add_queue_stats(&data, ring);
- /* Avoid accessing un-allocated queues */
- ring = (i < adapter->num_active_queues ?
- &adapter->rx_rings[i] : NULL);
+ /* Rx rings stats */
+ ring = &adapter->rx_rings[i];
iavf_add_queue_stats(&data, ring);
}
rcu_read_unlock();
@@ -407,10 +415,10 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
iavf_add_stat_strings(&data, iavf_gstrings_stats);
- /* Queues are always allocated in pairs, so we just use num_tx_queues
- * for both Tx and Rx queues.
+ /* Queues are always allocated in pairs, so we just use
+ * real_num_tx_queues for both Tx and Rx queues.
*/
- for (i = 0; i < netdev->num_tx_queues; i++) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
"tx", i);
iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
@@ -524,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
if (changed_flags & IAVF_FLAG_LEGACY_RX) {
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
}
@@ -573,9 +581,9 @@ static void iavf_get_drvinfo(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, iavf_driver_name, 32);
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strscpy(drvinfo->driver, iavf_driver_name, 32);
+ strscpy(drvinfo->fw_version, "N/A", 4);
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
@@ -583,12 +591,16 @@ static void iavf_get_drvinfo(struct net_device *netdev,
* iavf_get_ringparam - Get ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool extenal ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Returns current ring parameters. TX and RX rings are reported separately,
* but the number of rings is not reported.
**/
static void iavf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -602,12 +614,16 @@ static void iavf_get_ringparam(struct net_device *netdev,
* iavf_set_ringparam - Set ring parameters
* @netdev: network interface device structure
* @ring: ethtool ringparam structure
+ * @kernel_ring: ethtool external ringparam structure
+ * @extack: netlink extended ACK report struct
*
* Sets ring parameters. TX and RX rings are controlled separately, but the
* number of rings is not specified, so all rings get the same settings.
**/
static int iavf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
@@ -656,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev,
if (netif_running(netdev)) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
return 0;
@@ -676,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
struct iavf_ring *rx_ring, *tx_ring;
- ec->tx_max_coalesced_frames = vsi->work_limit;
- ec->rx_max_coalesced_frames = vsi->work_limit;
-
/* Rx and Tx usecs per queue value. If user doesn't specify the
* queue, return queue 0's value to represent.
*/
@@ -809,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec, int queue)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- struct iavf_vsi *vsi = &adapter->vsi;
int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
- vsi->work_limit = ec->tx_max_coalesced_frames_irq;
-
if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce)
netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
@@ -1425,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
spin_unlock_bh(&adapter->fdir_fltr_lock);
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
ret:
if (err && fltr)
@@ -1466,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
return err;
}
@@ -1650,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->adv_rss_lock);
if (!err)
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
mutex_unlock(&adapter->crit_lock);
@@ -1923,7 +1931,7 @@ static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
* @key: hash key
* @hfunc: hash function to use
*
- * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
**/
static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
@@ -1932,27 +1940,27 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
struct iavf_adapter *adapter = netdev_priv(netdev);
u16 i;
- /* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ /* Only support toeplitz hash function */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
+
+ if (!key && !indir)
return 0;
if (key)
memcpy(adapter->rss_key, key, adapter->rss_key_size);
- /* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < adapter->rss_lut_size; i++)
- adapter->rss_lut[i] = (u8)(indir[i]);
+ if (indir) {
+ /* Each 32 bits pointed by 'indir' is stored with a lut entry */
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
+ }
return iavf_config_rss(adapter);
}
static const struct ethtool_ops iavf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES |
- ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
ETHTOOL_COALESCE_USE_ADAPTIVE,
.get_drvinfo = iavf_get_drvinfo,
.get_link = ethtool_op_get_link,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 4e7c04047f91..2de4baff4c20 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -49,7 +49,113 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver")
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
-struct workqueue_struct *iavf_wq;
+
+int iavf_status_to_errno(enum iavf_status status)
+{
+ switch (status) {
+ case IAVF_SUCCESS:
+ return 0;
+ case IAVF_ERR_PARAM:
+ case IAVF_ERR_MAC_TYPE:
+ case IAVF_ERR_INVALID_MAC_ADDR:
+ case IAVF_ERR_INVALID_LINK_SETTINGS:
+ case IAVF_ERR_INVALID_PD_ID:
+ case IAVF_ERR_INVALID_QP_ID:
+ case IAVF_ERR_INVALID_CQ_ID:
+ case IAVF_ERR_INVALID_CEQ_ID:
+ case IAVF_ERR_INVALID_AEQ_ID:
+ case IAVF_ERR_INVALID_SIZE:
+ case IAVF_ERR_INVALID_ARP_INDEX:
+ case IAVF_ERR_INVALID_FPM_FUNC_ID:
+ case IAVF_ERR_QP_INVALID_MSG_SIZE:
+ case IAVF_ERR_INVALID_FRAG_COUNT:
+ case IAVF_ERR_INVALID_ALIGNMENT:
+ case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
+ case IAVF_ERR_INVALID_IMM_DATA_SIZE:
+ case IAVF_ERR_INVALID_VF_ID:
+ case IAVF_ERR_INVALID_HMCFN_ID:
+ case IAVF_ERR_INVALID_PBLE_INDEX:
+ case IAVF_ERR_INVALID_SD_INDEX:
+ case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
+ case IAVF_ERR_INVALID_SD_TYPE:
+ case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
+ case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
+ case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
+ return -EINVAL;
+ case IAVF_ERR_NVM:
+ case IAVF_ERR_NVM_CHECKSUM:
+ case IAVF_ERR_PHY:
+ case IAVF_ERR_CONFIG:
+ case IAVF_ERR_UNKNOWN_PHY:
+ case IAVF_ERR_LINK_SETUP:
+ case IAVF_ERR_ADAPTER_STOPPED:
+ case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
+ case IAVF_ERR_AUTONEG_NOT_COMPLETE:
+ case IAVF_ERR_RESET_FAILED:
+ case IAVF_ERR_BAD_PTR:
+ case IAVF_ERR_SWFW_SYNC:
+ case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
+ case IAVF_ERR_QUEUE_EMPTY:
+ case IAVF_ERR_FLUSHED_QUEUE:
+ case IAVF_ERR_OPCODE_MISMATCH:
+ case IAVF_ERR_CQP_COMPL_ERROR:
+ case IAVF_ERR_BACKING_PAGE_ERROR:
+ case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
+ case IAVF_ERR_MEMCPY_FAILED:
+ case IAVF_ERR_SRQ_ENABLED:
+ case IAVF_ERR_ADMIN_QUEUE_ERROR:
+ case IAVF_ERR_ADMIN_QUEUE_FULL:
+ case IAVF_ERR_BAD_RDMA_CQE:
+ case IAVF_ERR_NVM_BLANK_MODE:
+ case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
+ case IAVF_ERR_DIAG_TEST_FAILED:
+ case IAVF_ERR_FIRMWARE_API_VERSION:
+ case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
+ return -EIO;
+ case IAVF_ERR_DEVICE_NOT_SUPPORTED:
+ return -ENODEV;
+ case IAVF_ERR_NO_AVAILABLE_VSI:
+ case IAVF_ERR_RING_FULL:
+ return -ENOSPC;
+ case IAVF_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case IAVF_ERR_TIMEOUT:
+ case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
+ return -ETIMEDOUT;
+ case IAVF_ERR_NOT_IMPLEMENTED:
+ case IAVF_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
+ return -EALREADY;
+ case IAVF_ERR_NOT_READY:
+ return -EBUSY;
+ case IAVF_ERR_BUF_TOO_SHORT:
+ return -EMSGSIZE;
+ }
+
+ return -EIO;
+}
+
+int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
+{
+ switch (v_status) {
+ case VIRTCHNL_STATUS_SUCCESS:
+ return 0;
+ case VIRTCHNL_STATUS_ERR_PARAM:
+ case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
+ return -EINVAL;
+ case VIRTCHNL_STATUS_ERR_NO_MEMORY:
+ return -ENOMEM;
+ case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
+ case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
+ case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
+ return -EIO;
+ case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ }
+
+ return -EIO;
+}
/**
* iavf_pdev_to_adapter - go from pci_dev to adapter
@@ -170,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
if (!(adapter->flags &
(IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
}
@@ -184,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
void iavf_schedule_request_stats(struct iavf_adapter *adapter)
{
adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
@@ -302,8 +408,9 @@ static irqreturn_t iavf_msix_aq(int irq, void *data)
rd32(hw, IAVF_VFINT_ICR01);
rd32(hw, IAVF_VFINT_ICR0_ENA1);
- /* schedule work on the private workqueue */
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state != __IAVF_REMOVE)
+ /* schedule work on the private workqueue */
+ queue_work(adapter->wq, &adapter->adminq_task);
return IRQ_HANDLED;
}
@@ -463,14 +570,14 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-TxRx-%d", basename, rx_int_idx++);
+ "iavf-%s-TxRx-%u", basename, rx_int_idx++);
tx_int_idx++;
} else if (q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-rx-%d", basename, rx_int_idx++);
+ "iavf-%s-rx-%u", basename, rx_int_idx++);
} else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name),
- "iavf-%s-tx-%d", basename, tx_int_idx++);
+ "iavf-%s-tx-%u", basename, tx_int_idx++);
} else {
/* skip this unused q_vector */
continue;
@@ -492,10 +599,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* Spread the IRQ affinity hints across online CPUs. Note that
* get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_set_affinity_hint.
+ * it's safe to use as a hint for irq_update_affinity_hint.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -505,7 +612,7 @@ free_queue_irqs:
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -557,7 +664,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
for (vector = 0; vector < q_vectors; vector++) {
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
}
@@ -646,14 +753,17 @@ static void iavf_configure_rx(struct iavf_adapter *adapter)
* mac_vlan_list_lock.
**/
static struct
-iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
+iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
+ struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (vlan == f->vlan)
+ if (f->vlan.vid == vlan.vid &&
+ f->vlan.tpid == vlan.tpid)
return f;
}
+
return NULL;
}
@@ -665,7 +775,8 @@ iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
* Returns ptr to the filter object or NULL when no memory available.
**/
static struct
-iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
+iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
+ struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f = NULL;
@@ -680,7 +791,8 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
f->vlan = vlan;
list_add_tail(&f->list, &adapter->vlan_filter_list);
- f->add = true;
+ f->state = IAVF_VLAN_ADD;
+ adapter->num_vlan_filters++;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
}
@@ -694,7 +806,7 @@ clearout:
* @adapter: board private structure
* @vlan: VLAN tag
**/
-static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
+static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
{
struct iavf_vlan_filter *f;
@@ -702,7 +814,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
- f->remove = true;
+ f->state = IAVF_VLAN_REMOVE;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
}
@@ -717,11 +829,61 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
**/
static void iavf_restore_filters(struct iavf_adapter *adapter)
{
- u16 vid;
+ struct iavf_vlan_filter *f;
/* re-add all VLAN filters */
- for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
- iavf_add_vlan(adapter, vid);
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_INACTIVE)
+ f->state = IAVF_VLAN_ADD;
+ }
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+}
+
+/**
+ * iavf_get_num_vlans_added - get number of VLANs added
+ * @adapter: board private structure
+ */
+u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
+{
+ return adapter->num_vlan_filters;
+}
+
+/**
+ * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
+ * @adapter: board private structure
+ *
+ * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
+ * do not impose a limit as that maintains current behavior and for
+ * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
+ **/
+static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
+{
+ /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
+ * never been a limit on the VF driver side
+ */
+ if (VLAN_ALLOWED(adapter))
+ return VLAN_N_VID;
+ else if (VLAN_V2_ALLOWED(adapter))
+ return adapter->vlan_v2_caps.filtering.max_filters;
+
+ return 0;
+}
+
+/**
+ * iavf_max_vlans_added - check if maximum VLANs allowed already exist
+ * @adapter: board private structure
+ **/
+static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
+{
+ if (iavf_get_num_vlans_added(adapter) <
+ iavf_get_max_vlans_allowed(adapter))
+ return false;
+
+ return true;
}
/**
@@ -735,13 +897,22 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter))
+ /* Do not track VLAN 0 filter, always added by the PF on VF init */
+ if (!vid)
+ return 0;
+
+ if (!VLAN_FILTERING_ALLOWED(adapter))
+ return -EIO;
+
+ if (iavf_max_vlans_added(adapter)) {
+ netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
+ iavf_get_max_vlans_allowed(adapter));
return -EIO;
+ }
- if (iavf_add_vlan(adapter, vid) == NULL)
+ if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
return -ENOMEM;
- set_bit(vid, adapter->vsi.active_vlans);
return 0;
}
@@ -756,9 +927,11 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- iavf_del_vlan(adapter, vid);
- clear_bit(vid, adapter->vsi.active_vlans);
+ /* We do not track VLAN 0 filter */
+ if (!vid)
+ return 0;
+ iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
return 0;
}
@@ -811,7 +984,9 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
+ f->add_handled = false;
f->is_new_mac = true;
+ f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
} else {
f->remove = false;
@@ -821,42 +996,123 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
}
/**
- * iavf_set_mac - NDO callback to set port mac address
- * @netdev: network interface device structure
- * @p: pointer to an address structure
+ * iavf_replace_primary_mac - Replace current primary address
+ * @adapter: board private structure
+ * @new_mac: new MAC address to be applied
*
- * Returns 0 on success, negative on failure
+ * Replace current dev_addr and send request to PF for removal of previous
+ * primary MAC address filter and addition of new primary MAC filter.
+ * Return 0 for success, -ENOMEM for failure.
+ *
+ * Do not call this with mac_vlan_list_lock!
**/
-static int iavf_set_mac(struct net_device *netdev, void *p)
+int iavf_replace_primary_mac(struct iavf_adapter *adapter,
+ const u8 *new_mac)
{
- struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f;
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
- return 0;
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ f->is_primary = false;
+ }
+
f = iavf_find_filter(adapter, hw->mac.addr);
if (f) {
f->remove = true;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
}
- f = iavf_add_filter(adapter, addr->sa_data);
+ f = iavf_add_filter(adapter, new_mac);
+
+ if (f) {
+ /* Always send the request to add if changing primary MAC
+ * even if filter is already present on the list
+ */
+ f->is_primary = true;
+ f->add = true;
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
+ ether_addr_copy(hw->mac.addr, new_mac);
+ }
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ /* schedule the watchdog task to immediately process the request */
if (f) {
- ether_addr_copy(hw->mac.addr, addr->sa_data);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ return 0;
}
+ return -ENOMEM;
+}
+
+/**
+ * iavf_is_mac_set_handled - wait for a response to set MAC from PF
+ * @netdev: network interface device structure
+ * @macaddr: MAC address to set
+ *
+ * Returns true on success, false on failure
+ */
+static bool iavf_is_mac_set_handled(struct net_device *netdev,
+ const u8 *macaddr)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_mac_filter *f;
+ bool ret = false;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+ f = iavf_find_filter(adapter, macaddr);
+
+ if (!f || (!f->add && f->add_handled))
+ ret = true;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
- return (f == NULL) ? -ENOMEM : 0;
+ return ret;
+}
+
+/**
+ * iavf_set_mac - NDO callback to set port MAC address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int iavf_set_mac(struct net_device *netdev, void *p)
+{
+ struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int ret;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = iavf_replace_primary_mac(adapter, addr->sa_data);
+
+ if (ret)
+ return ret;
+
+ ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
+ iavf_is_mac_set_handled(netdev, addr->sa_data),
+ msecs_to_jiffies(2500));
+
+ /* If ret < 0 then it means wait was interrupted.
+ * If ret == 0 then it means we got a timeout.
+ * else it means we got response for set MAC from PF,
+ * check if netdev MAC was updated to requested MAC,
+ * if yes then set MAC succeeded otherwise it failed return -EACCES
+ */
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ return -EAGAIN;
+
+ if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ return -EACCES;
+
+ return 0;
}
/**
@@ -1007,88 +1263,159 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
- * iavf_down - Shutdown the connection processing
+ * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
+ * yet and mark other to be removed.
* @adapter: board private structure
- *
- * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
**/
-void iavf_down(struct iavf_adapter *adapter)
+static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
- struct iavf_vlan_filter *vlf;
- struct iavf_cloud_filter *cf;
- struct iavf_fdir_fltr *fdir;
- struct iavf_mac_filter *f;
- struct iavf_adv_rss *rss;
-
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return;
-
- netif_carrier_off(netdev);
- netif_tx_disable(netdev);
- adapter->link_up = false;
- iavf_napi_disable_all(adapter);
- iavf_irq_disable(adapter);
+ struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_mac_filter *f, *ftmp;
spin_lock_bh(&adapter->mac_vlan_list_lock);
-
/* clear the sync flag on all filters */
__dev_uc_unsync(adapter->netdev, NULL);
__dev_mc_unsync(adapter->netdev, NULL);
/* remove all MAC filters */
- list_for_each_entry(f, &adapter->mac_filter_list, list) {
- f->remove = true;
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
+ list) {
+ if (f->add) {
+ list_del(&f->list);
+ kfree(f);
+ } else {
+ f->remove = true;
+ }
}
- /* remove all VLAN filters */
- list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
- vlf->remove = true;
- }
+ /* disable all VLAN filters */
+ list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+ list)
+ vlf->state = IAVF_VLAN_DISABLE;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
+ * mark other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_cloud_filter *cf, *cftmp;
/* remove all cloud filters */
spin_lock_bh(&adapter->cloud_filter_list_lock);
- list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
- cf->del = true;
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ if (cf->add) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ } else {
+ cf->del = true;
+ }
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
+}
+
+/**
+ * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ }
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
+ * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
+{
+ struct iavf_adv_rss *rss, *rsstmp;
/* remove all advance RSS configuration */
spin_lock_bh(&adapter->adv_rss_lock);
- list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
- rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
+ list_del(&rss->list);
+ kfree(rss);
+ } else {
+ rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ }
+ }
spin_unlock_bh(&adapter->adv_rss_lock);
+}
+
+/**
+ * iavf_down - Shutdown the connection processing
+ * @adapter: board private structure
+ *
+ * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
+ **/
+void iavf_down(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
- adapter->state != __IAVF_RESETTING) {
+ if (adapter->state <= __IAVF_DOWN_PENDING)
+ return;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ adapter->link_up = false;
+ iavf_napi_disable_all(adapter);
+ iavf_irq_disable(adapter);
+
+ iavf_clear_mac_vlan_filters(adapter);
+ iavf_clear_cloud_filters(adapter);
+ iavf_clear_fdir_filters(adapter);
+ iavf_clear_adv_rss_conf(adapter);
+
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
* here for this to complete. The watchdog is still running
* and it will take care of this.
*/
- adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+ if (!list_empty(&adapter->mac_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
+ if (!list_empty(&adapter->vlan_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ if (!list_empty(&adapter->cloud_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ if (!list_empty(&adapter->fdir_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ if (!list_empty(&adapter->adv_rss_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
/**
@@ -1152,6 +1479,86 @@ static void iavf_free_queues(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
+ * @adapter: board private structure
+ *
+ * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
+ * stripped in certain descriptor fields. Instead of checking the offload
+ * capability bits in the hot path, cache the location the ring specific
+ * flags.
+ */
+void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_active_queues; i++) {
+ struct iavf_ring *tx_ring = &adapter->tx_rings[i];
+ struct iavf_ring *rx_ring = &adapter->rx_rings[i];
+
+ /* prevent multiple L2TAG bits being set after VFR */
+ tx_ring->flags &=
+ ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
+ rx_ring->flags &=
+ ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
+
+ if (VLAN_ALLOWED(adapter)) {
+ tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+
+ stripping_support =
+ &adapter->vlan_v2_caps.offloads.stripping_support;
+ insertion_support =
+ &adapter->vlan_v2_caps.offloads.insertion_support;
+
+ if (stripping_support->outer) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ rx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (stripping_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+ rx_ring->flags |=
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+ } else if (stripping_support->inner) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ rx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (stripping_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
+ rx_ring->flags |=
+ IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
+ }
+
+ if (insertion_support->outer) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ tx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (insertion_support->outer &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
+ tx_ring->flags |=
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
+ } else if (insertion_support->inner) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
+ tx_ring->flags |=
+ IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
+ else if (insertion_support->inner &
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
+ tx_ring->flags |=
+ IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
+ }
+ }
+ }
+}
+
+/**
* iavf_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -1212,6 +1619,8 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
adapter->num_active_queues = num_active_queues;
+ iavf_set_queue_vlan_tag_loc(adapter);
+
return 0;
err_out:
@@ -1275,7 +1684,7 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
struct iavf_aqc_get_set_rss_key_data *rss_key =
(struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
struct iavf_hw *hw = &adapter->hw;
- int ret = 0;
+ enum iavf_status status;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -1284,24 +1693,25 @@ static int iavf_config_rss_aq(struct iavf_adapter *adapter)
return -EBUSY;
}
- ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
- if (ret) {
+ status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- iavf_stat_str(hw, ret),
+ iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
+ return iavf_status_to_errno(status);
}
- ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
- adapter->rss_lut, adapter->rss_lut_size);
- if (ret) {
+ status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
+ if (status) {
dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
- iavf_stat_str(hw, ret),
+ iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
+ return iavf_status_to_errno(status);
}
- return ret;
+ return 0;
}
@@ -1371,7 +1781,6 @@ static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
static int iavf_init_rss(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
- int ret;
if (!RSS_PF(adapter)) {
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
@@ -1387,9 +1796,8 @@ static int iavf_init_rss(struct iavf_adapter *adapter)
iavf_fill_rss_lut(adapter);
netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
- ret = iavf_config_rss(adapter);
- return ret;
+ return iavf_config_rss(adapter);
}
/**
@@ -1418,7 +1826,7 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll, NAPI_POLL_WEIGHT);
+ iavf_napi_poll);
}
return 0;
@@ -1584,6 +1992,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
{
if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
return iavf_send_vf_config_msg(adapter);
+ if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
+ return iavf_send_vf_offload_vlan_v2_msg(adapter);
if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
iavf_disable_queues(adapter);
return 0;
@@ -1717,6 +2127,39 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_del_adv_rss_cfg(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
+ iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
+ iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
+ iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
+ iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
+ iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
+ iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
+ iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
+ return 0;
+ }
+ if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
+ iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
+ return 0;
+ }
+
if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
iavf_request_stats(adapter);
return 0;
@@ -1726,6 +2169,91 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_vlan_offload_features - set VLAN offload configuration
+ * @adapter: board private structure
+ * @prev_features: previous features used for comparison
+ * @features: updated features used for configuration
+ *
+ * Set the aq_required bit(s) based on the requested features passed in to
+ * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
+ * the watchdog if any changes are requested to expedite the request via
+ * virtchnl.
+ **/
+void
+iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
+ netdev_features_t prev_features,
+ netdev_features_t features)
+{
+ bool enable_stripping = true, enable_insertion = true;
+ u16 vlan_ethertype = 0;
+ u64 aq_required = 0;
+
+ /* keep cases separate because one ethertype for offloads can be
+ * disabled at the same time as another is disabled, so check for an
+ * enabled ethertype first, then check for disabled. Default to
+ * ETH_P_8021Q so an ethertype is specified if disabling insertion and
+ * stripping.
+ */
+ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+ else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+ else
+ vlan_ethertype = ETH_P_8021Q;
+
+ if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
+ enable_stripping = false;
+ if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
+ enable_insertion = false;
+
+ if (VLAN_ALLOWED(adapter)) {
+ /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
+ * stripping via virtchnl. VLAN insertion can be toggled on the
+ * netdev, but it doesn't require a virtchnl message
+ */
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
+
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ switch (vlan_ethertype) {
+ case ETH_P_8021Q:
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
+
+ if (enable_insertion)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
+ break;
+ case ETH_P_8021AD:
+ if (enable_stripping)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
+
+ if (enable_insertion)
+ aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
+ else
+ aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
+ break;
+ }
+ }
+
+ if (aq_required) {
+ adapter->aq_required |= aq_required;
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
+ }
+}
+
+/**
* iavf_startup - first step of driver startup
* @adapter: board private structure
*
@@ -1737,23 +2265,24 @@ static void iavf_startup(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
- int err;
+ enum iavf_status status;
+ int ret;
WARN_ON(adapter->state != __IAVF_STARTUP);
/* driver loaded, probe complete */
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- err = iavf_set_mac_type(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
+ status = iavf_set_mac_type(hw);
+ if (status) {
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
goto err;
}
- err = iavf_check_reset_complete(hw);
- if (err) {
+ ret = iavf_check_reset_complete(hw);
+ if (ret) {
dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
- err);
+ ret);
goto err;
}
hw->aq.num_arq_entries = IAVF_AQ_LEN;
@@ -1761,14 +2290,15 @@ static void iavf_startup(struct iavf_adapter *adapter)
hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
- err = iavf_init_adminq(hw);
- if (err) {
- dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
+ status = iavf_init_adminq(hw);
+ if (status) {
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ status);
goto err;
}
- err = iavf_send_api_ver(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
+ ret = iavf_send_api_ver(adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
iavf_shutdown_adminq(hw);
goto err;
}
@@ -1804,7 +2334,7 @@ static void iavf_init_version_check(struct iavf_adapter *adapter)
/* aq msg sent, awaiting reply */
err = iavf_verify_api_ver(adapter);
if (err) {
- if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
+ if (err == -EALREADY)
err = iavf_send_api_ver(adapter);
else
dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
@@ -1827,6 +2357,58 @@ err:
}
/**
+ * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
+ * @adapter: board private structure
+ */
+int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
+{
+ int i, num_req_queues = adapter->num_req_queues;
+ struct iavf_vsi *vsi = &adapter->vsi;
+
+ for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+ if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+ adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ }
+ if (!adapter->vsi_res) {
+ dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+ return -ENODEV;
+ }
+
+ if (num_req_queues &&
+ num_req_queues > adapter->vsi_res->num_queue_pairs) {
+ /* Problem. The PF gave us fewer queues than what we had
+ * negotiated in our request. Need a reset to see if we can't
+ * get back to a working state.
+ */
+ dev_err(&adapter->pdev->dev,
+ "Requested %d queues, but PF only gave us %d.\n",
+ num_req_queues,
+ adapter->vsi_res->num_queue_pairs);
+ adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
+ adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
+ iavf_schedule_reset(adapter);
+
+ return -EAGAIN;
+ }
+ adapter->num_req_queues = 0;
+ adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+ adapter->vsi.back = adapter;
+ adapter->vsi.base_vector = 1;
+ vsi->netdev = adapter->netdev;
+ vsi->qs_handle = adapter->vsi_res->qset_handle;
+ if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ adapter->rss_key_size = adapter->vf_res->rss_key_size;
+ adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
+ } else {
+ adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
+ adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
+ }
+
+ return 0;
+}
+
+/**
* iavf_init_get_resources - third step of driver startup
* @adapter: board private structure
*
@@ -1837,7 +2419,6 @@ err:
**/
static void iavf_init_get_resources(struct iavf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
int err;
@@ -1853,11 +2434,11 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
}
}
err = iavf_get_vf_config(adapter);
- if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
+ if (err == -EALREADY) {
err = iavf_send_vf_config_msg(adapter);
goto err;
- } else if (err == IAVF_ERR_PARAM) {
- /* We only get ERR_PARAM if the device is in a very bad
+ } else if (err == -EINVAL) {
+ /* We only get -EINVAL if the device is in a very bad
* state or if we've been disabled for previous bad
* behavior. Either way, we're done now.
*/
@@ -1870,9 +2451,133 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
goto err_alloc;
}
- err = iavf_process_config(adapter);
- if (err)
+ err = iavf_parse_vf_resource_msg(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
+ err);
goto err_alloc;
+ }
+ /* Some features require additional messages to negotiate extended
+ * capabilities. These are processed in sequence by the
+ * __IAVF_INIT_EXTENDED_CAPS driver state.
+ */
+ adapter->extended_caps = IAVF_EXTENDED_CAPS;
+
+ iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
+ return;
+
+err_alloc:
+ kfree(adapter->vf_res);
+ adapter->vf_res = NULL;
+err:
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
+ * @adapter: board private structure
+ *
+ * Function processes send of the extended VLAN V2 capability message to the
+ * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
+ * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ */
+static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
+
+ ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
+ if (ret && ret == -EOPNOTSUPP) {
+ /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
+ * we did not send the capability exchange message and do not
+ * expect a response.
+ */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
+ }
+
+ /* We sent the message, so move on to the next step */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
+}
+
+/**
+ * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
+ * @adapter: board private structure
+ *
+ * Function processes receipt of the extended VLAN V2 capability message from
+ * the PF.
+ **/
+static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ int ret;
+
+ WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
+
+ memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
+
+ ret = iavf_get_vf_vlan_v2_caps(adapter);
+ if (ret)
+ goto err;
+
+ /* We've processed receipt of the VLAN V2 caps message */
+ adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
+ return;
+err:
+ /* We didn't receive a reply. Make sure we try sending again when
+ * __IAVF_INIT_FAILED attempts to recover.
+ */
+ adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+}
+
+/**
+ * iavf_init_process_extended_caps - Part of driver startup
+ * @adapter: board private structure
+ *
+ * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
+ * handles negotiating capabilities for features which require an additional
+ * message.
+ *
+ * Once all extended capabilities exchanges are finished, the driver will
+ * transition into __IAVF_INIT_CONFIG_ADAPTER.
+ */
+static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
+{
+ WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
+
+ /* Process capability exchange for VLAN V2 */
+ if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
+ iavf_init_send_offload_vlan_v2_caps(adapter);
+ return;
+ } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
+ iavf_init_recv_offload_vlan_v2_caps(adapter);
+ return;
+ }
+
+ /* When we reach here, no further extended capabilities exchanges are
+ * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
+ */
+ iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
+}
+
+/**
+ * iavf_init_config_adapter - last part of driver startup
+ * @adapter: board private structure
+ *
+ * After all the supported capabilities are negotiated, then the
+ * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
+ */
+static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
+
+ if (iavf_process_config(adapter))
+ goto err;
+
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
@@ -1955,6 +2660,10 @@ static void iavf_init_get_resources(struct iavf_adapter *adapter)
else
iavf_init_rss(adapter);
+ if (VLAN_V2_ALLOWED(adapter))
+ /* request initial VLAN offload settings */
+ iavf_set_vlan_offload_features(adapter, 0, netdev->features);
+
return;
err_mem:
iavf_free_rss(adapter);
@@ -1962,9 +2671,6 @@ err_register:
iavf_free_misc_irq(adapter);
err_sw_init:
iavf_reset_interrupt_capability(adapter);
-err_alloc:
- kfree(adapter->vf_res);
- adapter->vf_res = NULL;
err:
iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
@@ -1981,55 +2687,101 @@ static void iavf_watchdog_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
u32 reg_val;
- if (!mutex_trylock(&adapter->crit_lock))
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
goto restart_watchdog;
+ }
+
+ if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
+ adapter->netdev_registered &&
+ !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
+ rtnl_trylock()) {
+ netdev_update_features(adapter->netdev);
+ rtnl_unlock();
+ adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
+ }
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
iavf_change_state(adapter, __IAVF_COMM_FAILED);
- if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
- adapter->state != __IAVF_RESETTING) {
- iavf_change_state(adapter, __IAVF_RESETTING);
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ mutex_unlock(&adapter->crit_lock);
+ queue_work(adapter->wq, &adapter->reset_task);
+ return;
}
switch (adapter->state) {
case __IAVF_STARTUP:
iavf_startup(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_VERSION_CHECK:
iavf_init_version_check(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(30));
return;
case __IAVF_INIT_GET_RESOURCES:
iavf_init_get_resources(adapter);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_EXTENDED_CAPS:
+ iavf_init_process_extended_caps(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_CONFIG_ADAPTER:
+ iavf_init_config_adapter(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(1));
return;
case __IAVF_INIT_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Do not update the state and do not reschedule
+ * watchdog task, iavf_remove should handle this state
+ * as it can loop forever
+ */
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
dev_err(&adapter->pdev->dev,
"Failed to communicate with PF; waiting before retry\n");
adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
iavf_shutdown_adminq(hw);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task, (5 * HZ));
return;
}
/* Try again from failed step*/
iavf_change_state(adapter, adapter->last_state);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
return;
case __IAVF_COMM_FAILED:
+ if (test_bit(__IAVF_IN_REMOVE_TASK,
+ &adapter->crit_section)) {
+ /* Set state to __IAVF_INIT_FAILED and perform remove
+ * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
+ * doesn't bring the state back to __IAVF_COMM_FAILED.
+ */
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
@@ -2047,13 +2799,14 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ HZ * 2);
return;
case __IAVF_DOWN:
case __IAVF_DOWN_PENDING:
@@ -2066,10 +2819,13 @@ static void iavf_watchdog_task(struct work_struct *work)
iavf_send_api_ver(adapter);
}
} else {
+ int ret = iavf_process_aq_command(adapter);
+
/* An error will be returned if no commands were
* processed; use this opportunity to update stats
+ * if the error isn't -ENOTSUPP
*/
- if (iavf_process_aq_command(adapter) &&
+ if (ret && ret != -EOPNOTSUPP &&
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
@@ -2089,9 +2845,9 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
mutex_unlock(&adapter->crit_lock);
- queue_delayed_work(iavf_wq,
+ queue_delayed_work(adapter->wq,
&adapter->watchdog_task, HZ * 2);
return;
}
@@ -2099,14 +2855,23 @@ static void iavf_watchdog_task(struct work_struct *work)
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
- queue_work(iavf_wq, &adapter->adminq_task);
+ if (adapter->state >= __IAVF_DOWN)
+ queue_work(adapter->wq, &adapter->adminq_task);
if (adapter->aq_required)
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
else
- queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ HZ * 2);
}
+/**
+ * iavf_disable_vf - disable VF
+ * @adapter: board private structure
+ *
+ * Set communication failed flag and free all resources.
+ * NOTE: This function is expected to be called with crit_lock being held.
+ **/
static void iavf_disable_vf(struct iavf_adapter *adapter)
{
struct iavf_mac_filter *f, *ftmp;
@@ -2143,6 +2908,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
list_del(&fv->list);
kfree(fv);
}
+ adapter->num_vlan_filters = 0;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -2160,8 +2926,6 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
iavf_free_queues(adapter);
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
- adapter->netdev->flags &= ~IFF_UP;
- mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
@@ -2186,20 +2950,26 @@ static void iavf_reset_task(struct work_struct *work)
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf;
+ enum iavf_status status;
u32 reg_val;
int i = 0, err;
bool running;
+ /* Detach interface to avoid subsequent NDO callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
- if (mutex_is_locked(&adapter->remove_lock))
- return;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state != __IAVF_REMOVE)
+ queue_work(adapter->wq, &adapter->reset_task);
- if (iavf_lock_timeout(&adapter->crit_lock, 200)) {
- schedule_work(&adapter->reset_task);
- return;
+ goto reset_finish;
}
+
while (!mutex_trylock(&adapter->client_lock))
usleep_range(500, 1000);
if (CLIENT_ENABLED(adapter)) {
@@ -2254,6 +3024,12 @@ static void iavf_reset_task(struct work_struct *work)
reg_val);
iavf_disable_vf(adapter);
mutex_unlock(&adapter->client_lock);
+ mutex_unlock(&adapter->crit_lock);
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
+ }
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -2262,11 +3038,9 @@ continue_reset:
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
*/
- running = ((adapter->state == __IAVF_RUNNING) ||
- (adapter->state == __IAVF_RESETTING));
+ running = adapter->state == __IAVF_RUNNING;
if (running) {
- netdev->flags &= ~IFF_UP;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -2287,13 +3061,16 @@ continue_reset:
/* kill and reinit the admin queue */
iavf_shutdown_adminq(hw);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
- err = iavf_init_adminq(hw);
- if (err)
+ status = iavf_init_adminq(hw);
+ if (status) {
dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
- err);
+ status);
+ goto reset_err;
+ }
adapter->aq_required = 0;
- if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
err = iavf_reinit_interrupt_scheme(adapter);
if (err)
goto reset_err;
@@ -2308,6 +3085,13 @@ continue_reset:
}
adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
+ /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
+ * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
+ * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
+ * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
+ * been successfully sent and negotiated
+ */
+ adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
spin_lock_bh(&adapter->mac_vlan_list_lock);
@@ -2342,7 +3126,7 @@ continue_reset:
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
- mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
+ mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some
* state here.
@@ -2358,12 +3142,13 @@ continue_reset:
if (err)
goto reset_err;
- if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
+ if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
+ (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
err = iavf_request_traffic_irqs(adapter, netdev->name);
if (err)
goto reset_err;
- adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
}
iavf_configure(adapter);
@@ -2372,25 +3157,43 @@ continue_reset:
* to __IAVF_RUNNING
*/
iavf_up_complete(adapter);
- netdev->flags |= IFF_UP;
+
iavf_irq_enable(adapter, true);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
+
+ adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- return;
+ goto reset_finish;
reset_err:
+ if (running) {
+ set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
+ iavf_free_traffic_irqs(adapter);
+ }
+ iavf_disable_vf(adapter);
+
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running) {
- iavf_change_state(adapter, __IAVF_RUNNING);
- netdev->flags |= IFF_UP;
+
+ if (netif_running(netdev)) {
+ /* Close device to ensure that Tx queues will not be started
+ * during netif_device_attach() at the end of the reset task.
+ */
+ rtnl_lock();
+ dev_close(netdev);
+ rtnl_unlock();
}
+
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
- iavf_close(netdev);
+reset_finish:
+ rtnl_lock();
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
/**
@@ -2411,13 +3214,19 @@ static void iavf_adminq_task(struct work_struct *work)
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
goto out;
+ if (!mutex_trylock(&adapter->crit_lock)) {
+ if (adapter->state == __IAVF_REMOVE)
+ return;
+
+ queue_work(adapter->wq, &adapter->adminq_task);
+ goto out;
+ }
+
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
if (!event.msg_buf)
goto out;
- if (iavf_lock_timeout(&adapter->crit_lock, 200))
- goto freedom;
do {
ret = iavf_clean_arq_element(hw, &event, &pending);
v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
@@ -2685,6 +3494,7 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
u64 total_max_rate = 0;
+ u32 tx_rate_rem = 0;
int i, num_qps = 0;
u64 tx_rate = 0;
int ret = 0;
@@ -2699,17 +3509,40 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
return -EINVAL;
if (mqprio_qopt->min_rate[i]) {
dev_err(&adapter->pdev->dev,
- "Invalid min tx rate (greater than 0) specified\n");
+ "Invalid min tx rate (greater than 0) specified for TC%d\n",
+ i);
return -EINVAL;
}
- /*convert to Mbps */
+
+ /* convert to Mbps */
tx_rate = div_u64(mqprio_qopt->max_rate[i],
IAVF_MBPS_DIVISOR);
+
+ if (mqprio_qopt->max_rate[i] &&
+ tx_rate < IAVF_MBPS_QUANTA) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, minimum %dMbps\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
+ (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
+
+ if (tx_rate_rem != 0) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid max tx rate for TC%d, not divisible by %d\n",
+ i, IAVF_MBPS_QUANTA);
+ return -EINVAL;
+ }
+
total_max_rate += tx_rate;
num_qps += mqprio_qopt->qopt.count[i];
}
- if (num_qps > IAVF_MAX_REQ_QUEUES)
+ if (num_qps > adapter->num_active_queues) {
+ dev_err(&adapter->pdev->dev,
+ "Cannot support requested number of queues\n");
return -EINVAL;
+ }
ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
return ret;
@@ -2768,6 +3601,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
netif_tx_disable(netdev);
iavf_del_all_cloud_filters(adapter);
adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
+ total_qps = adapter->orig_num_active_queues;
goto exit;
} else {
return -EINVAL;
@@ -2811,7 +3645,21 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
adapter->ch_config.ch_info[i].offset = 0;
}
}
+
+ /* Take snapshot of original config such as "num_active_queues"
+ * It is used later when delete ADQ flow is exercised, so that
+ * once delete ADQ flow completes, VF shall go back to its
+ * original queue configuration
+ */
+
+ adapter->orig_num_active_queues = adapter->num_active_queues;
+
+ /* Store queue info based on TC so that VF gets configured
+ * with correct number of queues when VF completes ADQ config
+ * flow
+ */
adapter->ch_config.total_qps = total_qps;
+
netif_tx_stop_all_queues(netdev);
netif_tx_disable(netdev);
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
@@ -2828,6 +3676,12 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
}
}
exit:
+ if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
+ return 0;
+
+ netif_set_real_num_rx_queues(netdev, total_qps);
+ netif_set_real_num_tx_queues(netdev, total_qps);
+
return ret;
}
@@ -2910,7 +3764,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
match.mask->dst);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2920,7 +3774,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
match.mask->src);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2955,7 +3809,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
match.mask->vlan_id);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
@@ -2979,7 +3833,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
be32_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -2988,14 +3842,14 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
field_flags |= IAVF_CLOUD_FIELD_IIP;
} else {
dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
- be32_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ be32_to_cpu(match.mask->src));
+ return -EINVAL;
}
}
if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
if (match.key->dst) {
vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
@@ -3016,7 +3870,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
if (ipv6_addr_any(&match.mask->dst)) {
dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
IPV6_ADDR_ANY);
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
/* src and dest IPv6 address should not be LOOPBACK
@@ -3026,7 +3880,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
ipv6_addr_loopback(&match.key->src)) {
dev_err(&adapter->pdev->dev,
"ipv6 addr should not be loopback\n");
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
if (!ipv6_addr_any(&match.mask->dst) ||
!ipv6_addr_any(&match.mask->src))
@@ -3051,7 +3905,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
be16_to_cpu(match.mask->src));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
@@ -3061,7 +3915,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
} else {
dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
be16_to_cpu(match.mask->dst));
- return IAVF_ERR_CONFIG;
+ return -EINVAL;
}
}
if (match.key->dst) {
@@ -3104,6 +3958,29 @@ static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
}
/**
+ * iavf_find_cf - Find the cloud filter in the list
+ * @adapter: Board private structure
+ * @cookie: filter specific cookie
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * cloud_filter_list_lock.
+ */
+static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
+ unsigned long *cookie)
+{
+ struct iavf_cloud_filter *filter = NULL;
+
+ if (!cookie)
+ return NULL;
+
+ list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+ return filter;
+ }
+ return NULL;
+}
+
+/**
* iavf_configure_clsflower - Add tc flower filters
* @adapter: board private structure
* @cls_flower: Pointer to struct flow_cls_offload
@@ -3134,6 +4011,15 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
filter->cookie = cls_flower->cookie;
+ /* bail out here if filter already exists */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ if (iavf_find_cf(adapter, &cls_flower->cookie)) {
+ dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
+ err = -EEXIST;
+ goto spin_unlock;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
/* set the mask to all zeroes to begin with */
memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
/* start out with flow type and eth type IPv4 to begin with */
@@ -3152,6 +4038,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter,
adapter->num_cloud_filters++;
filter->add = true;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
+spin_unlock:
spin_unlock_bh(&adapter->cloud_filter_list_lock);
err:
if (err)
@@ -3161,28 +4048,6 @@ err:
return err;
}
-/* iavf_find_cf - Find the cloud filter in the list
- * @adapter: Board private structure
- * @cookie: filter specific cookie
- *
- * Returns ptr to the filter object or NULL. Must be called while holding the
- * cloud_filter_list_lock.
- */
-static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
- unsigned long *cookie)
-{
- struct iavf_cloud_filter *filter = NULL;
-
- if (!cookie)
- return NULL;
-
- list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
- if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
- return filter;
- }
- return NULL;
-}
-
/**
* iavf_delete_clsflower - Remove tc flower filters
* @adapter: board private structure
@@ -3304,8 +4169,17 @@ static int iavf_open(struct net_device *netdev)
return -EIO;
}
- while (!mutex_trylock(&adapter->crit_lock))
+ while (!mutex_trylock(&adapter->crit_lock)) {
+ /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
+ * is already taken and iavf_open is called from an upper
+ * device's notifier reacting on NETDEV_REGISTER event.
+ * We have to leave here to avoid dead lock.
+ */
+ if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
+ return -EBUSY;
+
usleep_range(500, 1000);
+ }
if (adapter->state != __IAVF_DOWN) {
err = -EBUSY;
@@ -3380,17 +4254,42 @@ err_unlock:
static int iavf_close(struct net_device *netdev)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ u64 aq_to_restore;
int status;
- if (adapter->state <= __IAVF_DOWN_PENDING)
- return 0;
+ mutex_lock(&adapter->crit_lock);
- while (!mutex_trylock(&adapter->crit_lock))
- usleep_range(500, 1000);
+ if (adapter->state <= __IAVF_DOWN_PENDING) {
+ mutex_unlock(&adapter->crit_lock);
+ return 0;
+ }
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
+ /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
+ * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
+ * deadlock with adminq_task() until iavf_close timeouts. We must send
+ * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
+ * disable queues possible for vf. Give only necessary flags to
+ * iavf_down and save other to set them right before iavf_close()
+ * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
+ * iavf will be in DOWN state.
+ */
+ aq_to_restore = adapter->aq_required;
+ adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
+
+ /* Remove flags which we do not want to send after close or we want to
+ * send before disable queues.
+ */
+ aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
+ IAVF_FLAG_AQ_ENABLE_QUEUES |
+ IAVF_FLAG_AQ_CONFIGURE_QUEUES |
+ IAVF_FLAG_AQ_ADD_VLAN_FILTER |
+ IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
+ IAVF_FLAG_AQ_ADD_FDIR_FILTER |
+ IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
iavf_down(adapter);
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
@@ -3414,6 +4313,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+
+ mutex_lock(&adapter->crit_lock);
+ adapter->aq_required |= aq_to_restore;
+ mutex_unlock(&adapter->crit_lock);
return 0;
}
@@ -3428,17 +4331,27 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ netdev_dbg(netdev, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
if (CLIENT_ENABLED(adapter)) {
iavf_notify_client_l2_params(&adapter->vsi);
adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
}
- adapter->flags |= IAVF_FLAG_RESET_NEEDED;
- queue_work(iavf_wq, &adapter->reset_task);
+
+ if (netif_running(netdev)) {
+ adapter->flags |= IAVF_FLAG_RESET_NEEDED;
+ queue_work(adapter->wq, &adapter->reset_task);
+ }
return 0;
}
+#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
/**
* iavf_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
@@ -3450,25 +4363,11 @@ static int iavf_set_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- /* Don't allow enabling VLAN features when adapter is not capable
- * of VLAN offload/filtering
- */
- if (!VLAN_ALLOWED(adapter)) {
- netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
- if (features & (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_FILTER))
- return -EINVAL;
- } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- adapter->aq_required |=
- IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
- else
- adapter->aq_required |=
- IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
- }
+ /* trigger update on any VLAN feature change */
+ if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
+ (features & NETIF_VLAN_OFFLOAD_FEATURES))
+ iavf_set_vlan_offload_features(adapter, netdev->features,
+ features);
return 0;
}
@@ -3522,7 +4421,7 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb,
}
/* No need to validate L4LEN as TCP is the only protocol with a
- * a flexible value and we support all possible values supported
+ * flexible value and we support all possible values supported
* by TCP, which is at most 15 dwords
*/
@@ -3532,6 +4431,228 @@ out_err:
}
/**
+ * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
+ * @adapter: board private structure
+ *
+ * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * were negotiated determine the VLAN features that can be toggled on and off.
+ **/
+static netdev_features_t
+iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
+{
+ netdev_features_t hw_features = 0;
+
+ if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
+ return hw_features;
+
+ /* Enable VLAN features if supported */
+ if (VLAN_ALLOWED(adapter)) {
+ hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_caps *vlan_v2_caps =
+ &adapter->vlan_v2_caps;
+ struct virtchnl_vlan_supported_caps *stripping_support =
+ &vlan_v2_caps->offloads.stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support =
+ &vlan_v2_caps->offloads.insertion_support;
+
+ if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
+ stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+ } else if (stripping_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED &&
+ stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
+ insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ } else if (insertion_support->inner &&
+ insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100)
+ hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+ }
+
+ return hw_features;
+}
+
+/**
+ * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
+ * @adapter: board private structure
+ *
+ * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * were negotiated determine the VLAN features that are enabled by default.
+ **/
+static netdev_features_t
+iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
+{
+ netdev_features_t features = 0;
+
+ if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
+ return features;
+
+ if (VLAN_ALLOWED(adapter)) {
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
+ } else if (VLAN_V2_ALLOWED(adapter)) {
+ struct virtchnl_vlan_caps *vlan_v2_caps =
+ &adapter->vlan_v2_caps;
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &vlan_v2_caps->filtering.filtering_support;
+ struct virtchnl_vlan_supported_caps *stripping_support =
+ &vlan_v2_caps->offloads.stripping_support;
+ struct virtchnl_vlan_supported_caps *insertion_support =
+ &vlan_v2_caps->offloads.insertion_support;
+ u32 ethertype_init;
+
+ /* give priority to outer stripping and don't support both outer
+ * and inner stripping
+ */
+ ethertype_init = vlan_v2_caps->offloads.ethertype_init;
+ if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else if (stripping_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_RX;
+ } else if (stripping_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (stripping_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_RX;
+ }
+
+ /* give priority to outer insertion and don't support both outer
+ * and inner insertion
+ */
+ if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else if (insertion_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_TX;
+ } else if (insertion_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (insertion_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+
+ /* give priority to outer filtering and don't bother if both
+ * outer and inner filtering are enabled
+ */
+ ethertype_init = vlan_v2_caps->filtering.ethertype_init;
+ if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (filtering_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (filtering_support->outer &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ } else if (filtering_support->inner !=
+ VIRTCHNL_VLAN_UNSUPPORTED) {
+ if (filtering_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_8100 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (filtering_support->inner &
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
+ ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
+ features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ }
+
+ return features;
+}
+
+#define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
+ (!(((requested) & (feature_bit)) && \
+ !((allowed) & (feature_bit))))
+
+/**
+ * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
+ * @adapter: board private structure
+ * @requested_features: stack requested NETDEV features
+ **/
+static netdev_features_t
+iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
+ netdev_features_t requested_features)
+{
+ netdev_features_t allowed_features;
+
+ allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
+ iavf_get_netdev_vlan_features(adapter);
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_TX))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_RX))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_TX))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_RX))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_CTAG_FILTER))
+ requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
+ allowed_features,
+ NETIF_F_HW_VLAN_STAG_FILTER))
+ requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if ((requested_features &
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
+ (requested_features &
+ (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
+ adapter->vlan_v2_caps.offloads.ethertype_match ==
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
+ netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
+ requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX);
+ }
+
+ return requested_features;
+}
+
+/**
* iavf_fix_features - fix up the netdev feature bits
* @netdev: our net device
* @features: desired feature bits
@@ -3543,13 +4664,7 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (adapter->vf_res &&
- !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
- features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
-
- return features;
+ return iavf_fix_netdev_vlan_features(adapter, features);
}
static const struct net_device_ops iavf_netdev_ops = {
@@ -3601,39 +4716,11 @@ static int iavf_check_reset_complete(struct iavf_hw *hw)
int iavf_process_config(struct iavf_adapter *adapter)
{
struct virtchnl_vf_resource *vfres = adapter->vf_res;
- int i, num_req_queues = adapter->num_req_queues;
+ netdev_features_t hw_vlan_features, vlan_features;
struct net_device *netdev = adapter->netdev;
- struct iavf_vsi *vsi = &adapter->vsi;
netdev_features_t hw_enc_features;
netdev_features_t hw_features;
- /* got VF config message back from PF, now we can parse it */
- for (i = 0; i < vfres->num_vsis; i++) {
- if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
- adapter->vsi_res = &vfres->vsi_res[i];
- }
- if (!adapter->vsi_res) {
- dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
- return -ENODEV;
- }
-
- if (num_req_queues &&
- num_req_queues > adapter->vsi_res->num_queue_pairs) {
- /* Problem. The PF gave us fewer queues than what we had
- * negotiated in our request. Need a reset to see if we can't
- * get back to a working state.
- */
- dev_err(&adapter->pdev->dev,
- "Requested %d queues, but PF only gave us %d.\n",
- num_req_queues,
- adapter->vsi_res->num_queue_pairs);
- adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
- adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
- iavf_schedule_reset(adapter);
- return -ENODEV;
- }
- adapter->num_req_queues = 0;
-
hw_enc_features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -3677,19 +4764,19 @@ int iavf_process_config(struct iavf_adapter *adapter)
*/
hw_features = hw_enc_features;
- /* Enable VLAN features if supported */
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
- hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX);
+ /* get HW VLAN features that can be toggled */
+ hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
+
/* Enable cloud filter if ADQ is supported */
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
hw_features |= NETIF_F_HW_TC;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
hw_features |= NETIF_F_GSO_UDP_L4;
- netdev->hw_features |= hw_features;
+ netdev->hw_features |= hw_features | hw_vlan_features;
+ vlan_features = iavf_get_netdev_vlan_features(adapter);
- netdev->features |= hw_features;
+ netdev->features |= hw_features | vlan_features;
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -3714,21 +4801,6 @@ int iavf_process_config(struct iavf_adapter *adapter)
netdev->features &= ~NETIF_F_GSO;
}
- adapter->vsi.id = adapter->vsi_res->vsi_id;
-
- adapter->vsi.back = adapter;
- adapter->vsi.base_vector = 1;
- adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
- vsi->netdev = adapter->netdev;
- vsi->qs_handle = adapter->vsi_res->qset_handle;
- if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
- adapter->rss_key_size = vfres->rss_key_size;
- adapter->rss_lut_size = vfres->rss_lut_size;
- } else {
- adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
- adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
- }
-
return 0;
}
@@ -3747,7 +4819,7 @@ static void iavf_shutdown(struct pci_dev *pdev)
iavf_close(netdev);
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
+ dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
/* Prevent the watchdog from running. */
iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
@@ -3784,12 +4856,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "DMA configuration failed: 0x%x\n", err);
- goto err_dma;
- }
+ dev_err(&pdev->dev,
+ "DMA configuration failed: 0x%x\n", err);
+ goto err_dma;
}
err = pci_request_regions(pdev, iavf_driver_name);
@@ -3799,8 +4868,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
@@ -3821,6 +4888,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw = &adapter->hw;
hw->back = adapter;
+ adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ iavf_driver_name);
+ if (!adapter->wq) {
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
iavf_change_state(adapter, __IAVF_STARTUP);
@@ -3847,7 +4921,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
mutex_init(&adapter->crit_lock);
mutex_init(&adapter->client_lock);
- mutex_init(&adapter->remove_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -3866,18 +4939,22 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
+ /* Setup the wait queue for indicating virtchannel events */
+ init_waitqueue_head(&adapter->vc_waitqueue);
+
return 0;
err_ioremap:
+ destroy_workqueue(adapter->wq);
+err_alloc_wq:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
@@ -3944,7 +5021,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
return err;
}
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
netif_device_attach(adapter->netdev);
@@ -3963,23 +5040,48 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
static void iavf_remove(struct pci_dev *pdev)
{
struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
- enum iavf_state_t prev_state = adapter->last_state;
- struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_cloud_filter *cf, *cftmp;
struct iavf_adv_rss *rss, *rsstmp;
struct iavf_mac_filter *f, *ftmp;
- struct iavf_cloud_filter *cf, *cftmp;
- struct iavf_hw *hw = &adapter->hw;
+ struct net_device *netdev;
+ struct iavf_hw *hw;
int err;
- /* Indicate we are in remove and not to run reset_task */
- mutex_lock(&adapter->remove_lock);
- cancel_work_sync(&adapter->reset_task);
+
+ netdev = adapter->netdev;
+ hw = &adapter->hw;
+
+ if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
+ return;
+
+ /* Wait until port initialization is complete.
+ * There are flows where register/unregister netdev may race.
+ */
+ while (1) {
+ mutex_lock(&adapter->crit_lock);
+ if (adapter->state == __IAVF_RUNNING ||
+ adapter->state == __IAVF_DOWN ||
+ adapter->state == __IAVF_INIT_FAILED) {
+ mutex_unlock(&adapter->crit_lock);
+ break;
+ }
+ /* Simply return if we already went through iavf_shutdown */
+ if (adapter->state == __IAVF_REMOVE) {
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }
+
+ mutex_unlock(&adapter->crit_lock);
+ usleep_range(500, 1000);
+ }
cancel_delayed_work_sync(&adapter->watchdog_task);
- cancel_delayed_work_sync(&adapter->client_task);
+
if (adapter->netdev_registered) {
- unregister_netdev(netdev);
+ rtnl_lock();
+ unregister_netdevice(netdev);
adapter->netdev_registered = false;
+ rtnl_unlock();
}
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_del_device(adapter);
@@ -3988,6 +5090,10 @@ static void iavf_remove(struct pci_dev *pdev)
err);
}
+ mutex_lock(&adapter->crit_lock);
+ dev_info(&adapter->pdev->dev, "Removing device\n");
+ iavf_change_state(adapter, __IAVF_REMOVE);
+
iavf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -3995,36 +5101,24 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_request_reset(adapter);
msleep(50);
}
- if (iavf_lock_timeout(&adapter->crit_lock, 5000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
+ iavf_misc_irq_disable(adapter);
/* Shut down all the garbage mashers on the detention level */
- iavf_change_state(adapter, __IAVF_REMOVE);
+ cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
+ cancel_work_sync(&adapter->adminq_task);
+ cancel_delayed_work_sync(&adapter->client_task);
+
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
- iavf_misc_irq_disable(adapter);
iavf_free_misc_irq(adapter);
- /* In case we enter iavf_remove from erroneous state, free traffic irqs
- * here, so as to not cause a kernel crash, when calling
- * iavf_reset_interrupt_capability.
- */
- if ((adapter->last_state == __IAVF_RESETTING &&
- prev_state != __IAVF_DOWN) ||
- (adapter->last_state == __IAVF_RUNNING &&
- !(netdev->flags & IFF_UP)))
- iavf_free_traffic_irqs(adapter);
-
iavf_reset_interrupt_capability(adapter);
iavf_free_q_vectors(adapter);
- cancel_delayed_work_sync(&adapter->watchdog_task);
-
- cancel_work_sync(&adapter->adminq_task);
-
iavf_free_rss(adapter);
if (hw->aq.asq.count)
@@ -4036,8 +5130,6 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_destroy(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
- mutex_unlock(&adapter->remove_lock);
- mutex_destroy(&adapter->remove_lock);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
@@ -4081,9 +5173,9 @@ static void iavf_remove(struct pci_dev *pdev)
}
spin_unlock_bh(&adapter->adv_rss_lock);
- free_netdev(netdev);
+ destroy_workqueue(adapter->wq);
- pci_disable_pcie_error_reporting(pdev);
+ free_netdev(netdev);
pci_disable_device(pdev);
}
@@ -4107,20 +5199,11 @@ static struct pci_driver iavf_driver = {
**/
static int __init iavf_init_module(void)
{
- int ret;
-
pr_info("iavf: %s\n", iavf_driver_string);
pr_info("%s\n", iavf_copyright);
- iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
- iavf_driver_name);
- if (!iavf_wq) {
- pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
- return -ENOMEM;
- }
- ret = pci_register_driver(&iavf_driver);
- return ret;
+ return pci_register_driver(&iavf_driver);
}
module_init(iavf_init_module);
@@ -4134,7 +5217,6 @@ module_init(iavf_init_module);
static void __exit iavf_exit_module(void)
{
pci_unregister_driver(&iavf_driver);
- destroy_workqueue(iavf_wq);
}
module_exit(iavf_exit_module);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 46e3d1f6b604..0e493ee9e9d1 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -18,7 +18,7 @@ enum iavf_status {
IAVF_ERR_ADAPTER_STOPPED = -9,
IAVF_ERR_INVALID_MAC_ADDR = -10,
IAVF_ERR_DEVICE_NOT_SUPPORTED = -11,
- IAVF_ERR_MASTER_REQUESTS_PENDING = -12,
+ IAVF_ERR_PRIMARY_REQUESTS_PENDING = -12,
IAVF_ERR_INVALID_LINK_SETTINGS = -13,
IAVF_ERR_AUTONEG_NOT_COMPLETE = -14,
IAVF_ERR_RESET_FAILED = -15,
@@ -64,7 +64,7 @@ enum iavf_status {
IAVF_ERR_BUF_TOO_SHORT = -55,
IAVF_ERR_ADMIN_QUEUE_FULL = -56,
IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
- IAVF_ERR_BAD_IWARP_CQE = -58,
+ IAVF_ERR_BAD_RDMA_CQE = -58,
IAVF_ERR_NVM_BLANK_MODE = -59,
IAVF_ERR_NOT_IMPLEMENTED = -60,
IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 3525eab8e9f9..e989feda133c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -114,8 +114,11 @@ u32 iavf_get_tx_pending(struct iavf_ring *ring, bool in_sw)
{
u32 head, tail;
+ /* underlying hardware might not allow access and/or always return
+ * 0 for the head/tail registers so just use the cached values
+ */
head = ring->next_to_clean;
- tail = readl(ring->tail);
+ tail = ring->next_to_use;
if (head != tail)
return (head < tail) ?
@@ -194,7 +197,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
struct iavf_tx_buffer *tx_buf;
struct iavf_tx_desc *tx_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int budget = vsi->work_limit;
+ unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = IAVF_TX_DESC(tx_ring, i);
@@ -374,29 +377,60 @@ static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
return &q_vector->rx == rc;
}
-static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)
+#define IAVF_AIM_MULTIPLIER_100G 2560
+#define IAVF_AIM_MULTIPLIER_50G 1280
+#define IAVF_AIM_MULTIPLIER_40G 1024
+#define IAVF_AIM_MULTIPLIER_20G 512
+#define IAVF_AIM_MULTIPLIER_10G 256
+#define IAVF_AIM_MULTIPLIER_1G 32
+
+static unsigned int iavf_mbps_itr_multiplier(u32 speed_mbps)
{
- unsigned int divisor;
+ switch (speed_mbps) {
+ case SPEED_100000:
+ return IAVF_AIM_MULTIPLIER_100G;
+ case SPEED_50000:
+ return IAVF_AIM_MULTIPLIER_50G;
+ case SPEED_40000:
+ return IAVF_AIM_MULTIPLIER_40G;
+ case SPEED_25000:
+ case SPEED_20000:
+ return IAVF_AIM_MULTIPLIER_20G;
+ case SPEED_10000:
+ default:
+ return IAVF_AIM_MULTIPLIER_10G;
+ case SPEED_1000:
+ case SPEED_100:
+ return IAVF_AIM_MULTIPLIER_1G;
+ }
+}
- switch (q_vector->adapter->link_speed) {
+static unsigned int
+iavf_virtchnl_itr_multiplier(enum virtchnl_link_speed speed_virtchnl)
+{
+ switch (speed_virtchnl) {
case VIRTCHNL_LINK_SPEED_40GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;
- break;
+ return IAVF_AIM_MULTIPLIER_40G;
case VIRTCHNL_LINK_SPEED_25GB:
case VIRTCHNL_LINK_SPEED_20GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;
- break;
- default:
+ return IAVF_AIM_MULTIPLIER_20G;
case VIRTCHNL_LINK_SPEED_10GB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;
- break;
+ default:
+ return IAVF_AIM_MULTIPLIER_10G;
case VIRTCHNL_LINK_SPEED_1GB:
case VIRTCHNL_LINK_SPEED_100MB:
- divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;
- break;
+ return IAVF_AIM_MULTIPLIER_1G;
}
+}
- return divisor;
+static unsigned int iavf_itr_divisor(struct iavf_adapter *adapter)
+{
+ if (ADV_LINK_SUPPORT(adapter))
+ return IAVF_ITR_ADAPTIVE_MIN_INC *
+ iavf_mbps_itr_multiplier(adapter->link_speed_mbps);
+ else
+ return IAVF_ITR_ADAPTIVE_MIN_INC *
+ iavf_virtchnl_itr_multiplier(adapter->link_speed);
}
/**
@@ -586,8 +620,9 @@ adjust_by_size:
* Use addition as we have already recorded the new latency flag
* for the ITR value.
*/
- itr += DIV_ROUND_UP(avg_wire_size, iavf_itr_divisor(q_vector)) *
- IAVF_ITR_ADAPTIVE_MIN_INC;
+ itr += DIV_ROUND_UP(avg_wire_size,
+ iavf_itr_divisor(q_vector->adapter)) *
+ IAVF_ITR_ADAPTIVE_MIN_INC;
if ((itr & IAVF_ITR_MASK) > IAVF_ITR_ADAPTIVE_MAX_USECS) {
itr &= IAVF_ITR_ADAPTIVE_LATENCY;
@@ -865,6 +900,9 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring,
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) &&
+ vlan_tag & VLAN_VID_MASK)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
napi_gro_receive(&q_vector->napi, skb);
}
@@ -1058,7 +1096,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
@@ -1250,11 +1288,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
{
struct iavf_rx_buffer *rx_buffer;
- if (!size)
- return NULL;
-
rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
prefetchw(rx_buffer->page);
+ if (!size)
+ return rx_buffer;
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -1356,14 +1393,14 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
#endif
struct sk_buff *skb;
- if (!rx_buffer)
+ if (!rx_buffer || !size)
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
net_prefetch(va);
/* build an skb around the page buffer */
- skb = build_skb(va - IAVF_SKB_PAD, truesize);
+ skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
return NULL;
@@ -1468,7 +1505,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
struct iavf_rx_buffer *rx_buffer;
union iavf_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
+ u16 vlan_tag = 0;
u8 rx_ptype;
u64 qword;
@@ -1514,7 +1551,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer)
+ if (rx_buffer && size)
rx_buffer->pagecnt_bias++;
break;
}
@@ -1551,9 +1588,13 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
/* populate checksum, VLAN, and protocol */
iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) &&
+ rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1)
+ vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1);
+ if (rx_desc->wb.qword2.ext_status &
+ cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) &&
+ rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2)
+ vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2);
iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
iavf_receive_skb(rx_ring, skb, vlan_tag);
@@ -1766,7 +1807,7 @@ tx_only:
if (likely(napi_complete_done(napi, work_done)))
iavf_update_enable_itr(vsi, q_vector);
- return min(work_done, budget - 1);
+ return min_t(int, work_done, budget - 1);
}
/**
@@ -1781,46 +1822,29 @@ tx_only:
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-static inline int iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct iavf_ring *tx_ring,
- u32 *flags)
+static void iavf_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct iavf_ring *tx_ring, u32 *flags)
{
- __be16 protocol = skb->protocol;
u32 tx_flags = 0;
- if (protocol == htons(ETH_P_8021Q) &&
- !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
- /* When HW VLAN acceleration is turned off by the user the
- * stack sets the protocol to 8021q so that the driver
- * can take any steps required to support the SW only
- * VLAN handling. In our case the driver doesn't need
- * to take any further steps so just set the protocol
- * to the encapsulated ethertype.
- */
- skb->protocol = vlan_get_protocol(skb);
- goto out;
- }
-
- /* if we have a HW VLAN tag being added, default to the HW one */
- if (skb_vlan_tag_present(skb)) {
- tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
- /* else if it is a SW VLAN, check the next protocol and store the tag */
- } else if (protocol == htons(ETH_P_8021Q)) {
- struct vlan_hdr *vhdr, _vhdr;
- vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
- if (!vhdr)
- return -EINVAL;
+ /* stack will only request hardware VLAN insertion offload for protocols
+ * that the driver supports and has enabled
+ */
+ if (!skb_vlan_tag_present(skb))
+ return;
- protocol = vhdr->h_vlan_encapsulated_proto;
- tx_flags |= ntohs(vhdr->h_vlan_TCI) << IAVF_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IAVF_TX_FLAGS_SW_VLAN;
+ tx_flags |= skb_vlan_tag_get(skb) << IAVF_TX_FLAGS_VLAN_SHIFT;
+ if (tx_ring->flags & IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2) {
+ tx_flags |= IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ } else if (tx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) {
+ tx_flags |= IAVF_TX_FLAGS_HW_VLAN;
+ } else {
+ dev_dbg(tx_ring->dev, "Unsupported Tx VLAN tag location requested\n");
+ return;
}
-out:
*flags = tx_flags;
- return 0;
}
/**
@@ -2440,8 +2464,13 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
first->gso_segs = 1;
/* prepare the xmit flags */
- if (iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
- goto out_drop;
+ iavf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags);
+ if (tx_flags & IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+ cd_type_cmd_tso_mss |= IAVF_TX_CTX_DESC_IL2TAG2 <<
+ IAVF_TXD_CTX_QW1_CMD_SHIFT;
+ cd_l2tag2 = (tx_flags & IAVF_TX_FLAGS_VLAN_MASK) >>
+ IAVF_TX_FLAGS_VLAN_SHIFT;
+ }
/* obtain protocol of skb */
protocol = vlan_get_protocol(skb);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index e5b9ba42dd00..2624bf6d009e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -243,19 +243,20 @@ static inline unsigned int iavf_txd_use_count(unsigned int size)
#define DESC_NEEDED (MAX_SKB_FRAGS + 6)
#define IAVF_MIN_DESC_PENDING 4
-#define IAVF_TX_FLAGS_HW_VLAN BIT(1)
-#define IAVF_TX_FLAGS_SW_VLAN BIT(2)
-#define IAVF_TX_FLAGS_TSO BIT(3)
-#define IAVF_TX_FLAGS_IPV4 BIT(4)
-#define IAVF_TX_FLAGS_IPV6 BIT(5)
-#define IAVF_TX_FLAGS_FCCRC BIT(6)
-#define IAVF_TX_FLAGS_FSO BIT(7)
-#define IAVF_TX_FLAGS_FD_SB BIT(9)
-#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
-#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
-#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
-#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
-#define IAVF_TX_FLAGS_VLAN_SHIFT 16
+#define IAVF_TX_FLAGS_HW_VLAN BIT(1)
+#define IAVF_TX_FLAGS_SW_VLAN BIT(2)
+#define IAVF_TX_FLAGS_TSO BIT(3)
+#define IAVF_TX_FLAGS_IPV4 BIT(4)
+#define IAVF_TX_FLAGS_IPV6 BIT(5)
+#define IAVF_TX_FLAGS_FCCRC BIT(6)
+#define IAVF_TX_FLAGS_FSO BIT(7)
+#define IAVF_TX_FLAGS_FD_SB BIT(9)
+#define IAVF_TX_FLAGS_VXLAN_TUNNEL BIT(10)
+#define IAVF_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(11)
+#define IAVF_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IAVF_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
+#define IAVF_TX_FLAGS_VLAN_PRIO_SHIFT 29
+#define IAVF_TX_FLAGS_VLAN_SHIFT 16
struct iavf_tx_buffer {
struct iavf_tx_desc *next_to_watch;
@@ -362,6 +363,9 @@ struct iavf_ring {
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
+#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
+#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
/* stats structs */
struct iavf_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index d60bf7c21200..9afbbdac3590 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -5,10 +5,6 @@
#include "iavf_prototype.h"
#include "iavf_client.h"
-/* busy wait delay in msec */
-#define IAVF_BUSY_WAIT_DELAY 10
-#define IAVF_BUSY_WAIT_COUNT 50
-
/**
* iavf_send_pf_msg
* @adapter: adapter structure
@@ -22,17 +18,17 @@ static int iavf_send_pf_msg(struct iavf_adapter *adapter,
enum virtchnl_ops op, u8 *msg, u16 len)
{
struct iavf_hw *hw = &adapter->hw;
- enum iavf_status err;
+ enum iavf_status status;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
return 0; /* nothing to see here, move along */
- err = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
- if (err)
- dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
- op, iavf_stat_str(hw, err),
+ status = iavf_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
+ if (status)
+ dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, status %s, aq_err %s\n",
+ op, iavf_stat_str(hw, status),
iavf_aq_str(hw, hw->aq.asq_last_status));
- return err;
+ return iavf_status_to_errno(status);
}
/**
@@ -55,6 +51,41 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
}
/**
+ * iavf_poll_virtchnl_msg
+ * @hw: HW configuration structure
+ * @event: event to populate on success
+ * @op_to_poll: requested virtchnl op to poll for
+ *
+ * Initialize poll for virtchnl msg matching the requested_op. Returns 0
+ * if a message of the correct opcode is in the queue or an error code
+ * if no message matching the op code is waiting and other failures.
+ */
+static int
+iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
+ enum virtchnl_ops op_to_poll)
+{
+ enum virtchnl_ops received_op;
+ enum iavf_status status;
+ u32 v_retval;
+
+ while (1) {
+ /* When the AQ is empty, iavf_clean_arq_element will return
+ * nonzero and this loop will terminate.
+ */
+ status = iavf_clean_arq_element(hw, event, NULL);
+ if (status != IAVF_SUCCESS)
+ return iavf_status_to_errno(status);
+ received_op =
+ (enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+ if (op_to_poll == received_op)
+ break;
+ }
+
+ v_retval = le32_to_cpu(event->desc.cookie_low);
+ return virtchnl_status_to_errno((enum virtchnl_status_code)v_retval);
+}
+
+/**
* iavf_verify_api_ver
* @adapter: adapter structure
*
@@ -65,55 +96,28 @@ int iavf_send_api_ver(struct iavf_adapter *adapter)
**/
int iavf_verify_api_ver(struct iavf_adapter *adapter)
{
- struct virtchnl_version_info *pf_vvi;
- struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
+ int err;
event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
-
- while (1) {
- err = iavf_clean_arq_element(hw, &event, NULL);
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- if (err)
- goto out_alloc;
- op =
- (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_VERSION)
- break;
- }
+ event.msg_buf = kzalloc(IAVF_MAX_AQ_BUF_SIZE, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event, VIRTCHNL_OP_VERSION);
+ if (!err) {
+ struct virtchnl_version_info *pf_vvi =
+ (struct virtchnl_version_info *)event.msg_buf;
+ adapter->pf_version = *pf_vvi;
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
- if (err)
- goto out_alloc;
-
- if (op != VIRTCHNL_OP_VERSION) {
- dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
- op);
- err = -EIO;
- goto out_alloc;
+ if (pf_vvi->major > VIRTCHNL_VERSION_MAJOR ||
+ (pf_vvi->major == VIRTCHNL_VERSION_MAJOR &&
+ pf_vvi->minor > VIRTCHNL_VERSION_MINOR))
+ err = -EIO;
}
- pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
- adapter->pf_version = *pf_vvi;
-
- if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
- ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
- (pf_vvi->minor > VIRTCHNL_VERSION_MINOR)))
- err = -EIO;
-
-out_alloc:
kfree(event.msg_buf);
-out:
+
return err;
}
@@ -137,6 +141,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
+ VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
@@ -155,6 +160,19 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
NULL, 0);
}
+int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter)
+{
+ adapter->aq_required &= ~IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
+
+ if (!VLAN_V2_ALLOWED(adapter))
+ return -EOPNOTSUPP;
+
+ adapter->current_op = VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS;
+
+ return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
+ NULL, 0);
+}
+
/**
* iavf_validate_num_queues
* @adapter: adapter structure
@@ -194,33 +212,17 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
{
struct iavf_hw *hw = &adapter->hw;
struct iavf_arq_event_info event;
- enum virtchnl_ops op;
- enum iavf_status err;
u16 len;
+ int err;
- len = sizeof(struct virtchnl_vf_resource) +
+ len = sizeof(struct virtchnl_vf_resource) +
IAVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
event.buf_len = len;
- event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
- if (!event.msg_buf) {
- err = -ENOMEM;
- goto out;
- }
+ event.msg_buf = kzalloc(len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
- while (1) {
- /* When the AQ is empty, iavf_clean_arq_element will return
- * nonzero and this loop will terminate.
- */
- err = iavf_clean_arq_element(hw, &event, NULL);
- if (err)
- goto out_alloc;
- op =
- (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
- if (op == VIRTCHNL_OP_GET_VF_RESOURCES)
- break;
- }
-
- err = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
+ err = iavf_poll_virtchnl_msg(hw, &event, VIRTCHNL_OP_GET_VF_RESOURCES);
memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len));
/* some PFs send more queues than we should have so validate that
@@ -229,9 +231,32 @@ int iavf_get_vf_config(struct iavf_adapter *adapter)
if (!err)
iavf_validate_num_queues(adapter);
iavf_vf_parse_hw_config(hw, adapter->vf_res);
-out_alloc:
+
kfree(event.msg_buf);
-out:
+
+ return err;
+}
+
+int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
+{
+ struct iavf_arq_event_info event;
+ int err;
+ u16 len;
+
+ len = sizeof(struct virtchnl_vlan_caps);
+ event.buf_len = len;
+ event.msg_buf = kzalloc(len, GFP_KERNEL);
+ if (!event.msg_buf)
+ return -ENOMEM;
+
+ err = iavf_poll_virtchnl_msg(&adapter->hw, &event,
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS);
+ if (!err)
+ memcpy(&adapter->vlan_v2_caps, event.msg_buf,
+ min(event.msg_len, len));
+
+ kfree(event.msg_buf);
+
return err;
}
@@ -244,11 +269,14 @@ out:
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- struct virtchnl_queue_pair_info *vqpi;
+ int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
- int i, max_frame = IAVF_MAX_RXBUFFER;
+ struct virtchnl_queue_pair_info *vqpi;
size_t len;
+ if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
+ max_frame = IAVF_MAX_RXBUFFER;
+
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure queues, command %d pending\n",
@@ -401,6 +429,20 @@ void iavf_map_queues(struct iavf_adapter *adapter)
}
/**
+ * iavf_set_mac_addr_type - Set the correct request type from the filter type
+ * @virtchnl_ether_addr: pointer to requested list element
+ * @filter: pointer to requested filter
+ **/
+static void
+iavf_set_mac_addr_type(struct virtchnl_ether_addr *virtchnl_ether_addr,
+ const struct iavf_mac_filter *filter)
+{
+ virtchnl_ether_addr->type = filter->is_primary ?
+ VIRTCHNL_ETHER_ADDR_PRIMARY :
+ VIRTCHNL_ETHER_ADDR_EXTRA;
+}
+
+/**
* iavf_add_ether_addrs
* @adapter: adapter structure
*
@@ -455,6 +497,7 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter)
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
+ iavf_set_mac_addr_type(&veal->list[i], f);
i++;
f->add = false;
if (i == count)
@@ -524,6 +567,7 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove) {
ether_addr_copy(veal->list[i].addr, f->macaddr);
+ iavf_set_mac_addr_type(&veal->list[i], f);
i++;
list_del(&f->list);
kfree(f);
@@ -553,6 +597,8 @@ static void iavf_mac_add_ok(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
f->is_new_mac = false;
+ if (!f->add && !f->add_handled)
+ f->add_handled = true;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
@@ -573,6 +619,9 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
f->remove = false;
+ if (!f->add && !f->add_handled)
+ f->add_handled = true;
+
if (f->is_new_mac) {
list_del(&f->list);
kfree(f);
@@ -582,6 +631,27 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
}
/**
+ * iavf_vlan_add_reject
+ * @adapter: adapter structure
+ *
+ * Remove VLAN filters from list based on PF response.
+ **/
+static void iavf_vlan_add_reject(struct iavf_adapter *adapter)
+{
+ struct iavf_vlan_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_IS_NEW) {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
* iavf_add_vlans
* @adapter: adapter structure
*
@@ -589,7 +659,6 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter)
**/
void iavf_add_vlans(struct iavf_adapter *adapter)
{
- struct virtchnl_vlan_filter_list *vvfl;
int len, i = 0, count = 0;
struct iavf_vlan_filter *f;
bool more = false;
@@ -604,51 +673,117 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add)
+ if (f->state == IAVF_VLAN_ADD)
count++;
}
- if (!count || !VLAN_ALLOWED(adapter)) {
+ if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
- adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(struct virtchnl_vlan_filter_list)) /
- sizeof(u16);
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- more = true;
- }
- vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
+ if (VLAN_ALLOWED(adapter)) {
+ struct virtchnl_vlan_filter_list *vvfl;
+
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN;
+
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
+ sizeof(u16);
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ more = true;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
+
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_ADD) {
+ vvfl->vlan_id[i] = f->vlan.vid;
+ i++;
+ f->state = IAVF_VLAN_IS_NEW;
+ if (i == count)
+ break;
+ }
+ }
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- return;
- }
- vvfl->vsi_id = adapter->vsi_res->vsi_id;
- vvfl->num_elements = count;
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->add) {
- vvfl->vlan_id[i] = f->vlan;
- i++;
- f->add = false;
- if (i == count)
- break;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ } else {
+ u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters;
+ u16 current_vlans = iavf_get_num_vlans_added(adapter);
+ struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
+
+ adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
+
+ if ((count + current_vlans) > max_vlans &&
+ current_vlans < max_vlans) {
+ count = max_vlans - iavf_get_num_vlans_added(adapter);
+ more = true;
}
- }
- if (!more)
- adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ len = sizeof(*vvfl_v2) + ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl_v2)) /
+ sizeof(struct virtchnl_vlan_filter);
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ more = true;
+ }
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
- kfree(vvfl);
+ vvfl_v2 = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl_v2) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
+
+ vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ vvfl_v2->num_elements = count;
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_ADD) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &adapter->vlan_v2_caps.filtering.filtering_support;
+ struct virtchnl_vlan *vlan;
+
+ if (i == count)
+ break;
+
+ /* give priority over outer if it's enabled */
+ if (filtering_support->outer)
+ vlan = &vvfl_v2->filters[i].outer;
+ else
+ vlan = &vvfl_v2->filters[i].inner;
+
+ vlan->tci = f->vlan.vid;
+ vlan->tpid = f->vlan.tpid;
+
+ i++;
+ f->state = IAVF_VLAN_IS_NEW;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN_V2,
+ (u8 *)vvfl_v2, len);
+ kfree(vvfl_v2);
+ }
}
/**
@@ -659,7 +794,6 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
**/
void iavf_del_vlans(struct iavf_adapter *adapter)
{
- struct virtchnl_vlan_filter_list *vvfl;
struct iavf_vlan_filter *f, *ftmp;
int len, i = 0, count = 0;
bool more = false;
@@ -680,56 +814,135 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
* filters marked for removal to enable bailing out before
* sending a virtchnl message
*/
- if (f->remove && !VLAN_ALLOWED(adapter)) {
+ if (f->state == IAVF_VLAN_REMOVE &&
+ !VLAN_FILTERING_ALLOWED(adapter)) {
list_del(&f->list);
kfree(f);
- } else if (f->remove) {
+ adapter->num_vlan_filters--;
+ } else if (f->state == IAVF_VLAN_DISABLE &&
+ !VLAN_FILTERING_ALLOWED(adapter)) {
+ f->state = IAVF_VLAN_INACTIVE;
+ } else if (f->state == IAVF_VLAN_REMOVE ||
+ f->state == IAVF_VLAN_DISABLE) {
count++;
}
}
- if (!count) {
+ if (!count || !VLAN_FILTERING_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
}
- adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- if (len > IAVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
- count = (IAVF_MAX_AQ_BUF_SIZE -
- sizeof(struct virtchnl_vlan_filter_list)) /
- sizeof(u16);
- len = sizeof(struct virtchnl_vlan_filter_list) +
- (count * sizeof(u16));
- more = true;
- }
- vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
+ if (VLAN_ALLOWED(adapter)) {
+ struct virtchnl_vlan_filter_list *vvfl;
+
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN;
+
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many delete VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE - sizeof(*vvfl)) /
+ sizeof(u16);
+ len = sizeof(*vvfl) + (count * sizeof(u16));
+ more = true;
+ }
+ vvfl = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
+
+ vvfl->vsi_id = adapter->vsi_res->vsi_id;
+ vvfl->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_DISABLE) {
+ vvfl->vlan_id[i] = f->vlan.vid;
+ f->state = IAVF_VLAN_INACTIVE;
+ i++;
+ if (i == count)
+ break;
+ } else if (f->state == IAVF_VLAN_REMOVE) {
+ vvfl->vlan_id[i] = f->vlan.vid;
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ i++;
+ if (i == count)
+ break;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- return;
- }
- vvfl->vsi_id = adapter->vsi_res->vsi_id;
- vvfl->num_elements = count;
- list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- if (f->remove) {
- vvfl->vlan_id[i] = f->vlan;
- i++;
- list_del(&f->list);
- kfree(f);
- if (i == count)
- break;
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+ kfree(vvfl);
+ } else {
+ struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
+
+ adapter->current_op = VIRTCHNL_OP_DEL_VLAN_V2;
+
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) * sizeof(struct virtchnl_vlan_filter));
+ if (len > IAVF_MAX_AQ_BUF_SIZE) {
+ dev_warn(&adapter->pdev->dev, "Too many add VLAN changes in one request\n");
+ count = (IAVF_MAX_AQ_BUF_SIZE -
+ sizeof(*vvfl_v2)) /
+ sizeof(struct virtchnl_vlan_filter);
+ len = sizeof(*vvfl_v2) +
+ ((count - 1) *
+ sizeof(struct virtchnl_vlan_filter));
+ more = true;
}
- }
- if (!more)
- adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ vvfl_v2 = kzalloc(len, GFP_ATOMIC);
+ if (!vvfl_v2) {
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ return;
+ }
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
- kfree(vvfl);
+ vvfl_v2->vport_id = adapter->vsi_res->vsi_id;
+ vvfl_v2->num_elements = count;
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_DISABLE ||
+ f->state == IAVF_VLAN_REMOVE) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &adapter->vlan_v2_caps.filtering.filtering_support;
+ struct virtchnl_vlan *vlan;
+
+ /* give priority over outer if it's enabled */
+ if (filtering_support->outer)
+ vlan = &vvfl_v2->filters[i].outer;
+ else
+ vlan = &vvfl_v2->filters[i].inner;
+
+ vlan->tci = f->vlan.vid;
+ vlan->tpid = f->vlan.tpid;
+
+ if (f->state == IAVF_VLAN_DISABLE) {
+ f->state = IAVF_VLAN_INACTIVE;
+ } else {
+ list_del(&f->list);
+ kfree(f);
+ adapter->num_vlan_filters--;
+ }
+ i++;
+ if (i == count)
+ break;
+ }
+ }
+
+ if (!more)
+ adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN_V2,
+ (u8 *)vvfl_v2, len);
+ kfree(vvfl_v2);
+ }
}
/**
@@ -762,15 +975,23 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
if (flags & FLAG_VF_MULTICAST_PROMISC) {
adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
- dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
+ adapter->netdev->name);
}
if (!flags) {
- adapter->flags &= ~(IAVF_FLAG_PROMISC_ON |
- IAVF_FLAG_ALLMULTI_ON);
- adapter->aq_required &= ~(IAVF_FLAG_AQ_RELEASE_PROMISC |
- IAVF_FLAG_AQ_RELEASE_ALLMULTI);
- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
+ adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ }
+
+ if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
+ adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+ dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
+ adapter->netdev->name);
+ }
}
adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
@@ -948,6 +1169,204 @@ void iavf_disable_vlan_stripping(struct iavf_adapter *adapter)
iavf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, NULL, 0);
}
+/**
+ * iavf_tpid_to_vc_ethertype - transform from VLAN TPID to virtchnl ethertype
+ * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
+ */
+static u32 iavf_tpid_to_vc_ethertype(u16 tpid)
+{
+ switch (tpid) {
+ case ETH_P_8021Q:
+ return VIRTCHNL_VLAN_ETHERTYPE_8100;
+ case ETH_P_8021AD:
+ return VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_set_vc_offload_ethertype - set virtchnl ethertype for offload message
+ * @adapter: adapter structure
+ * @msg: message structure used for updating offloads over virtchnl to update
+ * @tpid: VLAN TPID (i.e. 0x8100, 0x88a8, etc.)
+ * @offload_op: opcode used to determine which support structure to check
+ */
+static int
+iavf_set_vc_offload_ethertype(struct iavf_adapter *adapter,
+ struct virtchnl_vlan_setting *msg, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ struct virtchnl_vlan_supported_caps *offload_support;
+ u16 vc_ethertype = iavf_tpid_to_vc_ethertype(tpid);
+
+ /* reference the correct offload support structure */
+ switch (offload_op) {
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ offload_support =
+ &adapter->vlan_v2_caps.offloads.stripping_support;
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ offload_support =
+ &adapter->vlan_v2_caps.offloads.insertion_support;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Invalid opcode %d for setting virtchnl ethertype to enable/disable VLAN offloads\n",
+ offload_op);
+ return -EINVAL;
+ }
+
+ /* make sure ethertype is supported */
+ if (offload_support->outer & vc_ethertype &&
+ offload_support->outer & VIRTCHNL_VLAN_TOGGLE) {
+ msg->outer_ethertype_setting = vc_ethertype;
+ } else if (offload_support->inner & vc_ethertype &&
+ offload_support->inner & VIRTCHNL_VLAN_TOGGLE) {
+ msg->inner_ethertype_setting = vc_ethertype;
+ } else {
+ dev_dbg(&adapter->pdev->dev, "opcode %d unsupported for VLAN TPID 0x%04x\n",
+ offload_op, tpid);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * iavf_clear_offload_v2_aq_required - clear AQ required bit for offload request
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID
+ * @offload_op: opcode used to determine which AQ required bit to clear
+ */
+static void
+iavf_clear_offload_v2_aq_required(struct iavf_adapter *adapter, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ switch (offload_op) {
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ if (tpid == ETH_P_8021Q)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
+ else if (tpid == ETH_P_8021AD)
+ adapter->aq_required &=
+ ~IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported opcode %d specified for clearing aq_required bits for VIRTCHNL_VF_OFFLOAD_VLAN_V2 offload request\n",
+ offload_op);
+ }
+}
+
+/**
+ * iavf_send_vlan_offload_v2 - send offload enable/disable over virtchnl
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used for the command (i.e. 0x8100 or 0x88a8)
+ * @offload_op: offload_op used to make the request over virtchnl
+ */
+static void
+iavf_send_vlan_offload_v2(struct iavf_adapter *adapter, u16 tpid,
+ enum virtchnl_ops offload_op)
+{
+ struct virtchnl_vlan_setting *msg;
+ int len = sizeof(*msg);
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot send %d, command %d pending\n",
+ offload_op, adapter->current_op);
+ return;
+ }
+
+ adapter->current_op = offload_op;
+
+ msg = kzalloc(len, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ msg->vport_id = adapter->vsi_res->vsi_id;
+
+ /* always clear to prevent unsupported and endless requests */
+ iavf_clear_offload_v2_aq_required(adapter, tpid, offload_op);
+
+ /* only send valid offload requests */
+ if (!iavf_set_vc_offload_ethertype(adapter, msg, tpid, offload_op))
+ iavf_send_pf_msg(adapter, offload_op, (u8 *)msg, len);
+ else
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+
+ kfree(msg);
+}
+
+/**
+ * iavf_enable_vlan_stripping_v2 - enable VLAN stripping
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to enable VLAN stripping
+ */
+void iavf_enable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2);
+}
+
+/**
+ * iavf_disable_vlan_stripping_v2 - disable VLAN stripping
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to disable VLAN stripping
+ */
+void iavf_disable_vlan_stripping_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2);
+}
+
+/**
+ * iavf_enable_vlan_insertion_v2 - enable VLAN insertion
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to enable VLAN insertion
+ */
+void iavf_enable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2);
+}
+
+/**
+ * iavf_disable_vlan_insertion_v2 - disable VLAN insertion
+ * @adapter: adapter structure
+ * @tpid: VLAN TPID used to disable VLAN insertion
+ */
+void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
+{
+ iavf_send_vlan_offload_v2(adapter, tpid,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
+}
+
#define IAVF_MAX_SPEED_STRLEN 13
/**
@@ -1017,7 +1436,7 @@ print_link_msg:
} else if (link_speed_mbps == SPEED_UNKNOWN) {
snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
} else {
- snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%u %s",
+ snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
link_speed_mbps, "Mbps");
}
@@ -1453,11 +1872,29 @@ void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter)
*
* Request that the PF reset this VF. No response is expected.
**/
-void iavf_request_reset(struct iavf_adapter *adapter)
+int iavf_request_reset(struct iavf_adapter *adapter)
{
+ int err;
/* Don't check CURRENT_OP - this is always higher priority */
- iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
+ err = iavf_send_pf_msg(adapter, VIRTCHNL_OP_RESET_VF, NULL, 0);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ return err;
+}
+
+/**
+ * iavf_netdev_features_vlan_strip_set - update vlan strip status
+ * @netdev: ptr to netdev being adjusted
+ * @enable: enable or disable vlan strip
+ *
+ * Helper function to change vlan strip status in netdev->features.
+ */
+static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev,
+ const bool enable)
+{
+ if (enable)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+ else
+ netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
/**
@@ -1522,11 +1959,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
- dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) {
adapter->flags |= IAVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
- queue_work(iavf_wq, &adapter->reset_task);
+ queue_work(adapter->wq, &adapter->reset_task);
}
break;
default:
@@ -1548,6 +1985,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_mac_add_reject(adapter);
/* restore administratively set MAC address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
+ wake_up(&adapter->vc_waitqueue);
break;
case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
@@ -1683,8 +2121,23 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ /* Vlan stripping could not be enabled by ethtool.
+ * Disable it in netdev->features.
+ */
+ iavf_netdev_features_vlan_strip_set(netdev, false);
+ break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
+ /* Vlan stripping could not be disabled by ethtool.
+ * Enable it in netdev->features.
+ */
+ iavf_netdev_features_vlan_strip_set(netdev, true);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ iavf_vlan_add_reject(adapter);
+ dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n",
+ iavf_stat_str(&adapter->hw, v_retval));
break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
@@ -1697,7 +2150,13 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
- eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ if (!ether_addr_equal(netdev->dev_addr,
+ adapter->hw.mac.addr)) {
+ netif_addr_lock_bh(netdev);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
+ }
+ wake_up(&adapter->vc_waitqueue);
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -1727,10 +2186,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
/* restore current mac address */
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
+ netif_addr_lock_bh(netdev);
/* refresh current mac address if changed */
- eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
iavf_add_filter(adapter, adapter->hw.mac.addr);
@@ -1743,7 +2203,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
list_for_each_entry(vlf,
&adapter->vlan_filter_list,
list)
- vlf->add = true;
+ vlf->state = IAVF_VLAN_ADD;
adapter->aq_required |=
IAVF_FLAG_AQ_ADD_VLAN_FILTER;
@@ -1751,20 +2211,80 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- iavf_process_config(adapter);
- /* unlock crit_lock before acquiring rtnl_lock as other
- * processes holding rtnl_lock could be waiting for the same
- * crit_lock
+ iavf_parse_vf_resource_msg(adapter);
+
+ /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the
+ * response to VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS to finish
+ * configuration
+ */
+ if (VLAN_V2_ALLOWED(adapter))
+ break;
+ /* fallthrough and finish config if VIRTCHNL_VF_OFFLOAD_VLAN_V2
+ * wasn't successfully negotiated with the PF
*/
- mutex_unlock(&adapter->crit_lock);
- rtnl_lock();
- netdev_update_features(adapter->netdev);
- rtnl_unlock();
- if (iavf_lock_timeout(&adapter->crit_lock, 10000))
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
- __FUNCTION__);
+ }
+ fallthrough;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS: {
+ struct iavf_mac_filter *f;
+ bool was_mac_changed;
+ u64 aq_required = 0;
+
+ if (v_opcode == VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS)
+ memcpy(&adapter->vlan_v2_caps, msg,
+ min_t(u16, msglen,
+ sizeof(adapter->vlan_v2_caps)));
+
+ iavf_process_config(adapter);
+ adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
+
+ /* Request VLAN offload settings */
+ if (VLAN_V2_ALLOWED(adapter))
+ iavf_set_vlan_offload_features(adapter, 0,
+ netdev->features);
+
+ iavf_set_queue_vlan_tag_loc(adapter);
+
+ was_mac_changed = !ether_addr_equal(netdev->dev_addr,
+ adapter->hw.mac.addr);
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
+ /* re-add all MAC filters */
+ list_for_each_entry(f, &adapter->mac_filter_list, list) {
+ if (was_mac_changed &&
+ ether_addr_equal(netdev->dev_addr, f->macaddr))
+ ether_addr_copy(f->macaddr,
+ adapter->hw.mac.addr);
+
+ f->is_new_mac = true;
+ f->add = true;
+ f->add_handled = false;
+ f->remove = false;
+ }
+
+ /* re-add all VLAN filters */
+ if (VLAN_FILTERING_ALLOWED(adapter)) {
+ struct iavf_vlan_filter *vlf;
+
+ if (!list_empty(&adapter->vlan_filter_list)) {
+ list_for_each_entry(vlf,
+ &adapter->vlan_filter_list,
+ list)
+ vlf->state = IAVF_VLAN_ADD;
+
+ aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+ }
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
+ netif_addr_lock_bh(netdev);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
+ netif_addr_unlock_bh(netdev);
+ adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ aq_required;
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
@@ -1789,7 +2309,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* Gobble zero-length replies from the PF. They indicate that
* a previous message was received OK, and the client doesn't
* care about that.
@@ -1798,9 +2318,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_notify_client_message(&adapter->vsi, msg, msglen);
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
adapter->client_pending &=
- ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+ ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP));
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
@@ -1930,6 +2450,31 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
spin_unlock_bh(&adapter->adv_rss_lock);
}
break;
+ case VIRTCHNL_OP_ADD_VLAN_V2: {
+ struct iavf_vlan_filter *f;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ if (f->state == IAVF_VLAN_IS_NEW)
+ f->state = IAVF_VLAN_ACTIVE;
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ }
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ /* PF enabled vlan strip on this VF.
+ * Update netdev->features if needed to be in sync with ethtool.
+ */
+ if (!v_retval)
+ iavf_netdev_features_vlan_strip_set(netdev, true);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ /* PF disabled vlan strip on this VF.
+ * Update netdev->features if needed to be in sync with ethtool.
+ */
+ if (!v_retval)
+ iavf_netdev_features_vlan_strip_set(netdev, false);
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index c36faa7d1471..5d89392f969b 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -18,21 +18,33 @@ ice-y := ice_main.o \
ice_txrx_lib.o \
ice_txrx.o \
ice_fltr.o \
+ ice_pf_vsi_vlan_ops.o \
+ ice_vsi_vlan_ops.o \
+ ice_vsi_vlan_lib.o \
ice_fdir.o \
ice_ethtool_fdir.o \
+ ice_vlan_mode.o \
ice_flex_pipe.o \
ice_flow.o \
ice_idc.o \
ice_devlink.o \
+ ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
-ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
+ice-$(CONFIG_PCI_IOV) += \
+ ice_sriov.o \
+ ice_virtchnl.o \
+ ice_virtchnl_allowlist.o \
+ ice_virtchnl_fdir.o \
+ ice_vf_mbx.o \
+ ice_vf_vsi_vlan_ops.o \
+ ice_vf_lib.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
+ice-$(CONFIG_GNSS) += ice_gnss.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b2db39ee5f85..aa32111afd6e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -20,7 +20,6 @@
#include <linux/pci.h>
#include <linux/workqueue.h>
#include <linux/wait.h>
-#include <linux/aer.h>
#include <linux/interrupt.h>
#include <linux/ethtool.h>
#include <linux/timer.h>
@@ -39,7 +38,9 @@
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
+#include <linux/gnss.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_gact.h>
#include <net/ip.h>
@@ -51,9 +52,8 @@
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include <net/vxlan.h>
-#if IS_ENABLED(CONFIG_DCB)
-#include <scsi/iscsi_proto.h>
-#endif /* CONFIG_DCB */
+#include <net/gtp.h>
+#include <linux/ppp_defs.h>
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
@@ -63,8 +63,8 @@
#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
-#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_vf_mbx.h"
#include "ice_ptp.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
@@ -72,6 +72,8 @@
#include "ice_repr.h"
#include "ice_eswitch.h"
#include "ice_lag.h"
+#include "ice_vsi_vlan_ops.h"
+#include "ice_gnss.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -107,12 +109,10 @@
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
-#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
#define ICE_CHNL_START_TC 1
-#define ICE_CHNL_MAX_TC 16
#define ICE_MAX_RESET_WAIT 20
@@ -122,6 +122,8 @@
#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
+#define ICE_MAX_TSO_SIZE 131072
+
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
ICE_AQ_VSI_UP_TABLE_UP##i##_M)
@@ -138,6 +140,21 @@
*/
#define ICE_BW_KBPS_DIVISOR 125
+/* Default recipes have priority 4 and below, hence priority values between 5..7
+ * can be used as filter priority for advanced switch filter (advanced switch
+ * filters need new recipe to be created for specified extraction sequence
+ * because default recipe extraction sequence does not represent custom
+ * extraction)
+ */
+#define ICE_SWITCH_FLTR_PRIO_QUEUE 7
+/* prio 6 is reserved for future use (e.g. switch filter with L3 fields +
+ * (Optional: IP TOS/TTL) + L4 fields + (optionally: TCP fields such as
+ * SYN/FIN/RST))
+ */
+#define ICE_SWITCH_FLTR_PRIO_RSVD 6
+#define ICE_SWITCH_FLTR_PRIO_VSI 5
+#define ICE_SWITCH_FLTR_PRIO_QGRP ICE_SWITCH_FLTR_PRIO_VSI
+
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
@@ -183,7 +200,9 @@
enum ice_feature {
ICE_F_DSCP,
+ ICE_F_PTP_EXTTS,
ICE_F_SMA_CTRL,
+ ICE_F_GNSS,
ICE_F_MAX
};
@@ -201,6 +220,7 @@ struct ice_channel {
struct ice_aqc_vsi_props info;
u64 max_tx_rate;
u64 min_tx_rate;
+ atomic_t num_sb_fltr;
struct ice_vsi *ch_vsi;
};
@@ -246,8 +266,6 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
- struct ice_vsi *dflt_vsi; /* default VSI for this switch */
- u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_pf_state {
@@ -280,7 +298,6 @@ enum ice_pf_state {
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
ICE_VF_DIS,
- ICE_VF_DEINIT_IN_PROGRESS,
ICE_CFG_BUSY,
ICE_SERVICE_SCHED,
ICE_SERVICE_DIS,
@@ -291,6 +308,7 @@ enum ice_pf_state {
ICE_LINK_DEFAULT_OVERRIDE_PENDING,
ICE_PHY_INIT_COMPLETE,
ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
+ ICE_AUX_ERR_PENDING,
ICE_STATE_NBITS /* must be last */
};
@@ -301,11 +319,15 @@ enum ice_vsi_state {
ICE_VSI_NETDEV_REGISTERED,
ICE_VSI_UMAC_FLTR_CHANGED,
ICE_VSI_MMAC_FLTR_CHANGED,
- ICE_VSI_VLAN_FLTR_CHANGED,
ICE_VSI_PROMISC_CHANGED,
ICE_VSI_STATE_NBITS /* must be last */
};
+struct ice_vsi_stats {
+ struct ice_ring_stats **tx_ring_stats; /* Tx ring stats array */
+ struct ice_ring_stats **rx_ring_stats; /* Rx ring stats array */
+};
+
/* struct that defines a VSI, associated with a dev */
struct ice_vsi {
struct net_device *netdev;
@@ -331,9 +353,8 @@ struct ice_vsi {
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
- s16 vf_id; /* VF ID for SR-IOV VSIs */
+ struct ice_vf *vf; /* VF associated with this VSI */
- u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr;
u16 num_bfltr;
@@ -359,6 +380,7 @@ struct ice_vsi {
/* VSI stats */
struct rtnl_link_stats64 net_stats;
+ struct rtnl_link_stats64 net_stats_prev;
struct ice_eth_stats eth_stats;
struct ice_eth_stats eth_stats_prev;
@@ -368,6 +390,8 @@ struct ice_vsi {
u8 irqs_ready:1;
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
+ struct ice_vsi_vlan_ops inner_vlan_ops;
+ struct ice_vsi_vlan_ops outer_vlan_ops;
u16 num_vlan;
/* queue information */
@@ -468,7 +492,6 @@ enum ice_pf_flags {
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
ICE_FLAG_PTP, /* PTP is enabled by software */
- ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
@@ -482,7 +505,12 @@ enum ice_pf_flags {
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
+ ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
+ ICE_FLAG_PLUG_AUX_DEV,
+ ICE_FLAG_UNPLUG_AUX_DEV,
+ ICE_FLAG_MTU_CHANGED,
+ ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -503,6 +531,7 @@ struct ice_pf {
struct pci_dev *pdev;
struct devlink_region *nvm_region;
+ struct devlink_region *sram_region;
struct devlink_region *devcaps_region;
/* devlink port data */
@@ -520,17 +549,10 @@ struct ice_pf {
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
struct ice_vsi **vsi; /* VSIs created by the driver */
+ struct ice_vsi_stats **vsi_stats;
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
- /* Virtchnl/SR-IOV config info */
- struct ice_vf *vf;
- u16 num_alloc_vfs; /* actual number of VFs allocated */
- u16 num_vfs_supported; /* num VFs supported for this PF */
- u16 num_qps_per_vf;
- u16 num_msix_per_vf;
- /* used to ratelimit the MDD event logging */
- unsigned long last_printed_mdd_jiffies;
- DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
@@ -543,8 +565,11 @@ struct ice_pf {
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
+ struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable;
struct ice_ptp ptp;
+ struct gnss_serial *gnss_serial;
+ struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
@@ -552,10 +577,12 @@ struct ice_pf {
spinlock_t aq_wait_lock;
struct hlist_head aq_wait_list;
wait_queue_head_t aq_wait_queue;
+ bool fw_emp_reset_disabled;
wait_queue_head_t reset_wait_queue;
u32 hw_csum_rx_error;
+ u32 oicr_err_reg;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
@@ -576,6 +603,7 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
+ u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
@@ -590,6 +618,8 @@ struct ice_pf {
u16 num_dmac_chnl_fltrs;
struct hlist_head tc_flower_fltr_list;
+ u64 supported_rxdids;
+
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
@@ -668,7 +698,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
{
- return !!vsi->xdp_prog;
+ return !!READ_ONCE(vsi->xdp_prog);
}
static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
@@ -680,8 +710,8 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise.
+ * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
+ * present, NULL otherwise.
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
@@ -695,23 +725,33 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
}
/**
- * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: Tx ring to use
+ * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
+ *
+ * Sets XSK buff pool pointer on XDP ring.
*
- * Returns a pointer to xdp_umem structure if there is a buffer pool present,
- * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
+ * queue id. Reason for doing so is that queue vectors might have assigned more
+ * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
+ * carries a pointer to one of these XDP rings for its own purposes, such as
+ * handling XDP_TX action, therefore we can piggyback here on the
+ * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
*/
-static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
{
- struct ice_vsi *vsi = ring->vsi;
- u16 qid;
+ struct ice_tx_ring *ring;
- qid = ring->q_index - vsi->num_xdp_txq;
+ ring = vsi->rx_rings[qid]->xdp_ring;
+ if (!ring)
+ return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
+ ring->xsk_pool = NULL;
+ return;
+ }
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
@@ -755,6 +795,21 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
}
/**
+ * ice_find_vsi - Find the VSI from VSI ID
+ * @pf: The PF pointer to search in
+ * @vsi_num: The VSI ID to search for
+ */
+static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
+ return pf->vsi[i];
+ return NULL;
+}
+
+/**
* ice_is_switchdev_running - check if switchdev is configured
* @pf: pointer to PF structure
*
@@ -789,6 +844,9 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf)
#define ICE_FD_STAT_PF_IDX(base_idx) \
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+#define ICE_FD_STAT_CH 1
+#define ICE_FD_CH_STAT_IDX(base_idx) \
+ (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH)
/**
* ice_is_adq_active - any active ADQs
@@ -824,12 +882,16 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
-int ice_vsi_cfg(struct ice_vsi *vsi);
+int ice_down_up(struct ice_vsi *vsi);
+int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
@@ -847,9 +909,10 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
-const char *ice_stat_str(enum ice_status stat_err);
+void ice_deinit_rdma(struct ice_pf *pf);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
+void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int
ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
bool is_tun);
@@ -860,6 +923,7 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
int
ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
u32 *rule_locs);
+void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx);
void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
@@ -870,6 +934,8 @@ int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+int ice_load(struct ice_pf *pf);
+void ice_unload(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
@@ -879,8 +945,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- ice_plug_aux_dev(pf);
+ set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}
@@ -890,8 +955,11 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
*/
static inline void ice_clear_rdma_cap(struct ice_pf *pf)
{
- ice_unplug_aux_dev(pf);
+ /* defer unplug to service task to avoid RTNL lock and
+ * clear PLUG bit so that pending plugs don't interfere
+ */
+ clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
+ set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 4eef3488d86f..838d9b274d68 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -117,6 +117,8 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
#define ICE_AQC_CAPS_RDMA 0x0051
+#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
@@ -224,6 +226,15 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
+/* Set Port parameters, (direct, 0x0203) */
+struct ice_aqc_set_port_params {
+ __le16 cmd_flags;
+#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
+ __le16 bad_frame_vsi;
+ __le16 swid;
+ u8 reserved[10];
+};
+
/* These resource type defines are used for all switch resource
* commands where a resource type is required, such as:
* Get Resource Allocation command (indirect 0x0204)
@@ -281,6 +292,40 @@ struct ice_aqc_alloc_free_res_elem {
struct ice_aqc_res_elem elem[];
};
+/* Request buffer for Set VLAN Mode AQ command (indirect 0x020C) */
+struct ice_aqc_set_vlan_mode {
+ u8 reserved;
+ u8 l2tag_prio_tagging;
+#define ICE_AQ_VLAN_PRIO_TAG_S 0
+#define ICE_AQ_VLAN_PRIO_TAG_M (0x7 << ICE_AQ_VLAN_PRIO_TAG_S)
+#define ICE_AQ_VLAN_PRIO_TAG_NOT_SUPPORTED 0x0
+#define ICE_AQ_VLAN_PRIO_TAG_STAG 0x1
+#define ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG 0x2
+#define ICE_AQ_VLAN_PRIO_TAG_OUTER_VLAN 0x3
+#define ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG 0x4
+#define ICE_AQ_VLAN_PRIO_TAG_MAX 0x4
+#define ICE_AQ_VLAN_PRIO_TAG_ERROR 0x7
+ u8 l2tag_reserved[64];
+ u8 rdma_packet;
+#define ICE_AQ_VLAN_RDMA_TAG_S 0
+#define ICE_AQ_VLAN_RDMA_TAG_M (0x3F << ICE_AQ_VLAN_RDMA_TAG_S)
+#define ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING 0x10
+#define ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING 0x1A
+ u8 rdma_reserved[2];
+ u8 mng_vlan_prot_id;
+#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER 0x10
+#define ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER 0x11
+ u8 prot_id_reserved[30];
+};
+
+/* Response buffer for Get VLAN Mode AQ command (indirect 0x020D) */
+struct ice_aqc_get_vlan_mode {
+ u8 vlan_mode;
+#define ICE_AQ_VLAN_MODE_DVM_ENA BIT(0)
+ u8 l2tag_prio_tagging;
+ u8 reserved[98];
+};
+
/* Add VSI (indirect 0x0210)
* Update VSI (indirect 0x0211)
* Get VSI (indirect 0x0212)
@@ -341,108 +386,113 @@ struct ice_aqc_vsi_props {
#define ICE_AQ_VSI_SW_FLAG_SRC_PRUNE BIT(7)
u8 sw_flags2;
#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S 0
-#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M \
- (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
+#define ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M (0xF << ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_S)
#define ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA BIT(0)
#define ICE_AQ_VSI_SW_FLAG_LAN_ENA BIT(4)
u8 veb_stat_id;
#define ICE_AQ_VSI_SW_VEB_STAT_ID_S 0
-#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
+#define ICE_AQ_VSI_SW_VEB_STAT_ID_M (0x1F << ICE_AQ_VSI_SW_VEB_STAT_ID_S)
#define ICE_AQ_VSI_SW_VEB_STAT_ID_VALID BIT(5)
/* security section */
u8 sec_flags;
#define ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD BIT(0)
#define ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF BIT(2)
-#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
-#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S 4
+#define ICE_AQ_VSI_SEC_TX_PRUNE_ENA_M (0xF << ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S)
#define ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA BIT(0)
u8 sec_reserved;
/* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- u8 pvlan_reserved[2];
- u8 vlan_flags;
-#define ICE_AQ_VSI_VLAN_MODE_S 0
-#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
-#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
-#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
-#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
-#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
-#define ICE_AQ_VSI_VLAN_EMOD_S 3
-#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
-#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
- u8 pvlan_reserved2[3];
+ __le16 port_based_inner_vlan; /* VLANS include priority bits */
+ u8 inner_vlan_reserved[2];
+ u8 inner_vlan_flags;
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_S 0
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_TX_MODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
+#define ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL 0x3
+#define ICE_AQ_VSI_INNER_VLAN_INSERT_PVID BIT(2)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_S 3
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH (0x0 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR_UP (0x1 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_STR (0x2 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING (0x3 << ICE_AQ_VSI_INNER_VLAN_EMODE_S)
+ u8 inner_vlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
-#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
-#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
-#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
-#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
-#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
-#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
-#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
-#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
-#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
-#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
-#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
-#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
-#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
-#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
-#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
-#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
+#define ICE_AQ_VSI_UP_TABLE_UP0_S 0
+#define ICE_AQ_VSI_UP_TABLE_UP0_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP0_S)
+#define ICE_AQ_VSI_UP_TABLE_UP1_S 3
+#define ICE_AQ_VSI_UP_TABLE_UP1_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP1_S)
+#define ICE_AQ_VSI_UP_TABLE_UP2_S 6
+#define ICE_AQ_VSI_UP_TABLE_UP2_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP2_S)
+#define ICE_AQ_VSI_UP_TABLE_UP3_S 9
+#define ICE_AQ_VSI_UP_TABLE_UP3_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP3_S)
+#define ICE_AQ_VSI_UP_TABLE_UP4_S 12
+#define ICE_AQ_VSI_UP_TABLE_UP4_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP4_S)
+#define ICE_AQ_VSI_UP_TABLE_UP5_S 15
+#define ICE_AQ_VSI_UP_TABLE_UP5_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP5_S)
+#define ICE_AQ_VSI_UP_TABLE_UP6_S 18
+#define ICE_AQ_VSI_UP_TABLE_UP6_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP6_S)
+#define ICE_AQ_VSI_UP_TABLE_UP7_S 21
+#define ICE_AQ_VSI_UP_TABLE_UP7_M (0x7 << ICE_AQ_VSI_UP_TABLE_UP7_S)
__le32 egress_table; /* same defines as for ingress table */
/* outer tags section */
- __le16 outer_tag;
- u8 outer_tag_flags;
-#define ICE_AQ_VSI_OUTER_TAG_MODE_S 0
-#define ICE_AQ_VSI_OUTER_TAG_MODE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_MODE_S)
-#define ICE_AQ_VSI_OUTER_TAG_NOTHING 0x0
-#define ICE_AQ_VSI_OUTER_TAG_REMOVE 0x1
-#define ICE_AQ_VSI_OUTER_TAG_COPY 0x2
-#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
-#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
-#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
-#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
-#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
-#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
-#define ICE_AQ_VSI_OUTER_TAG_INSERT BIT(4)
-#define ICE_AQ_VSI_OUTER_TAG_ACCEPT_HOST BIT(6)
- u8 outer_tag_reserved;
+ __le16 port_based_outer_vlan;
+ u8 outer_vlan_flags;
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_S 0
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_EMODE_S)
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH 0x0
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_UP 0x1
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW 0x2
+#define ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING 0x3
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_S 2
+#define ICE_AQ_VSI_OUTER_TAG_TYPE_M (0x3 << ICE_AQ_VSI_OUTER_TAG_TYPE_S)
+#define ICE_AQ_VSI_OUTER_TAG_NONE 0x0
+#define ICE_AQ_VSI_OUTER_TAG_STAG 0x1
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_8100 0x2
+#define ICE_AQ_VSI_OUTER_TAG_VLAN_9100 0x3
+#define ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT BIT(4)
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S 5
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M (0x3 << ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S)
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED 0x1
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTTAGGED 0x2
+#define ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL 0x3
+#define ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC BIT(7)
+ u8 outer_vlan_reserved;
/* queue mapping section */
__le16 mapping_flags;
-#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
-#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
+#define ICE_AQ_VSI_Q_MAP_CONTIG 0x0
+#define ICE_AQ_VSI_Q_MAP_NONCONTIG BIT(0)
__le16 q_mapping[16];
-#define ICE_AQ_VSI_Q_S 0
-#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
+#define ICE_AQ_VSI_Q_S 0
+#define ICE_AQ_VSI_Q_M (0x7FF << ICE_AQ_VSI_Q_S)
__le16 tc_mapping[8];
-#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
-#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
-#define ICE_AQ_VSI_TC_Q_NUM_S 11
-#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
+#define ICE_AQ_VSI_TC_Q_OFFSET_S 0
+#define ICE_AQ_VSI_TC_Q_OFFSET_M (0x7FF << ICE_AQ_VSI_TC_Q_OFFSET_S)
+#define ICE_AQ_VSI_TC_Q_NUM_S 11
+#define ICE_AQ_VSI_TC_Q_NUM_M (0xF << ICE_AQ_VSI_TC_Q_NUM_S)
/* queueing option section */
u8 q_opt_rss;
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
-#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
-#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
-#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
-#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
-#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_S 0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI 0x0
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_PF 0x2
+#define ICE_AQ_VSI_Q_OPT_RSS_LUT_GBL 0x3
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S 2
+#define ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M (0xF << ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_S 6
+#define ICE_AQ_VSI_Q_OPT_RSS_HASH_M (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_TPLZ (0x0 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_SYM_TPLZ (0x1 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_XOR (0x2 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
+#define ICE_AQ_VSI_Q_OPT_RSS_JHASH (0x3 << ICE_AQ_VSI_Q_OPT_RSS_HASH_S)
u8 q_opt_tc;
-#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
-#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
-#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_S 0
+#define ICE_AQ_VSI_Q_OPT_TC_OVR_M (0x1F << ICE_AQ_VSI_Q_OPT_TC_OVR_S)
+#define ICE_AQ_VSI_Q_OPT_PROF_TC_OVR BIT(7)
u8 q_opt_flags;
-#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
+#define ICE_AQ_VSI_Q_OPT_PE_FLTR_EN BIT(0)
u8 q_opt_reserved[3];
/* outer up section */
__le32 outer_up_table; /* same structure and defines as ingress tbl */
@@ -450,27 +500,27 @@ struct ice_aqc_vsi_props {
__le16 sect_10_reserved;
/* flow director section */
__le16 fd_options;
-#define ICE_AQ_VSI_FD_ENABLE BIT(0)
-#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
-#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
+#define ICE_AQ_VSI_FD_ENABLE BIT(0)
+#define ICE_AQ_VSI_FD_TX_AUTO_ENABLE BIT(1)
+#define ICE_AQ_VSI_FD_PROG_ENABLE BIT(3)
__le16 max_fd_fltr_dedicated;
__le16 max_fd_fltr_shared;
__le16 fd_def_q;
-#define ICE_AQ_VSI_FD_DEF_Q_S 0
-#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
-#define ICE_AQ_VSI_FD_DEF_GRP_S 12
-#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
+#define ICE_AQ_VSI_FD_DEF_Q_S 0
+#define ICE_AQ_VSI_FD_DEF_Q_M (0x7FF << ICE_AQ_VSI_FD_DEF_Q_S)
+#define ICE_AQ_VSI_FD_DEF_GRP_S 12
+#define ICE_AQ_VSI_FD_DEF_GRP_M (0x7 << ICE_AQ_VSI_FD_DEF_GRP_S)
__le16 fd_report_opt;
-#define ICE_AQ_VSI_FD_REPORT_Q_S 0
-#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
-#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
-#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
-#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
+#define ICE_AQ_VSI_FD_REPORT_Q_S 0
+#define ICE_AQ_VSI_FD_REPORT_Q_M (0x7FF << ICE_AQ_VSI_FD_REPORT_Q_S)
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_S 12
+#define ICE_AQ_VSI_FD_DEF_PRIORITY_M (0x7 << ICE_AQ_VSI_FD_DEF_PRIORITY_S)
+#define ICE_AQ_VSI_FD_DEF_DROP BIT(15)
/* PASID section */
__le32 pasid_id;
-#define ICE_AQ_VSI_PASID_ID_S 0
-#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
-#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
+#define ICE_AQ_VSI_PASID_ID_S 0
+#define ICE_AQ_VSI_PASID_ID_M (0xFFFFF << ICE_AQ_VSI_PASID_ID_S)
+#define ICE_AQ_VSI_PASID_ID_VALID BIT(31)
u8 reserved[24];
};
@@ -487,9 +537,13 @@ struct ice_aqc_add_get_recipe {
struct ice_aqc_recipe_content {
u8 rid;
+#define ICE_AQ_RECIPE_ID_S 0
+#define ICE_AQ_RECIPE_ID_M (0x3F << ICE_AQ_RECIPE_ID_S)
#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7)
#define ICE_AQ_SW_ID_LKUP_IDX 0
u8 lkup_indx[5];
+#define ICE_AQ_RECIPE_LKUP_DATA_S 0
+#define ICE_AQ_RECIPE_LKUP_DATA_M (0x3F << ICE_AQ_RECIPE_LKUP_DATA_S)
#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7)
#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF
__le16 mask[5];
@@ -500,15 +554,25 @@ struct ice_aqc_recipe_content {
u8 rsvd0[3];
u8 act_ctrl_join_priority;
u8 act_ctrl_fwd_priority;
+#define ICE_AQ_RECIPE_FWD_PRIORITY_S 0
+#define ICE_AQ_RECIPE_FWD_PRIORITY_M (0xF << ICE_AQ_RECIPE_FWD_PRIORITY_S)
u8 act_ctrl;
+#define ICE_AQ_RECIPE_ACT_NEED_PASS_L2 BIT(0)
+#define ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2 BIT(1)
#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2)
+#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_S 4
+#define ICE_AQ_RECIPE_ACT_PRUNE_INDX_M (0x3 << ICE_AQ_RECIPE_ACT_PRUNE_INDX_S)
u8 rsvd1;
__le32 dflt_act;
+#define ICE_AQ_RECIPE_DFLT_ACT_S 0
+#define ICE_AQ_RECIPE_DFLT_ACT_M (0x7FFFF << ICE_AQ_RECIPE_DFLT_ACT_S)
+#define ICE_AQ_RECIPE_DFLT_ACT_VALID BIT(31)
};
struct ice_aqc_recipe_data_elem {
u8 recipe_indx;
u8 resp_bits;
+#define ICE_AQ_RECIPE_WAS_UPDATED BIT(0)
u8 rsvd0[2];
u8 recipe_bitmap[8];
u8 rsvd1[4];
@@ -537,12 +601,30 @@ struct ice_aqc_sw_rules {
__le32 addr_low;
};
+/* Add switch rule response:
+ * Content of return buffer is same as the input buffer. The status field and
+ * LUT index are updated as part of the response
+ */
+struct ice_aqc_sw_rules_elem_hdr {
+ __le16 type; /* Switch rule type, one of T_... */
+#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
+#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
+#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
+#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
+#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
+#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
+ __le16 status;
+} __packed __aligned(sizeof(__le16));
+
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
* This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
struct ice_sw_rule_lkup_rx_tx {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 recipe_id;
#define ICE_SW_RECIPE_LOGICAL_PORT_FWD 10
/* Source port for LOOKUP_RX and source VSI in case of LOOKUP_TX */
@@ -619,14 +701,16 @@ struct ice_sw_rule_lkup_rx_tx {
* lookup-type
*/
__le16 hdr_len;
- u8 hdr[];
-};
+ u8 hdr_data[];
+} __packed __aligned(sizeof(__le16));
/* Add/Update/Remove large action command/response entry
* "index" is returned as part of a response to a successful Add command, and
* can be used to identify the action for Update/Get/Remove commands.
*/
struct ice_sw_rule_lg_act {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 index; /* Index in large action table */
__le16 size;
/* Max number of large actions */
@@ -680,45 +764,19 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_STAT_COUNT_S 3
#define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S)
__le32 act[]; /* array of size for actions */
-};
+} __packed __aligned(sizeof(__le16));
/* Add/Update/Remove VSI list command/response entry
* "index" is returned as part of a response to a successful Add command, and
* can be used to identify the VSI list for Update/Get/Remove commands.
*/
struct ice_sw_rule_vsi_list {
+ struct ice_aqc_sw_rules_elem_hdr hdr;
+
__le16 index; /* Index of VSI/Prune list */
__le16 number_vsi;
__le16 vsi[]; /* Array of number_vsi VSI numbers */
-};
-
-/* Query VSI list command/response entry */
-struct ice_sw_rule_vsi_list_query {
- __le16 index;
- DECLARE_BITMAP(vsi_list, ICE_MAX_VSI);
-} __packed;
-
-/* Add switch rule response:
- * Content of return buffer is same as the input buffer. The status field and
- * LUT index are updated as part of the response
- */
-struct ice_aqc_sw_rules_elem {
- __le16 type; /* Switch rule type, one of T_... */
-#define ICE_AQC_SW_RULES_T_LKUP_RX 0x0
-#define ICE_AQC_SW_RULES_T_LKUP_TX 0x1
-#define ICE_AQC_SW_RULES_T_LG_ACT 0x2
-#define ICE_AQC_SW_RULES_T_VSI_LIST_SET 0x3
-#define ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR 0x4
-#define ICE_AQC_SW_RULES_T_PRUNE_LIST_SET 0x5
-#define ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR 0x6
- __le16 status;
- union {
- struct ice_sw_rule_lkup_rx_tx lkup_tx_rx;
- struct ice_sw_rule_lg_act lg_act;
- struct ice_sw_rule_vsi_list vsi_list;
- struct ice_sw_rule_vsi_list_query vsi_list_query;
- } __packed pdata;
-};
+} __packed __aligned(sizeof(__le16));
/* Query PFC Mode (direct 0x0302)
* Set PFC Mode (direct 0x0303)
@@ -790,9 +848,9 @@ struct ice_aqc_txsched_elem {
u8 generic;
#define ICE_AQC_ELEM_GENERIC_MODE_M 0x1
#define ICE_AQC_ELEM_GENERIC_PRIO_S 0x1
-#define ICE_AQC_ELEM_GENERIC_PRIO_M (0x7 << ICE_AQC_ELEM_GENERIC_PRIO_S)
+#define ICE_AQC_ELEM_GENERIC_PRIO_M GENMASK(3, 1)
#define ICE_AQC_ELEM_GENERIC_SP_S 0x4
-#define ICE_AQC_ELEM_GENERIC_SP_M (0x1 << ICE_AQC_ELEM_GENERIC_SP_S)
+#define ICE_AQC_ELEM_GENERIC_SP_M GENMASK(4, 4)
#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S 0x5
#define ICE_AQC_ELEM_GENERIC_ADJUST_VAL_M \
(0x3 << ICE_AQC_ELEM_GENERIC_ADJUST_VAL_S)
@@ -1337,6 +1395,24 @@ struct ice_aqc_get_link_topo {
u8 rsvd[9];
};
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ice_aqc_i2c {
+ struct ice_aqc_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define ICE_AQC_I2C_DATA_SIZE_M GENMASK(3, 0)
+#define ICE_AQC_I2C_USE_REPEATED_START BIT(7)
+
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ice_aqc_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
/* Set Port Identification LED (direct, 0x06E9) */
struct ice_aqc_set_port_id_led {
u8 lport_num;
@@ -1347,6 +1423,56 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Get Port Options (indirect, 0x06EA) */
+struct ice_aqc_get_port_options {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 port_options_count;
+#define ICE_AQC_PORT_OPT_COUNT_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX 16
+
+ u8 innermost_phy_index;
+ u8 port_options;
+#define ICE_AQC_PORT_OPT_ACTIVE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_VALID BIT(7)
+
+ u8 pending_port_option_status;
+#define ICE_AQC_PENDING_PORT_OPT_IDX_M GENMASK(3, 0)
+#define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7)
+
+ u8 rsvd[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_port_options_elem {
+ u8 pmd;
+#define ICE_AQC_PORT_OPT_PMD_COUNT_M GENMASK(3, 0)
+
+ u8 max_lane_speed;
+#define ICE_AQC_PORT_OPT_MAX_LANE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX_LANE_100M 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_1G 1
+#define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2
+#define ICE_AQC_PORT_OPT_MAX_LANE_5G 3
+#define ICE_AQC_PORT_OPT_MAX_LANE_10G 4
+#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
+#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
+#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+
+ u8 global_scid[2];
+ u8 phy_scid[2];
+ u8 pf2port_cid[2];
+};
+
+/* Set Port Option (direct, 0x06EB) */
+struct ice_aqc_set_port_option {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 selected_port_option;
+ u8 rsvd[13];
+};
+
/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
struct ice_aqc_gpio {
__le16 gpio_ctrl_handle;
@@ -1408,6 +1534,17 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
#define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
+#define ICE_AQC_NVM_RESET_LVL_M ICE_M(0x3, 0) /* Write reply only */
+#define ICE_AQC_NVM_POR_FLAG 0
+#define ICE_AQC_NVM_PERST_FLAG 1
+#define ICE_AQC_NVM_EMPR_FLAG 2
+#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@@ -1522,14 +1659,24 @@ struct ice_aqc_lldp_get_mib {
#define ICE_AQ_LLDP_TX_ACTIVE 0
#define ICE_AQ_LLDP_TX_SUSPENDED 1
#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* DCBX mode */
+#define ICE_AQ_LLDP_DCBX_M GENMASK(7, 6)
+#define ICE_AQ_LLDP_DCBX_NA 0
+#define ICE_AQ_LLDP_DCBX_CEE 1
+#define ICE_AQ_LLDP_DCBX_IEEE 2
+
+ u8 state;
+#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M BIT(0)
+#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0
+#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1
+
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
* Get LLDP MIB (0x0A00) response only.
*/
- u8 reserved1;
__le16 local_len;
__le16 remote_len;
- u8 reserved2[2];
+ u8 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
@@ -1540,6 +1687,9 @@ struct ice_aqc_lldp_set_mib_change {
u8 command;
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+#define ICE_AQ_LLDP_MIB_PENDING_M BIT(1)
+#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0
+#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1
u8 reserved[15];
};
@@ -1876,7 +2026,7 @@ struct ice_aqc_get_clear_fw_log {
};
/* Download Package (indirect 0x0C40) */
-/* Also used for Update Package (indirect 0x0C42) */
+/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
u8 flags;
#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
@@ -2001,7 +2151,10 @@ struct ice_aq_desc {
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
+ struct ice_aqc_get_port_options get_port_options;
+ struct ice_aqc_set_port_option set_port_option;
struct ice_aqc_get_sw_cfg get_sw_conf;
+ struct ice_aqc_set_port_params set_port_params;
struct ice_aqc_sw_rules sw_rules;
struct ice_aqc_add_get_recipe add_get_recipe;
struct ice_aqc_recipe_to_profile recipe_to_profile;
@@ -2042,6 +2195,8 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_i2c read_write_i2c;
+ struct ice_aqc_read_i2c_resp read_i2c_resp;
} params;
};
@@ -2103,10 +2258,13 @@ enum ice_adminq_opc {
/* internal switch commands */
ice_aqc_opc_get_sw_cfg = 0x0200,
+ ice_aqc_opc_set_port_params = 0x0203,
/* Alloc/Free/Get Resources */
ice_aqc_opc_alloc_res = 0x0208,
ice_aqc_opc_free_res = 0x0209,
+ ice_aqc_opc_set_vlan_mode_parameters = 0x020C,
+ ice_aqc_opc_get_vlan_mode_parameters = 0x020D,
/* VSI commands */
ice_aqc_opc_add_vsi = 0x0210,
@@ -2153,7 +2311,11 @@ enum ice_adminq_opc {
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
+ ice_aqc_opc_read_i2c = 0x06E2,
+ ice_aqc_opc_write_i2c = 0x06E3,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_get_port_options = 0x06EA,
+ ice_aqc_opc_set_port_option = 0x06EB,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
@@ -2180,6 +2342,7 @@ enum ice_adminq_opc {
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
+ ice_aqc_opc_lldp_execute_pending_mib = 0x0A0B,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
@@ -2197,6 +2360,7 @@ enum ice_adminq_opc {
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_upload_section = 0x0C41,
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 5daade32ea62..fba178e07600 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
- if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+ if (!vsi || vsi->type != ICE_VSI_PF)
return;
netdev = vsi->netdev;
@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
int base_idx, i;
if (!vsi || vsi->type != ICE_VSI_PF)
- return -EINVAL;
+ return 0;
pf = vsi->back;
netdev = vsi->netdev;
@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
if (!pf_vsi)
return;
- ice_free_cpu_rx_rmap(pf_vsi);
ice_clear_arfs(pf_vsi);
}
@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
return;
ice_remove_arfs(pf);
- if (ice_set_cpu_rx_rmap(pf_vsi)) {
- dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
- return;
- }
ice_init_arfs(pf_vsi);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 80ed76f0cace..9669ad9bf7b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -3,6 +3,9 @@
#ifndef _ICE_ARFS_H_
#define _ICE_ARFS_H_
+
+#include "ice_fdir.h"
+
enum ice_arfs_fltr_state {
ICE_ARFS_INACTIVE,
ICE_ARFS_ACTIVE,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1efc635cc0f5..1911d644dfa8 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+#include "ice_sriov.h"
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
@@ -129,8 +130,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
* handler here (i.e. resume, reset/rebuild, etc.)
*/
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
out:
/* tie q_vector and VSI together */
@@ -310,7 +310,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
case ICE_VSI_SWITCHDEV_CTRL:
@@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
- else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
- return XDP_PACKET_HEADROOM;
-
return 0;
}
@@ -389,7 +386,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
* Indicates the starting address of the descriptor queue defined in
* 128 Byte units.
*/
- rlan_ctx.base = ring->dma >> 7;
+ rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count;
@@ -404,10 +401,24 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Strip the Ethernet CRC bytes before the packet is posted to host
* memory.
*/
- rlan_ctx.crcstrip = 1;
-
- /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
- rlan_ctx.l2tsel = 1;
+ rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
+
+ /* L2TSEL flag defines the reported L2 Tags in the receive descriptor
+ * and it needs to remain 1 for non-DVM capable configurations to not
+ * break backward compatibility for VF drivers. Setting this field to 0
+ * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND
+ * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to
+ * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will
+ * check for the tag
+ */
+ if (ice_is_dvm_ena(hw))
+ if (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_port_vlan_ena(vsi->vf))
+ rlan_ctx.l2tsel = 1;
+ else
+ rlan_ctx.l2tsel = 0;
+ else
+ rlan_ctx.l2tsel = 1;
rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
@@ -481,7 +492,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
- u16 num_bufs = ICE_DESC_UNUSED(ring);
+ u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
int err;
ring->rx_buf_len = ring->vsi->rx_buf_len;
@@ -489,8 +500,10 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
+ __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->vsi->rx_buf_len);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
@@ -510,9 +523,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
+ __xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->vsi->rx_buf_len);
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
@@ -522,6 +537,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
}
}
+ xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+ ring->xdp.data = NULL;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
@@ -759,7 +776,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
u16 pf_q;
u8 tc;
@@ -804,9 +821,9 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
ring->q_handle, 1, qg_buf, buf_len,
NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
- ice_stat_str(status));
- return -ENODEV;
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ return status;
}
/* Add Tx Queue TEID into the VSI Tx ring from the
@@ -929,7 +946,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
u32 val;
/* clear cause_ena bit for disabled queues */
@@ -944,7 +961,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* associated to the queue to schedule NAPI handler
*/
q_vector = ring->q_vector;
- if (q_vector)
+ if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf)))
ice_trigger_sw_intr(hw, q_vector);
status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
@@ -953,18 +970,18 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an
- * active reset flow, ICE_ERR_RESET_ONGOING is returned.
+ * active reset flow, -EBUSY is returned.
* This is not an error as the reset operation disables
* queues at the hardware level anyway.
*/
- if (status == ICE_ERR_RESET_ONGOING) {
+ if (status == -EBUSY) {
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
+ } else if (status == -ENOENT) {
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n",
- ice_stat_str(status));
- return -ENODEV;
+ dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ return status;
}
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
new file mode 100644
index 000000000000..57abd52386d0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_CGU_REGS_H_
+#define _ICE_CGU_REGS_H_
+
+#define NAC_CGU_DWORD9 0x24
+union nac_cgu_dword9 {
+ struct {
+ u32 time_ref_freq_sel : 3;
+ u32 clk_eref1_en : 1;
+ u32 clk_eref0_en : 1;
+ u32 time_ref_en : 1;
+ u32 time_sync_en : 1;
+ u32 one_pps_out_en : 1;
+ u32 clk_ref_synce_en : 1;
+ u32 clk_synce1_en : 1;
+ u32 clk_synce0_en : 1;
+ u32 net_clk_ref1_en : 1;
+ u32 net_clk_ref0_en : 1;
+ u32 clk_synce1_amp : 2;
+ u32 misc6 : 1;
+ u32 clk_synce0_amp : 2;
+ u32 one_pps_out_amp : 2;
+ u32 misc24 : 12;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD19 0x4c
+union nac_cgu_dword19 {
+ struct {
+ u32 tspll_fbdiv_intgr : 8;
+ u32 fdpll_ulck_thr : 5;
+ u32 misc15 : 3;
+ u32 tspll_ndivratio : 4;
+ u32 tspll_iref_ndivratio : 3;
+ u32 misc19 : 1;
+ u32 japll_ndivratio : 4;
+ u32 japll_iref_ndivratio : 3;
+ u32 misc27 : 1;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD22 0x58
+union nac_cgu_dword22 {
+ struct {
+ u32 fdpll_frac_div_out_nc : 2;
+ u32 fdpll_lock_int_for : 1;
+ u32 synce_hdov_int_for : 1;
+ u32 synce_lock_int_for : 1;
+ u32 fdpll_phlead_slip_nc : 1;
+ u32 fdpll_acc1_ovfl_nc : 1;
+ u32 fdpll_acc2_ovfl_nc : 1;
+ u32 synce_status_nc : 6;
+ u32 fdpll_acc1f_ovfl : 1;
+ u32 misc18 : 1;
+ u32 fdpllclk_div : 4;
+ u32 time1588clk_div : 4;
+ u32 synceclk_div : 4;
+ u32 synceclk_sel_div2 : 1;
+ u32 fdpllclk_sel_div2 : 1;
+ u32 time1588clk_sel_div2 : 1;
+ u32 misc3 : 1;
+ } field;
+ u32 val;
+};
+
+#define NAC_CGU_DWORD24 0x60
+union nac_cgu_dword24 {
+ struct {
+ u32 tspll_fbdiv_frac : 22;
+ u32 misc20 : 2;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+ } field;
+ u32 val;
+};
+
+#define TSPLL_CNTR_BIST_SETTINGS 0x344
+union tspll_cntr_bist_settings {
+ struct {
+ u32 i_irefgen_settling_time_cntr_7_0 : 8;
+ u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
+ u32 reserved195 : 5;
+ u32 i_plllock_sel_0 : 1;
+ u32 i_plllock_sel_1 : 1;
+ u32 i_plllock_cnt_6_0 : 7;
+ u32 i_plllock_cnt_10_7 : 4;
+ u32 reserved200 : 4;
+ } field;
+ u32 val;
+};
+
+#define TSPLL_RO_BWM_LF 0x370
+union tspll_ro_bwm_lf {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 biascaldone_cri : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 m2fbdivmod_cri_7_0 : 8;
+ } field;
+ u32 val;
+};
+
+#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index b3066d0fea8b..0157f6e98d3e 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2,13 +2,114 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
-#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#define ICE_PF_RESET_WAIT_COUNT 300
+static const char * const ice_link_mode_str_low[] = {
+ [0] = "100BASE_TX",
+ [1] = "100M_SGMII",
+ [2] = "1000BASE_T",
+ [3] = "1000BASE_SX",
+ [4] = "1000BASE_LX",
+ [5] = "1000BASE_KX",
+ [6] = "1G_SGMII",
+ [7] = "2500BASE_T",
+ [8] = "2500BASE_X",
+ [9] = "2500BASE_KX",
+ [10] = "5GBASE_T",
+ [11] = "5GBASE_KR",
+ [12] = "10GBASE_T",
+ [13] = "10G_SFI_DA",
+ [14] = "10GBASE_SR",
+ [15] = "10GBASE_LR",
+ [16] = "10GBASE_KR_CR1",
+ [17] = "10G_SFI_AOC_ACC",
+ [18] = "10G_SFI_C2C",
+ [19] = "25GBASE_T",
+ [20] = "25GBASE_CR",
+ [21] = "25GBASE_CR_S",
+ [22] = "25GBASE_CR1",
+ [23] = "25GBASE_SR",
+ [24] = "25GBASE_LR",
+ [25] = "25GBASE_KR",
+ [26] = "25GBASE_KR_S",
+ [27] = "25GBASE_KR1",
+ [28] = "25G_AUI_AOC_ACC",
+ [29] = "25G_AUI_C2C",
+ [30] = "40GBASE_CR4",
+ [31] = "40GBASE_SR4",
+ [32] = "40GBASE_LR4",
+ [33] = "40GBASE_KR4",
+ [34] = "40G_XLAUI_AOC_ACC",
+ [35] = "40G_XLAUI",
+ [36] = "50GBASE_CR2",
+ [37] = "50GBASE_SR2",
+ [38] = "50GBASE_LR2",
+ [39] = "50GBASE_KR2",
+ [40] = "50G_LAUI2_AOC_ACC",
+ [41] = "50G_LAUI2",
+ [42] = "50G_AUI2_AOC_ACC",
+ [43] = "50G_AUI2",
+ [44] = "50GBASE_CP",
+ [45] = "50GBASE_SR",
+ [46] = "50GBASE_FR",
+ [47] = "50GBASE_LR",
+ [48] = "50GBASE_KR_PAM4",
+ [49] = "50G_AUI1_AOC_ACC",
+ [50] = "50G_AUI1",
+ [51] = "100GBASE_CR4",
+ [52] = "100GBASE_SR4",
+ [53] = "100GBASE_LR4",
+ [54] = "100GBASE_KR4",
+ [55] = "100G_CAUI4_AOC_ACC",
+ [56] = "100G_CAUI4",
+ [57] = "100G_AUI4_AOC_ACC",
+ [58] = "100G_AUI4",
+ [59] = "100GBASE_CR_PAM4",
+ [60] = "100GBASE_KR_PAM4",
+ [61] = "100GBASE_CP2",
+ [62] = "100GBASE_SR2",
+ [63] = "100GBASE_DR",
+};
+
+static const char * const ice_link_mode_str_high[] = {
+ [0] = "100GBASE_KR2_PAM4",
+ [1] = "100G_CAUI2_AOC_ACC",
+ [2] = "100G_CAUI2",
+ [3] = "100G_AUI2_AOC_ACC",
+ [4] = "100G_AUI2",
+};
+
+/**
+ * ice_dump_phy_type - helper function to dump phy_type
+ * @hw: pointer to the HW structure
+ * @low: 64 bit value for phy_type_low
+ * @high: 64 bit value for phy_type_high
+ * @prefix: prefix string to differentiate multiple dumps
+ */
+static void
+ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
+{
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
+ if (low & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_low[i]);
+ }
+
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
+ if (high & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_high[i]);
+ }
+}
+
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -16,10 +117,10 @@
* This function sets the MAC type of the adapter based on the
* vendor ID and device ID stored in the HW structure.
*/
-static enum ice_status ice_set_mac_type(struct ice_hw *hw)
+static int ice_set_mac_type(struct ice_hw *hw)
{
if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
- return ICE_ERR_DEVICE_NOT_SUPPORTED;
+ return -ENODEV;
switch (hw->device_id) {
case ICE_DEV_ID_E810C_BACKPLANE:
@@ -81,9 +182,23 @@ bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
- hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T:
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T4:
+ case ICE_SUBDEV_ID_E810T6:
+ case ICE_SUBDEV_ID_E810T7:
+ return true;
+ }
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T5:
return true;
+ }
break;
default:
break;
@@ -93,13 +208,38 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e823
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E823-L or E823-C based, false if not.
+ */
+bool ice_is_e823(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+int ice_clear_pf_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -123,21 +263,21 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
* ice_discover_dev_caps is expected to be called before this function is
* called.
*/
-static enum ice_status
+static int
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 flags;
u8 i;
cmd = &desc.params.mac_read;
if (buf_size < sizeof(*resp))
- return ICE_ERR_BUF_TOO_SHORT;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
@@ -150,7 +290,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
/* A single port can report up to two (LAN and WoL) addresses */
@@ -176,7 +316,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -184,18 +324,19 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
- enum ice_status status;
+ const char *prefix;
struct ice_hw *hw;
+ int status;
cmd = &desc.params.get_phy;
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
!ice_fw_supports_report_dflt_cfg(hw))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
@@ -205,29 +346,48 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
- ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
- report_mode);
- ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
- ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
- ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
- ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
+
+ switch (report_mode) {
+ case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
+ prefix = "phy_caps_media";
+ break;
+ case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
+ prefix = "phy_caps_no_media";
+ break;
+ case ICE_AQC_REPORT_ACTIVE_CFG:
+ prefix = "phy_caps_active";
+ break;
+ case ICE_AQC_REPORT_DFLT_CFG:
+ prefix = "phy_caps_default";
+ break;
+ default:
+ prefix = "phy_caps_invalid";
+ }
+
+ ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
+ le64_to_cpu(pcaps->phy_type_high), prefix);
+
+ ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
+ prefix, report_mode);
+ ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
+ ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
pcaps->low_power_ctrl_an);
- ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
- ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
+ pcaps->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
pcaps->eeer_value);
- ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
pcaps->link_fec_options);
- ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
- pcaps->module_compliance_enforcement);
- ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
- pcaps->extended_compliance_code);
- ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
+ prefix, pcaps->module_compliance_enforcement);
+ ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
+ prefix, pcaps->extended_compliance_code);
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
pcaps->module_type[0]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
pcaps->module_type[1]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
pcaps->module_type[2]);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
@@ -252,7 +412,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
* returns error (ENOENT), then no cage present. If no cage present, then
* connection type is backplane or BASE-T.
*/
-static enum ice_status
+static int
ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
{
@@ -418,7 +578,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -429,12 +589,12 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
u16 cmd_flags;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type;
@@ -556,7 +716,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
*
* Set MAC configuration (0x0603)
*/
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
@@ -565,7 +725,7 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
cmd = &desc.params.set_mac_cfg;
if (max_frame_size == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
@@ -580,17 +740,17 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
- enum ice_status status;
+ int status;
hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*hw->switch_info), GFP_KERNEL);
sw = hw->switch_info;
if (!sw)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0;
@@ -666,17 +826,17 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
* ice_get_fw_log_cfg - get FW logging configuration
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
+static int ice_get_fw_log_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
- enum ice_status status;
__le16 *config;
+ int status;
u16 size;
size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
if (!config)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
@@ -738,15 +898,15 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
* messages from FW to SW. Interrupts are typically disabled during the device's
* initialization phase.
*/
-static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
+static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
{
struct ice_aqc_fw_logging *cmd;
- enum ice_status status = 0;
u16 i, chgs = 0, len = 0;
struct ice_aq_desc desc;
__le16 *data = NULL;
u8 actv_evnts = 0;
void *buf = NULL;
+ int status = 0;
if (!hw->fw_log.cq_en && !hw->fw_log.uart_en)
return 0;
@@ -790,7 +950,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
sizeof(*data),
GFP_KERNEL);
if (!data)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
val = i << ICE_AQC_FW_LOG_ID_S;
@@ -904,12 +1064,12 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw(struct ice_hw *hw)
+int ice_init_hw(struct ice_hw *hw)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
u16 mac_buf_len;
void *mac_buf;
+ int status;
/* Set MAC type based on DeviceID */
status = ice_set_mac_type(hw);
@@ -953,10 +1113,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*hw->port_info), GFP_KERNEL);
+ if (!hw->port_info)
+ hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*hw->port_info),
+ GFP_KERNEL);
if (!hw->port_info) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_cqinit;
}
@@ -970,6 +1132,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->evb_veb = true;
+ /* init xarray for identifying scheduling nodes uniquely */
+ xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC);
+
/* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
@@ -985,7 +1150,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_sched;
}
@@ -1006,7 +1171,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/* need a valid SW entry point to build a Tx tree */
if (!hw->sw_entry_point_layer) {
ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list);
@@ -1026,7 +1191,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
if (!mac_buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll_fltr_mgmt_struct;
}
@@ -1079,11 +1244,6 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
- if (hw->port_info) {
- devm_kfree(ice_hw_to_dev(hw), hw->port_info);
- hw->port_info = NULL;
- }
-
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_destroy_all_ctrlq(hw);
@@ -1096,7 +1256,7 @@ void ice_deinit_hw(struct ice_hw *hw)
* ice_check_reset - Check to see if a global reset is complete
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_check_reset(struct ice_hw *hw)
+int ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_timeout, uld_mask;
@@ -1116,7 +1276,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
if (cnt == grst_timeout) {
ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
@@ -1143,7 +1303,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
reg);
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -1156,7 +1316,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
* If a global reset has been triggered, this function checks
* for its completion and then issues the PF reset
*/
-static enum ice_status ice_pf_reset(struct ice_hw *hw)
+static int ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg;
@@ -1169,7 +1329,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
(rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
/* poll on global reset currently in progress until done */
if (ice_check_reset(hw))
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
return 0;
}
@@ -1194,7 +1354,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
if (cnt == ICE_PF_RESET_WAIT_COUNT) {
ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
- return ICE_ERR_RESET_FAILED;
+ return -EIO;
}
return 0;
@@ -1212,7 +1372,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* This has to be cleared using ice_clear_pxe_mode again, once the AQ
* interface has been restored in the rebuild flow.
*/
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
{
u32 val = 0;
@@ -1228,7 +1388,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
val = GLGEN_RTRIG_GLOBR_M;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
val |= rd32(hw, GLGEN_RTRIG);
@@ -1247,16 +1407,16 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
*
* Copies rxq context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
if (!ice_rxq_ctx)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
if (rxq_index > QRX_CTRL_MAX_INDEX)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
@@ -1306,14 +1466,14 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
if (!rlan_ctx)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
rlan_ctx->prefena = 1;
@@ -1369,9 +1529,8 @@ static int
ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
- return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
- (struct ice_aq_desc *)desc,
- buf, buf_size, cd));
+ return ice_sq_send_cmd(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc, buf, buf_size, cd);
}
/**
@@ -1453,28 +1612,24 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
* Retry sending the FW Admin Queue command, multiple times, to the FW Admin
* Queue if the EBUSY AQ error is returned.
*/
-static enum ice_status
+static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc_cpy;
- enum ice_status status;
bool is_cmd_for_retry;
- u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
+ int status;
opcode = le16_to_cpu(desc->opcode);
is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
memset(&desc_cpy, 0, sizeof(desc_cpy));
if (is_cmd_for_retry) {
- if (buf) {
- buf_cpy = kzalloc(buf_size, GFP_KERNEL);
- if (!buf_cpy)
- return ICE_ERR_NO_MEMORY;
- }
+ /* All retryable cmds are direct, without buf. */
+ WARN_ON(buf);
memcpy(&desc_cpy, desc, sizeof(desc_cpy));
}
@@ -1486,17 +1641,12 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
break;
- if (buf_cpy)
- memcpy(buf, buf_cpy, buf_size);
-
memcpy(desc, &desc_cpy, sizeof(desc_cpy));
- mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
+ msleep(ICE_SQ_SEND_DELAY_TIME_MS);
} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
- kfree(buf_cpy);
-
return status;
}
@@ -1510,26 +1660,37 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd = &desc->params.res_owner;
bool lock_acquired = false;
- enum ice_status status;
+ int status;
/* When a package download is in process (i.e. when the firmware's
* Global Configuration Lock resource is held), only the Download
- * Package, Get Version, Get Package Info List and Release Resource
- * (with resource ID set to Global Config Lock) AdminQ commands are
- * allowed; all others must block until the package download completes
- * and the Global Config Lock is released. See also
- * ice_acquire_global_cfg_lock().
+ * Package, Get Version, Get Package Info List, Upload Section,
+ * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters,
+ * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get
+ * Recipes to Profile Association, and Release Resource (with resource
+ * ID set to Global Config Lock) AdminQ commands are allowed; all others
+ * must block until the package download completes and the Global Config
+ * Lock is released. See also ice_acquire_global_cfg_lock().
*/
switch (le16_to_cpu(desc->opcode)) {
case ice_aqc_opc_download_pkg:
case ice_aqc_opc_get_pkg_info_list:
case ice_aqc_opc_get_ver:
+ case ice_aqc_opc_upload_section:
+ case ice_aqc_opc_update_pkg:
+ case ice_aqc_opc_set_port_params:
+ case ice_aqc_opc_get_vlan_mode_parameters:
+ case ice_aqc_opc_set_vlan_mode_parameters:
+ case ice_aqc_opc_add_recipe:
+ case ice_aqc_opc_recipe_to_profile:
+ case ice_aqc_opc_get_recipe:
+ case ice_aqc_opc_get_recipe_to_profile:
break;
case ice_aqc_opc_release_res:
if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
@@ -1555,11 +1716,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*
* Get the firmware version (0x0001) from the admin queue commands
*/
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
struct ice_aqc_get_ver *resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
resp = &desc.params.get_ver;
@@ -1590,7 +1751,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
*
* Send the driver version (0x0002) to the firmware
*/
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
@@ -1601,7 +1762,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
cmd = &desc.params.driver_ver;
if (!dv)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
@@ -1627,7 +1788,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well (0x0003).
*/
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
struct ice_aq_desc desc;
@@ -1654,12 +1815,12 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* Requests common resource using the admin queue commands (0x0008).
* When attempting to acquire the Global Config Lock, the driver can
* learn of three states:
- * 1) ICE_SUCCESS - acquired lock, and can perform download package
- * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
- * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
- * successfully downloaded the package; the driver does
- * not have to download the package and can continue
- * loading
+ * 1) 0 - acquired lock, and can perform download package
+ * 2) -EIO - did not get lock, driver should fail to load
+ * 3) -EALREADY - did not get lock, but another driver has
+ * successfully downloaded the package; the driver does
+ * not have to download the package and can continue
+ * loading
*
* Note that if the caller is in an acquire lock, perform action, release lock
* phase of operation, it is possible that the FW may detect a timeout and issue
@@ -1668,14 +1829,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* will likely get an error propagated back to it indicating the Download
* Package, Update Package or the Release Resource AQ commands timed out.
*/
-static enum ice_status
+static int
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd_resp = &desc.params.res_owner;
@@ -1707,15 +1868,15 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
} else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout);
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
} else if (le16_to_cpu(cmd_resp->status) ==
ICE_AQ_RES_GLBL_DONE) {
- return ICE_ERR_AQ_NO_WORK;
+ return -EALREADY;
}
/* invalid FW response, force a timeout immediately */
*timeout = 0;
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
}
/* If the resource is held by some other driver, the command completes
@@ -1737,7 +1898,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
*
* release common resource using the admin queue commands (0x0009)
*/
-static enum ice_status
+static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
@@ -1763,23 +1924,23 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
*
* This function will attempt to acquire the ownership of a resource.
*/
-enum ice_status
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
u32 time_left = timeout;
- enum ice_status status;
+ int status;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
+ /* A return code of -EALREADY means that another driver has
* previously acquired the resource and performed any necessary updates;
* in this case the caller does not obtain the resource and has no
* further work to do.
*/
- if (status == ICE_ERR_AQ_NO_WORK)
+ if (status == -EALREADY)
goto ice_acquire_res_exit;
if (status)
@@ -1792,7 +1953,7 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
timeout = (timeout > delay) ? timeout - delay : 0;
status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
- if (status == ICE_ERR_AQ_NO_WORK)
+ if (status == -EALREADY)
/* lock free, but no work to do */
break;
@@ -1800,15 +1961,15 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* lock acquired */
break;
}
- if (status && status != ICE_ERR_AQ_NO_WORK)
+ if (status && status != -EALREADY)
ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
ice_acquire_res_exit:
- if (status == ICE_ERR_AQ_NO_WORK) {
+ if (status == -EALREADY) {
if (access == ICE_RES_WRITE)
ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
else
- ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
+ ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n");
}
return status;
}
@@ -1822,20 +1983,19 @@ ice_acquire_res_exit:
*/
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
{
- enum ice_status status;
- u32 total_delay = 0;
-
- status = ice_aq_release_res(hw, res, 0, NULL);
+ unsigned long timeout;
+ int status;
/* there are some rare cases when trying to release the resource
* results in an admin queue timeout, so handle them correctly
*/
- while ((status == ICE_ERR_AQ_TIMEOUT) &&
- (total_delay < hw->adminq.sq_cmd_timeout)) {
- mdelay(1);
+ timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT;
+ do {
status = ice_aq_release_res(hw, res, 0, NULL);
- total_delay++;
- }
+ if (status != -EIO)
+ break;
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, timeout));
}
/**
@@ -1849,7 +2009,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
*
* Helper function to allocate/free resources using the admin queue commands
*/
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
@@ -1860,10 +2020,10 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
cmd = &desc.params.sw_res_ctrl;
if (!buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (buf_size < flex_array_size(buf, elem, num_entries))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1882,17 +2042,17 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* @btm: allocate from bottom
* @res: pointer to array that will receive the resources
*/
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Prepare buffer to allocate resource. */
buf->num_elems = cpu_to_le16(num);
@@ -1920,16 +2080,16 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(buf, elem, num);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Prepare buffer to free resource. */
buf->num_elems = cpu_to_le16(num);
@@ -2071,6 +2231,18 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
+ case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: pcie_reset_avoidance = %d\n", prefix,
+ caps->pcie_reset_avoidance);
+ break;
+ case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT,
+ "%s: reset_restrict_support = %d\n", prefix,
+ caps->reset_restrict_support);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2180,6 +2352,18 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
+ info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ } else {
+ /* Unknown clock frequency, so assume a (probably incorrect)
+ * default to avoid out-of-bounds look ups of frequency
+ * related information.
+ */
+ ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
+ info->clk_freq);
+ info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ }
+
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
func_p->common_cap.ieee_1588);
ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
@@ -2365,6 +2549,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+ info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2382,6 +2568,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
info->tmr1_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
+ info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2486,19 +2674,19 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
* firmware could return) to avoid this.
*/
-enum ice_status
+int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_cap;
if (opc != ice_aqc_opc_list_func_caps &&
opc != ice_aqc_opc_list_dev_caps)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -2517,16 +2705,16 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* Read the device capabilities and extract them into the dev_caps structure
* for later use.
*/
-enum ice_status
+int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum
@@ -2551,16 +2739,16 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* Read the function capabilities and extract them into the func_caps structure
* for later use.
*/
-static enum ice_status
+static int
ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!cbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Although the driver doesn't know the number of capabilities the
* device will return, we can simply send a 4KB buffer, the maximum
@@ -2650,9 +2838,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_caps(struct ice_hw *hw)
+int ice_get_caps(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
status = ice_discover_dev_caps(hw, &hw->dev_caps);
if (status)
@@ -2670,7 +2858,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
*
* This function is used to write MAC address to the NVM (0x0108).
*/
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
@@ -2692,7 +2880,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
-static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -2716,6 +2904,54 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
}
/**
+ * ice_aq_set_port_params - set physical port parameters.
+ * @pi: pointer to the port info struct
+ * @double_vlan: if set double VLAN is enabled
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Physical port parameters (0x0203)
+ */
+int
+ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
+ struct ice_sq_cd *cd)
+
+{
+ struct ice_aqc_set_port_params *cmd;
+ struct ice_hw *hw = pi->hw;
+ struct ice_aq_desc desc;
+ u16 cmd_flags = 0;
+
+ cmd = &desc.params.set_port_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+ if (double_vlan)
+ cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+ cmd->cmd_flags = cpu_to_le16(cmd_flags);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_is_100m_speed_supported
+ * @hw: pointer to the HW struct
+ *
+ * returns true if 100M speeds are supported by the device,
+ * false otherwise.
+ */
+bool ice_is_100m_speed_supported(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
@@ -2725,8 +2961,8 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
- * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
- * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
+ * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
@@ -2903,15 +3139,15 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-enum ice_status
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!cfg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Ensure that only valid bits of cfg->caps can be turned on. */
if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
@@ -2952,13 +3188,13 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-enum ice_status ice_update_link_info(struct ice_port_info *pi)
+int ice_update_link_info(struct ice_port_info *pi)
{
struct ice_link_status *li;
- enum ice_status status;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
li = &pi->phy.link_info;
@@ -2974,7 +3210,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
@@ -3070,7 +3306,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
* @cfg: PHY configuration data to set FC mode
* @req_mode: FC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode req_mode)
{
@@ -3078,7 +3314,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
u8 pause_mask = 0x0;
if (!pi || !cfg)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
switch (req_mode) {
case ICE_FC_FULL:
@@ -3117,23 +3353,23 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
*
* Set the requested flow control mode.
*/
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !aq_failures)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
*aq_failures = 0;
hw = pi->hw;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
@@ -3258,22 +3494,22 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !cfg)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
hw = pi->hw;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false,
(ice_fw_supports_report_dflt_cfg(hw) ?
@@ -3313,15 +3549,16 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
cfg->link_fec_opt |= pcaps->link_fec_options;
break;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
break;
}
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
!ice_fw_supports_report_dflt_cfg(hw)) {
- struct ice_link_default_override_tlv tlv;
+ struct ice_link_default_override_tlv tlv = { 0 };
- if (ice_get_link_default_override(&tlv, pi))
+ status = ice_get_link_default_override(&tlv, pi);
+ if (status)
goto out;
if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
@@ -3344,13 +3581,13 @@ out:
* The variable link_up is invalid if status is non zero. As a
* result of this call, link status reporting becomes enabled
*/
-enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
{
struct ice_phy_info *phy_info;
- enum ice_status status = 0;
+ int status = 0;
if (!pi || !link_up)
- return ICE_ERR_PARAM;
+ return -EINVAL;
phy_info = &pi->phy;
@@ -3375,7 +3612,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
*
* Sets up the link and restarts the Auto-Negotiation over the link.
*/
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
@@ -3405,7 +3642,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
*
* Set event mask (0x0613)
*/
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
@@ -3430,7 +3667,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
*
* Enable/disable loopback on a given port
*/
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
@@ -3453,7 +3690,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
*
* Set LED value for the given port (0x06e9)
*/
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd)
{
@@ -3474,6 +3711,121 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
}
/**
+ * ice_aq_get_port_options
+ * @hw: pointer to the HW struct
+ * @options: buffer for the resultant port options
+ * @option_count: input - size of the buffer in port options structures,
+ * output - number of returned port options
+ * @lport: logical port to call the command with (optional)
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @active_option_idx: index of active port option in returned buffer
+ * @active_option_valid: active option in returned buffer is valid
+ * @pending_option_idx: index of pending port option in returned buffer
+ * @pending_option_valid: pending option in returned buffer is valid
+ *
+ * Calls Get Port Options AQC (0x06ea) and verifies result.
+ */
+int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid)
+{
+ struct ice_aqc_get_port_options *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 i;
+
+ /* options buffer shall be able to hold max returned options */
+ if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.get_port_options;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+ cmd->lport_num_valid = lport_valid;
+
+ status = ice_aq_send_cmd(hw, &desc, options,
+ *option_count * sizeof(*options), NULL);
+ if (status)
+ return status;
+
+ /* verify direct FW response & set output parameters */
+ *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
+ cmd->port_options_count);
+ ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
+ *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
+ cmd->port_options);
+ if (*active_option_valid) {
+ *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
+ cmd->port_options);
+ if (*active_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
+ *active_option_idx);
+ }
+
+ *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
+ cmd->pending_port_option_status);
+ if (*pending_option_valid) {
+ *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
+ cmd->pending_port_option_status);
+ if (*pending_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
+ *pending_option_idx);
+ }
+
+ /* mask output options fields */
+ for (i = 0; i < *option_count; i++) {
+ options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
+ options[i].pmd);
+ options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
+ options[i].max_lane_speed);
+ ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
+ options[i].pmd, options[i].max_lane_speed);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_port_option
+ * @hw: pointer to the HW struct
+ * @lport: logical port to call the command with
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @new_option: new port option to be written
+ *
+ * Calls Set Port Options AQC (0x06eb).
+ */
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option)
+{
+ struct ice_aqc_set_port_option *cmd;
+ struct ice_aq_desc desc;
+
+ if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.set_port_option;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+
+ cmd->lport_num_valid = lport_valid;
+ cmd->selected_port_option = new_option;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -3488,17 +3840,17 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
*
* Read/Write SFF EEPROM (0x06EE)
*/
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || (mem_addr & 0xff00))
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
cmd = &desc.params.read_write_sff_param;
@@ -3527,23 +3879,23 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
*
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
-static enum ice_status
+static int
__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u8 *lut;
if (!params)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_handle = params->vsi_handle;
lut = params->lut;
if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
- return ICE_ERR_PARAM;
+ return -EINVAL;
lut_size = params->lut_size;
lut_type = params->lut_type;
@@ -3572,7 +3924,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
break;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit;
}
@@ -3607,7 +3959,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
}
fallthrough;
default:
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_get_set_rss_lut_exit;
}
@@ -3626,7 +3978,7 @@ ice_aq_get_set_rss_lut_exit:
*
* get the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
{
return __ice_aq_get_set_rss_lut(hw, get_params, false);
@@ -3639,7 +3991,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_
*
* set the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
{
return __ice_aq_get_set_rss_lut(hw, set_params, true);
@@ -3654,10 +4006,9 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_
*
* get (0x0B04) or set (0x0B02) the RSS key per VSI
*/
-static enum
-ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
- struct ice_aqc_get_set_rss_keys *key,
- bool set)
+static int
+__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+ struct ice_aqc_get_set_rss_keys *key, bool set)
{
struct ice_aqc_get_set_rss_key *cmd_resp;
u16 key_size = sizeof(*key);
@@ -3688,12 +4039,12 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
key, false);
@@ -3707,12 +4058,12 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
*
* set the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
keys, true);
@@ -3739,7 +4090,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
* Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
* flow.
*/
-static enum ice_status
+static int
ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
@@ -3754,10 +4105,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
if (!qg_list)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
for (i = 0, list = qg_list; i < num_qgrps; i++) {
sum_size += struct_size(list, txqs, list->num_txqs);
@@ -3766,7 +4117,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
}
if (buf_size != sum_size)
- return ICE_ERR_PARAM;
+ return -EINVAL;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -3787,7 +4138,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
*
* Disable LAN Tx queue (0x0C31)
*/
-static enum ice_status
+static int
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
@@ -3796,18 +4147,18 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 i, sz = 0;
+ int status;
cmd = &desc.params.dis_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
/* qg_list can be NULL only in VM/VF reset flow */
if (!qg_list && !rst_src)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd->num_entries = num_qgrps;
@@ -3856,7 +4207,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
}
if (buf_size != sz)
- return ICE_ERR_PARAM;
+ return -EINVAL;
do_aq:
status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
@@ -3914,8 +4265,7 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
cmd->num_qset_grps = num_qset_grps;
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
- buf_size, cd));
+ return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
/* End of FW Admin Queue command wrappers */
@@ -4111,7 +4461,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
@@ -4141,7 +4491,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
break;
default:
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
}
@@ -4185,7 +4535,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
*
* This function adds one LAN queue
*/
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -4193,19 +4543,19 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
struct ice_q_ctx *q_ctx;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
if (num_qgrps > 1 || buf->num_txqs > 1)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
hw = pi->hw;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
@@ -4213,7 +4563,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
if (!q_ctx) {
ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
q_handle);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ena_txq_exit;
}
@@ -4221,7 +4571,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
if (!parent) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ena_txq_exit;
}
@@ -4266,7 +4616,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
q_ctx->q_teid = le32_to_cpu(node.node_teid);
/* add a leaf node into scheduler tree queue layer */
- status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
+ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
if (!status)
status = ice_sched_replay_q_bw(pi, q_ctx);
@@ -4290,20 +4640,20 @@ ena_txq_exit:
*
* This function removes queues and their corresponding nodes in SW DB
*/
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handles, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
+ int status = -ENOENT;
struct ice_hw *hw;
u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
hw = pi->hw;
@@ -4315,13 +4665,13 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
if (rst_src)
return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
vmvf_num, NULL);
- return ICE_ERR_CFG;
+ return -EIO;
}
buf_size = struct_size(qg_list, q_id, 1);
qg_list = kzalloc(buf_size, GFP_KERNEL);
if (!qg_list)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mutex_lock(&pi->sched_lock);
@@ -4368,18 +4718,18 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
*
* This function adds/updates the VSI queues per TC.
*/
-static enum ice_status
+static int
ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *maxqs, u8 owner)
{
- enum ice_status status = 0;
+ int status = 0;
u8 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
- return ICE_ERR_CFG;
+ return -EIO;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
@@ -4407,7 +4757,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
*
* This function adds/updates the VSI LAN queues per TC.
*/
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs)
{
@@ -4428,9 +4778,8 @@ int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs)
{
- return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
- max_rdmaqs,
- ICE_SCHED_NODE_OWNER_RDMA));
+ return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs,
+ ICE_SCHED_NODE_OWNER_RDMA);
}
/**
@@ -4451,7 +4800,6 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_aqc_add_rdma_qset_data *buf;
struct ice_sched_node *parent;
- enum ice_status status;
struct ice_hw *hw;
u16 i, buf_size;
int ret;
@@ -4502,12 +4850,10 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
for (i = 0; i < num_qsets; i++) {
node.node_teid = buf->rdma_qsets[i].qset_teid;
- status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
- &node);
- if (status) {
- ret = ice_status_to_errno(status);
+ ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
+ &node, NULL);
+ if (ret)
break;
- }
qset_teid[i] = le32_to_cpu(node.node_teid);
}
rdma_error_exit:
@@ -4528,8 +4874,8 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
- enum ice_status status = 0;
struct ice_hw *hw;
+ int status = 0;
u16 qg_size;
int i;
@@ -4568,7 +4914,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
mutex_unlock(&pi->sched_lock);
kfree(qg_list);
- return ice_status_to_errno(status);
+ return status;
}
/**
@@ -4577,7 +4923,7 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
-static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
+static int ice_replay_pre_init(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
u8 i;
@@ -4588,7 +4934,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
* will allow adding rules entries back to filt_rules list,
* which is operational list.
*/
- for (i = 0; i < ICE_SW_LKUP_LAST; i++)
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
list_replace_init(&sw->recp_list[i].filt_rules,
&sw->recp_list[i].filt_replay_rules);
ice_sched_replay_agg_vsi_preinit(hw);
@@ -4604,12 +4950,12 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
* Restore all VSI configuration after reset. It is required to call this
* function with main VSI first.
*/
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Replay pre-initialization if there is any */
if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
@@ -4725,12 +5071,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
*
* This function queries HW element information
*/
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
+ int status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
@@ -4743,6 +5089,104 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
}
/**
+ * ice_aq_read_i2c
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type,
+ * bits [3:0] - data size to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ * @cd: pointer to command details structure or NULL
+ *
+ * Read I2C (0x06E2)
+ */
+int
+ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc = { 0 };
+ struct ice_aqc_i2c *cmd;
+ u8 data_size;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ if (!data)
+ return -EINVAL;
+
+ data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
+
+ cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status) {
+ struct ice_aqc_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_write_i2c
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ * @cd: pointer to command details structure or NULL
+ *
+ * Write I2C (0x06E3)
+ *
+ * * Return:
+ * * 0 - Successful write to the i2c device
+ * * -EINVAL - Data size greater than 4 bytes
+ * * -EIO - FW error
+ */
+int
+ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc = { 0 };
+ struct ice_aqc_i2c *cmd;
+ u8 data_size;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return -EINVAL;
+
+ cmd->i2c_bus_addr = cpu_to_le16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ memcpy(cmd->i2c_data, data, data_size);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_driver_param - Set driver parameter to share via firmware
* @hw: pointer to the HW struct
* @idx: parameter index to set
@@ -4775,7 +5219,7 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
cmd->param_indx = idx;
cmd->param_val = cpu_to_le32(value);
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
@@ -4796,7 +5240,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
{
struct ice_aqc_driver_shared_params *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
return -EIO;
@@ -4810,7 +5254,7 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
*value = le32_to_cpu(cmd->param_val);
@@ -4840,7 +5284,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
- return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
@@ -4860,7 +5304,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
{
struct ice_aqc_gpio *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
@@ -4869,27 +5313,29 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
*value = !!cmd->gpio_val;
return 0;
}
/**
- * ice_fw_supports_link_override
+ * ice_is_fw_api_min_ver
* @hw: pointer to the hardware structure
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
*
- * Checks if the firmware supports link override
+ * Checks if the firmware API is minimum version
*/
-bool ice_fw_supports_link_override(struct ice_hw *hw)
+static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
{
- if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+ if (hw->api_maj_ver == maj) {
+ if (hw->api_min_ver > min)
return true;
- if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
- hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+ if (hw->api_min_ver == min && hw->api_patch >= patch)
return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+ } else if (hw->api_maj_ver > maj) {
return true;
}
@@ -4897,19 +5343,32 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
}
/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
+ ICE_FW_API_LINK_OVERRIDE_MIN,
+ ICE_FW_API_LINK_OVERRIDE_PATCH);
+}
+
+/**
* ice_get_link_default_override
* @ldo: pointer to the link default override struct
* @pi: pointer to the port info struct
*
* Gets the link default override for a port
*/
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi)
{
u16 i, tlv, tlv_len, tlv_start, buf, offset;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
@@ -4994,7 +5453,7 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
*
* Set the LLDP MIB. (0x0A08)
*/
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -5004,7 +5463,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
cmd = &desc.params.lldp_set_mib;
if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
@@ -5026,16 +5485,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
if (hw->mac_type != ICE_MAC_E810)
return false;
- if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
- hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
+ ICE_FW_API_LLDP_FLTR_MIN,
+ ICE_FW_API_LLDP_FLTR_PATCH);
}
/**
@@ -5044,7 +5496,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
* @vsi_num: absolute HW index for VSI
* @add: boolean for if adding or removing a filter
*/
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
@@ -5065,6 +5517,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
}
/**
+ * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
+ * @hw: pointer to HW struct
+ */
+int ice_lldp_execute_pending_mib(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
*
@@ -5072,14 +5537,43 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
- if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
- hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
+ ICE_FW_API_REPORT_DFLT_CFG_MIN,
+ ICE_FW_API_REPORT_DFLT_CFG_PATCH);
+}
+
+/* each of the indexes into the following array match the speed of a return
+ * value from the list of AQ returned speeds like the range:
+ * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding
+ * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this
+ * array. The array is defined as 15 elements long because the link_speed
+ * returned by the firmware is a 16 bit * value, but is indexed
+ * by [fls(speed) - 1]
+ */
+static const u32 ice_aq_to_link_speed[] = {
+ SPEED_10, /* BIT(0) */
+ SPEED_100,
+ SPEED_1000,
+ SPEED_2500,
+ SPEED_5000,
+ SPEED_10000,
+ SPEED_20000,
+ SPEED_25000,
+ SPEED_40000,
+ SPEED_50000,
+ SPEED_100000, /* BIT(10) */
+};
+
+/**
+ * ice_get_link_speed - get integer speed from table
+ * @index: array index from fls(aq speed) - 1
+ *
+ * Returns: u32 value containing integer speed
+ */
+u32 ice_get_link_speed(u16 index)
+{
+ if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
+ return 0;
+
+ return ice_aq_to_link_speed[index];
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 65c1b3244264..8ba5f935a092 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -4,120 +4,125 @@
#ifndef _ICE_COMMON_H_
#define _ICE_COMMON_H_
-#include "ice.h"
+#include <linux/bitfield.h>
+
#include "ice_type.h"
#include "ice_nvm.h"
#include "ice_flex_pipe.h"
-#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
+#include "ice_switch.h"
+#include "ice_fdir.h"
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
-enum ice_status ice_init_hw(struct ice_hw *hw);
+int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
-enum ice_status ice_check_reset(struct ice_hw *hw);
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+int ice_check_reset(struct ice_hw *hw);
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+int ice_create_all_ctrlq(struct ice_hw *hw);
+int ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
-enum ice_status
+int
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
-enum ice_status ice_update_link_info(struct ice_port_info *pi);
-enum ice_status
+int ice_update_link_info(struct ice_port_info *pi);
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
-enum ice_status
+int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
-enum ice_status ice_get_caps(struct ice_hw *hw);
+int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
extern struct mutex ice_global_cfg_lock_sw;
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
-enum ice_status
+int
+ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
+ struct ice_sq_cd *cd);
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_e810(struct ice_hw *hw);
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
-enum ice_status
+int ice_clear_pf_cfg(struct ice_hw *hw);
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
-enum ice_status
+int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
- enum ice_fc_mode fc);
+ enum ice_fc_mode req_mode);
bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
@@ -125,30 +130,40 @@ void
ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec);
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
-enum ice_status
+int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid);
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option);
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
+u32 ice_get_link_speed(u16 index);
int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
@@ -159,19 +174,19 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id);
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
@@ -184,7 +199,8 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
-enum ice_status
+bool ice_is_e823(struct ice_hw *hw);
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
int
@@ -199,11 +215,21 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
-enum ice_status
+bool ice_is_100m_speed_supported(struct ice_hw *hw);
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_execute_pending_mib(struct ice_hw *hw);
+int
+ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd);
+int
+ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data,
+ struct ice_sq_cd *cd);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 03bdb125be36..d2faf1baad2f 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -87,7 +87,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
@@ -96,7 +96,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->sq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->sq.desc_buf.va)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->sq.desc_buf.size = size;
cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
@@ -107,7 +107,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.desc_buf.va = NULL;
cq->sq.desc_buf.pa = 0;
cq->sq.desc_buf.size = 0;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
return 0;
@@ -118,7 +118,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
@@ -127,7 +127,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
&cq->rq.desc_buf.pa,
GFP_KERNEL | __GFP_ZERO);
if (!cq->rq.desc_buf.va)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->rq.desc_buf.size = size;
return 0;
}
@@ -154,7 +154,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -165,7 +165,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
sizeof(cq->rq.desc_buf), GFP_KERNEL);
if (!cq->rq.dma_head)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
/* allocate the mapped buffers */
@@ -218,7 +218,7 @@ unwind_alloc_rq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
cq->rq.dma_head = NULL;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -226,7 +226,7 @@ unwind_alloc_rq_bufs:
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -235,7 +235,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
sizeof(cq->sq.desc_buf), GFP_KERNEL);
if (!cq->sq.dma_head)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
/* allocate the mapped buffers */
@@ -266,10 +266,10 @@ unwind_alloc_sq_bufs:
devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
cq->sq.dma_head = NULL;
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
-static enum ice_status
+static int
ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
/* Clear Head and Tail */
@@ -283,7 +283,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
/* Check one register to verify that config was applied */
if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
+ return -EIO;
return 0;
}
@@ -295,8 +295,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
*
* Configure base address and length registers for the transmit queue
*/
-static enum ice_status
-ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
}
@@ -308,10 +307,9 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*
* Configure base address and length registers for the receive (event queue)
*/
-static enum ice_status
-ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status status;
+ int status;
status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
if (status)
@@ -361,19 +359,19 @@ do { \
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
if (cq->sq.count > 0) {
/* queue already initialized */
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_sq_entries || !cq->sq_buf_size) {
- ret_code = ICE_ERR_CFG;
+ ret_code = -EIO;
goto init_ctrlq_exit;
}
@@ -421,19 +419,19 @@ init_ctrlq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
if (cq->rq.count > 0) {
/* queue already initialized */
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto init_ctrlq_exit;
}
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->rq_buf_size) {
- ret_code = ICE_ERR_CFG;
+ ret_code = -EIO;
goto init_ctrlq_exit;
}
@@ -474,15 +472,14 @@ init_ctrlq_exit:
*
* The main shutdown routine for the Control Transmit Queue
*/
-static enum ice_status
-ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&cq->sq_lock);
if (!cq->sq.count) {
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_sq_out;
}
@@ -541,15 +538,14 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
*
* The main shutdown routine for the Control Receive Queue
*/
-static enum ice_status
-ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&cq->rq_lock);
if (!cq->rq.count) {
- ret_code = ICE_ERR_NOT_READY;
+ ret_code = -EBUSY;
goto shutdown_rq_out;
}
@@ -576,17 +572,17 @@ shutdown_rq_out:
* ice_init_check_adminq - Check version for Admin Queue to know if its alive
* @hw: pointer to the hardware structure
*/
-static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+static int ice_init_check_adminq(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- enum ice_status status;
+ int status;
status = ice_aq_get_fw_ver(hw, NULL);
if (status)
goto init_ctrlq_free_rq;
if (!ice_aq_ver_check(hw)) {
- status = ICE_ERR_FW_API_VER;
+ status = -EIO;
goto init_ctrlq_free_rq;
}
@@ -612,10 +608,10 @@ init_ctrlq_free_rq:
*
* NOTE: this function does not initialize the controlq locks
*/
-static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
- enum ice_status ret_code;
+ int ret_code;
switch (q_type) {
case ICE_CTL_Q_ADMIN:
@@ -631,19 +627,16 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
cq = &hw->mailboxq;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
cq->qtype = q_type;
/* verify input for valid configuration */
if (!cq->num_rq_entries || !cq->num_sq_entries ||
!cq->rq_buf_size || !cq->sq_buf_size) {
- return ICE_ERR_CFG;
+ return -EIO;
}
- /* setup SQ command write back timeout */
- cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
-
/* allocate the ATQ */
ret_code = ice_init_sq(hw, cq);
if (ret_code)
@@ -751,10 +744,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
*
* NOTE: this function does not initialize the controlq locks.
*/
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+int ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status status;
u32 retry = 0;
+ int status;
/* Init FW admin queue */
do {
@@ -763,7 +756,7 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
return status;
status = ice_init_check_adminq(hw);
- if (status != ICE_ERR_AQ_FW_CRITICAL)
+ if (status != -EIO)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
@@ -814,7 +807,7 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+int ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
@@ -962,7 +955,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
*/
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -970,27 +963,27 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- enum ice_status status = 0;
struct ice_sq_cd *details;
- u32 total_delay = 0;
+ unsigned long timeout;
+ int status = 0;
u16 retval = 0;
u32 val = 0;
/* if reset is in progress return a soft error */
if (hw->reset_ongoing)
- return ICE_ERR_RESET_ONGOING;
+ return -EBUSY;
mutex_lock(&cq->sq_lock);
cq->sq_last_status = ICE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
- status = ICE_ERR_AQ_EMPTY;
+ status = -EIO;
goto sq_send_command_error;
}
if ((buf && !buf_size) || (!buf && buf_size)) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto sq_send_command_error;
}
@@ -998,7 +991,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (buf_size > cq->sq_buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
buf_size);
- status = ICE_ERR_INVAL_SIZE;
+ status = -EINVAL;
goto sq_send_command_error;
}
@@ -1011,7 +1004,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (val >= cq->num_sq_entries) {
ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
val);
- status = ICE_ERR_AQ_EMPTY;
+ status = -EIO;
goto sq_send_command_error;
}
@@ -1028,7 +1021,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*/
if (ice_clean_sq(hw, cq) == 0) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
- status = ICE_ERR_AQ_FULL;
+ status = -ENOSPC;
goto sq_send_command_error;
}
@@ -1064,13 +1057,14 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
cq->sq.next_to_use = 0;
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
+ timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
do {
if (ice_sq_done(hw, cq))
break;
- udelay(ICE_CTL_Q_SQ_CMD_USEC);
- total_delay++;
- } while (total_delay < cq->sq_cmd_timeout);
+ usleep_range(ICE_CTL_Q_SQ_CMD_USEC,
+ ICE_CTL_Q_SQ_CMD_USEC * 3 / 2);
+ } while (time_before(jiffies, timeout));
/* if ready, copy the desc back to temp */
if (ice_sq_done(hw, cq)) {
@@ -1082,7 +1076,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (copy_size > buf_size) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
copy_size, buf_size);
- status = ICE_ERR_AQ_ERROR;
+ status = -EIO;
} else {
memcpy(buf, dma_buf->va, copy_size);
}
@@ -1098,7 +1092,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
cmd_completed = true;
if (!status && retval != ICE_AQ_RC_OK)
- status = ICE_ERR_AQ_ERROR;
+ status = -EIO;
cq->sq_last_status = (enum ice_aq_err)retval;
}
@@ -1116,10 +1110,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
- status = ICE_ERR_AQ_FW_CRITICAL;
+ status = -EIO;
} else {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
- status = ICE_ERR_AQ_TIMEOUT;
+ status = -EIO;
}
}
@@ -1154,15 +1148,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* the contents through e. It can also return how many events are
* left to process through 'pending'.
*/
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
u16 ntc = cq->rq.next_to_clean;
enum ice_aq_err rq_last_status;
- enum ice_status ret_code = 0;
struct ice_aq_desc *desc;
struct ice_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
@@ -1176,7 +1170,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (!cq->rq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
- ret_code = ICE_ERR_AQ_EMPTY;
+ ret_code = -EIO;
goto clean_rq_elem_err;
}
@@ -1185,7 +1179,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ntu == ntc) {
/* nothing to do - shouldn't need to update ring's values */
- ret_code = ICE_ERR_AQ_NO_WORK;
+ ret_code = -EALREADY;
goto clean_rq_elem_out;
}
@@ -1196,7 +1190,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
if (flags & ICE_AQ_FLAG_ERR) {
- ret_code = ICE_ERR_AQ_ERROR;
+ ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index c07e9cc9fc6e..950b7f4a7a05 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -34,7 +34,7 @@ enum ice_ctl_q {
};
/* Control Queue timeout settings - max delay 1s */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
@@ -87,7 +87,6 @@ struct ice_ctl_q_info {
enum ice_ctl_q qtype;
struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */
- u32 sq_cmd_timeout; /* send queue cmd write back timeout */
u16 num_rq_entries; /* receive queue depth */
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 241427cd9bc0..396e555023aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
-#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
@@ -19,19 +18,19 @@
*
* Requests the complete LLDP MIB (entire packet). (0x0A00)
*/
-static enum ice_status
+static int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_get_mib;
if (buf_size == 0 || !buf)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
@@ -61,7 +60,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes (0x0A01)
*/
-static enum ice_status
+static int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
@@ -74,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
if (!ena_update)
cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+ else
+ cmd->command |= FIELD_PREP(ICE_AQ_LLDP_MIB_PENDING_M,
+ ICE_AQ_LLDP_MIB_PENDING_ENABLE);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -89,7 +91,7 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
*
* Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
@@ -117,8 +119,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
*
* Start the embedded LLDP Agent on all ports. (0x0A06)
*/
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
+int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
struct ice_aq_desc desc;
@@ -568,7 +569,7 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
* @tlv: Organization specific TLV
* @dcbcfg: Local store to update ETS REC data
*
- * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * Currently IEEE 802.1Qaz and CEE DCBX TLV are supported, others
* will be returned
*/
static void
@@ -587,7 +588,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
ice_parse_cee_tlv(tlv, dcbcfg);
break;
default:
- break;
+ break; /* Other OUIs not supported */
}
}
@@ -598,18 +599,17 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
-static enum ice_status
-ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+static int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
- enum ice_status ret = 0;
u16 offset = 0;
+ int ret = 0;
u16 typelen;
u16 type;
u16 len;
if (!lldpmib || !dcbcfg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* set to the start of LLDPDU */
lldpmib += ETH_HLEN;
@@ -649,17 +649,17 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
*
* Query DCB configuration from the firmware
*/
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg)
{
- enum ice_status ret;
u8 *lldpmib;
+ int ret;
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL);
@@ -684,17 +684,17 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
- * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/
-enum ice_status
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- enum ice_status status;
struct ice_aq_desc desc;
u16 opcode;
+ int status;
cmd = &desc.params.lldp_agent_ctrl;
@@ -724,7 +724,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
*
* Get CEE DCBX mode operational configuration from firmware (0x0A07)
*/
-static enum ice_status
+static int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
@@ -749,7 +749,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return -EINVAL;
@@ -762,7 +762,7 @@ int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
if (status)
- return ice_status_to_errno(status);
+ return status;
/* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
* disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
@@ -903,14 +903,13 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*
* Get IEEE or CEE mode DCB configuration from the Firmware
*/
-static enum ice_status
-ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
{
struct ice_dcbx_cfg *dcbx_cfg = NULL;
- enum ice_status ret;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@@ -943,14 +942,14 @@ out:
*
* Get DCB configuration from the Firmware
*/
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+int ice_get_dcb_cfg(struct ice_port_info *pi)
{
struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status ret;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
@@ -968,19 +967,55 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
}
/**
+ * ice_get_dcb_cfg_from_mib_change
+ * @pi: port information structure
+ * @event: pointer to the admin queue receive event
+ *
+ * Set DCB configuration from received MIB Change event
+ */
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ struct ice_aqc_lldp_get_mib *mib;
+ u8 change_type, dcbx_mode;
+
+ mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+
+ change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
+ if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
+ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
+
+ dcbx_mode = FIELD_GET(ICE_AQ_LLDP_DCBX_M, mib->type);
+
+ switch (dcbx_mode) {
+ case ICE_AQ_LLDP_DCBX_IEEE:
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg);
+ break;
+
+ case ICE_AQ_LLDP_DCBX_CEE:
+ pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
+ ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *)
+ event->msg_buf, pi);
+ break;
+ }
+}
+
+/**
* ice_init_dcb
* @hw: pointer to the HW struct
* @enable_mib_change: enable MIB change event
*
* Update DCB configuration from the Firmware
*/
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret = 0;
+ int ret = 0;
if (!hw->func_caps.common_cap.dcb)
- return ICE_ERR_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
qos_cfg->is_sw_lldp = true;
@@ -996,7 +1031,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
return ret;
qos_cfg->is_sw_lldp = false;
} else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
- return ICE_ERR_NOT_READY;
+ return -EBUSY;
}
/* Configure the LLDP MIB change event */
@@ -1016,19 +1051,19 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*
* Configure (disable/enable) MIB
*/
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret;
+ int ret;
if (!hw->func_caps.common_cap.dcb)
- return ICE_ERR_NOT_SUPPORTED;
+ return -EOPNOTSUPP;
/* Get DCBX status */
qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
- return ICE_ERR_NOT_READY;
+ return -EBUSY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
@@ -1376,7 +1411,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF;
- buf[1] = dcbcfg->pfc.pfcena & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
}
/**
@@ -1469,16 +1504,16 @@ ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
*
* Set DCB configuration to the Firmware
*/
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+int ice_set_dcb_cfg(struct ice_port_info *pi)
{
u8 mib_type, *lldpmib = NULL;
struct ice_dcbx_cfg *dcbcfg;
- enum ice_status ret;
struct ice_hw *hw;
u16 miblen;
+ int ret;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
@@ -1487,7 +1522,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
@@ -1511,17 +1546,17 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
*
* query current port ETS configuration
*/
-static enum ice_status
+static int
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.port_ets;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
@@ -1537,18 +1572,18 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
*
* update the SW DB with the new TC changes
*/
-static enum ice_status
+static int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
struct ice_aqc_txsched_elem_data elem;
- enum ice_status status = 0;
u32 teid1, teid2;
+ int status = 0;
u8 i, j;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* suspend the missing TC nodes */
for (i = 0; i < pi->root->num_children; i++) {
teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
@@ -1584,7 +1619,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
/* new TC */
status = ice_sched_query_elem(pi->hw, teid2, &elem);
if (!status)
- status = ice_sched_add_node(pi, 1, &elem);
+ status = ice_sched_add_node(pi, 1, &elem, NULL);
if (status)
break;
/* update the TC number */
@@ -1605,12 +1640,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
* query current port ETS configuration and update the
* SW DB with the TC changes
*/
-enum ice_status
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index 9b6f87a889a6..be34650a77d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -5,6 +5,7 @@
#define _ICE_DCB_H_
#include "ice_type.h"
+#include <scsi/iscsi_proto.h>
#define ICE_DCBX_STATUS_NOT_STARTED 0
#define ICE_DCBX_STATUS_IN_PROGRESS 1
@@ -138,28 +139,29 @@ struct ice_cee_app_prio {
} __packed;
int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
-enum ice_status
+int ice_get_dcb_cfg(struct ice_port_info *pi);
+int ice_set_dcb_cfg(struct ice_port_info *pi);
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event);
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
-enum ice_status
-ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
-enum ice_status
+int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
#else /* CONFIG_DCB */
-static inline enum ice_status
+static inline int
ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
bool __always_unused shutdown_lldp_agent,
bool __always_unused persist,
@@ -168,7 +170,7 @@ ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_aq_start_lldp(struct ice_hw __always_unused *hw,
bool __always_unused persist,
struct ice_sq_cd __always_unused *cd)
@@ -176,7 +178,7 @@ ice_aq_start_lldp(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
bool __always_unused start_dcbx_agent,
bool *dcbx_agent_status,
@@ -187,7 +189,7 @@ ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
return 0;
}
-static inline enum ice_status
+static inline int
ice_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
bool __always_unused ena_mib)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a72e18320a22..c6d4926f0fcf 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -3,6 +3,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
+#include "ice_devlink.h"
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
@@ -364,6 +365,12 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) {
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
+ if (pf->hw.port_info->is_custom_tx_enabled) {
+ dev_err(dev, "Custom Tx scheduler feature enabled, can't configure DCB\n");
+ return -EBUSY;
+ }
+ ice_tear_down_devlink_rate_tree(pf);
+
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
dev_dbg(dev, "DCB tagging disabled (num TC = 1)\n");
@@ -434,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto out;
}
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
out:
/* enable previously downed VSIs */
@@ -528,7 +535,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
struct ice_dcbx_cfg *err_cfg;
- enum ice_status ret;
+ int ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -724,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
/**
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
* @pf: pointer to the PF struct
+ * @locked: is adev device lock held
*
* Assumed caller has already disabled all VSIs before
* calling this function. Reconfiguring DCB based on
* local_dcbx_cfg.
*/
-void ice_pf_dcb_recfg(struct ice_pf *pf)
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct iidc_event *event;
@@ -776,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- /* Notify the AUX drivers that TC change is finished */
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return;
+ if (!locked) {
+ /* Notify the AUX drivers that TC change is finished */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
}
/**
@@ -852,7 +862,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
if (err)
goto dcb_init_err;
- return err;
+ return 0;
dcb_init_err:
dev_err(dev, "DCB init failed\n");
@@ -874,6 +884,9 @@ void ice_update_dcb_stats(struct ice_pf *pf)
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
+ if (ice_is_reset_in_progress(pf->state))
+ pf->stat_prev_loaded = false;
+
for (i = 0; i < 8; i++) {
ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),
pf->stat_prev_loaded,
@@ -916,7 +929,8 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
return;
/* Insert 802.1p priority into VLAN header */
- if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN) ||
+ if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
+ first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
skb->priority != TC_PRIO_CONTROL) {
first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
/* Mask the lower 3 bits to set the 802.1p priority */
@@ -925,11 +939,24 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
/* if this is not already set it means a VLAN 0 + priority needs
* to be offloaded
*/
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
}
/**
+ * ice_dcb_is_mib_change_pending - Check if MIB change is pending
+ * @state: MIB change state
+ */
+static bool ice_dcb_is_mib_change_pending(u8 state)
+{
+ return ICE_AQ_LLDP_MIB_CHANGE_PENDING ==
+ FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state);
+}
+
+/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
@@ -942,6 +969,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aqc_lldp_get_mib *mib;
struct ice_dcbx_cfg tmp_dcbx_cfg;
+ bool pending_handled = true;
bool need_reconfig = false;
struct ice_port_info *pi;
u8 mib_type;
@@ -958,41 +986,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+
/* Ignore if event is not for Nearest Bridge */
- mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M);
+ mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
+ /* A pending change event contains accurate config information, and
+ * the FW setting has not been updaed yet, so detect if change is
+ * pending to determine where to pull config information from
+ * (FW vs event)
+ */
+ if (ice_dcb_is_mib_change_pending(mib->state))
+ pending_handled = false;
+
/* Check MIB Type and return if event for Remote MIB update */
- mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
- ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
- ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
- &pi->qos_cfg.remote_dcbx_cfg);
- if (ret) {
- dev_err(dev, "Failed to get remote DCB config\n");
- return;
+ if (!pending_handled) {
+ ice_get_dcb_cfg_from_mib_change(pi, event);
+ } else {
+ ret =
+ ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
+ &pi->qos_cfg.remote_dcbx_cfg);
+ if (ret)
+ dev_dbg(dev, "Failed to get remote DCB config\n");
}
+ return;
}
+ /* That a DCB change has happened is now determined */
mutex_lock(&pf->tc_mutex);
/* store the old configuration */
- tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */
memset(&pi->qos_cfg.local_dcbx_cfg, 0,
sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */
- ret = ice_get_dcb_cfg(pf->hw.port_info);
- if (ret) {
- dev_err(dev, "Failed to get DCB config\n");
- goto out;
+ if (!pending_handled) {
+ ice_get_dcb_cfg_from_mib_change(pi, event);
+ } else {
+ ret = ice_get_dcb_cfg(pi);
+ if (ret) {
+ dev_err(dev, "Failed to get DCB config\n");
+ goto out;
+ }
}
/* No change detected in DCBX configs */
@@ -1019,18 +1064,24 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
+ /* Send Execute Pending MIB Change event if it is a Pending event */
+ if (!pending_handled) {
+ ice_lldp_execute_pending_mib(&pf->hw);
+ pending_handled = true;
+ }
+
rtnl_lock();
/* disable VSIs affected by DCB changes */
ice_dcb_ena_dis_vsi(pf, false, true);
- ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
goto unlock_rtnl;
}
/* changes in configuration update VSI */
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
/* enable previously downed VSIs */
ice_dcb_ena_dis_vsi(pf, true, true);
@@ -1038,4 +1089,8 @@ unlock_rtnl:
rtnl_unlock();
out:
mutex_unlock(&pf->tc_mutex);
+
+ /* Send Execute Pending MIB Change event if it is a Pending event */
+ if (!pending_handled)
+ ice_lldp_execute_pending_mib(&pf->hw);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 4c421c842a13..800879a88c5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
-void ice_pf_dcb_recfg(struct ice_pf *pf);
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
@@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
return 0;
}
-static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
+static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
new file mode 100644
index 000000000000..d71ed210f9c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -0,0 +1,1897 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice.h"
+#include "ice_ddp.h"
+
+/* For supporting double VLAN mode, it is necessary to enable or disable certain
+ * boost tcam entries. The metadata labels names that match the following
+ * prefixes will be saved to allow enabling double VLAN mode.
+ */
+#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
+#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
+
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+#define ICE_TNL_PRE "TNL_"
+static const struct ice_tunnel_type_scan tnls[] = {
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_LAST, "" }
+};
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+ u32 seg_count;
+ u32 i;
+
+ if (len < struct_size(pkg, seg_offset, 1))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* pkg must have at least one segment */
+ seg_count = le32_to_cpu(pkg->seg_count);
+ if (seg_count < 1)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* make sure segment array fits in package length */
+ if (len < struct_size(pkg, seg_offset, seg_count))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* all segments must fit within length */
+ for (i = 0; i < seg_count; i++) {
+ u32 off = le32_to_cpu(pkg->seg_offset[i]);
+ struct ice_generic_seg_hdr *seg;
+
+ /* segment header must fit */
+ if (len < off + sizeof(*seg))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+ /* segment body must fit */
+ if (len < off + le32_to_cpu(seg->seg_size))
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+ if (hw->pkg_copy) {
+ devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
+ hw->pkg_copy = NULL;
+ hw->pkg_size = 0;
+ }
+ hw->seg = NULL;
+}
+
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
+ else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+ struct ice_buf_hdr *hdr;
+ u16 section_count;
+ u16 data_end;
+
+ hdr = (struct ice_buf_hdr *)buf->buf;
+ /* verify data */
+ section_count = le16_to_cpu(hdr->section_count);
+ if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+ return NULL;
+
+ data_end = le16_to_cpu(hdr->data_end);
+ if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ return hdr;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+ struct ice_nvm_table *nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count));
+
+ return (__force struct ice_buf_table *)(nvms->vers +
+ le32_to_cpu(nvms->table_count));
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state)
+{
+ if (ice_seg) {
+ state->buf_table = ice_find_buf_table(ice_seg);
+ if (!state->buf_table)
+ return NULL;
+
+ state->buf_idx = 0;
+ return ice_pkg_val_buf(state->buf_table->buf_array);
+ }
+
+ if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
+ return ice_pkg_val_buf(state->buf_table->buf_array +
+ state->buf_idx);
+ else
+ return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+static bool ice_pkg_advance_sect(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state)
+{
+ if (!ice_seg && !state->buf)
+ return false;
+
+ if (!ice_seg && state->buf)
+ if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
+ return true;
+
+ state->buf = ice_pkg_enum_buf(ice_seg, state);
+ if (!state->buf)
+ return false;
+
+ /* start of new buffer, reset section index */
+ state->sect_idx = 0;
+ return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type)
+{
+ u16 offset, size;
+
+ if (ice_seg)
+ state->type = sect_type;
+
+ if (!ice_pkg_advance_sect(ice_seg, state))
+ return NULL;
+
+ /* scan for next matching section */
+ while (state->buf->section_entry[state->sect_idx].type !=
+ cpu_to_le32(state->type))
+ if (!ice_pkg_advance_sect(NULL, state))
+ return NULL;
+
+ /* validate section */
+ offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
+ if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+ return NULL;
+
+ size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+ return NULL;
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE)
+ return NULL;
+
+ state->sect_type =
+ le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
+
+ /* calc pointer to this section */
+ state->sect =
+ ((u8 *)state->buf) +
+ le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
+
+ return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state, u32 sect_type,
+ u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section = section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= le16_to_cpu(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = le16_to_cpu(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in HW for following use.
+ */
+static int ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return -EINVAL;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return 0;
+}
+
+/**
+ * ice_get_ddp_pkg_state - get DDP pkg state after download
+ * @hw: pointer to the HW struct
+ * @already_loaded: indicates if pkg was already loaded onto the device
+ */
+static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw,
+ bool already_loaded)
+{
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
+ if (already_loaded)
+ return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
+ else
+ return ICE_DDP_PKG_SUCCESS;
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
+ } else {
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX 0
+
+ /* setup Switch block input mask, which is 48-bits in two parts */
+ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+ wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_marker_ptype_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the Marker PType TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual Marker PType TCAM entries.
+ */
+static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section,
+ u32 index, u32 *offset)
+{
+ struct ice_marker_ptype_tcam_section *marker_ptype;
+
+ if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
+ return NULL;
+
+ if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ marker_ptype = section;
+ if (index >= le16_to_cpu(marker_ptype->count))
+ return NULL;
+
+ return marker_ptype->tcam + index;
+}
+
+/**
+ * ice_add_dvm_hint
+ * @hw: pointer to the HW structure
+ * @val: value of the boost entry
+ * @enable: true if entry needs to be enabled, or false if needs to be disabled
+ */
+static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
+{
+ if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
+ hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
+ hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
+ hw->dvm_upd.count++;
+ }
+}
+
+/**
+ * ice_add_tunnel_hint
+ * @hw: pointer to the HW structure
+ * @label_name: label text
+ * @val: value of the tunnel port boost entry
+ */
+static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
+{
+ if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ u16 i;
+
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *ice_label_enum_handler(u32 __always_unused sect_type,
+ void *section, u32 index, u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = section;
+ if (index >= le16_to_cpu(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type,
+ struct ice_pkg_enum *state, u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = le16_to_cpu(label->value);
+ return label->name;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = section;
+ if (index >= le16_to_cpu(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return -EINVAL;
+
+ do {
+ tcam = ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && le16_to_cpu(tcam->addr) == addr) {
+ *entry = tcam;
+ return 0;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return -EIO;
+}
+
+/**
+ * ice_is_init_pkg_successful - check if DDP init was successful
+ * @state: state of the DDP pkg after download
+ */
+bool ice_is_init_pkg_successful(enum ice_ddp_state state)
+{
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end =
+ cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry));
+ return bld;
+}
+
+static bool ice_is_gtp_u_profile(u16 prof_idx)
+{
+ return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
+ prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
+ prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
+}
+
+static bool ice_is_gtp_c_profile(u16 prof_idx)
+{
+ switch (prof_idx) {
+ case ICE_PROFID_IPV4_GTPC_TEID:
+ case ICE_PROFID_IPV4_GTPC_NO_TEID:
+ case ICE_PROFID_IPV6_GTPC_TEID:
+ case ICE_PROFID_IPV6_GTPC_NO_TEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ * @prof_idx: profile index to check
+ */
+static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
+ struct ice_fv *fv, u32 prof_idx)
+{
+ u16 i;
+
+ if (ice_is_gtp_c_profile(prof_idx))
+ return ICE_PROF_TUN_GTPC;
+
+ if (ice_is_gtp_u_profile(prof_idx))
+ return ICE_PROF_TUN_GTPU;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+ }
+
+ return ICE_PROF_NON_TUN;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ unsigned long *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ if (req_profs == ICE_PROF_ALL) {
+ bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
+ return;
+ }
+
+ memset(&state, 0, sizeof(state));
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv, offset);
+
+ if (req_profs & prof_type)
+ set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @lkups: list of protocol types
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and offset and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!lkups->n_val_words || !hw->seg)
+ return -EINVAL;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!test_bit((u16)offset, bm))
+ continue;
+
+ for (i = 0; i < lkups->n_val_words; i++) {
+ int j;
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id ==
+ lkups->fv_words[i].prot_id &&
+ fv->ew[j].off == lkups->fv_words[i].off)
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == lkups->n_val_words) {
+ fvl = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*fvl), GFP_KERNEL);
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ list_add(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (list_empty(fv_list)) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Required profiles not found in currently loaded DDP package");
+ return -EIO;
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
+ list_del(&fvl->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvl);
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ bitmap_zero(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ set_bit(i, hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ devm_kfree(ice_hw_to_dev(hw), bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return -EINVAL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = le16_to_cpu(buf->section_count);
+ if (section_count > 0)
+ return -EIO;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return -EIO;
+ bld->reserved_section_table_entries += count;
+
+ data_end = le16_to_cpu(buf->data_end) +
+ flex_array_size(buf, section_entry, count);
+ buf->data_end = cpu_to_le16(data_end);
+
+ return 0;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = le16_to_cpu(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = le16_to_cpu(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
+ buf->section_entry[sect_count].size = cpu_to_le16(size);
+ buf->section_entry[sect_count].type = cpu_to_le32(type);
+
+ data_end += size;
+ buf->data_end = cpu_to_le16(data_end);
+
+ buf->section_count = cpu_to_le16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw,
+ u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return le16_to_cpu(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (!bld)
+ return NULL;
+
+ return &bld->buf;
+}
+
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_ENOSEC:
+ case ICE_AQ_RC_EBADSIG:
+ return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
+ case ICE_AQ_RC_ESVN:
+ return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
+ case ICE_AQ_RC_EBADMAN:
+ case ICE_AQ_RC_EBADBUF:
+ return ICE_DDP_PKG_LOAD_ERROR;
+ default:
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * 0 - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * -EALREADY - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static int ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ int status;
+
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+ if (!status)
+ mutex_lock(&ice_global_cfg_lock_sw);
+ else if (status == -EALREADY)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Global config lock: No work to do\n");
+
+ return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+static void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+ mutex_unlock(&ice_global_cfg_lock_sw);
+ ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware. Metadata buffers are skipped, and the first metadata buffer
+ * found indicates that the rest of the buffers are all metadata buffers.
+ */
+static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
+ struct ice_buf *bufs, u32 count)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_buf_hdr *bh;
+ enum ice_aq_err err;
+ u32 offset, info, i;
+ int status;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
+ }
+
+ for (i = 0; i < count; i++) {
+ bool last = ((i + 1) == count);
+
+ if (!last) {
+ /* check next buffer for metadata flag */
+ bh = (struct ice_buf_hdr *)(bufs + i + 1);
+
+ /* A set metadata flag in the next buffer will signal
+ * that the current buffer will be the last buffer
+ * downloaded
+ */
+ if (le16_to_cpu(bh->section_count))
+ if (le32_to_cpu(bh->section_entry[0].type) &
+ ICE_METADATA_BUF)
+ last = true;
+ }
+
+ bh = (struct ice_buf_hdr *)(bufs + i);
+
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
+
+ /* Save AQ status from download package */
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Pkg download failed: err %d off %d inf %d\n",
+ status, offset, info);
+ err = hw->adminq.sq_last_status;
+ state = ice_map_aq_err_to_ddp_state(err);
+ break;
+ }
+
+ if (last)
+ break;
+ }
+
+ if (!status) {
+ status = ice_set_vlan_mode(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Failed to set VLAN mode: err %d\n", status);
+ }
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
+ struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+ int status;
+
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ le32_to_cpu(ice_seg->hdr.seg_type),
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return status;
+}
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == -EIO) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == -EIO) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_update_pkg_no_lock
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ */
+int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status = 0;
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+ u32 offset, info;
+
+ status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
+ struct ice_generic_seg_hdr *seg;
+
+ seg = (struct ice_generic_seg_hdr
+ *)((u8 *)pkg_hdr +
+ le32_to_cpu(pkg_hdr->seg_offset[i]));
+
+ if (le32_to_cpu(seg->seg_type) == seg_type)
+ return seg;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the HW structure.
+ */
+static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ if (!pkg_hdr)
+ return ICE_DDP_PKG_ERR;
+
+ seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ if (seg_hdr) {
+ struct ice_meta_sect *meta;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ /* Get package information from the Metadata Section */
+ meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
+ ICE_SID_METADATA);
+ if (!meta) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice metadata section in package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ hw->pkg_ver = meta->ver;
+ memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta->ver.major, meta->ver.minor, meta->ver.update,
+ meta->ver.draft, meta->name);
+
+ hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
+ memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id));
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft, seg_hdr->seg_id);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice segment in driver package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ u16 size;
+ u32 i;
+
+ size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
+ pkg_info = kzalloc(size, GFP_KERNEL);
+ if (!pkg_info)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
+ goto init_pkg_free_alloc;
+ }
+
+ for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT 4
+ char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+ u8 place = 0;
+
+ if (pkg_info->pkg_info[i].is_active) {
+ flags[place++] = 'A';
+ hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ le32_to_cpu(pkg_info->pkg_info[i].track_id);
+ memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name,
+ sizeof(pkg_info->pkg_info[i].name));
+ hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
+ }
+ if (pkg_info->pkg_info[i].is_active_at_boot)
+ flags[place++] = 'B';
+ if (pkg_info->pkg_info[i].is_modified)
+ flags[place++] = 'M';
+ if (pkg_info->pkg_info[i].is_in_nvm)
+ flags[place++] = 'N';
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i,
+ pkg_info->pkg_info[i].ver.major,
+ pkg_info->pkg_info[i].ver.minor,
+ pkg_info->pkg_info[i].ver.update,
+ pkg_info->pkg_info[i].ver.draft,
+ pkg_info->pkg_info[i].name, flags);
+ }
+
+init_pkg_free_alloc:
+ kfree(pkg_info);
+
+ return state;
+}
+
+/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
+ struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_ddp_state state;
+ u16 size;
+ u32 i;
+
+ /* Check package version compatibility */
+ state = ice_chk_pkg_version(&hw->pkg_ver);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return state;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
+ pkg = kzalloc(size, GFP_KERNEL);
+ if (!pkg)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
+ state = ICE_DDP_PKG_LOAD_ERROR;
+ goto fw_ddp_compat_free_alloc;
+ }
+
+ for (i = 0; i < le32_to_cpu(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ state = ICE_DDP_PKG_FW_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT,
+ "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ kfree(pkg);
+ return state;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ memset(&hw->tnl, 0, sizeof(hw->tnl));
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name) {
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
+
+ /* check for a dvm mode entry */
+ else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
+ ice_add_dvm_hint(hw, val, true);
+
+ /* check for a svm mode entry */
+ else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
+ ice_add_dvm_hint(hw, val, false);
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry) {
+ hw->tnl.tbl[i].valid = true;
+ if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
+ hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
+ }
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
+ for (i = 0; i < hw->dvm_upd.count; i++)
+ ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
+ &hw->dvm_upd.tbl[i].boost_entry);
+}
+
+/**
+ * ice_fill_hw_ptype - fill the enabled PTYPE bit information
+ * @hw: pointer to the HW structure
+ */
+static void ice_fill_hw_ptype(struct ice_hw *hw)
+{
+ struct ice_marker_ptype_tcam_entry *tcam;
+ struct ice_seg *seg = hw->seg;
+ struct ice_pkg_enum state;
+
+ bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
+ if (!seg)
+ return;
+
+ memset(&state, 0, sizeof(state));
+
+ do {
+ tcam = ice_pkg_enum_entry(seg, &state,
+ ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
+ ice_marker_ptype_tcam_handler);
+ if (tcam &&
+ le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
+ le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
+ set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
+
+ seg = NULL;
+ } while (tcam);
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ bool already_loaded = false;
+ enum ice_ddp_state state;
+ struct ice_pkg_hdr *pkg;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ state);
+ return state;
+ }
+
+ /* initialize package info */
+ state = ice_init_pkg_info(hw, pkg);
+ if (state)
+ return state;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ state = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (state)
+ return state;
+
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
+ state = ice_download_pkg(hw, seg);
+ if (state == ICE_DDP_PKG_ALREADY_LOADED) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "package previously loaded - no work.\n");
+ already_loaded = true;
+ }
+
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
+ state = ice_get_pkg_info(hw);
+ if (!state)
+ state = ice_get_ddp_pkg_state(hw, already_loaded);
+ }
+
+ if (ice_is_init_pkg_successful(state)) {
+ hw->seg = seg;
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
+ */
+ ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
+ ice_fill_hw_ptype(hw);
+ ice_get_prof_index_max(hw);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state);
+ }
+
+ return state;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
+ u32 len)
+{
+ enum ice_ddp_state state;
+ u8 *buf_copy;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+
+ state = ice_init_pkg(hw, buf_copy, len);
+ if (!ice_is_init_pkg_successful(state)) {
+ /* Free the copy, since we failed to initialize the package */
+ devm_kfree(ice_hw_to_dev(hw), buf_copy);
+ } else {
+ /* Track the copied pkg so we can free it later */
+ hw->pkg_copy = buf_copy;
+ hw->pkg_size = len;
+ }
+
+ return state;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
new file mode 100644
index 000000000000..37eadb3d27a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022, Intel Corporation. */
+
+#ifndef _ICE_DDP_H_
+#define _ICE_DDP_H_
+
+#include "ice_type.h"
+
+/* Package minimal version supported */
+#define ICE_PKG_SUPP_VER_MAJ 1
+#define ICE_PKG_SUPP_VER_MNR 3
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ 1
+#define ICE_PKG_FMT_VER_MNR 0
+#define ICE_PKG_FMT_VER_UPD 0
+#define ICE_PKG_FMT_VER_DFT 0
+
+#define ICE_PKG_CNT 4
+
+#define ICE_FV_OFFSET_INVAL 0x1FF
+
+/* Extraction Sequence (Field Vector) Table */
+struct ice_fv_word {
+ u8 prot_id;
+ u16 off; /* Offset within the protocol header */
+ u8 resvrd;
+} __packed;
+
+#define ICE_MAX_NUM_PROFILES 256
+
+#define ICE_MAX_FV_WORDS 48
+struct ice_fv {
+ struct ice_fv_word ew[ICE_MAX_FV_WORDS];
+};
+
+enum ice_ddp_state {
+ /* Indicates that this call to ice_init_pkg
+ * successfully loaded the requested DDP package
+ */
+ ICE_DDP_PKG_SUCCESS = 0,
+
+ /* Generic error for already loaded errors, it is mapped later to
+ * the more specific one (one of the next 3)
+ */
+ ICE_DDP_PKG_ALREADY_LOADED = -1,
+
+ /* Indicates that a DDP package of the same version has already been
+ * loaded onto the device by a previous call or by another PF
+ */
+ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
+
+ /* The device has a DDP package that is not supported by the driver */
+ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
+
+ /* The device has a compatible package
+ * (but different from the request) already loaded
+ */
+ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
+
+ /* The firmware loaded on the device is not compatible with
+ * the DDP package loaded
+ */
+ ICE_DDP_PKG_FW_MISMATCH = -5,
+
+ /* The DDP package file is invalid */
+ ICE_DDP_PKG_INVALID_FILE = -6,
+
+ /* The version of the DDP package provided is higher than
+ * the driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
+
+ /* The version of the DDP package provided is lower than the
+ * driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
+
+ /* The signature of the DDP package file provided is invalid */
+ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
+
+ /* The DDP package file security revision is too low and not
+ * supported by firmware
+ */
+ ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
+
+ /* An error occurred in firmware while loading the DDP package */
+ ICE_DDP_PKG_LOAD_ERROR = -11,
+
+ /* Other errors */
+ ICE_DDP_PKG_ERR = -12
+};
+
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+ struct ice_pkg_ver pkg_format_ver;
+ __le32 seg_count;
+ __le32 seg_offset[];
+};
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE 0x00000010
+ __le32 seg_type;
+ struct ice_pkg_ver seg_format_ver;
+ __le32 seg_size;
+ char seg_id[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+ struct {
+ __le16 device_id;
+ __le16 vendor_id;
+ } dev_vend_id;
+ __le32 id;
+};
+
+struct ice_device_id_entry {
+ union ice_device_id device;
+ union ice_device_id sub_device;
+};
+
+struct ice_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 device_table_count;
+ struct ice_device_id_entry device_table[];
+};
+
+struct ice_nvm_table {
+ __le32 table_count;
+ __le32 vers[];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE 4096
+ u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+ __le32 buf_count;
+ struct ice_buf buf_array[];
+};
+
+struct ice_run_time_cfg_seg {
+ struct ice_generic_seg_hdr hdr;
+ u8 rsvd[8];
+ struct ice_buf_table buf_table;
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+ struct ice_generic_seg_hdr hdr;
+ struct ice_pkg_ver pkg_ver;
+ __le32 rsvd;
+ char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF 12
+#define ICE_MAX_S_OFF 4095
+#define ICE_MIN_S_SZ 1
+#define ICE_MAX_S_SZ 4084
+
+/* section information */
+struct ice_section_entry {
+ __le32 type;
+ __le16 offset;
+ __le16 size;
+};
+
+#define ICE_MIN_S_COUNT 1
+#define ICE_MAX_S_COUNT 511
+#define ICE_MIN_S_DATA_END 12
+#define ICE_MAX_S_DATA_END 4096
+
+#define ICE_METADATA_BUF 0x80000000
+
+struct ice_buf_hdr {
+ __le16 section_count;
+ __le16 data_end;
+ struct ice_section_entry section_entry[];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \
+ ((ICE_PKG_BUF_SIZE - \
+ struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \
+ (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_METADATA 1
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
+#define ICE_SID_XLT1_SW 12
+#define ICE_SID_XLT2_SW 13
+#define ICE_SID_PROFID_TCAM_SW 14
+#define ICE_SID_PROFID_REDIR_SW 15
+#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+
+struct ice_meta_sect {
+ struct ice_pkg_ver ver;
+#define ICE_META_SECT_NAME_SIZE 28
+ char name[ICE_META_SECT_NAME_SIZE];
+ __le32 track_id;
+};
+
+#define ICE_SID_CDID_REDIR_SW 18
+
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
+#define ICE_SID_XLT1_ACL 22
+#define ICE_SID_XLT2_ACL 23
+#define ICE_SID_PROFID_TCAM_ACL 24
+#define ICE_SID_PROFID_REDIR_ACL 25
+#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
+#define ICE_SID_XLT1_FD 32
+#define ICE_SID_XLT2_FD 33
+#define ICE_SID_PROFID_TCAM_FD 34
+#define ICE_SID_PROFID_REDIR_FD 35
+#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
+#define ICE_SID_XLT1_RSS 42
+#define ICE_SID_XLT2_RSS 43
+#define ICE_SID_PROFID_TCAM_RSS 44
+#define ICE_SID_PROFID_REDIR_RSS 45
+#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
+
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
+#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_METADATA_INIT 58
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
+
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
+#define ICE_SID_XLT1_PE 82
+#define ICE_SID_XLT2_PE 83
+#define ICE_SID_PROFID_TCAM_PE 84
+#define ICE_SID_PROFID_REDIR_PE 85
+#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST 0x80000010
+#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST 0x80000038
+
+/* Label ICE runtime configuration section IDs */
+#define ICE_SID_TX_5_LAYER_TOPO 0x10
+
+enum ice_block {
+ ICE_BLK_SW = 0,
+ ICE_BLK_ACL,
+ ICE_BLK_FD,
+ ICE_BLK_RSS,
+ ICE_BLK_PE,
+ ICE_BLK_COUNT
+};
+
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
+/* package labels */
+struct ice_label {
+ __le16 value;
+#define ICE_PKG_LABEL_SIZE 64
+ char name[ICE_PKG_LABEL_SIZE];
+};
+
+struct ice_label_section {
+ __le16 count;
+ struct ice_label label[];
+};
+
+#define ICE_MAX_LABELS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \
+ label, 1) - \
+ sizeof(struct ice_label), \
+ sizeof(struct ice_label))
+
+struct ice_sw_fv_section {
+ __le16 count;
+ __le16 base_offset;
+ struct ice_fv fv[];
+};
+
+struct ice_sw_fv_list_entry {
+ struct list_head list_entry;
+ u32 profile_id;
+ struct ice_fv *fv_ptr;
+};
+
+/* The BOOST TCAM stores the match packet header in reverse order, meaning
+ * the fields are reversed; in addition, this means that the normally big endian
+ * fields of the packet are now little endian.
+ */
+struct ice_boost_key_value {
+#define ICE_BOOST_REMAINING_HV_KEY 15
+ u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
+ __le16 hv_dst_port_key;
+ __le16 hv_src_port_key;
+ u8 tcam_search_key;
+} __packed;
+
+struct ice_boost_key {
+ struct ice_boost_key_value key;
+ struct ice_boost_key_value key2;
+};
+
+/* package Boost TCAM entry */
+struct ice_boost_tcam_entry {
+ __le16 addr;
+ __le16 reserved;
+ /* break up the 40 bytes of key into different fields */
+ struct ice_boost_key key;
+ u8 boost_hit_index_group;
+ /* The following contains bitfields which are not on byte boundaries.
+ * These fields are currently unused by driver software.
+ */
+#define ICE_BOOST_BIT_FIELDS 43
+ u8 bit_fields[ICE_BOOST_BIT_FIELDS];
+};
+
+struct ice_boost_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_boost_tcam_entry tcam[];
+};
+
+#define ICE_MAX_BST_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \
+ tcam, 1) - \
+ sizeof(struct ice_boost_tcam_entry), \
+ sizeof(struct ice_boost_tcam_entry))
+
+/* package Marker Ptype TCAM entry */
+struct ice_marker_ptype_tcam_entry {
+#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
+ __le16 addr;
+ __le16 ptype;
+ u8 keys[20];
+};
+
+struct ice_marker_ptype_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_marker_ptype_tcam_entry tcam[];
+};
+
+#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF( \
+ struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \
+ 1) - \
+ sizeof(struct ice_marker_ptype_tcam_entry), \
+ sizeof(struct ice_marker_ptype_tcam_entry))
+
+struct ice_xlt1_section {
+ __le16 count;
+ __le16 offset;
+ u8 value[];
+};
+
+struct ice_xlt2_section {
+ __le16 count;
+ __le16 offset;
+ __le16 value[];
+};
+
+struct ice_prof_redir_section {
+ __le16 count;
+ __le16 offset;
+ u8 redir_value[];
+};
+
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
+struct ice_pkg_enum {
+ struct ice_buf_table *buf_table;
+ u32 buf_idx;
+
+ u32 type;
+ struct ice_buf_hdr *buf;
+ u32 sect_idx;
+ void *sect;
+ u32 sect_type;
+
+ u32 entry_idx;
+ void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd);
+int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd);
+
+void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
+
+enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
+
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
+
+struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr);
+
+int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+
+int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type);
+
+struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf);
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 61dd2f18dee8..6d560d1c74a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -5,6 +5,7 @@
#define _ICE_DEVIDS_H_
/* Device IDs */
+#define ICE_DEV_ID_E822_SI_DFLT 0x1888
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
@@ -23,6 +24,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
+#define ICE_SUBDEV_ID_E810T3 0x0010
+#define ICE_SUBDEV_ID_E810T4 0x0011
+#define ICE_SUBDEV_ID_E810T5 0x0012
+#define ICE_SUBDEV_ID_E810T6 0x02E9
+#define ICE_SUBDEV_ID_E810T7 0x02EA
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index b9bd9f9472f6..bc44cc220818 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -1,11 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
+#include <linux/vmalloc.h>
+
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
+#include "ice_dcb_lib.h"
+
+static int ice_active_port_option = -1;
/* context for devlink info version reporting */
struct ice_info_ctx {
@@ -39,13 +44,13 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
+ int status;
status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
if (status)
/* We failed to locate the PBA, so just skip this entry */
- dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
- ice_stat_str(status));
+ dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %d\n",
+ status);
}
static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
@@ -251,7 +256,6 @@ static int ice_devlink_info_get(struct devlink *devlink,
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct ice_info_ctx *ctx;
- enum ice_status status;
size_t i;
int err;
@@ -266,20 +270,19 @@ static int ice_devlink_info_get(struct devlink *devlink,
return -ENOMEM;
/* discover capabilities first */
- status = ice_discover_dev_caps(hw, &ctx->dev_caps);
- if (status) {
- dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_discover_dev_caps(hw, &ctx->dev_caps);
+ if (err) {
+ dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
- err = -EIO;
goto out_free_ctx;
}
if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
- status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
- if (status) {
- dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
@@ -287,10 +290,10 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
- status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
- if (status) {
- dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
@@ -298,22 +301,16 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
- status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
- if (status) {
- dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n",
- ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (err) {
+ dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
}
}
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
- goto out_free_ctx;
- }
-
ice_info_get_dsn(pf, ctx);
err = devlink_info_serial_number_put(req, ctx->buf);
@@ -373,61 +370,1028 @@ out_free_ctx:
}
/**
- * ice_devlink_flash_update - Update firmware stored in flash on the device
- * @devlink: pointer to devlink associated with device to update
- * @params: flash update parameters
+ * ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
+ * @pf: pointer to the pf instance
* @extack: netlink extended ACK structure
*
- * Perform a device flash update. The bulk of the update logic is contained
- * within the ice_flash_pldm_image function.
+ * Allow user to activate new Embedded Management Processor firmware by
+ * issuing device specific EMP reset. Called in response to
+ * a DEVLINK_CMD_RELOAD with the DEVLINK_RELOAD_ACTION_FW_ACTIVATE.
*
- * Returns: zero on success, or an error code on failure.
+ * Note that teardown and rebuild of the driver state happens automatically as
+ * part of an interrupt and watchdog task. This is because all physical
+ * functions on the device must be able to reset when an EMP reset occurs from
+ * any source.
*/
static int
-ice_devlink_flash_update(struct devlink *devlink,
- struct devlink_flash_update_params *params,
- struct netlink_ext_ack *extack)
+ice_devlink_reload_empr_start(struct ice_pf *pf,
+ struct netlink_ext_ack *extack)
{
- struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- u8 preservation;
+ u8 pending;
int err;
- if (!params->overwrite_mask) {
- /* preserve all settings and identifiers */
- preservation = ICE_AQC_NVM_PRESERVE_ALL;
- } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
- /* overwrite settings, but preserve the vital device identifiers */
- preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
- } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
- DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
- /* overwrite both settings and identifiers, preserve nothing */
- preservation = ICE_AQC_NVM_NO_PRESERVATION;
- } else {
- NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
- return -EOPNOTSUPP;
+ err = ice_get_pending_updates(pf, &pending, extack);
+ if (err)
+ return err;
+
+ /* pending is a bitmask of which flash banks have a pending update,
+ * including the main NVM bank, the Option ROM bank, and the netlist
+ * bank. If any of these bits are set, then there is a pending update
+ * waiting to be activated.
+ */
+ if (!pending) {
+ NL_SET_ERR_MSG_MOD(extack, "No pending firmware update");
+ return -ECANCELED;
+ }
+
+ if (pf->fw_emp_reset_disabled) {
+ NL_SET_ERR_MSG_MOD(extack, "EMP reset is not available. To activate firmware, a reboot or power cycle is needed");
+ return -ECANCELED;
+ }
+
+ dev_dbg(dev, "Issuing device EMP reset to activate firmware\n");
+
+ err = ice_aq_nvm_update_empr(hw);
+ if (err) {
+ dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
+ return err;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
- NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
+ return 0;
+}
+
+/**
+ * ice_devlink_reload_down - prepare for reload
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ */
+static int
+ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Go to legacy mode before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ if (ice_is_adq_active(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Turn off ADQ before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ if (ice_has_vfs(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Remove all VFs before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ ice_unload(pf);
+ return 0;
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ return ice_devlink_reload_empr_start(pf, extack);
+ default:
+ WARN_ON(1);
return -EOPNOTSUPP;
}
+}
- err = ice_check_for_pending_update(pf, NULL, extack);
- if (err)
+/**
+ * ice_devlink_reload_empr_finish - Wait for EMP reset to finish
+ * @pf: pointer to the pf instance
+ * @extack: netlink extended ACK structure
+ *
+ * Wait for driver to finish rebuilding after EMP reset is completed. This
+ * includes time to wait for both the actual device reset as well as the time
+ * for the driver's rebuild to complete.
+ */
+static int
+ice_devlink_reload_empr_finish(struct ice_pf *pf,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = ice_wait_for_reset(pf, 60 * HZ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
+/**
+ * ice_tear_down_devlink_rate_tree - removes devlink-rate exported tree
+ * @pf: pf struct
+ *
+ * This function tears down tree exported during VF's creation.
+ */
+void ice_tear_down_devlink_rate_tree(struct ice_pf *pf)
+{
+ struct devlink *devlink;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ devlink = priv_to_devlink(pf);
+
+ devl_lock(devlink);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->devlink_port.devlink_rate)
+ devl_rate_leaf_destroy(&vf->devlink_port);
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+
+ devl_rate_nodes_destroy(devlink);
+ devl_unlock(devlink);
+}
+
+/**
+ * ice_enable_custom_tx - try to enable custom Tx feature
+ * @pf: pf struct
+ *
+ * This function tries to enable custom Tx feature,
+ * it's not possible to enable it, if DCB or ADQ is active.
+ */
+static bool ice_enable_custom_tx(struct ice_pf *pf)
+{
+ struct ice_port_info *pi = ice_get_main_vsi(pf)->port_info;
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (pi->is_custom_tx_enabled)
+ /* already enabled, return true */
+ return true;
+
+ if (ice_is_adq_active(pf)) {
+ dev_err(dev, "ADQ active, can't modify Tx scheduler tree\n");
+ return false;
+ }
+
+ if (ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB active, can't modify Tx scheduler tree\n");
+ return false;
+ }
+
+ pi->is_custom_tx_enabled = true;
+
+ return true;
+}
+
+/**
+ * ice_traverse_tx_tree - traverse Tx scheduler tree
+ * @devlink: devlink struct
+ * @node: current node, used for recursion
+ * @tc_node: tc_node struct, that is treated as a root
+ * @pf: pf struct
+ *
+ * This function traverses Tx scheduler tree and exports
+ * entire structure to the devlink-rate.
+ */
+static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node *node,
+ struct ice_sched_node *tc_node, struct ice_pf *pf)
+{
+ struct devlink_rate *rate_node = NULL;
+ struct ice_vf *vf;
+ int i;
+
+ if (node->parent == tc_node) {
+ /* create root node */
+ rate_node = devl_rate_node_create(devlink, node, node->name, NULL);
+ } else if (node->vsi_handle &&
+ pf->vsi[node->vsi_handle]->vf) {
+ vf = pf->vsi[node->vsi_handle]->vf;
+ if (!vf->devlink_port.devlink_rate)
+ /* leaf nodes doesn't have children
+ * so we don't set rate_node
+ */
+ devl_rate_leaf_create(&vf->devlink_port, node,
+ node->parent->rate_node);
+ } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF &&
+ node->parent->rate_node) {
+ rate_node = devl_rate_node_create(devlink, node, node->name,
+ node->parent->rate_node);
+ }
+
+ if (rate_node && !IS_ERR(rate_node))
+ node->rate_node = rate_node;
+
+ for (i = 0; i < node->num_children; i++)
+ ice_traverse_tx_tree(devlink, node->children[i], tc_node, pf);
+}
+
+/**
+ * ice_devlink_rate_init_tx_topology - export Tx scheduler tree to devlink rate
+ * @devlink: devlink struct
+ * @vsi: main vsi struct
+ *
+ * This function finds a root node, then calls ice_traverse_tx tree, which
+ * traverses the tree and exports it's contents to devlink rate.
+ */
+int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi)
+{
+ struct ice_port_info *pi = vsi->port_info;
+ struct ice_sched_node *tc_node;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ tc_node = pi->root->children[0];
+ mutex_lock(&pi->sched_lock);
+ devl_lock(devlink);
+ for (i = 0; i < tc_node->num_children; i++)
+ ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
+ devl_unlock(devlink);
+ mutex_unlock(&pi->sched_lock);
+
+ return 0;
+}
+
+/**
+ * ice_set_object_tx_share - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @bw: bandwidth in bytes per second
+ * @extack: extended netdev ack structure
+ *
+ * This function sets ICE_MIN_BW scheduling BW limit.
+ */
+static int ice_set_object_tx_share(struct ice_port_info *pi, struct ice_sched_node *node,
+ u64 bw, struct netlink_ext_ack *extack)
+{
+ int status;
+
+ mutex_lock(&pi->sched_lock);
+ /* converts bytes per second to kilo bits per second */
+ node->tx_share = div_u64(bw, 125);
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, node->tx_share);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_share");
+
+ return status;
+}
+
+/**
+ * ice_set_object_tx_max - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @bw: bandwidth in bytes per second
+ * @extack: extended netdev ack structure
+ *
+ * This function sets ICE_MAX_BW scheduling BW limit.
+ */
+static int ice_set_object_tx_max(struct ice_port_info *pi, struct ice_sched_node *node,
+ u64 bw, struct netlink_ext_ack *extack)
+{
+ int status;
+
+ mutex_lock(&pi->sched_lock);
+ /* converts bytes per second value to kilo bits per second */
+ node->tx_max = div_u64(bw, 125);
+ status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, node->tx_max);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_max");
+
+ return status;
+}
+
+/**
+ * ice_set_object_tx_priority - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @priority: value representing priority for strict priority arbitration
+ * @extack: extended netdev ack structure
+ *
+ * This function sets priority of node among siblings.
+ */
+static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched_node *node,
+ u32 priority, struct netlink_ext_ack *extack)
+{
+ int status;
+
+ if (priority >= 8) {
+ NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8");
+ return -EINVAL;
+ }
+
+ mutex_lock(&pi->sched_lock);
+ node->tx_priority = priority;
+ status = ice_sched_set_node_priority(pi, node, node->tx_priority);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_priority");
+
+ return status;
+}
+
+/**
+ * ice_set_object_tx_weight - sets node scheduling parameter
+ * @pi: devlink struct instance
+ * @node: node struct instance
+ * @weight: value represeting relative weight for WFQ arbitration
+ * @extack: extended netdev ack structure
+ *
+ * This function sets node weight for WFQ algorithm.
+ */
+static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_node *node,
+ u32 weight, struct netlink_ext_ack *extack)
+{
+ int status;
+
+ if (weight > 200 || weight < 1) {
+ NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200");
+ return -EINVAL;
+ }
+
+ mutex_lock(&pi->sched_lock);
+ node->tx_weight = weight;
+ status = ice_sched_set_node_weight(pi, node, node->tx_weight);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't set scheduling node tx_weight");
+
+ return status;
+}
+
+/**
+ * ice_get_pi_from_dev_rate - get port info from devlink_rate
+ * @rate_node: devlink struct instance
+ *
+ * This function returns corresponding port_info struct of devlink_rate
+ */
+static struct ice_port_info *ice_get_pi_from_dev_rate(struct devlink_rate *rate_node)
+{
+ struct ice_pf *pf = devlink_priv(rate_node->devlink);
+
+ return ice_get_main_vsi(pf)->port_info;
+}
+
+static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node;
+ struct ice_port_info *pi;
+
+ pi = ice_get_pi_from_dev_rate(rate_node);
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
- devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+ /* preallocate memory for ice_sched_node */
+ node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ *priv = node;
- return ice_flash_pldm_image(pf, params->fw, preservation, extack);
+ return 0;
+}
+
+static int ice_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node, *tc_node;
+ struct ice_port_info *pi;
+
+ pi = ice_get_pi_from_dev_rate(rate_node);
+ tc_node = pi->root->children[0];
+ node = priv;
+
+ if (!rate_node->parent || !node || tc_node == node || !extack)
+ return 0;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ /* can't allow to delete a node with children */
+ if (node->num_children)
+ return -EINVAL;
+
+ mutex_lock(&pi->sched_lock);
+ ice_free_sched_node(pi, node);
+ mutex_unlock(&pi->sched_lock);
+
+ return 0;
+}
+
+static int ice_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_leaf),
+ node, tx_max, extack);
+}
+
+static int ice_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_leaf), node,
+ tx_share, extack);
+}
+
+static int ice_devlink_rate_leaf_tx_priority_set(struct devlink_rate *rate_leaf, void *priv,
+ u32 tx_priority, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_leaf), node,
+ tx_priority, extack);
+}
+
+static int ice_devlink_rate_leaf_tx_weight_set(struct devlink_rate *rate_leaf, void *priv,
+ u32 tx_weight, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_leaf->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_leaf), node,
+ tx_weight, extack);
+}
+
+static int ice_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_max(ice_get_pi_from_dev_rate(rate_node),
+ node, tx_max, extack);
+}
+
+static int ice_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_share(ice_get_pi_from_dev_rate(rate_node),
+ node, tx_share, extack);
+}
+
+static int ice_devlink_rate_node_tx_priority_set(struct devlink_rate *rate_node, void *priv,
+ u32 tx_priority, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_priority(ice_get_pi_from_dev_rate(rate_node),
+ node, tx_priority, extack);
+}
+
+static int ice_devlink_rate_node_tx_weight_set(struct devlink_rate *rate_node, void *priv,
+ u32 tx_weight, struct netlink_ext_ack *extack)
+{
+ struct ice_sched_node *node = priv;
+
+ if (!ice_enable_custom_tx(devlink_priv(rate_node->devlink)))
+ return -EBUSY;
+
+ if (!node)
+ return 0;
+
+ return ice_set_object_tx_weight(ice_get_pi_from_dev_rate(rate_node),
+ node, tx_weight, extack);
+}
+
+static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
+ struct devlink_rate *parent,
+ void *priv, void *parent_priv,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_port_info *pi = ice_get_pi_from_dev_rate(devlink_rate);
+ struct ice_sched_node *tc_node, *node, *parent_node;
+ u16 num_nodes_added;
+ u32 first_node_teid;
+ u32 node_teid;
+ int status;
+
+ tc_node = pi->root->children[0];
+ node = priv;
+
+ if (!extack)
+ return 0;
+
+ if (!ice_enable_custom_tx(devlink_priv(devlink_rate->devlink)))
+ return -EBUSY;
+
+ if (!parent) {
+ if (!node || tc_node == node || node->num_children)
+ return -EINVAL;
+
+ mutex_lock(&pi->sched_lock);
+ ice_free_sched_node(pi, node);
+ mutex_unlock(&pi->sched_lock);
+
+ return 0;
+ }
+
+ parent_node = parent_priv;
+
+ /* if the node doesn't exist, create it */
+ if (!node->parent) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_add_elems(pi, tc_node, parent_node,
+ parent_node->tx_sched_layer + 1,
+ 1, &num_nodes_added, &first_node_teid,
+ &node);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't add a new node");
+ return status;
+ }
+
+ if (devlink_rate->tx_share)
+ ice_set_object_tx_share(pi, node, devlink_rate->tx_share, extack);
+ if (devlink_rate->tx_max)
+ ice_set_object_tx_max(pi, node, devlink_rate->tx_max, extack);
+ if (devlink_rate->tx_priority)
+ ice_set_object_tx_priority(pi, node, devlink_rate->tx_priority, extack);
+ if (devlink_rate->tx_weight)
+ ice_set_object_tx_weight(pi, node, devlink_rate->tx_weight, extack);
+ } else {
+ node_teid = le32_to_cpu(node->info.node_teid);
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_move_nodes(pi, parent_node, 1, &node_teid);
+ mutex_unlock(&pi->sched_lock);
+
+ if (status)
+ NL_SET_ERR_MSG_MOD(extack, "Can't move existing node to a new parent");
+ }
+
+ return status;
+}
+
+/**
+ * ice_devlink_reload_up - do reload up after reinit
+ * @devlink: pointer to the devlink instance reloading
+ * @action: the action requested
+ * @limit: limits imposed by userspace, such as not resetting
+ * @actions_performed: on return, indicate what actions actually performed
+ * @extack: netlink extended ACK structure
+ */
+static int
+ice_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ return ice_load(pf);
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+ return ice_devlink_reload_empr_finish(pf, extack);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
}
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_down = ice_devlink_reload_down,
+ .reload_up = ice_devlink_reload_up,
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
+
+ .rate_node_new = ice_devlink_rate_node_new,
+ .rate_node_del = ice_devlink_rate_node_del,
+
+ .rate_leaf_tx_max_set = ice_devlink_rate_leaf_tx_max_set,
+ .rate_leaf_tx_share_set = ice_devlink_rate_leaf_tx_share_set,
+ .rate_leaf_tx_priority_set = ice_devlink_rate_leaf_tx_priority_set,
+ .rate_leaf_tx_weight_set = ice_devlink_rate_leaf_tx_weight_set,
+
+ .rate_node_tx_max_set = ice_devlink_rate_node_tx_max_set,
+ .rate_node_tx_share_set = ice_devlink_rate_node_tx_share_set,
+ .rate_node_tx_priority_set = ice_devlink_rate_node_tx_priority_set,
+ .rate_node_tx_weight_set = ice_devlink_rate_node_tx_weight_set,
+
+ .rate_leaf_parent_set = ice_devlink_set_parent,
+ .rate_node_parent_set = ice_devlink_set_parent,
+};
+
+static int
+ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool roce_ena = ctx->val.vbool;
+ int ret;
+
+ if (!roce_ena) {
+ ice_unplug_aux_dev(pf);
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ return 0;
+ }
+
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+
+ return ret;
+}
+
+static int
+ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+
+ return 0;
+}
+
+static int
+ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool iw_ena = ctx->val.vbool;
+ int ret;
+
+ if (!iw_ena) {
+ ice_unplug_aux_dev(pf);
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ return 0;
+ }
+
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+
+ return ret;
+}
+
+static int
+ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param ice_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_roce_get,
+ ice_devlink_enable_roce_set,
+ ice_devlink_enable_roce_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_enable_iw_get,
+ ice_devlink_enable_iw_set,
+ ice_devlink_enable_iw_validate),
+
};
static void ice_devlink_free(void *devlink_ptr)
@@ -485,6 +1449,70 @@ void ice_devlink_unregister(struct ice_pf *pf)
}
/**
+ * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
+ * @pf: the PF to create a devlink port for
+ * @ppid: struct with switch id information
+ */
+static void
+ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = pf->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+int ice_devlink_register_params(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ return devlink_params_register(devlink, ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
+}
+
+void ice_devlink_unregister_params(struct ice_pf *pf)
+{
+ devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
+}
+
+/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
+/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
*
@@ -511,6 +1539,15 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pf->hw.bus.func;
+
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
@@ -532,12 +1569,7 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
*/
void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- struct devlink_port *devlink_port;
-
- devlink_port = &pf->devlink_port;
-
- devlink_port_type_clear(devlink_port);
- devlink_port_unregister(devlink_port);
+ devlink_port_unregister(&pf->devlink_port);
}
/**
@@ -560,13 +1592,18 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
- vsi = ice_get_vf_vsi(vf);
devlink_port = &vf->devlink_port;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
attrs.pci_vf.pf = pf->hw.bus.func;
attrs.pci_vf.vf = vf->vf_id;
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
@@ -588,24 +1625,27 @@ int ice_devlink_create_vf_port(struct ice_vf *vf)
*/
void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
- struct devlink_port *devlink_port;
+ devl_rate_leaf_destroy(&vf->devlink_port);
+ devlink_port_unregister(&vf->devlink_port);
+}
- devlink_port = &vf->devlink_port;
+#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
- devlink_port_type_clear(devlink_port);
- devlink_port_unregister(devlink_port);
-}
+static const struct devlink_region_ops ice_nvm_region_ops;
+static const struct devlink_region_ops ice_sram_region_ops;
/**
- * ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
+ * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents
* @devlink: the devlink instance
- * @ops: the devlink region being snapshotted
+ * @ops: the devlink region to snapshot
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
- * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
- * the shadow-ram devlink region. It captures a snapshot of the shadow ram
- * contents. This snapshot can later be viewed via the devlink-region
+ * This function is called in response to a DEVLINK_CMD_REGION_NEW for either
+ * the nvm-flash or shadow-ram region.
+ *
+ * It captures a snapshot of the NVM or Shadow RAM flash contents. This
+ * snapshot can then later be viewed via the DEVLINK_CMD_REGION_READ netlink
* interface.
*
* @returns zero on success, and updates the data pointer. Returns a non-zero
@@ -618,38 +1658,134 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- void *nvm_data;
- u32 nvm_size;
+ bool read_shadow_ram;
+ u8 *nvm_data, *tmp, i;
+ u32 nvm_size, left;
+ s8 num_blks;
+ int status;
+
+ if (ops == &ice_nvm_region_ops) {
+ read_shadow_ram = false;
+ nvm_size = hw->flash.flash_size;
+ } else if (ops == &ice_sram_region_ops) {
+ read_shadow_ram = true;
+ nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function");
+ return -EOPNOTSUPP;
+ }
- nvm_size = hw->flash.flash_size;
nvm_data = vzalloc(nvm_size);
if (!nvm_data)
return -ENOMEM;
+ num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE);
+ tmp = nvm_data;
+ left = nvm_size;
+
+ /* Some systems take longer to read the NVM than others which causes the
+ * FW to reclaim the NVM lock before the entire NVM has been read. Fix
+ * this by breaking the reads of the NVM into smaller chunks that will
+ * probably not take as long. This has some overhead since we are
+ * increasing the number of AQ commands, but it should always work
+ */
+ for (i = 0; i < num_blks; i++) {
+ u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left);
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ vfree(nvm_data);
+ return -EIO;
+ }
+
+ status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE,
+ &read_sz, tmp, read_shadow_ram);
+ if (status) {
+ dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
+ read_sz, status, hw->adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
+ ice_release_nvm(hw);
+ vfree(nvm_data);
+ return -EIO;
+ }
+ ice_release_nvm(hw);
+
+ tmp += read_sz;
+ left -= read_sz;
+ }
+
+ *data = nvm_data;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_nvm_read - Read a portion of NVM flash contents
+ * @devlink: the devlink instance
+ * @ops: the devlink region to snapshot
+ * @extack: extended ACK response structure
+ * @offset: the offset to start at
+ * @size: the amount to read
+ * @data: the data buffer to read into
+ *
+ * This function is called in response to DEVLINK_CMD_REGION_READ to directly
+ * read a section of the NVM contents.
+ *
+ * It reads from either the nvm-flash or shadow-ram region contents.
+ *
+ * @returns zero on success, and updates the data pointer. Returns a non-zero
+ * error code on failure.
+ */
+static int ice_devlink_nvm_read(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u64 offset, u32 size, u8 *data)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ bool read_shadow_ram;
+ u64 nvm_size;
+ int status;
+
+ if (ops == &ice_nvm_region_ops) {
+ read_shadow_ram = false;
+ nvm_size = hw->flash.flash_size;
+ } else if (ops == &ice_sram_region_ops) {
+ read_shadow_ram = true;
+ nvm_size = hw->flash.sr_words * 2u;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected region in snapshot function");
+ return -EOPNOTSUPP;
+ }
+
+ if (offset + size >= nvm_size) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot read beyond the region size");
+ return -ERANGE;
+ }
+
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status) {
dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- vfree(nvm_data);
return -EIO;
}
- status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false);
+ status = ice_read_flat_nvm(hw, (u32)offset, &size, data,
+ read_shadow_ram);
if (status) {
dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n",
- nvm_size, status, hw->adminq.sq_last_status);
+ size, status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents");
ice_release_nvm(hw);
- vfree(nvm_data);
return -EIO;
}
-
ice_release_nvm(hw);
- *data = nvm_data;
-
return 0;
}
@@ -675,8 +1811,8 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink,
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
void *devcaps;
+ int status;
devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
if (!devcaps)
@@ -689,7 +1825,7 @@ ice_devlink_devcaps_snapshot(struct devlink *devlink,
status, hw->adminq.sq_last_status);
NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
vfree(devcaps);
- return -EIO;
+ return status;
}
*data = (u8 *)devcaps;
@@ -701,6 +1837,14 @@ static const struct devlink_region_ops ice_nvm_region_ops = {
.name = "nvm-flash",
.destructor = vfree,
.snapshot = ice_devlink_nvm_snapshot,
+ .read = ice_devlink_nvm_read,
+};
+
+static const struct devlink_region_ops ice_sram_region_ops = {
+ .name = "shadow-ram",
+ .destructor = vfree,
+ .snapshot = ice_devlink_nvm_snapshot,
+ .read = ice_devlink_nvm_read,
};
static const struct devlink_region_ops ice_devcaps_region_ops = {
@@ -720,7 +1864,7 @@ void ice_devlink_init_regions(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
- u64 nvm_size;
+ u64 nvm_size, sram_size;
nvm_size = pf->hw.flash.flash_size;
pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
@@ -731,6 +1875,15 @@ void ice_devlink_init_regions(struct ice_pf *pf)
pf->nvm_region = NULL;
}
+ sram_size = pf->hw.flash.sr_words * 2u;
+ pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
+ 1, sram_size);
+ if (IS_ERR(pf->sram_region)) {
+ dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
+ PTR_ERR(pf->sram_region));
+ pf->sram_region = NULL;
+ }
+
pf->devcaps_region = devlink_region_create(devlink,
&ice_devcaps_region_ops, 10,
ICE_AQ_MAX_BUF_LEN);
@@ -751,6 +1904,10 @@ void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
devlink_region_destroy(pf->nvm_region);
+
+ if (pf->sram_region)
+ devlink_region_destroy(pf->sram_region);
+
if (pf->devcaps_region)
devlink_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index b7f9551e4fc4..6ec96779f52e 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -8,6 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev);
void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
+int ice_devlink_register_params(struct ice_pf *pf);
+void ice_devlink_unregister_params(struct ice_pf *pf);
int ice_devlink_create_pf_port(struct ice_pf *pf);
void ice_devlink_destroy_pf_port(struct ice_pf *pf);
int ice_devlink_create_vf_port(struct ice_vf *vf);
@@ -16,4 +18,7 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
+int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *vsi);
+void ice_tear_down_devlink_rate_tree(struct ice_pf *pf);
+
#endif /* _ICE_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index d1d7389b0bff..f6dd3f8fd936 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -10,6 +10,101 @@
#include "ice_tc_lib.h"
/**
+ * ice_eswitch_add_vf_mac_rule - add adv rule with VF's MAC
+ * @pf: pointer to PF struct
+ * @vf: pointer to VF struct
+ * @mac: VF's MAC address
+ *
+ * This function adds advanced rule that forwards packets with
+ * VF's MAC address (src MAC) to the corresponding switchdev ctrl VSI queue.
+ */
+int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf, const u8 *mac)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_adv_lkup_elem *list;
+ struct ice_hw *hw = &pf->hw;
+ const u16 lkups_cnt = 1;
+ int err;
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
+
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
+ rule_info.rx = false;
+ rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
+ ctrl_vsi->rxq_map[vf->vf_id];
+ rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
+ rule_info.flags_info.act_valid = true;
+ rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
+ vf->repr->mac_rule);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "Unable to add VF mac rule in switchdev mode for VF %d",
+ vf->vf_id);
+ else
+ vf->repr->rule_added = true;
+
+ kfree(list);
+ return err;
+}
+
+/**
+ * ice_eswitch_replay_vf_mac_rule - replay adv rule with VF's MAC
+ * @vf: pointer to vF struct
+ *
+ * This function replays VF's MAC rule after reset.
+ */
+void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
+{
+ int err;
+
+ if (!ice_is_switchdev_running(vf->pf))
+ return;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
+ err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
+ vf->hw_lan_addr);
+ if (err) {
+ dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
+ vf->hw_lan_addr, vf->vf_id, err);
+ return;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
+ }
+}
+
+/**
+ * ice_eswitch_del_vf_mac_rule - delete adv rule with VF's MAC
+ * @vf: pointer to the VF struct
+ *
+ * Delete the advanced rule that was used to forward packets with the VF's MAC
+ * address (src MAC) to the corresponding switchdev ctrl VSI queue.
+ */
+void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf)
+{
+ if (!ice_is_switchdev_running(vf->pf))
+ return;
+
+ if (!vf->repr->rule_added)
+ return;
+
+ ice_rem_adv_rule_by_id(&vf->pf->hw, vf->repr->mac_rule);
+ vf->repr->rule_added = false;
+}
+
+/**
* ice_eswitch_setup_env - configure switchdev HW filters
* @pf: pointer to PF struct
*
@@ -21,10 +116,12 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct net_device *uplink_netdev = uplink_vsi->netdev;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
- struct ice_port_info *pi = pf->hw.port_info;
+ struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false;
- ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
+ vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
+ if (vlan_ops->dis_stripping(ctrl_vsi))
+ return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
@@ -33,40 +130,28 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
__dev_mc_unsync(uplink_netdev, NULL);
netif_addr_unlock_bh(uplink_netdev);
- if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
+ if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
- if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
rule_added = true;
}
- if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
- goto err_def_tx;
-
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control;
- if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
- ICE_FLTR_TX,
- ICE_SINGLE_ACT_LB_ENABLE))
- goto err_update_action;
-
return 0;
-err_update_action:
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
- ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
-err_def_tx:
if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
err_def_rx:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -91,10 +176,20 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
int q_id;
ice_for_each_txq(vsi, q_id) {
- struct ice_repr *repr = pf->vf[q_id].repr;
- struct ice_q_vector *q_vector = repr->q_vector;
- struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
- struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ struct ice_repr *repr;
+ struct ice_vf *vf;
+
+ vf = ice_get_vf_by_id(pf, q_id);
+ if (WARN_ON(!vf))
+ continue;
+
+ repr = vf->repr;
+ q_vector = repr->q_vector;
+ tx_ring = vsi->tx_rings[q_id];
+ rx_ring = vsi->rx_rings[q_id];
q_vector->vsi = vsi;
q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
@@ -114,6 +209,38 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
rx_ring->q_vector = q_vector;
rx_ring->next = NULL;
rx_ring->netdev = repr->netdev;
+
+ ice_put_vf(vf);
+ }
+}
+
+/**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+ * @ctrl_vsi: pointer to switchdev control VSI
+ */
+static void
+ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_vsi *vsi = vf->repr->src_vsi;
+
+ /* Skip VFs that aren't configured */
+ if (!vf->repr->dst)
+ continue;
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
+ ICE_FWD_TO_VSI);
+
+ netif_napi_del(&vf->repr->q_vector->napi);
}
}
@@ -125,35 +252,39 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
{
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
int max_vsi_num = 0;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_vf(pf, i) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_vsi *vsi = vf->repr->src_vsi;
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!vf->repr->dst) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
goto err;
}
if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
goto err;
}
- if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
+ if (ice_vsi_add_vlan_zero(vsi)) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
+ vf->repr->dst = NULL;
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
goto err;
}
@@ -161,27 +292,17 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
if (max_vsi_num < vsi->vsi_num)
max_vsi_num = vsi->vsi_num;
- netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
+ ice_napi_poll);
netif_keep_dst(vf->repr->netdev);
}
- kfree(ctrl_vsi->target_netdevs);
-
- ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
- sizeof(*ctrl_vsi->target_netdevs),
- GFP_KERNEL);
- if (!ctrl_vsi->target_netdevs)
- goto err;
-
- ice_for_each_vf(pf, i) {
- struct ice_repr *repr = pf->vf[i].repr;
+ ice_for_each_vf(pf, bkt, vf) {
+ struct ice_repr *repr = vf->repr;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
- ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
-
dst = repr->dst;
dst->u.port_info.port_id = vsi->vsi_num;
dst->u.port_info.lower_dev = repr->netdev;
@@ -191,44 +312,12 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
return 0;
err:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
-
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
- }
+ ice_eswitch_release_reprs(pf, ctrl_vsi);
return -ENODEV;
}
/**
- * ice_eswitch_release_reprs - clear PR VSIs configuration
- * @pf: poiner to PF struct
- * @ctrl_vsi: pointer to switchdev control VSI
- */
-static void
-ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
-{
- int i;
-
- kfree(ctrl_vsi->target_netdevs);
- ice_for_each_vf(pf, i) {
- struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
- struct ice_vf *vf = &pf->vf[i];
-
- ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
- metadata_dst_free(vf->repr->dst);
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
-
- netif_napi_del(&vf->repr->q_vector->napi);
- }
-}
-
-/**
* ice_eswitch_update_repr - reconfigure VF port representor
* @vsi: VF VSI for which port representor is configured
*/
@@ -242,15 +331,16 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
if (!ice_is_switchdev_running(pf))
return;
- vf = &pf->vf[vsi->vf_id];
+ vf = vsi->vf;
repr = vf->repr;
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
- dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
+ vsi->vf->vf_id);
}
}
@@ -271,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev);
vsi = np->vsi;
- if (ice_is_reset_in_progress(vsi->back->state))
+ if (ice_is_reset_in_progress(vsi->back->state) ||
+ test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY;
repr = ice_netdev_to_repr(netdev);
@@ -320,8 +411,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
- ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
- ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
@@ -335,7 +425,13 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_SWITCHDEV_CTRL;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -344,10 +440,13 @@ ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*/
static void ice_eswitch_napi_del(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_for_each_vf(pf, i)
- netif_napi_del(&pf->vf[i].repr->q_vector->napi);
+ ice_for_each_vf(pf, bkt, vf)
+ netif_napi_del(&vf->repr->q_vector->napi);
}
/**
@@ -356,10 +455,13 @@ static void ice_eswitch_napi_del(struct ice_pf *pf)
*/
static void ice_eswitch_napi_enable(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_vf(pf, i)
- napi_enable(&pf->vf[i].repr->q_vector->napi);
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf)
+ napi_enable(&vf->repr->q_vector->napi);
}
/**
@@ -368,28 +470,13 @@ static void ice_eswitch_napi_enable(struct ice_pf *pf)
*/
static void ice_eswitch_napi_disable(struct ice_pf *pf)
{
- int i;
-
- ice_for_each_vf(pf, i)
- napi_disable(&pf->vf[i].repr->q_vector->napi);
-}
-
-/**
- * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
- * @vsi: VSI to setup rxdid on
- * @rxdid: flex descriptor id
- */
-static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
-{
- struct ice_hw *hw = &vsi->back->hw;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_for_each_rxq(vsi, i) {
- struct ice_rx_ring *ring = vsi->rx_rings[i];
- u16 pf_q = vsi->rxq_map[ring->q_index];
+ lockdep_assert_held(&pf->vfs.table_lock);
- ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- }
+ ice_for_each_vf(pf, bkt, vf)
+ napi_disable(&vf->repr->q_vector->napi);
}
/**
@@ -425,8 +512,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
ice_eswitch_napi_enable(pf);
- ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
-
return 0;
err_setup_reprs:
@@ -448,6 +533,7 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
ice_eswitch_napi_disable(pf);
ice_eswitch_release_env(pf);
+ ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf);
@@ -468,7 +554,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (pf->eswitch_mode == mode)
return 0;
- if (pf->num_alloc_vfs) {
+ if (ice_has_vfs(pf)) {
dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
return -EOPNOTSUPP;
@@ -497,34 +583,6 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
/**
- * ice_eswitch_get_target_netdev - return port representor netdev
- * @rx_ring: pointer to Rx ring
- * @rx_desc: pointer to Rx descriptor
- *
- * When working in switchdev mode context (when control VSI is used), this
- * function returns netdev of appropriate port representor. For non-switchdev
- * context, regular netdev associated with Rx ring is returned.
- */
-struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
-{
- struct ice_32b_rx_flex_desc_nic_2 *desc;
- struct ice_vsi *vsi = rx_ring->vsi;
- struct ice_vsi *control_vsi;
- u16 target_vsi_id;
-
- control_vsi = vsi->back->switchdev.control_vsi;
- if (vsi != control_vsi)
- return rx_ring->netdev;
-
- desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
- target_vsi_id = le16_to_cpu(desc->src_vsi);
-
- return vsi->target_netdevs[target_vsi_id];
-}
-
-/**
* ice_eswitch_mode_get - get current eswitch mode
* @devlink: pointer to devlink structure
* @mode: output parameter for current eswitch mode
@@ -587,16 +645,17 @@ int ice_eswitch_configure(struct ice_pf *pf)
*/
static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
{
- struct ice_repr *repr;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state))
return;
- ice_for_each_vf(pf, i) {
- repr = pf->vf[i].repr;
- if (repr)
- ice_repr_start_tx_queues(repr);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->repr)
+ ice_repr_start_tx_queues(vf->repr);
}
}
@@ -606,16 +665,17 @@ static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
*/
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
{
- struct ice_repr *repr;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
if (test_bit(ICE_DOWN, pf->state))
return;
- ice_for_each_vf(pf, i) {
- repr = pf->vf[i].repr;
- if (repr)
- ice_repr_stop_tx_queues(repr);
+ ice_for_each_vf(pf, bkt, vf) {
+ if (vf->repr)
+ ice_repr_stop_tx_queues(vf->repr);
}
}
@@ -648,7 +708,6 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
return status;
ice_eswitch_napi_enable(pf);
- ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
ice_eswitch_start_all_tx_queues(pf);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 364cd2a79c37..6a413331572b 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -20,10 +20,11 @@ bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
void ice_eswitch_update_repr(struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
-
-struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc);
+int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
+ const u8 *mac);
+void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf);
+void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf);
void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
@@ -33,6 +34,15 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
static inline void ice_eswitch_release(struct ice_pf *pf) { }
static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
+static inline void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf) { }
+static inline void ice_eswitch_del_vf_mac_rule(struct ice_vf *vf) { }
+
+static inline int
+ice_eswitch_add_vf_mac_rule(struct ice_pf *pf, struct ice_vf *vf,
+ const u8 *mac)
+{
+ return -EOPNOTSUPP;
+}
static inline void
ice_eswitch_set_target_vsi(struct sk_buff *skb,
@@ -42,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
- return -EOPNOTSUPP;
+ return 0;
}
static inline int ice_eswitch_rebuild(struct ice_pf *pf)
@@ -67,13 +77,6 @@ static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
return false;
}
-static inline struct net_device *
-ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
-{
- return rx_ring->netdev;
-}
-
static inline netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 572519e402f4..f86e814354a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -136,6 +136,11 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
+ ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped),
+ ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts),
+ ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed),
+ ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded),
+ ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates),
};
static const u32 ice_regs_dump_list[] = {
@@ -146,6 +151,175 @@ static const u32 ice_regs_dump_list[] = {
QINT_RQCTL(0),
PFINT_OICR_ENA,
QRX_ITR(0),
+#define GLDCB_TLPM_PCI_DM 0x000A0180
+ GLDCB_TLPM_PCI_DM,
+#define GLDCB_TLPM_TC2PFC 0x000A0194
+ GLDCB_TLPM_TC2PFC,
+#define TCDCB_TLPM_WAIT_DM(_i) (0x000A0080 + ((_i) * 4))
+ TCDCB_TLPM_WAIT_DM(0),
+ TCDCB_TLPM_WAIT_DM(1),
+ TCDCB_TLPM_WAIT_DM(2),
+ TCDCB_TLPM_WAIT_DM(3),
+ TCDCB_TLPM_WAIT_DM(4),
+ TCDCB_TLPM_WAIT_DM(5),
+ TCDCB_TLPM_WAIT_DM(6),
+ TCDCB_TLPM_WAIT_DM(7),
+ TCDCB_TLPM_WAIT_DM(8),
+ TCDCB_TLPM_WAIT_DM(9),
+ TCDCB_TLPM_WAIT_DM(10),
+ TCDCB_TLPM_WAIT_DM(11),
+ TCDCB_TLPM_WAIT_DM(12),
+ TCDCB_TLPM_WAIT_DM(13),
+ TCDCB_TLPM_WAIT_DM(14),
+ TCDCB_TLPM_WAIT_DM(15),
+ TCDCB_TLPM_WAIT_DM(16),
+ TCDCB_TLPM_WAIT_DM(17),
+ TCDCB_TLPM_WAIT_DM(18),
+ TCDCB_TLPM_WAIT_DM(19),
+ TCDCB_TLPM_WAIT_DM(20),
+ TCDCB_TLPM_WAIT_DM(21),
+ TCDCB_TLPM_WAIT_DM(22),
+ TCDCB_TLPM_WAIT_DM(23),
+ TCDCB_TLPM_WAIT_DM(24),
+ TCDCB_TLPM_WAIT_DM(25),
+ TCDCB_TLPM_WAIT_DM(26),
+ TCDCB_TLPM_WAIT_DM(27),
+ TCDCB_TLPM_WAIT_DM(28),
+ TCDCB_TLPM_WAIT_DM(29),
+ TCDCB_TLPM_WAIT_DM(30),
+ TCDCB_TLPM_WAIT_DM(31),
+#define GLPCI_WATMK_CLNT_PIPEMON 0x000BFD90
+ GLPCI_WATMK_CLNT_PIPEMON,
+#define GLPCI_CUR_CLNT_COMMON 0x000BFD84
+ GLPCI_CUR_CLNT_COMMON,
+#define GLPCI_CUR_CLNT_PIPEMON 0x000BFD88
+ GLPCI_CUR_CLNT_PIPEMON,
+#define GLPCI_PCIERR 0x0009DEB0
+ GLPCI_PCIERR,
+#define GLPSM_DEBUG_CTL_STATUS 0x000B0600
+ GLPSM_DEBUG_CTL_STATUS,
+#define GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0680
+ GLPSM0_DEBUG_FIFO_OVERFLOW_DETECT,
+#define GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0684
+ GLPSM0_DEBUG_FIFO_UNDERFLOW_DETECT,
+#define GLPSM0_DEBUG_DT_OUT_OF_WINDOW 0x000B0688
+ GLPSM0_DEBUG_DT_OUT_OF_WINDOW,
+#define GLPSM0_DEBUG_INTF_HW_ERROR_DETECT 0x000B069C
+ GLPSM0_DEBUG_INTF_HW_ERROR_DETECT,
+#define GLPSM0_DEBUG_MISC_HW_ERROR_DETECT 0x000B06A0
+ GLPSM0_DEBUG_MISC_HW_ERROR_DETECT,
+#define GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT 0x000B0E80
+ GLPSM1_DEBUG_FIFO_OVERFLOW_DETECT,
+#define GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B0E84
+ GLPSM1_DEBUG_FIFO_UNDERFLOW_DETECT,
+#define GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT 0x000B0E88
+ GLPSM1_DEBUG_SRL_FIFO_OVERFLOW_DETECT,
+#define GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT 0x000B0E8C
+ GLPSM1_DEBUG_SRL_FIFO_UNDERFLOW_DETECT,
+#define GLPSM1_DEBUG_MISC_HW_ERROR_DETECT 0x000B0E90
+ GLPSM1_DEBUG_MISC_HW_ERROR_DETECT,
+#define GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT 0x000B1680
+ GLPSM2_DEBUG_FIFO_OVERFLOW_DETECT,
+#define GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT 0x000B1684
+ GLPSM2_DEBUG_FIFO_UNDERFLOW_DETECT,
+#define GLPSM2_DEBUG_MISC_HW_ERROR_DETECT 0x000B1688
+ GLPSM2_DEBUG_MISC_HW_ERROR_DETECT,
+#define GLTDPU_TCLAN_COMP_BOB(_i) (0x00049ADC + ((_i) * 4))
+ GLTDPU_TCLAN_COMP_BOB(1),
+ GLTDPU_TCLAN_COMP_BOB(2),
+ GLTDPU_TCLAN_COMP_BOB(3),
+ GLTDPU_TCLAN_COMP_BOB(4),
+ GLTDPU_TCLAN_COMP_BOB(5),
+ GLTDPU_TCLAN_COMP_BOB(6),
+ GLTDPU_TCLAN_COMP_BOB(7),
+ GLTDPU_TCLAN_COMP_BOB(8),
+#define GLTDPU_TCB_CMD_BOB(_i) (0x0004975C + ((_i) * 4))
+ GLTDPU_TCB_CMD_BOB(1),
+ GLTDPU_TCB_CMD_BOB(2),
+ GLTDPU_TCB_CMD_BOB(3),
+ GLTDPU_TCB_CMD_BOB(4),
+ GLTDPU_TCB_CMD_BOB(5),
+ GLTDPU_TCB_CMD_BOB(6),
+ GLTDPU_TCB_CMD_BOB(7),
+ GLTDPU_TCB_CMD_BOB(8),
+#define GLTDPU_PSM_UPDATE_BOB(_i) (0x00049B5C + ((_i) * 4))
+ GLTDPU_PSM_UPDATE_BOB(1),
+ GLTDPU_PSM_UPDATE_BOB(2),
+ GLTDPU_PSM_UPDATE_BOB(3),
+ GLTDPU_PSM_UPDATE_BOB(4),
+ GLTDPU_PSM_UPDATE_BOB(5),
+ GLTDPU_PSM_UPDATE_BOB(6),
+ GLTDPU_PSM_UPDATE_BOB(7),
+ GLTDPU_PSM_UPDATE_BOB(8),
+#define GLTCB_CMD_IN_BOB(_i) (0x000AE288 + ((_i) * 4))
+ GLTCB_CMD_IN_BOB(1),
+ GLTCB_CMD_IN_BOB(2),
+ GLTCB_CMD_IN_BOB(3),
+ GLTCB_CMD_IN_BOB(4),
+ GLTCB_CMD_IN_BOB(5),
+ GLTCB_CMD_IN_BOB(6),
+ GLTCB_CMD_IN_BOB(7),
+ GLTCB_CMD_IN_BOB(8),
+#define GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(_i) (0x000FC148 + ((_i) * 4))
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(1),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(2),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(3),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(4),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(5),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(6),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(7),
+ GLLAN_TCLAN_FETCH_CTL_FBK_BOB_CTL(8),
+#define GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(_i) (0x000FC248 + ((_i) * 4))
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(1),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(2),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(3),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(4),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(5),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(6),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(7),
+ GLLAN_TCLAN_FETCH_CTL_SCHED_BOB_CTL(8),
+#define GLLAN_TCLAN_CACHE_CTL_BOB_CTL(_i) (0x000FC1C8 + ((_i) * 4))
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(1),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(2),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(3),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(4),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(5),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(6),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(7),
+ GLLAN_TCLAN_CACHE_CTL_BOB_CTL(8),
+#define GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(_i) (0x000FC188 + ((_i) * 4))
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(1),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(2),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(3),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(4),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(5),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(6),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(7),
+ GLLAN_TCLAN_FETCH_CTL_PROC_BOB_CTL(8),
+#define GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(_i) (0x000FC288 + ((_i) * 4))
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(1),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(2),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(3),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(4),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(5),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(6),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(7),
+ GLLAN_TCLAN_FETCH_CTL_PCIE_RD_BOB_CTL(8),
+#define PRTDCB_TCUPM_REG_CM(_i) (0x000BC360 + ((_i) * 4))
+ PRTDCB_TCUPM_REG_CM(0),
+ PRTDCB_TCUPM_REG_CM(1),
+ PRTDCB_TCUPM_REG_CM(2),
+ PRTDCB_TCUPM_REG_CM(3),
+#define PRTDCB_TCUPM_REG_DM(_i) (0x000BC3A0 + ((_i) * 4))
+ PRTDCB_TCUPM_REG_DM(0),
+ PRTDCB_TCUPM_REG_DM(1),
+ PRTDCB_TCUPM_REG_DM(2),
+ PRTDCB_TCUPM_REG_DM(3),
+#define PRTDCB_TLPM_REG_DM(_i) (0x000A0000 + ((_i) * 4))
+ PRTDCB_TLPM_REG_DM(0),
+ PRTDCB_TLPM_REG_DM(1),
+ PRTDCB_TLPM_REG_DM(2),
+ PRTDCB_TLPM_REG_DM(3),
};
struct ice_priv_flag {
@@ -164,6 +338,7 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("vf-true-promisc-support",
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
+ ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
@@ -189,19 +364,17 @@ __ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
nvm->eetrack, orom->major, orom->build, orom->patch);
+
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
+ sizeof(drvinfo->bus_info));
}
static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
__ice_get_drvinfo(netdev, drvinfo, np->vsi);
-
- strscpy(drvinfo->bus_info, pci_name(pf->pdev),
- sizeof(drvinfo->bus_info));
-
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -270,9 +443,8 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct device *dev;
- int ret = 0;
+ int ret;
u8 *buf;
dev = ice_pf_to_dev(pf);
@@ -285,22 +457,18 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!buf)
return -ENOMEM;
- status = ice_acquire_nvm(hw, ICE_RES_READ);
- if (status) {
- dev_err(dev, "ice_acquire_nvm failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (ret) {
+ dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
- status = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
- false);
- if (status) {
- dev_err(dev, "ice_read_flat_nvm failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_read_flat_nvm(hw, eeprom->offset, &eeprom->len, buf,
+ false);
+ if (ret) {
+ dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
goto release;
}
@@ -320,16 +488,20 @@ out:
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
- unsigned int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ bool active = false;
+ struct ice_vf *vf;
+ unsigned int bkt;
- if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- return true;
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ active = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return false;
+ return active;
}
/**
@@ -342,14 +514,14 @@ static bool ice_active_vfs(struct ice_pf *pf)
static u64 ice_link_test(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- enum ice_status status;
bool link_up = false;
+ int status;
netdev_info(netdev, "link test\n");
status = ice_get_link_status(np->vsi->port_info, &link_up);
if (status) {
- netdev_err(netdev, "link query error, status = %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "link query error, status = %d\n",
+ status);
return 1;
}
@@ -484,7 +656,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_setup_rx_ring;
- status = ice_vsi_cfg(vsi);
+ status = ice_vsi_cfg_lan(vsi);
if (status)
goto err_setup_rx_ring;
@@ -492,7 +664,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_start_rx_ring;
- return status;
+ return 0;
err_start_rx_ring:
ice_vsi_free_rx_rings(vsi);
@@ -660,7 +832,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
rx_desc = ICE_RX_DESC(rx_ring, i);
if (!(rx_desc->wb.status_error0 &
- cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
+ (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
+ cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;
rx_buf = &rx_ring->rx_buf[i];
@@ -1052,8 +1225,7 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
- enum ice_status status;
- int err = 0;
+ int err;
pi = vsi->port_info;
@@ -1079,12 +1251,10 @@ ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- caps, NULL);
- if (status) {
- err = -EAGAIN;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ caps, NULL);
+ if (err)
goto done;
- }
/* Set supported/configured FEC modes based on PHY capability */
if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
@@ -1203,7 +1373,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
- enum ice_status status;
+ int status;
/* Disable FW LLDP engine */
status = ice_cfg_lldp_mib_change(&pf->hw, false);
@@ -1232,8 +1402,8 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
pf->dcbx_cap &= ~DCB_CAP_DCBX_LLD_MANAGED;
pf->dcbx_cap |= DCB_CAP_DCBX_HOST;
} else {
- enum ice_status status;
bool dcbx_agent_status;
+ int status;
if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
@@ -1288,19 +1458,26 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
/* down and up VSI so that changes of Rx cfg are reflected. */
- ice_down(vsi);
- ice_up(vsi);
+ ice_down_up(vsi);
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
*/
if (test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, change_flags) &&
- ice_is_any_vf_in_promisc(pf)) {
+ ice_is_any_vf_in_unicast_promisc(pf)) {
dev_err(dev, "Changing vf-true-promisc-support flag while VF(s) are in promiscuous mode not supported\n");
/* toggle bit back to previous state */
change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
ret = -EAGAIN;
}
+
+ if (test_bit(ICE_FLAG_VF_VLAN_PRUNING, change_flags) &&
+ ice_has_vfs(pf)) {
+ dev_err(dev, "vf-vlan-pruning: VLAN pruning cannot be changed while VFs are active.\n");
+ /* toggle bit back to previous state */
+ change_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags);
+ ret = -EOPNOTSUPP;
+ }
ethtool_exit:
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
return ret;
@@ -1367,9 +1544,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,
ice_for_each_alloc_txq(vsi, j) {
tx_ring = READ_ONCE(vsi->tx_rings[j]);
- if (tx_ring) {
- data[i++] = tx_ring->stats.pkts;
- data[i++] = tx_ring->stats.bytes;
+ if (tx_ring && tx_ring->ring_stats) {
+ data[i++] = tx_ring->ring_stats->stats.pkts;
+ data[i++] = tx_ring->ring_stats->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1378,9 +1555,9 @@ __ice_get_ethtool_stats(struct net_device *netdev,
ice_for_each_alloc_rxq(vsi, j) {
rx_ring = READ_ONCE(vsi->rx_rings[j]);
- if (rx_ring) {
- data[i++] = rx_ring->stats.pkts;
- data[i++] = rx_ring->stats.bytes;
+ if (rx_ring && rx_ring->ring_stats) {
+ data[i++] = rx_ring->ring_stats->stats.pkts;
+ data[i++] = rx_ring->ring_stats->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1462,20 +1639,22 @@ ice_get_ethtool_stats(struct net_device *netdev,
/**
* ice_mask_min_supported_speeds
+ * @hw: pointer to the HW structure
* @phy_types_high: PHY type high
* @phy_types_low: PHY type low to apply minimum supported speeds mask
*
* Apply minimum supported speeds mask to PHY type low. These are the speeds
* for ethtool supported link mode.
*/
-static
-void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
+static void
+ice_mask_min_supported_speeds(struct ice_hw *hw,
+ u64 phy_types_high, u64 *phy_types_low)
{
/* if QSFP connection with 100G speed, minimum supported speed is 25G */
if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
- else
+ else if (!ice_is_100m_speed_supported(hw))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
}
@@ -1525,7 +1704,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
- ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
+ ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
+ &phy_types_low);
/* determine advertised modes based on link override only
* if it's supported and if the FW doesn't abstract the
* driver from having to account for link overrides
@@ -1770,8 +1950,7 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
ICE_PHY_TYPE_LOW_100G_CAUI4 |
ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |
ICE_PHY_TYPE_LOW_100G_AUI4 |
- ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 |
- ICE_PHY_TYPE_LOW_100GBASE_CP2;
+ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4;
phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |
ICE_PHY_TYPE_HIGH_100G_CAUI2 |
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC |
@@ -1784,15 +1963,27 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
100000baseCR4_Full);
}
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 |
- ICE_PHY_TYPE_LOW_100GBASE_SR2;
- if (phy_types_low & phy_type_mask_lo) {
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseCR2_Full);
+ }
+
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseSR4_Full);
}
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseSR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseSR2_Full);
+ }
+
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |
ICE_PHY_TYPE_LOW_100GBASE_DR;
if (phy_types_low & phy_type_mask_lo) {
@@ -1804,14 +1995,20 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |
ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4;
- phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4;
- if (phy_types_low & phy_type_mask_lo ||
- phy_types_high & phy_type_mask_hi) {
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR4_Full);
}
+
+ if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseKR2_Full);
+ }
+
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -1938,8 +2135,7 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
- enum ice_status status;
- int err = 0;
+ int err;
ethtool_link_ksettings_zero_link_mode(ks, supported);
ethtool_link_ksettings_zero_link_mode(ks, advertising);
@@ -1990,12 +2186,10 @@ ice_get_link_ksettings(struct net_device *netdev,
if (!caps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_ACTIVE_CFG, caps, NULL);
+ if (err)
goto done;
- }
/* Set the advertised flow control based on the PHY capability */
if ((caps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) &&
@@ -2027,12 +2221,10 @@ ice_get_link_ksettings(struct net_device *netdev,
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
- status = ice_aq_get_phy_caps(vsi->port_info, false,
- ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(vsi->port_info, false,
+ ICE_AQC_REPORT_TOPO_CAP_MEDIA, caps, NULL);
+ if (err)
goto done;
- }
/* Set supported FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
@@ -2067,17 +2259,15 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
100baseT_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseX_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseKX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 2500baseT_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
2500baseX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -2086,9 +2276,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseKR_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseSR_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseLR_Full))
@@ -2112,9 +2301,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseCR2_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseKR2_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseKR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseSR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -2124,7 +2312,13 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseLR4_ER4_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseKR4_Full))
+ 100000baseKR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseCR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseSR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseKR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
return adv_link_speed;
@@ -2190,6 +2384,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
}
/**
+ * ice_set_phy_type_from_speed - set phy_types based on speeds
+ * and advertised modes
+ * @ks: ethtool link ksettings struct
+ * @phy_type_low: pointer to the lower part of phy_type
+ * @phy_type_high: pointer to the higher part of phy_type
+ * @adv_link_speed: targeted link speeds bitmap
+ */
+static void
+ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks,
+ u64 *phy_type_low, u64 *phy_type_high,
+ u16 adv_link_speed)
+{
+ /* Handle 1000M speed in a special way because ice_update_phy_type
+ * enables all link modes, but having mixed copper and optical
+ * standards is not supported.
+ */
+ adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseT_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T |
+ ICE_PHY_TYPE_LOW_1G_SGMII;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseKX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX;
+
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full))
+ *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX |
+ ICE_PHY_TYPE_LOW_1000BASE_LX;
+
+ ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed);
+}
+
+/**
* ice_set_link_ksettings - Set Speed and Duplex
* @netdev: network interface device structure
* @ks: ethtool ksettings
@@ -2210,11 +2440,10 @@ ice_set_link_ksettings(struct net_device *netdev,
struct ice_pf *pf = np->vsi->back;
struct ice_port_info *pi;
u8 autoneg_changed = 0;
- enum ice_status status;
u64 phy_type_high = 0;
u64 phy_type_low = 0;
- int err = 0;
bool linkup;
+ int err;
pi = np->vsi->port_info;
@@ -2234,15 +2463,13 @@ ice_set_link_ksettings(struct net_device *netdev,
/* Get the PHY capabilities based on media */
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- phy_caps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ phy_caps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- phy_caps, NULL);
- if (status) {
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ phy_caps, NULL);
+ if (err)
goto done;
- }
/* save autoneg out of ksettings */
autoneg = copy_ks.base.autoneg;
@@ -2308,13 +2535,11 @@ ice_set_link_ksettings(struct net_device *netdev,
/* Call to get the current link speed */
pi->phy.get_link_info = true;
- status = ice_get_link_status(pi, &linkup);
- if (status) {
- err = -EIO;
+ err = ice_get_link_status(pi, &linkup);
+ if (err)
goto done;
- }
- curr_link_speed = pi->phy.link_info.link_speed;
+ curr_link_speed = pi->phy.curr_user_speed_req;
adv_link_speed = ice_ksettings_find_adv_link_speed(ks);
/* If speed didn't get set, set it to what it currently is.
@@ -2325,7 +2550,8 @@ ice_set_link_ksettings(struct net_device *netdev,
adv_link_speed = curr_link_speed;
/* Convert the advertise link speeds to their corresponded PHY_TYPE */
- ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed);
+ ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high,
+ adv_link_speed);
if (!autoneg_changed && adv_link_speed == curr_link_speed) {
netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -2381,10 +2607,9 @@ ice_set_link_ksettings(struct net_device *netdev,
}
/* make the aq call */
- status = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
- if (status) {
+ err = ice_aq_set_phy_cfg(&pf->hw, pi, &config, NULL);
+ if (err) {
netdev_info(netdev, "Set phy config failed,\n");
- err = -EIO;
goto done;
}
@@ -2522,9 +2747,9 @@ static int
ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
u64 hashed_flds;
+ int status;
u32 hdrs;
dev = ice_pf_to_dev(pf);
@@ -2550,9 +2775,9 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
if (status) {
- dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
- return -EINVAL;
+ dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
+ vsi->vsi_num, status);
+ return status;
}
return 0;
@@ -2686,7 +2911,9 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
}
static void
-ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2704,7 +2931,9 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
}
static int
-ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_tx_ring *xdp_rings = NULL;
@@ -2790,6 +3019,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
while (i--)
@@ -2846,6 +3076,7 @@ process_rx:
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
+ rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
rx_rings[i].rx_buf = NULL;
/* this is to allow wr32 to have something to write to
@@ -2859,7 +3090,7 @@ process_rx:
/* allocate Rx buffers */
err = ice_alloc_rx_bufs(&rx_rings[i],
- ICE_DESC_UNUSED(&rx_rings[i]));
+ ICE_RX_DESC_UNUSED(&rx_rings[i]));
rx_unwind:
if (err) {
while (i) {
@@ -2949,7 +3180,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status status;
+ int status;
/* Initialize pause params */
pause->rx_pause = 0;
@@ -2999,11 +3230,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
- enum ice_status status;
u8 aq_failures;
bool link_up;
- int err = 0;
u32 is_an;
+ int err;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
@@ -3029,11 +3259,11 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (status) {
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
+ NULL);
+ if (err) {
kfree(pcaps);
- return -EIO;
+ return err;
}
is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
@@ -3069,22 +3299,19 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
return -EINVAL;
/* Set the FC mode and only restart AN if link is up */
- status = ice_set_fc(pi, &aq_failures, link_up);
+ err = ice_set_fc(pi, &aq_failures, link_up);
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
@@ -3115,36 +3342,47 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
return np->vsi->rss_table_size;
}
-/**
- * ice_get_rxfh - get the Rx flow hash indirection table
- * @netdev: network interface device structure
- * @indir: indirection table
- * @key: hash key
- * @hfunc: hash function
- *
- * Reads the indirection table directly from the hardware.
- */
static int
-ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
+ u8 *key, u8 *hfunc, u32 rss_context)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- int err, i;
+ u16 qcount, offset;
+ int err, num_tc, i;
u8 *lut;
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ netdev_warn(netdev, "RSS is not supported on this VSI!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (rss_context && !ice_is_adq_active(pf)) {
+ netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n");
+ return -EINVAL;
+ }
+
+ qcount = vsi->mqprio_qopt.qopt.count[rss_context];
+ offset = vsi->mqprio_qopt.qopt.offset[rss_context];
+
+ if (rss_context && ice_is_adq_active(pf)) {
+ num_tc = vsi->mqprio_qopt.qopt.num_tc;
+ if (rss_context >= num_tc) {
+ netdev_err(netdev, "RSS context:%d > num_tc:%d\n",
+ rss_context, num_tc);
+ return -EINVAL;
+ }
+ /* Use channel VSI of given TC */
+ vsi = vsi->tc_map_vsi[rss_context];
+ }
+
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
return 0;
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- /* RSS not supported return error here */
- netdev_warn(netdev, "RSS is not configured on this VSI!\n");
- return -EIO;
- }
-
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
return -ENOMEM;
@@ -3157,8 +3395,14 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
if (err)
goto out;
+ if (ice_is_adq_active(pf)) {
+ for (i = 0; i < vsi->rss_table_size; i++)
+ indir[i] = offset + lut[i] % qcount;
+ goto out;
+ }
+
for (i = 0; i < vsi->rss_table_size; i++)
- indir[i] = (u32)(lut[i]);
+ indir[i] = lut[i];
out:
kfree(lut);
@@ -3166,6 +3410,21 @@ out:
}
/**
+ * ice_get_rxfh - get the Rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function
+ *
+ * Reads the indirection table directly from the hardware.
+ */
+static int
+ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
+}
+
+/**
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
@@ -3399,7 +3658,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0;
+ bool locked = false;
u32 curr_combined;
+ int ret = 0;
/* do not support changing channels in Safe Mode */
if (ice_is_safe_mode(pf)) {
@@ -3442,6 +3703,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
new_rx = ch->combined_count + ch->rx_count;
new_tx = ch->combined_count + ch->tx_count;
+ if (new_rx < vsi->tc_cfg.numtc) {
+ netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n",
+ vsi->tc_cfg.numtc);
+ return -EINVAL;
+ }
+ if (new_tx < vsi->tc_cfg.numtc) {
+ netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n",
+ vsi->tc_cfg.numtc);
+ return -EINVAL;
+ }
if (new_rx > ice_get_max_rxq(pf)) {
netdev_err(dev, "Maximum allowed Rx channels is %d\n",
ice_get_max_rxq(pf));
@@ -3453,15 +3724,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+ if (pf->adev) {
+ mutex_lock(&pf->adev_mutex);
+ device_lock(&pf->adev->dev);
+ locked = true;
+ if (pf->adev->dev.driver) {
+ netdev_err(dev, "Cannot change channels when RDMA is active\n");
+ ret = -EBUSY;
+ goto adev_unlock;
+ }
+ }
+
+ ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
- if (!netif_is_rxfh_configured(dev))
- return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ if (!netif_is_rxfh_configured(dev)) {
+ ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ goto adev_unlock;
+ }
/* Update rss_size due to change in Rx queues */
vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
- return 0;
+adev_unlock:
+ if (locked) {
+ device_unlock(&pf->adev->dev);
+ mutex_unlock(&pf->adev_mutex);
+ }
+ return ret;
}
/**
@@ -3924,16 +4213,16 @@ ice_get_module_info(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 sff8472_comp = 0;
u8 sff8472_swap = 0;
u8 sff8636_rev = 0;
u8 value = 0;
+ int status;
status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR, 0x00, 0x00,
0, &value, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
switch (value) {
case ICE_MODULE_TYPE_SFP:
@@ -3941,12 +4230,12 @@ ice_get_module_info(struct net_device *netdev,
ICE_MODULE_SFF_8472_COMP, 0x00, 0,
&sff8472_comp, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
status = ice_aq_sff_eeprom(hw, 0, ICE_I2C_EEPROM_DEV_ADDR,
ICE_MODULE_SFF_8472_SWAP, 0x00, 0,
&sff8472_swap, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
if (sff8472_swap & ICE_MODULE_SFF_ADDR_MODE) {
modinfo->type = ETH_MODULE_SFF_8079;
@@ -3966,7 +4255,7 @@ ice_get_module_info(struct net_device *netdev,
ICE_MODULE_REVISION_ADDR, 0x00, 0,
&sff8636_rev, 1, 0, NULL);
if (status)
- return -EIO;
+ return status;
/* Check revision compliance */
if (sff8636_rev > 0x02) {
/* Module is SFF-8636 compliant */
@@ -4001,11 +4290,11 @@ ice_get_module_eeprom(struct net_device *netdev,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
bool is_sfp = false;
unsigned int i, j;
u16 offset = 0;
u8 page = 0;
+ int status;
if (!ee || !ee->len || !data)
return -EINVAL;
@@ -4013,7 +4302,7 @@ ice_get_module_eeprom(struct net_device *netdev,
status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, 0, value, 1, 0,
NULL);
if (status)
- return -EIO;
+ return status;
if (value[0] == ICE_MODULE_TYPE_SFP)
is_sfp = true;
@@ -4042,6 +4331,8 @@ ice_get_module_eeprom(struct net_device *netdev,
* SFP modules only ever use page 0.
*/
if (page == 0 || !(data[0x2] & 0x4)) {
+ u32 copy_len;
+
/* If i2c bus is busy due to slow page change or
* link management access, call can fail. This is normal.
* So we retry this a few times.
@@ -4065,8 +4356,8 @@ ice_get_module_eeprom(struct net_device *netdev,
}
/* Make sure we have enough room for the new block */
- if ((i + SFF_READ_BLOCK_SIZE) < ee->len)
- memcpy(data + i, value, SFF_READ_BLOCK_SIZE);
+ copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i);
+ memcpy(data + i, value, copy_len);
}
}
return 0;
@@ -4106,6 +4397,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_pauseparam = ice_set_pauseparam,
.get_rxfh_key_size = ice_get_rxfh_key_size,
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
+ .get_rxfh_context = ice_get_rxfh_context,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index b6e7f47c8c78..ead6d50fc0ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -5,6 +5,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_fdir.h"
#include "ice_flow.h"
static struct in6_addr full_ipv6_addr_mask = {
@@ -205,7 +206,7 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
if (rule->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
else
- fsp->ring_cookie = rule->q_index;
+ fsp->ring_cookie = rule->orig_q_index;
idx = ice_ethtool_flow_to_fltr(fsp->flow_type);
if (idx == ICE_FLTR_PTYPE_NONF_NONE) {
@@ -257,6 +258,80 @@ release_lock:
}
/**
+ * ice_fdir_remap_entries - update the FDir entries in profile
+ * @prof: FDir structure pointer
+ * @tun: tunneled or non-tunneled packet
+ * @idx: FDir entry index
+ */
+static void
+ice_fdir_remap_entries(struct ice_fd_hw_prof *prof, int tun, int idx)
+{
+ if (idx != prof->cnt && tun < ICE_FD_HW_SEG_MAX) {
+ int i;
+
+ for (i = idx; i < (prof->cnt - 1); i++) {
+ u64 old_entry_h;
+
+ old_entry_h = prof->entry_h[i + 1][tun];
+ prof->entry_h[i][tun] = old_entry_h;
+ prof->vsi_h[i] = prof->vsi_h[i + 1];
+ }
+
+ prof->entry_h[i][tun] = 0;
+ prof->vsi_h[i] = 0;
+ }
+}
+
+/**
+ * ice_fdir_rem_adq_chnl - remove an ADQ channel from HW filter rules
+ * @hw: hardware structure containing filter list
+ * @vsi_idx: VSI handle
+ */
+void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx)
+{
+ int status, flow;
+
+ if (!hw->fdir_prof)
+ return;
+
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ struct ice_fd_hw_prof *prof = hw->fdir_prof[flow];
+ int tun, i;
+
+ if (!prof || !prof->cnt)
+ continue;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ u64 prof_id;
+
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+
+ for (i = 0; i < prof->cnt; i++) {
+ if (prof->vsi_h[i] != vsi_idx)
+ continue;
+
+ prof->entry_h[i][tun] = 0;
+ prof->vsi_h[i] = 0;
+ break;
+ }
+
+ /* after clearing FDir entries update the remaining */
+ ice_fdir_remap_entries(prof, tun, i);
+
+ /* find flow profile corresponding to prof_id and clear
+ * vsi_idx from bitmap.
+ */
+ status = ice_flow_rem_vsi_prof(hw, vsi_idx, prof_id);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_flow_rem_vsi_prof() failed status=%d\n",
+ status);
+ }
+ }
+ prof->cnt--;
+ }
+}
+
+/**
* ice_fdir_get_hw_prof - return the ice_fd_hw_proc associated with a flow
* @hw: hardware structure containing the filter list
* @blk: hardware block
@@ -514,6 +589,28 @@ ice_fdir_alloc_flow_prof(struct ice_hw *hw, enum ice_fltr_ptype flow)
}
/**
+ * ice_fdir_prof_vsi_idx - find or insert a vsi_idx in structure
+ * @prof: pointer to flow director HW profile
+ * @vsi_idx: vsi_idx to locate
+ *
+ * return the index of the vsi_idx. if vsi_idx is not found insert it
+ * into the vsi_h table.
+ */
+static u16
+ice_fdir_prof_vsi_idx(struct ice_fd_hw_prof *prof, int vsi_idx)
+{
+ u16 idx = 0;
+
+ for (idx = 0; idx < prof->cnt; idx++)
+ if (prof->vsi_h[idx] == vsi_idx)
+ return idx;
+
+ if (idx == prof->cnt)
+ prof->vsi_h[prof->cnt++] = vsi_idx;
+ return idx;
+}
+
+/**
* ice_fdir_set_hw_fltr_rule - Configure HW tables to generate a FDir rule
* @pf: pointer to the PF structure
* @seg: protocol header description pointer
@@ -530,11 +627,12 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *hw_prof;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u64 entry1_h = 0;
u64 entry2_h = 0;
+ bool del_last;
u64 prof_id;
int err;
+ int idx;
main_vsi = ice_get_main_vsi(pf);
if (!main_vsi)
@@ -581,24 +679,20 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
* actions (NULL) and zero actions 0.
*/
prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
- status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- TNL_SEG_CNT(tun), &prof);
- if (status)
- return ice_status_to_errno(status);
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
- main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry1_h);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ TNL_SEG_CNT(tun), &prof);
+ if (err)
+ return err;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ main_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
+ if (err)
goto err_prof;
- }
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
- ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry2_h);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, main_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
+ if (err)
goto err_entry;
- }
hw_prof->fdir_seg[tun] = seg;
hw_prof->entry_h[0][tun] = entry1_h;
@@ -608,8 +702,60 @@ ice_fdir_set_hw_fltr_rule(struct ice_pf *pf, struct ice_flow_seg_info *seg,
if (!hw_prof->cnt)
hw_prof->cnt = 2;
+ for (idx = 1; idx < ICE_CHNL_MAX_TC; idx++) {
+ u16 vsi_idx;
+ u16 vsi_h;
+
+ if (!ice_is_adq_active(pf) || !main_vsi->tc_map_vsi[idx])
+ continue;
+
+ entry1_h = 0;
+ vsi_h = main_vsi->tc_map_vsi[idx]->idx;
+ err = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ main_vsi->idx, vsi_h,
+ ICE_FLOW_PRIO_NORMAL, seg,
+ &entry1_h);
+ if (err) {
+ dev_err(dev, "Could not add Channel VSI %d to flow group\n",
+ idx);
+ goto err_unroll;
+ }
+
+ vsi_idx = ice_fdir_prof_vsi_idx(hw_prof,
+ main_vsi->tc_map_vsi[idx]->idx);
+ hw_prof->entry_h[vsi_idx][tun] = entry1_h;
+ }
+
return 0;
+err_unroll:
+ entry1_h = 0;
+ hw_prof->fdir_seg[tun] = NULL;
+
+ /* The variable del_last will be used to determine when to clean up
+ * the VSI group data. The VSI data is not needed if there are no
+ * segments.
+ */
+ del_last = true;
+ for (idx = 0; idx < ICE_FD_HW_SEG_MAX; idx++)
+ if (hw_prof->fdir_seg[idx]) {
+ del_last = false;
+ break;
+ }
+
+ for (idx = 0; idx < hw_prof->cnt; idx++) {
+ u16 vsi_num = ice_get_hw_vsi_num(hw, hw_prof->vsi_h[idx]);
+
+ if (!hw_prof->entry_h[idx][tun])
+ continue;
+ ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
+ ice_flow_rem_entry(hw, ICE_BLK_FD, hw_prof->entry_h[idx][tun]);
+ hw_prof->entry_h[idx][tun] = 0;
+ if (del_last)
+ hw_prof->vsi_h[idx] = 0;
+ }
+ if (del_last)
+ hw_prof->cnt = 0;
err_entry:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
ice_get_hw_vsi_num(hw, main_vsi->idx), prof_id);
@@ -706,7 +852,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg)
return -ENOMEM;
- tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1068,7 +1214,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg)
return -ENOMEM;
- tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, ICE_FD_HW_SEG_MAX, sizeof(*tun_seg),
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1174,6 +1320,31 @@ err_exit:
}
/**
+ * ice_update_per_q_fltr
+ * @vsi: ptr to VSI
+ * @q_index: queue index
+ * @inc: true to increment or false to decrement per queue filter count
+ *
+ * This function is used to keep track of per queue sideband filters
+ */
+static void ice_update_per_q_fltr(struct ice_vsi *vsi, u32 q_index, bool inc)
+{
+ struct ice_rx_ring *rx_ring;
+
+ if (!vsi->num_rxq || q_index >= vsi->num_rxq)
+ return;
+
+ rx_ring = vsi->rx_rings[q_index];
+ if (!rx_ring || !rx_ring->ch)
+ return;
+
+ if (inc)
+ atomic_inc(&rx_ring->ch->num_sb_fltr);
+ else
+ atomic_dec_if_positive(&rx_ring->ch->num_sb_fltr);
+}
+
+/**
* ice_fdir_write_fltr - send a flow director filter to the hardware
* @pf: PF data structure
* @input: filter structure
@@ -1190,7 +1361,6 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
struct ice_hw *hw = &pf->hw;
struct ice_fltr_desc desc;
struct ice_vsi *ctrl_vsi;
- enum ice_status status;
u8 *pkt, *frag_pkt;
bool has_frag;
int err;
@@ -1209,11 +1379,9 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
}
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
+ if (err)
goto err_free_all;
- }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
if (err)
goto err_free_all;
@@ -1223,12 +1391,10 @@ ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
if (has_frag) {
/* does not return error */
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
- is_tun);
- if (status) {
- err = ice_status_to_errno(status);
+ err = ice_fdir_get_gen_prgm_pkt(hw, input, frag_pkt, true,
+ is_tun);
+ if (err)
goto err_frag;
- }
err = ice_prgm_fdir_fltr(ctrl_vsi, &desc, frag_pkt);
if (err)
goto err_frag;
@@ -1324,13 +1490,32 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf)
}
/**
+ * ice_fdir_del_all_fltrs - Delete all flow director filters
+ * @vsi: the VSI being changed
+ *
+ * This function needs to be called while holding hw->fdir_fltr_lock
+ */
+void ice_fdir_del_all_fltrs(struct ice_vsi *vsi)
+{
+ struct ice_fdir_fltr *f_rule, *tmp;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+
+ list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
+ ice_fdir_write_all_fltr(pf, f_rule, false);
+ ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
+ list_del(&f_rule->fltr_node);
+ devm_kfree(ice_pf_to_dev(pf), f_rule);
+ }
+}
+
+/**
* ice_vsi_manage_fdir - turn on/off flow director
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
*/
void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
{
- struct ice_fdir_fltr *f_rule, *tmp;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_fltr_ptype flow;
@@ -1344,13 +1529,8 @@ void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena)
mutex_lock(&hw->fdir_fltr_lock);
if (!test_and_clear_bit(ICE_FLAG_FD_ENA, pf->flags))
goto release_lock;
- list_for_each_entry_safe(f_rule, tmp, &hw->fdir_list_head, fltr_node) {
- /* ignore return value */
- ice_fdir_write_all_fltr(pf, f_rule, false);
- ice_fdir_update_cntrs(hw, f_rule->flow_type, false);
- list_del(&f_rule->fltr_node);
- devm_kfree(ice_hw_to_dev(hw), f_rule);
- }
+
+ ice_fdir_del_all_fltrs(vsi);
if (hw->fdir_prof)
for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX;
@@ -1401,18 +1581,25 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
{
struct ice_fdir_fltr *old_fltr;
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
int err = -ENOENT;
/* Do not update filters during reset */
if (ice_is_reset_in_progress(pf->state))
return -EBUSY;
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EINVAL;
+
old_fltr = ice_fdir_find_fltr_by_idx(hw, fltr_idx);
if (old_fltr) {
err = ice_fdir_write_all_fltr(pf, old_fltr, false);
if (err)
return err;
ice_fdir_update_cntrs(hw, old_fltr->flow_type, false);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, old_fltr->orig_q_index, false);
if (!input && !hw->fdir_fltr_cnt[old_fltr->flow_type])
/* we just deleted the last filter of flow_type so we
* should also delete the HW filter info.
@@ -1424,6 +1611,8 @@ ice_fdir_update_list_entry(struct ice_pf *pf, struct ice_fdir_fltr *input,
if (!input)
return err;
ice_fdir_list_add_fltr(hw, input);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, input->orig_q_index, true);
ice_fdir_update_cntrs(hw, input->flow_type, true);
return 0;
}
@@ -1463,6 +1652,39 @@ int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/**
+ * ice_update_ring_dest_vsi - update dest ring and dest VSI
+ * @vsi: pointer to target VSI
+ * @dest_vsi: ptr to dest VSI index
+ * @ring: ptr to dest ring
+ *
+ * This function updates destination VSI and queue if user specifies
+ * target queue which falls in channel's (aka ADQ) queue region
+ */
+static void
+ice_update_ring_dest_vsi(struct ice_vsi *vsi, u16 *dest_vsi, u32 *ring)
+{
+ struct ice_channel *ch;
+
+ list_for_each_entry(ch, &vsi->ch_list, list) {
+ if (!ch->ch_vsi)
+ continue;
+
+ /* make sure to locate corresponding channel based on "queue"
+ * specified
+ */
+ if ((*ring < ch->base_q) ||
+ (*ring >= (ch->base_q + ch->num_rxq)))
+ continue;
+
+ /* update the dest_vsi based on channel */
+ *dest_vsi = ch->ch_vsi->idx;
+
+ /* update the "ring" to be correct based on channel */
+ *ring -= ch->base_q;
+ }
+}
+
+/**
* ice_set_fdir_input_set - Set the input set for Flow Director
* @vsi: pointer to target VSI
* @fsp: pointer to ethtool Rx flow specification
@@ -1473,6 +1695,7 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
struct ice_fdir_fltr *input)
{
u16 dest_vsi, q_index = 0;
+ u16 orig_q_index = 0;
struct ice_pf *pf;
struct ice_hw *hw;
int flow_type;
@@ -1499,6 +1722,8 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
if (ring >= vsi->num_rxq)
return -EINVAL;
+ orig_q_index = ring;
+ ice_update_ring_dest_vsi(vsi, &dest_vsi, &ring);
dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
q_index = ring;
}
@@ -1507,6 +1732,11 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
input->q_index = q_index;
flow_type = fsp->flow_type & ~FLOW_EXT;
+ /* Record the original queue index as specified by user.
+ * with channel configuration 'q_index' becomes relative
+ * to TC (channel).
+ */
+ input->orig_q_index = orig_q_index;
input->dest_vsi = dest_vsi;
input->dest_ctl = dest_ctl;
input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
@@ -1694,6 +1924,8 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
remove_sw_rule:
ice_fdir_update_cntrs(hw, input->flow_type, false);
+ /* update sb-filters count, specific to ring->channel */
+ ice_update_per_q_fltr(vsi, input->orig_q_index, false);
list_del(&input->fltr_node);
release_lock:
mutex_unlock(&hw->fdir_fltr_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 4dca009bdd50..ae089d32ee9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -712,7 +712,7 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
* @hw: pointer to the hardware structure
* @cntr_id: returns counter index
*/
-enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
+int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
@@ -723,7 +723,7 @@ enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id)
* @hw: pointer to the hardware structure
* @cntr_id: counter index to be freed
*/
-enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
+int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
{
return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1, cntr_id);
@@ -735,8 +735,7 @@ enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id)
* @cntr_id: returns counter index
* @num_fltr: number of filter entries to be allocated
*/
-enum ice_status
-ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
@@ -749,8 +748,7 @@ ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
* @cntr_id: returns counter index
* @num_fltr: number of filter entries to be allocated
*/
-enum ice_status
-ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
+int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, num_fltr,
@@ -872,7 +870,7 @@ static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr)
* @frag: generate a fragment packet
* @tun: true implies generate a tunnel packet
*/
-enum ice_status
+int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun)
{
@@ -919,15 +917,15 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (ice_fdir_pkt[idx].flow == flow)
break;
if (idx == ICE_FDIR_NUM_PKT)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!tun) {
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
if (!ice_get_open_tunnel_port(hw, &tnl_port, TNL_ALL))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
if (!ice_fdir_pkt[idx].tun_pkt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
memcpy(pkt, ice_fdir_pkt[idx].tun_pkt,
ice_fdir_pkt[idx].tun_pkt_len);
ice_pkt_insert_u16(pkt, ICE_IPV4_UDP_DST_PORT_OFFSET,
@@ -1111,7 +1109,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
if (input->flex_fltr)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index da4163856f4c..1b9b84490689 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -182,6 +182,7 @@ struct ice_fdir_fltr {
/* filter control */
u16 q_index;
+ u16 orig_q_index;
u16 dest_vsi;
u8 dest_ctl;
u8 cnt_ena;
@@ -201,16 +202,14 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt;
};
-enum ice_status ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
-enum ice_status ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
-enum ice_status
-ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
-enum ice_status
-ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
+int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
+int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
+int ice_alloc_fd_shrd_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
void
ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
struct ice_fltr_desc *fdesc, bool add);
-enum ice_status
+int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun);
int ice_get_fdir_cnt_all(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 6ad1c2559724..5ce413965930 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -4,15 +4,7 @@
#include "ice_common.h"
#include "ice_flex_pipe.h"
#include "ice_flow.h"
-
-/* To support tunneling entries by PF, the package will append the PF number to
- * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
- */
-static const struct ice_tunnel_type_scan tnls[] = {
- { TNL_VXLAN, "TNL_VXLAN_PF" },
- { TNL_GENEVE, "TNL_GENEVE_PF" },
- { TNL_LAST, "" }
-};
+#include "ice.h"
static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
/* SWITCH */
@@ -95,422 +87,14 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
}
/**
- * ice_pkg_val_buf
- * @buf: pointer to the ice buffer
- *
- * This helper function validates a buffer's header.
- */
-static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
-{
- struct ice_buf_hdr *hdr;
- u16 section_count;
- u16 data_end;
-
- hdr = (struct ice_buf_hdr *)buf->buf;
- /* verify data */
- section_count = le16_to_cpu(hdr->section_count);
- if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
- return NULL;
-
- data_end = le16_to_cpu(hdr->data_end);
- if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
- return NULL;
-
- return hdr;
-}
-
-/**
- * ice_find_buf_table
- * @ice_seg: pointer to the ice segment
- *
- * Returns the address of the buffer table within the ice segment.
- */
-static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
-{
- struct ice_nvm_table *nvms;
-
- nvms = (struct ice_nvm_table *)
- (ice_seg->device_table +
- le32_to_cpu(ice_seg->device_table_count));
-
- return (__force struct ice_buf_table *)
- (nvms->vers + le32_to_cpu(nvms->table_count));
-}
-
-/**
- * ice_pkg_enum_buf
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This function will enumerate all the buffers in the ice segment. The first
- * call is made with the ice_seg parameter non-NULL; on subsequent calls,
- * ice_seg is set to NULL which continues the enumeration. When the function
- * returns a NULL pointer, then the end of the buffers has been reached, or an
- * unexpected value has been detected (for example an invalid section count or
- * an invalid buffer end value).
- */
-static struct ice_buf_hdr *
-ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (ice_seg) {
- state->buf_table = ice_find_buf_table(ice_seg);
- if (!state->buf_table)
- return NULL;
-
- state->buf_idx = 0;
- return ice_pkg_val_buf(state->buf_table->buf_array);
- }
-
- if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
- return ice_pkg_val_buf(state->buf_table->buf_array +
- state->buf_idx);
- else
- return NULL;
-}
-
-/**
- * ice_pkg_advance_sect
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This helper function will advance the section within the ice segment,
- * also advancing the buffer if needed.
- */
-static bool
-ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (!ice_seg && !state->buf)
- return false;
-
- if (!ice_seg && state->buf)
- if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
- return true;
-
- state->buf = ice_pkg_enum_buf(ice_seg, state);
- if (!state->buf)
- return false;
-
- /* start of new buffer, reset section index */
- state->sect_idx = 0;
- return true;
-}
-
-/**
- * ice_pkg_enum_section
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- *
- * This function will enumerate all the sections of a particular type in the
- * ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the matching
- * sections has been reached.
- */
-static void *
-ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type)
-{
- u16 offset, size;
-
- if (ice_seg)
- state->type = sect_type;
-
- if (!ice_pkg_advance_sect(ice_seg, state))
- return NULL;
-
- /* scan for next matching section */
- while (state->buf->section_entry[state->sect_idx].type !=
- cpu_to_le32(state->type))
- if (!ice_pkg_advance_sect(NULL, state))
- return NULL;
-
- /* validate section */
- offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
- if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
- return NULL;
-
- size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
- if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
- return NULL;
-
- /* make sure the section fits in the buffer */
- if (offset + size > ICE_PKG_BUF_SIZE)
- return NULL;
-
- state->sect_type =
- le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
-
- /* calc pointer to this section */
- state->sect = ((u8 *)state->buf) +
- le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
-
- return state->sect;
-}
-
-/**
- * ice_pkg_enum_entry
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- * @offset: pointer to variable that receives the offset in the table (optional)
- * @handler: function that handles access to the entries into the section type
- *
- * This function will enumerate all the entries in particular section type in
- * the ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the entries has
- * been reached.
- *
- * Since each section may have a different header and entry size, the handler
- * function is needed to determine the number and location entries in each
- * section.
- *
- * The offset parameter is optional, but should be used for sections that
- * contain an offset for each section table. For such cases, the section handler
- * function must return the appropriate offset + index to give the absolution
- * offset for each entry. For example, if the base for a section's header
- * indicates a base offset of 10, and the index for the entry is 2, then
- * section handler function should set the offset to 10 + 2 = 12.
- */
-static void *
-ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type, u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
-{
- void *entry;
-
- if (ice_seg) {
- if (!handler)
- return NULL;
-
- if (!ice_pkg_enum_section(ice_seg, state, sect_type))
- return NULL;
-
- state->entry_idx = 0;
- state->handler = handler;
- } else {
- state->entry_idx++;
- }
-
- if (!state->handler)
- return NULL;
-
- /* get entry */
- entry = state->handler(state->sect_type, state->sect, state->entry_idx,
- offset);
- if (!entry) {
- /* end of a section, look for another section of this type */
- if (!ice_pkg_enum_section(NULL, state, 0))
- return NULL;
-
- state->entry_idx = 0;
- entry = state->handler(state->sect_type, state->sect,
- state->entry_idx, offset);
- }
-
- return entry;
-}
-
-/**
- * ice_boost_tcam_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the boost TCAM entry to be returned
- * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual boost TCAM entries.
- */
-static void *
-ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_boost_tcam_section *boost;
-
- if (!section)
- return NULL;
-
- if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
- return NULL;
-
- /* cppcheck-suppress nullPointer */
- if (index > ICE_MAX_BST_TCAMS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- boost = section;
- if (index >= le16_to_cpu(boost->count))
- return NULL;
-
- return boost->tcam + index;
-}
-
-/**
- * ice_find_boost_entry
- * @ice_seg: pointer to the ice segment (non-NULL)
- * @addr: Boost TCAM address of entry to search for
- * @entry: returns pointer to the entry
- *
- * Finds a particular Boost TCAM entry and returns a pointer to that entry
- * if it is found. The ice_seg parameter must not be NULL since the first call
- * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
- */
-static enum ice_status
-ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
- struct ice_boost_tcam_entry **entry)
-{
- struct ice_boost_tcam_entry *tcam;
- struct ice_pkg_enum state;
-
- memset(&state, 0, sizeof(state));
-
- if (!ice_seg)
- return ICE_ERR_PARAM;
-
- do {
- tcam = ice_pkg_enum_entry(ice_seg, &state,
- ICE_SID_RXPARSER_BOOST_TCAM, NULL,
- ice_boost_tcam_handler);
- if (tcam && le16_to_cpu(tcam->addr) == addr) {
- *entry = tcam;
- return 0;
- }
-
- ice_seg = NULL;
- } while (tcam);
-
- *entry = NULL;
- return ICE_ERR_CFG;
-}
-
-/**
- * ice_label_enum_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the label entry to be returned
- * @offset: pointer to receive absolute offset, always zero for label sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual label entries.
- */
-static void *
-ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
- u32 *offset)
-{
- struct ice_label_section *labels;
-
- if (!section)
- return NULL;
-
- /* cppcheck-suppress nullPointer */
- if (index > ICE_MAX_LABELS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- labels = section;
- if (index >= le16_to_cpu(labels->count))
- return NULL;
-
- return labels->label + index;
-}
-
-/**
- * ice_enum_labels
- * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
- * @type: the section type that will contain the label (0 on subsequent calls)
- * @state: ice_pkg_enum structure that will hold the state of the enumeration
- * @value: pointer to a value that will return the label's value if found
- *
- * Enumerates a list of labels in the package. The caller will call
- * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
- * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
- * the end of the list has been reached.
- */
-static char *
-ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
- u16 *value)
-{
- struct ice_label *label;
-
- /* Check for valid label section on first call */
- if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
- return NULL;
-
- label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
- ice_label_enum_handler);
- if (!label)
- return NULL;
-
- *value = le16_to_cpu(label->value);
- return label->name;
-}
-
-/**
- * ice_init_pkg_hints
+ * ice_hw_ptype_ena - check if the PTYPE is enabled or not
* @hw: pointer to the HW structure
- * @ice_seg: pointer to the segment of the package scan (non-NULL)
- *
- * This function will scan the package and save off relevant information
- * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
- * since the first call to ice_enum_labels requires a pointer to an actual
- * ice_seg structure.
+ * @ptype: the hardware PTYPE
*/
-static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
{
- struct ice_pkg_enum state;
- char *label_name;
- u16 val;
- int i;
-
- memset(&hw->tnl, 0, sizeof(hw->tnl));
- memset(&state, 0, sizeof(state));
-
- if (!ice_seg)
- return;
-
- label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
- &val);
-
- while (label_name && hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
- for (i = 0; tnls[i].type != TNL_LAST; i++) {
- size_t len = strlen(tnls[i].label_prefix);
-
- /* Look for matching label start, before continuing */
- if (strncmp(label_name, tnls[i].label_prefix, len))
- continue;
-
- /* Make sure this label matches our PF. Note that the PF
- * character ('0' - '7') will be located where our
- * prefix string's null terminator is located.
- */
- if ((label_name[len] - '0') == hw->pf_id) {
- hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
- hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].boost_addr = val;
- hw->tnl.tbl[hw->tnl.count].port = 0;
- hw->tnl.count++;
- break;
- }
- }
-
- label_name = ice_enum_labels(NULL, 0, &state, &val);
- }
-
- /* Cache the appropriate boost TCAM entry pointers */
- for (i = 0; i < hw->tnl.count; i++) {
- ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
- &hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry) {
- hw->tnl.tbl[i].valid = true;
- if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
- hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
- }
- }
+ return ptype < ICE_FLOW_PTYPE_MAX &&
+ test_bit(ptype, hw->hw_ptype);
}
/* Key creation */
@@ -549,7 +133,7 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
* ------------------------------
* Result: key: b01 10 11 11 00 00
*/
-static enum ice_status
+static int
ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
u8 *key_inv)
{
@@ -558,7 +142,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
/* 'dont_care' and 'nvr_mtch' masks cannot overlap */
if ((dont_care ^ nvr_mtch) != (dont_care | nvr_mtch))
- return ICE_ERR_CFG;
+ return -EIO;
*key = 0;
*key_inv = 0;
@@ -651,7 +235,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
-static enum ice_status
+static int
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len)
{
@@ -660,11 +244,11 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
/* size must be a multiple of 2 bytes. */
if (size % 2)
- return ICE_ERR_CFG;
+ return -EIO;
half_size = size / 2;
if (off + len > half_size)
- return ICE_ERR_CFG;
+ return -EIO;
/* Make sure at most one bit is set in the never match mask. Having more
* than one never match mask bit set will cause HW to consume excessive
@@ -672,70 +256,25 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
*/
#define ICE_NVR_MTCH_BITS_MAX 1
if (nm && !ice_bits_max_set(nm, len, ICE_NVR_MTCH_BITS_MAX))
- return ICE_ERR_CFG;
+ return -EIO;
for (i = 0; i < len; i++)
if (ice_gen_key_word(val[i], upd ? upd[i] : 0xff,
dc ? dc[i] : 0, nm ? nm[i] : 0,
key + off + i, key + half_size + off + i))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
}
/**
- * ice_acquire_global_cfg_lock
- * @hw: pointer to the HW structure
- * @access: access type (read or write)
- *
- * This function will request ownership of the global config lock for reading
- * or writing of the package. When attempting to obtain write access, the
- * caller must check for the following two return values:
- *
- * ICE_SUCCESS - Means the caller has acquired the global config lock
- * and can perform writing of the package.
- * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
- * package or has found that no update was necessary; in
- * this case, the caller can just skip performing any
- * update of the package.
- */
-static enum ice_status
-ice_acquire_global_cfg_lock(struct ice_hw *hw,
- enum ice_aq_res_access_type access)
-{
- enum ice_status status;
-
- status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
- ICE_GLOBAL_CFG_LOCK_TIMEOUT);
-
- if (!status)
- mutex_lock(&ice_global_cfg_lock_sw);
- else if (status == ICE_ERR_AQ_NO_WORK)
- ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
-
- return status;
-}
-
-/**
- * ice_release_global_cfg_lock
- * @hw: pointer to the HW structure
- *
- * This function will release the global config lock.
- */
-static void ice_release_global_cfg_lock(struct ice_hw *hw)
-{
- mutex_unlock(&ice_global_cfg_lock_sw);
- ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
-}
-
-/**
* ice_acquire_change_lock
* @hw: pointer to the HW structure
* @access: access type (read or write)
*
* This function will request ownership of the change lock.
*/
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -754,1176 +293,118 @@ void ice_release_change_lock(struct ice_hw *hw)
}
/**
- * ice_aq_download_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer to transfer
- * @buf_size: the size of the package buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Download Package (0x0C40)
- */
-static enum ice_status
-ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_aq_update_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package cmd buffer
- * @buf_size: the size of the package cmd buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Update Package (0x0C42)
- */
-static enum ice_status
-ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
- bool last_buf, u32 *error_offset, u32 *error_info,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- enum ice_status status;
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == ICE_ERR_AQ_ERROR) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_find_seg_in_pkg
- * @hw: pointer to the hardware structure
- * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
- * @pkg_hdr: pointer to the package header to be searched
- *
- * This function searches a package file for a particular segment type. On
- * success it returns a pointer to the segment header, otherwise it will
- * return NULL.
- */
-static struct ice_generic_seg_hdr *
-ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
-{
- u32 i;
-
- ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
- pkg_hdr->pkg_format_ver.update,
- pkg_hdr->pkg_format_ver.draft);
-
- /* Search all package segments for the requested segment type */
- for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
- struct ice_generic_seg_hdr *seg;
-
- seg = (struct ice_generic_seg_hdr *)
- ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
-
- if (le32_to_cpu(seg->seg_type) == seg_type)
- return seg;
- }
-
- return NULL;
-}
-
-/**
- * ice_update_pkg
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
- */
-static enum ice_status
-ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_status status;
- u32 offset, info, i;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
-
- for (i = 0; i < count; i++) {
- struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
- bool last = ((i + 1) == count);
-
- status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
- last, &offset, &info, NULL);
-
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
- status, offset, info);
- break;
- }
- }
-
- ice_release_change_lock(hw);
-
- return status;
-}
-
-/**
- * ice_dwnld_cfg_bufs
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains global config lock and downloads the package configuration buffers
- * to the firmware. Metadata buffers are skipped, and the first metadata buffer
- * found indicates that the rest of the buffers are all metadata buffers.
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
+ * @hw: pointer to the HW structure
+ * @port: returns open port
+ * @type: type of tunnel, can be TNL_LAST if it doesn't matter
*/
-static enum ice_status
-ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
+ enum ice_tunnel_type type)
{
- enum ice_status status;
- struct ice_buf_hdr *bh;
- u32 offset, info, i;
-
- if (!bufs || !count)
- return ICE_ERR_PARAM;
-
- /* If the first buffer's first section has its metadata bit set
- * then there are no buffers to be downloaded, and the operation is
- * considered a success.
- */
- bh = (struct ice_buf_hdr *)bufs;
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return 0;
-
- /* reset pkg_dwnld_status in case this function is called in the
- * reset/rebuild flow
- */
- hw->pkg_dwnld_status = ICE_AQ_RC_OK;
-
- status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
- if (status) {
- if (status == ICE_ERR_AQ_NO_WORK)
- hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
- else
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- return status;
- }
-
- for (i = 0; i < count; i++) {
- bool last = ((i + 1) == count);
-
- if (!last) {
- /* check next buffer for metadata flag */
- bh = (struct ice_buf_hdr *)(bufs + i + 1);
-
- /* A set metadata flag in the next buffer will signal
- * that the current buffer will be the last buffer
- * downloaded
- */
- if (le16_to_cpu(bh->section_count))
- if (le32_to_cpu(bh->section_entry[0].type) &
- ICE_METADATA_BUF)
- last = true;
- }
-
- bh = (struct ice_buf_hdr *)(bufs + i);
-
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
-
- /* Save AQ status from download package */
- hw->pkg_dwnld_status = hw->adminq.sq_last_status;
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
- status, offset, info);
+ bool res = false;
+ u16 i;
- break;
- }
+ mutex_lock(&hw->tnl_lock);
- if (last)
+ for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
+ (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
break;
- }
-
- ice_release_global_cfg_lock(hw);
-
- return status;
-}
-
-/**
- * ice_aq_get_pkg_info_list
- * @hw: pointer to the hardware structure
- * @pkg_info: the buffer which will receive the information list
- * @buf_size: the size of the pkg_info information buffer
- * @cd: pointer to command details structure or NULL
- *
- * Get Package Info List (0x0C43)
- */
-static enum ice_status
-ice_aq_get_pkg_info_list(struct ice_hw *hw,
- struct ice_aqc_get_pkg_info_resp *pkg_info,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
-
- return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
-}
-
-/**
- * ice_download_pkg
- * @hw: pointer to the hardware structure
- * @ice_seg: pointer to the segment of the package to be downloaded
- *
- * Handles the download of a complete package.
- */
-static enum ice_status
-ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_buf_table *ice_buf_tbl;
-
- ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_format_ver.major,
- ice_seg->hdr.seg_format_ver.minor,
- ice_seg->hdr.seg_format_ver.update,
- ice_seg->hdr.seg_format_ver.draft);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
- le32_to_cpu(ice_seg->hdr.seg_type),
- le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
-
- ice_buf_tbl = ice_find_buf_table(ice_seg);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
- le32_to_cpu(ice_buf_tbl->buf_count));
-
- return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- le32_to_cpu(ice_buf_tbl->buf_count));
-}
-
-/**
- * ice_init_pkg_info
- * @hw: pointer to the hardware structure
- * @pkg_hdr: pointer to the driver's package hdr
- *
- * Saves off the package details into the HW structure.
- */
-static enum ice_status
-ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
-{
- struct ice_generic_seg_hdr *seg_hdr;
-
- if (!pkg_hdr)
- return ICE_ERR_PARAM;
-
- seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
- if (seg_hdr) {
- struct ice_meta_sect *meta;
- struct ice_pkg_enum state;
-
- memset(&state, 0, sizeof(state));
-
- /* Get package information from the Metadata Section */
- meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
- ICE_SID_METADATA);
- if (!meta) {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
- return ICE_ERR_CFG;
}
- hw->pkg_ver = meta->ver;
- memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
- meta->ver.major, meta->ver.minor, meta->ver.update,
- meta->ver.draft, meta->name);
-
- hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
- memcpy(hw->ice_seg_id, seg_hdr->seg_id,
- sizeof(hw->ice_seg_id));
-
- ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_format_ver.major,
- seg_hdr->seg_format_ver.minor,
- seg_hdr->seg_format_ver.update,
- seg_hdr->seg_format_ver.draft,
- seg_hdr->seg_id);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
- return ICE_ERR_CFG;
- }
-
- return 0;
-}
-
-/**
- * ice_get_pkg_info
- * @hw: pointer to the hardware structure
- *
- * Store details of the package currently loaded in HW into the HW structure.
- */
-static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
-{
- struct ice_aqc_get_pkg_info_resp *pkg_info;
- enum ice_status status;
- u16 size;
- u32 i;
-
- size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
- pkg_info = kzalloc(size, GFP_KERNEL);
- if (!pkg_info)
- return ICE_ERR_NO_MEMORY;
-
- status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
- if (status)
- goto init_pkg_free_alloc;
-
- for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
-#define ICE_PKG_FLAG_COUNT 4
- char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
- u8 place = 0;
-
- if (pkg_info->pkg_info[i].is_active) {
- flags[place++] = 'A';
- hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
- hw->active_track_id =
- le32_to_cpu(pkg_info->pkg_info[i].track_id);
- memcpy(hw->active_pkg_name,
- pkg_info->pkg_info[i].name,
- sizeof(pkg_info->pkg_info[i].name));
- hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
- }
- if (pkg_info->pkg_info[i].is_active_at_boot)
- flags[place++] = 'B';
- if (pkg_info->pkg_info[i].is_modified)
- flags[place++] = 'M';
- if (pkg_info->pkg_info[i].is_in_nvm)
- flags[place++] = 'N';
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
- i, pkg_info->pkg_info[i].ver.major,
- pkg_info->pkg_info[i].ver.minor,
- pkg_info->pkg_info[i].ver.update,
- pkg_info->pkg_info[i].ver.draft,
- pkg_info->pkg_info[i].name, flags);
- }
-
-init_pkg_free_alloc:
- kfree(pkg_info);
-
- return status;
-}
-
-/**
- * ice_verify_pkg - verify package
- * @pkg: pointer to the package buffer
- * @len: size of the package buffer
- *
- * Verifies various attributes of the package file, including length, format
- * version, and the requirement of at least one segment.
- */
-static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
-{
- u32 seg_count;
- u32 i;
-
- if (len < struct_size(pkg, seg_offset, 1))
- return ICE_ERR_BUF_TOO_SHORT;
-
- if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
- return ICE_ERR_CFG;
-
- /* pkg must have at least one segment */
- seg_count = le32_to_cpu(pkg->seg_count);
- if (seg_count < 1)
- return ICE_ERR_CFG;
-
- /* make sure segment array fits in package length */
- if (len < struct_size(pkg, seg_offset, seg_count))
- return ICE_ERR_BUF_TOO_SHORT;
-
- /* all segments must fit within length */
- for (i = 0; i < seg_count; i++) {
- u32 off = le32_to_cpu(pkg->seg_offset[i]);
- struct ice_generic_seg_hdr *seg;
-
- /* segment header must fit */
- if (len < off + sizeof(*seg))
- return ICE_ERR_BUF_TOO_SHORT;
-
- seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
-
- /* segment body must fit */
- if (len < off + le32_to_cpu(seg->seg_size))
- return ICE_ERR_BUF_TOO_SHORT;
- }
-
- return 0;
-}
-
-/**
- * ice_free_seg - free package segment pointer
- * @hw: pointer to the hardware structure
- *
- * Frees the package segment pointer in the proper manner, depending on if the
- * segment was allocated or just the passed in pointer was stored.
- */
-void ice_free_seg(struct ice_hw *hw)
-{
- if (hw->pkg_copy) {
- devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
- hw->pkg_copy = NULL;
- hw->pkg_size = 0;
- }
- hw->seg = NULL;
-}
-
-/**
- * ice_init_pkg_regs - initialize additional package registers
- * @hw: pointer to the hardware structure
- */
-static void ice_init_pkg_regs(struct ice_hw *hw)
-{
-#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
-#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
-#define ICE_SW_BLK_IDX 0
-
- /* setup Switch block input mask, which is 48-bits in two parts */
- wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
- wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
-}
-
-/**
- * ice_chk_pkg_version - check package version for compatibility with driver
- * @pkg_ver: pointer to a version structure to check
- *
- * Check to make sure that the package about to be downloaded is compatible with
- * the driver. To be compatible, the major and minor components of the package
- * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
- * definitions.
- */
-static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
-{
- if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
- pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
- return ICE_ERR_NOT_SUPPORTED;
-
- return 0;
-}
-
-/**
- * ice_chk_pkg_compat
- * @hw: pointer to the hardware structure
- * @ospkg: pointer to the package hdr
- * @seg: pointer to the package segment hdr
- *
- * This function checks the package version compatibility with driver and NVM
- */
-static enum ice_status
-ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
- struct ice_seg **seg)
-{
- struct ice_aqc_get_pkg_info_resp *pkg;
- enum ice_status status;
- u16 size;
- u32 i;
-
- /* Check package version compatibility */
- status = ice_chk_pkg_version(&hw->pkg_ver);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
- return status;
- }
-
- /* find ICE segment in given package */
- *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
- ospkg);
- if (!*seg) {
- ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_ERR_CFG;
- }
-
- /* Check if FW is compatible with the OS package */
- size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
- pkg = kzalloc(size, GFP_KERNEL);
- if (!pkg)
- return ICE_ERR_NO_MEMORY;
-
- status = ice_aq_get_pkg_info_list(hw, pkg, size, NULL);
- if (status)
- goto fw_ddp_compat_free_alloc;
-
- for (i = 0; i < le32_to_cpu(pkg->count); i++) {
- /* loop till we find the NVM package */
- if (!pkg->pkg_info[i].is_in_nvm)
- continue;
- if ((*seg)->hdr.seg_format_ver.major !=
- pkg->pkg_info[i].ver.major ||
- (*seg)->hdr.seg_format_ver.minor >
- pkg->pkg_info[i].ver.minor) {
- status = ICE_ERR_FW_DDP_MISMATCH;
- ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
- }
- /* done processing NVM package so break */
- break;
- }
-fw_ddp_compat_free_alloc:
- kfree(pkg);
- return status;
-}
-
-/**
- * ice_sw_fv_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the field vector entry to be returned
- * @offset: ptr to variable that receives the offset in the field vector table
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * This function treats the given section as of type ice_sw_fv_section and
- * enumerates offset field. "offset" is an index into the field vector table.
- */
-static void *
-ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_sw_fv_section *fv_section = section;
-
- if (!section || sect_type != ICE_SID_FLD_VEC_SW)
- return NULL;
- if (index >= le16_to_cpu(fv_section->count))
- return NULL;
- if (offset)
- /* "index" passed in to this function is relative to a given
- * 4k block. To get to the true index into the field vector
- * table need to add the relative index to the base_offset
- * field of this section
- */
- *offset = le16_to_cpu(fv_section->base_offset) + index;
- return fv_section->fv + index;
-}
-
-/**
- * ice_get_prof_index_max - get the max profile index for used profile
- * @hw: pointer to the HW struct
- *
- * Calling this function will get the max profile index for used profile
- * and store the index number in struct ice_switch_info *switch_info
- * in HW for following use.
- */
-static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
-{
- u16 prof_index = 0, j, max_prof_index = 0;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- bool flag = false;
- struct ice_fv *fv;
- u32 offset;
-
- memset(&state, 0, sizeof(state));
-
- if (!hw->seg)
- return ICE_ERR_PARAM;
-
- ice_seg = hw->seg;
-
- do {
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* in the profile that not be used, the prot_id is set to 0xff
- * and the off is set to 0x1ff for all the field vectors.
- */
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
- fv->ew[j].off != ICE_FV_OFFSET_INVAL)
- flag = true;
- if (flag && prof_index > max_prof_index)
- max_prof_index = prof_index;
-
- prof_index++;
- flag = false;
- } while (fv);
-
- hw->switch_info->max_used_prof_index = max_prof_index;
+ mutex_unlock(&hw->tnl_lock);
- return 0;
+ return res;
}
/**
- * ice_init_pkg - initialize/download package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function initializes a package. The package contains HW tables
- * required to do packet processing. First, the function extracts package
- * information such as version. Then it finds the ice configuration segment
- * within the package; this function then saves a copy of the segment pointer
- * within the supplied package buffer. Next, the function will cache any hints
- * from the package, followed by downloading the package itself. Note, that if
- * a previous PF driver has already downloaded the package successfully, then
- * the current driver will not have to download the package again.
- *
- * The local package contents will be used to query default behavior and to
- * update specific sections of the HW's version of the package (e.g. to update
- * the parse graph to understand new protocols).
- *
- * This function stores a pointer to the package buffer memory, and it is
- * expected that the supplied buffer will not be freed immediately. If the
- * package buffer needs to be freed, such as when read from a file, use
- * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
- * case.
+ * ice_upd_dvm_boost_entry
+ * @hw: pointer to the HW structure
+ * @entry: pointer to double vlan boost entry info
*/
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+static int
+ice_upd_dvm_boost_entry(struct ice_hw *hw, struct ice_dvm_entry *entry)
{
- struct ice_pkg_hdr *pkg;
- enum ice_status status;
- struct ice_seg *seg;
-
- if (!buf || !len)
- return ICE_ERR_PARAM;
-
- pkg = (struct ice_pkg_hdr *)buf;
- status = ice_verify_pkg(pkg, len);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- status);
- return status;
- }
+ struct ice_boost_tcam_section *sect_rx, *sect_tx;
+ int status = -ENOSPC;
+ struct ice_buf_build *bld;
+ u8 val, dc, nm;
- /* initialize package info */
- status = ice_init_pkg_info(hw, pkg);
- if (status)
- return status;
+ bld = ice_pkg_buf_alloc(hw);
+ if (!bld)
+ return -ENOMEM;
- /* before downloading the package, check package version for
- * compatibility with driver
- */
- status = ice_chk_pkg_compat(hw, pkg, &seg);
- if (status)
- return status;
+ /* allocate 2 sections, one for Rx parser, one for Tx parser */
+ if (ice_pkg_buf_reserve_section(bld, 2))
+ goto ice_upd_dvm_boost_entry_err;
- /* initialize package hints and then download package */
- ice_init_pkg_hints(hw, seg);
- status = ice_download_pkg(hw, seg);
- if (status == ICE_ERR_AQ_NO_WORK) {
- ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
- status = 0;
- }
+ sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
+ struct_size(sect_rx, tcam, 1));
+ if (!sect_rx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_rx->count = cpu_to_le16(1);
- /* Get information on the package currently loaded in HW, then make sure
- * the driver is compatible with this version.
- */
- if (!status) {
- status = ice_get_pkg_info(hw);
- if (!status)
- status = ice_chk_pkg_version(&hw->active_pkg_ver);
- }
+ sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
+ struct_size(sect_tx, tcam, 1));
+ if (!sect_tx)
+ goto ice_upd_dvm_boost_entry_err;
+ sect_tx->count = cpu_to_le16(1);
- if (!status) {
- hw->seg = seg;
- /* on successful package download update other required
- * registers to support the package and fill HW tables
- * with package content.
- */
- ice_init_pkg_regs(hw);
- ice_fill_blk_tbls(hw);
- ice_get_prof_index_max(hw);
+ /* copy original boost entry to update package buffer */
+ memcpy(sect_rx->tcam, entry->boost_entry, sizeof(*sect_rx->tcam));
+
+ /* re-write the don't care and never match bits accordingly */
+ if (entry->enable) {
+ /* all bits are don't care */
+ val = 0x00;
+ dc = 0xFF;
+ nm = 0x00;
} else {
- ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
- status);
+ /* disable, one never match bit, the rest are don't care */
+ val = 0x00;
+ dc = 0xF7;
+ nm = 0x08;
}
- return status;
-}
-
-/**
- * ice_copy_and_init_pkg - initialize/download a copy of the package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function copies the package buffer, and then calls ice_init_pkg() to
- * initialize the copied package contents.
- *
- * The copying is necessary if the package buffer supplied is constant, or if
- * the memory may disappear shortly after calling this function.
- *
- * If the package buffer resides in the data segment and can be modified, the
- * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
- *
- * However, if the package buffer needs to be copied first, such as when being
- * read from a file, the caller should use ice_copy_and_init_pkg().
- *
- * This function will first copy the package buffer, before calling
- * ice_init_pkg(). The caller is free to immediately destroy the original
- * package buffer, as the new copy will be managed by this function and
- * related routines.
- */
-enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
-{
- enum ice_status status;
- u8 *buf_copy;
+ ice_set_key((u8 *)&sect_rx->tcam[0].key, sizeof(sect_rx->tcam[0].key),
+ &val, NULL, &dc, &nm, 0, sizeof(u8));
- if (!buf || !len)
- return ICE_ERR_PARAM;
+ /* exact copy of entry to Tx section entry */
+ memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
- buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+ status = ice_update_pkg_no_lock(hw, ice_pkg_buf(bld), 1);
- status = ice_init_pkg(hw, buf_copy, len);
- if (status) {
- /* Free the copy, since we failed to initialize the package */
- devm_kfree(ice_hw_to_dev(hw), buf_copy);
- } else {
- /* Track the copied pkg so we can free it later */
- hw->pkg_copy = buf_copy;
- hw->pkg_size = len;
- }
+ice_upd_dvm_boost_entry_err:
+ ice_pkg_buf_free(hw, bld);
return status;
}
/**
- * ice_pkg_buf_alloc
+ * ice_set_dvm_boost_entries
* @hw: pointer to the HW structure
*
- * Allocates a package buffer and returns a pointer to the buffer header.
- * Note: all package contents must be in Little Endian form.
+ * Enable double vlan by updating the appropriate boost tcam entries.
*/
-static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
-{
- struct ice_buf_build *bld;
- struct ice_buf_hdr *buf;
-
- bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
- if (!bld)
- return NULL;
-
- buf = (struct ice_buf_hdr *)bld;
- buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
- section_entry));
- return bld;
-}
-
-/**
- * ice_get_sw_prof_type - determine switch profile type
- * @hw: pointer to the HW structure
- * @fv: pointer to the switch field vector
- */
-static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+int ice_set_dvm_boost_entries(struct ice_hw *hw)
{
u16 i;
- for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
- /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
- fv->ew[i].off == ICE_VNI_OFFSET)
- return ICE_PROF_TUN_UDP;
-
- /* GRE tunnel will have GRE protocol */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
- return ICE_PROF_TUN_GRE;
- }
-
- return ICE_PROF_NON_TUN;
-}
-
-/**
- * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
- * @hw: pointer to hardware structure
- * @req_profs: type of profiles requested
- * @bm: pointer to memory for returning the bitmap of field vectors
- */
-void
-ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
- unsigned long *bm)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- if (req_profs == ICE_PROF_ALL) {
- bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
- return;
- }
-
- memset(&state, 0, sizeof(state));
- bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
- ice_seg = hw->seg;
- do {
- enum ice_prof_type prof_type;
- u32 offset;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- ice_seg = NULL;
-
- if (fv) {
- /* Determine field vector type */
- prof_type = ice_get_sw_prof_type(hw, fv);
-
- if (req_profs & prof_type)
- set_bit((u16)offset, bm);
- }
- } while (fv);
-}
-
-/**
- * ice_get_sw_fv_list
- * @hw: pointer to the HW structure
- * @prot_ids: field vector to search for with a given protocol ID
- * @ids_cnt: lookup/protocol count
- * @bm: bitmap of field vectors to consider
- * @fv_list: Head of a list
- *
- * Finds all the field vector entries from switch block that contain
- * a given protocol ID and returns a list of structures of type
- * "ice_sw_fv_list_entry". Every structure in the list has a field vector
- * definition and profile ID information
- * NOTE: The caller of the function is responsible for freeing the memory
- * allocated for every list entry.
- */
-enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
- unsigned long *bm, struct list_head *fv_list)
-{
- struct ice_sw_fv_list_entry *fvl;
- struct ice_sw_fv_list_entry *tmp;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
- u32 offset;
-
- memset(&state, 0, sizeof(state));
-
- if (!ids_cnt || !hw->seg)
- return ICE_ERR_PARAM;
-
- ice_seg = hw->seg;
- do {
- u16 i;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* If field vector is not in the bitmap list, then skip this
- * profile.
- */
- if (!test_bit((u16)offset, bm))
- continue;
-
- for (i = 0; i < ids_cnt; i++) {
- int j;
+ for (i = 0; i < hw->dvm_upd.count; i++) {
+ int status;
- /* This code assumes that if a switch field vector line
- * has a matching protocol, then this line will contain
- * the entries necessary to represent every field in
- * that protocol header.
- */
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id == prot_ids[i])
- break;
- if (j >= hw->blk[ICE_BLK_SW].es.fvw)
- break;
- if (i + 1 == ids_cnt) {
- fvl = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*fvl), GFP_KERNEL);
- if (!fvl)
- goto err;
- fvl->fv_ptr = fv;
- fvl->profile_id = offset;
- list_add(&fvl->list_entry, fv_list);
- break;
- }
- }
- } while (fv);
- if (list_empty(fv_list))
- return ICE_ERR_CFG;
- return 0;
-
-err:
- list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
- list_del(&fvl->list_entry);
- devm_kfree(ice_hw_to_dev(hw), fvl);
+ status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
+ if (status)
+ return status;
}
- return ICE_ERR_NO_MEMORY;
-}
-
-/**
- * ice_init_prof_result_bm - Initialize the profile result index bitmap
- * @hw: pointer to hardware structure
- */
-void ice_init_prof_result_bm(struct ice_hw *hw)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- memset(&state, 0, sizeof(state));
-
- if (!hw->seg)
- return;
-
- ice_seg = hw->seg;
- do {
- u32 off;
- u16 i;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &off, ice_sw_fv_handler);
- ice_seg = NULL;
- if (!fv)
- break;
-
- bitmap_zero(hw->switch_info->prof_res_bm[off],
- ICE_MAX_FV_WORDS);
-
- /* Determine empty field vector indices, these can be
- * used for recipe results. Skip index 0, since it is
- * always used for Switch ID.
- */
- for (i = 1; i < ICE_MAX_FV_WORDS; i++)
- if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
- fv->ew[i].off == ICE_FV_OFFSET_INVAL)
- set_bit(i, hw->switch_info->prof_res_bm[off]);
- } while (fv);
-}
-
-/**
- * ice_pkg_buf_free
- * @hw: pointer to the HW structure
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Frees a package buffer
- */
-static void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
-{
- devm_kfree(ice_hw_to_dev(hw), bld);
-}
-
-/**
- * ice_pkg_buf_reserve_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @count: the number of sections to reserve
- *
- * Reserves one or more section table entries in a package buffer. This routine
- * can be called multiple times as long as they are made before calling
- * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
- * is called once, the number of sections that can be allocated will not be able
- * to be increased; not using all reserved sections is fine, but this will
- * result in some wasted space in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-static enum ice_status
-ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
-{
- struct ice_buf_hdr *buf;
- u16 section_count;
- u16 data_end;
-
- if (!bld)
- return ICE_ERR_PARAM;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* already an active section, can't increase table size */
- section_count = le16_to_cpu(buf->section_count);
- if (section_count > 0)
- return ICE_ERR_CFG;
-
- if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
- return ICE_ERR_CFG;
- bld->reserved_section_table_entries += count;
-
- data_end = le16_to_cpu(buf->data_end) +
- flex_array_size(buf, section_entry, count);
- buf->data_end = cpu_to_le16(data_end);
-
return 0;
}
/**
- * ice_pkg_buf_alloc_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- *
- * Reserves memory in the buffer for a section's content and updates the
- * buffers' status accordingly. This routine returns a pointer to the first
- * byte of the section start within the buffer, which is used to fill in the
- * section contents.
- * Note: all package contents must be in Little Endian form.
- */
-static void *
-ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
-{
- struct ice_buf_hdr *buf;
- u16 sect_count;
- u16 data_end;
-
- if (!bld || !type || !size)
- return NULL;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* check for enough space left in buffer */
- data_end = le16_to_cpu(buf->data_end);
-
- /* section start must align on 4 byte boundary */
- data_end = ALIGN(data_end, 4);
-
- if ((data_end + size) > ICE_MAX_S_DATA_END)
- return NULL;
-
- /* check for more available section table entries */
- sect_count = le16_to_cpu(buf->section_count);
- if (sect_count < bld->reserved_section_table_entries) {
- void *section_ptr = ((u8 *)buf) + data_end;
-
- buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
- buf->section_entry[sect_count].size = cpu_to_le16(size);
- buf->section_entry[sect_count].type = cpu_to_le32(type);
-
- data_end += size;
- buf->data_end = cpu_to_le16(data_end);
-
- buf->section_count = cpu_to_le16(sect_count + 1);
- return section_ptr;
- }
-
- /* no free section table entries */
- return NULL;
-}
-
-/**
- * ice_pkg_buf_get_active_sections
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Returns the number of active sections. Before using the package buffer
- * in an update package command, the caller should make sure that there is at
- * least one active section - otherwise, the buffer is not legal and should
- * not be used.
- * Note: all package contents must be in Little Endian form.
- */
-static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
-{
- struct ice_buf_hdr *buf;
-
- if (!bld)
- return 0;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
- return le16_to_cpu(buf->section_count);
-}
-
-/**
- * ice_pkg_buf
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Return a pointer to the buffer's header
- */
-static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
-{
- if (!bld)
- return NULL;
-
- return &bld->buf;
-}
-
-/**
- * ice_get_open_tunnel_port - retrieve an open tunnel port
- * @hw: pointer to the HW structure
- * @port: returns open port
- * @type: type of tunnel, can be TNL_LAST if it doesn't matter
- */
-bool
-ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
- enum ice_tunnel_type type)
-{
- bool res = false;
- u16 i;
-
- mutex_lock(&hw->tnl_lock);
-
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port &&
- (type == TNL_LAST || type == hw->tnl.tbl[i].type)) {
- *port = hw->tnl.tbl[i].port;
- res = true;
- break;
- }
-
- mutex_unlock(&hw->tnl_lock);
-
- return res;
-}
-
-/**
* ice_tunnel_idx_to_entry - convert linear index to the sparse one
* @hw: pointer to the HW structure
* @type: type of tunnel
@@ -1959,19 +440,19 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-static enum ice_status
+static int
ice_create_tunnel(struct ice_hw *hw, u16 index,
enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = -ENOSPC;
mutex_lock(&hw->tnl_lock);
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_create_tunnel_end;
}
@@ -2030,26 +511,26 @@ ice_create_tunnel_end:
* targeting the specific updates requested and then performing an update
* package.
*/
-static enum ice_status
+static int
ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = -ENOSPC;
mutex_lock(&hw->tnl_lock);
if (WARN_ON(!hw->tnl.tbl[index].valid ||
hw->tnl.tbl[index].type != type ||
hw->tnl.tbl[index].port != port)) {
- status = ICE_ERR_OUT_OF_RANGE;
+ status = -EIO;
goto ice_destroy_tunnel_end;
}
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_destroy_tunnel_end;
}
@@ -2097,7 +578,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
- enum ice_status status;
+ int status;
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
@@ -2105,8 +586,8 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {
- netdev_err(netdev, "Error adding UDP tunnel - %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "Error adding UDP tunnel - %d\n",
+ status);
return -EIO;
}
@@ -2121,15 +602,15 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
enum ice_tunnel_type tnl_type;
- enum ice_status status;
+ int status;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
ntohs(ti->port));
if (status) {
- netdev_err(netdev, "Error removing UDP tunnel - %s\n",
- ice_stat_str(status));
+ netdev_err(netdev, "Error removing UDP tunnel - %d\n",
+ status);
return -EIO;
}
@@ -2145,17 +626,17 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
* @prot: variable to receive the protocol ID
* @off: variable to receive the protocol offset
*/
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off)
{
struct ice_fv_word *fv_ext;
if (prof >= hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (fv_idx >= hw->blk[blk].es.fvw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
@@ -2178,11 +659,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
* PTG ID that contains it through the PTG parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
-static enum ice_status
+static int
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{
if (ptype >= ICE_XLT1_CNT || !ptg)
- return ICE_ERR_PARAM;
+ return -EINVAL;
*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
return 0;
@@ -2212,21 +693,21 @@ static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
* This function will remove the ptype from the specific PTG, and move it to
* the default PTG (ICE_DEFAULT_PTG).
*/
-static enum ice_status
+static int
ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
struct ice_ptg_ptype **ch;
struct ice_ptg_ptype *p;
if (ptype > ICE_XLT1_CNT - 1)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Should not happen if .in_use is set, bad config */
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
- return ICE_ERR_CFG;
+ return -EIO;
/* find the ptype within this PTG, and bypass the link over it */
p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
@@ -2256,20 +737,20 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
*
* This function will either add or move a ptype to a particular PTG depending
* on if the ptype is already part of another group. Note that using a
- * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
-static enum ice_status
+static int
ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
- enum ice_status status;
u8 original_ptg;
+ int status;
if (ptype > ICE_XLT1_CNT - 1)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
if (status)
@@ -2371,7 +852,6 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
count++;
list_for_each_entry(tmp2, list2, list)
chk_count++;
- /* cppcheck-suppress knownConditionTrueFalse */
if (!count || count != chk_count)
return false;
@@ -2404,11 +884,11 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
* This function will lookup the VSI entry in the XLT2 list and return
* the VSI group its associated with.
*/
-static enum ice_status
+static int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
{
if (!vsig || vsi >= ICE_MAX_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* As long as there's a default or valid VSIG associated with the input
* VSI, the functions returns a success. Any handling of VSIG will be
@@ -2473,7 +953,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
* for, the list must match exactly, including the order in which the
* characteristics are listed.
*/
-static enum ice_status
+static int
ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
struct list_head *chs, u16 *vsig)
{
@@ -2487,7 +967,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -2499,8 +979,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
* The function will remove all VSIs associated with the input VSIG and move
* them to the DEFAULT_VSIG and mark the VSIG available.
*/
-static enum ice_status
-ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+static int ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
struct ice_vsig_prof *dtmp, *del;
struct ice_vsig_vsi *vsi_cur;
@@ -2508,10 +987,10 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M;
if (idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
hw->blk[blk].xlt2.vsig_tbl[idx].in_use = false;
@@ -2560,7 +1039,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* The function will remove the input VSI from its VSI group and move it
* to the DEFAULT_VSIG.
*/
-static enum ice_status
+static int
ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
@@ -2569,10 +1048,10 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* entry already in default VSIG, don't have to remove */
if (idx == ICE_DEFAULT_VSIG)
@@ -2580,7 +1059,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
if (!(*vsi_head))
- return ICE_ERR_CFG;
+ return -EIO;
vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
vsi_cur = (*vsi_head);
@@ -2597,7 +1076,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* verify if VSI was removed from group list */
if (!vsi_cur)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
vsi_cur->vsig = ICE_DEFAULT_VSIG;
vsi_cur->changed = 1;
@@ -2618,24 +1097,24 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* move the entry to the DEFAULT_VSIG, update the original VSIG and
* then move entry to the new VSIG.
*/
-static enum ice_status
+static int
ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi *tmp;
- enum ice_status status;
u16 orig_vsig, idx;
+ int status;
idx = vsig & ICE_VSIG_IDX_M;
if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* if VSIG not in use and VSIG is not default type this VSIG
* doesn't exist.
*/
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
vsig != ICE_DEFAULT_VSIG)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (status)
@@ -2741,7 +1220,7 @@ ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
* @masks: masks for FV
* @prof_id: receives the profile ID
*/
-static enum ice_status
+static int
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
@@ -2752,7 +1231,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
* field vector and mask. This will cause rule interference.
*/
if (blk == ICE_BLK_FD)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
for (i = 0; i < (u8)es->count; i++) {
u16 off = i * es->fvw;
@@ -2768,7 +1247,7 @@ ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -2821,14 +1300,14 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
-static enum ice_status
+static int
ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
@@ -2841,13 +1320,13 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
*
* This function frees an entry in a Profile ID TCAM for a specific block.
*/
-static enum ice_status
+static int
ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tcam_idx);
}
@@ -2861,15 +1340,14 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
* This function allocates a new profile ID, which also corresponds to a Field
* Vector (Extraction Sequence) entry.
*/
-static enum ice_status
-ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
+static int ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
{
- enum ice_status status;
u16 res_type;
u16 get_prof;
+ int status;
if (!ice_prof_id_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
status = ice_alloc_hw_res(hw, res_type, 1, false, &get_prof);
if (!status)
@@ -2886,14 +1364,13 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
*
* This function frees a profile ID, which also corresponds to a Field Vector.
*/
-static enum ice_status
-ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+static int ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
u16 tmp_prof_id = (u16)prof_id;
u16 res_type;
if (!ice_prof_id_rsrc_type(blk, &res_type))
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_free_hw_res(hw, res_type, 1, &tmp_prof_id);
}
@@ -2904,11 +1381,10 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to increment the reference count
*/
-static enum ice_status
-ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
+static int ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw->blk[blk].es.ref_count[prof_id]++;
@@ -3025,17 +1501,17 @@ static void ice_init_all_prof_masks(struct ice_hw *hw)
* @mask: the 16-bit mask
* @mask_idx: variable to receive the mask index
*/
-static enum ice_status
+static int
ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
u16 *mask_idx)
{
bool found_unused = false, found_copy = false;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
u16 unused_idx = 0, copy_idx = 0;
+ int status = -ENOSPC;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->blk[blk].masks.lock);
@@ -3093,15 +1569,15 @@ err_ice_alloc_prof_mask:
* @blk: hardware block
* @mask_idx: index of mask
*/
-static enum ice_status
+static int
ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
{
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!(mask_idx >= hw->blk[blk].masks.first &&
mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
mutex_lock(&hw->blk[blk].masks.lock);
@@ -3135,14 +1611,14 @@ exit_ice_free_prof_mask:
* @blk: hardware block
* @prof_id: profile ID
*/
-static enum ice_status
+static int
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
{
u32 mask_bm;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
- return ICE_ERR_PARAM;
+ return -EINVAL;
mask_bm = hw->blk[blk].es.mask_ena[prof_id];
for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
@@ -3197,7 +1673,7 @@ static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
* @prof_id: profile ID
* @masks: masks
*/
-static enum ice_status
+static int
ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
u16 *masks)
{
@@ -3227,7 +1703,7 @@ ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
if (ena_mask & BIT(i))
ice_free_prof_mask(hw, blk, i);
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
}
/* enable the masks for this profile */
@@ -3269,11 +1745,11 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to decrement the reference count
*/
-static enum ice_status
+static int
ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
@@ -3711,7 +2187,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
* ice_init_hw_tbls - init hardware table memory
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+int ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
@@ -3830,7 +2306,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
err:
ice_free_hw_tbls(hw);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -3846,7 +2322,7 @@ err:
* @nm_msk: never match mask
* @key: output of profile ID key
*/
-static enum ice_status
+static int
ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -3902,7 +2378,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
* @dc_msk: don't care mask
* @nm_msk: never match mask
*/
-static enum ice_status
+static int
ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -3910,7 +2386,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
{
struct ice_prof_tcam_entry;
- enum ice_status status;
+ int status;
status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
@@ -3929,7 +2405,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
* @vsig: VSIG to query
* @refs: pointer to variable to receive the reference count
*/
-static enum ice_status
+static int
ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
@@ -3938,7 +2414,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
*refs = 0;
if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
ptr = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
while (ptr) {
@@ -3979,7 +2455,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct list_head *chgs)
{
@@ -3999,7 +2475,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
sizeof(p->es[0]));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->prof_id);
@@ -4017,7 +2493,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct list_head *chgs)
{
@@ -4033,7 +2509,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct_size(p, entry, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->entry[0].addr = cpu_to_le16(tmp->tcam_idx);
@@ -4053,7 +2529,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct list_head *chgs)
{
@@ -4069,7 +2545,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->ptype);
@@ -4085,7 +2561,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct list_head *chgs)
{
@@ -4104,7 +2580,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct_size(p, value, 1));
if (!p)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
p->count = cpu_to_le16(1);
p->offset = cpu_to_le16(tmp->vsi);
@@ -4124,18 +2600,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
* @blk: hardware block
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
struct list_head *chgs)
{
struct ice_buf_build *b;
struct ice_chs_chg *tmp;
- enum ice_status status;
u16 pkg_sects;
u16 xlt1 = 0;
u16 xlt2 = 0;
u16 tcam = 0;
u16 es = 0;
+ int status;
u16 sects;
/* count number of sections we need */
@@ -4167,7 +2643,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
/* Build update package buffer */
b = ice_pkg_buf_alloc(hw);
if (!b)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_pkg_buf_reserve_section(b, sects);
if (status)
@@ -4204,13 +2680,13 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
*/
pkg_sects = ice_pkg_buf_get_active_sections(b);
if (!pkg_sects || pkg_sects != sects) {
- status = ICE_ERR_INVAL_SIZE;
+ status = -EINVAL;
goto error_tmp;
}
/* update package */
status = ice_update_pkg(hw, ice_pkg_buf(b), 1);
- if (status == ICE_ERR_AQ_ERROR)
+ if (status == -EIO)
ice_debug(hw, ICE_DBG_INIT, "Unable to update HW profile\n");
error_tmp:
@@ -4276,7 +2752,7 @@ static const struct ice_fd_src_dst_pair ice_fd_pairs[] = {
* @prof_id: profile ID
* @es: extraction sequence (length of array is determined by the block)
*/
-static enum ice_status
+static int
ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
{
DECLARE_BITMAP(pair_list, ICE_FD_SRC_DST_PAIR_COUNT);
@@ -4308,7 +2784,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
for (j = 0; j < ICE_FD_SRC_DST_PAIR_COUNT; j++)
if (es[i].prot_id == ice_fd_pairs[j].prot_id &&
es[i].off == ice_fd_pairs[j].off) {
- set_bit(j, pair_list);
+ __set_bit(j, pair_list);
pair_start[j] = i;
}
}
@@ -4331,7 +2807,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
/* check for room */
if (first_free + 1 < (s8)ice_fd_pairs[index].count)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* place in extraction sequence */
for (k = 0; k < ice_fd_pairs[index].count; k++) {
@@ -4341,7 +2817,7 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
ice_fd_pairs[index].off + (k * 2);
if (k > first_free)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* keep track of non-relevant fields */
mask_sel |= BIT(first_free - k);
@@ -4452,7 +2928,7 @@ ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
* @attr: array of attributes that will be considered
* @attr_cnt: number of elements in the attribute array
*/
-static enum ice_status
+static int
ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
const struct ice_ptype_attributes *attr, u16 attr_cnt)
{
@@ -4468,11 +2944,11 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
&prof->attr[prof->ptg_cnt]);
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
}
if (!found)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
return 0;
}
@@ -4493,7 +2969,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
-enum ice_status
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks)
@@ -4501,9 +2977,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- enum ice_status status;
u8 byte = 0;
u8 prof_id;
+ int status;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -4541,7 +3017,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
/* add profile info */
prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*prof), GFP_KERNEL);
if (!prof) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof;
}
@@ -4578,13 +3054,13 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
if (test_bit(ptg, ptgs_used))
continue;
- set_bit(ptg, ptgs_used);
+ __set_bit(ptg, ptgs_used);
/* Check to see there are any attributes for
* this PTYPE, and add them if found.
*/
status = ice_add_prof_attrib(prof, ptg, ptype,
attr, attr_cnt);
- if (status == ICE_ERR_MAX_LIMIT)
+ if (status == -ENOSPC)
break;
if (status) {
/* This is simple a PTYPE/PTG with no
@@ -4661,14 +3137,13 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* @blk: hardware block
* @idx: the index to release
*/
-static enum ice_status
-ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
+static int ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
{
/* Masks to invoke a never match entry */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status;
+ int status;
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
@@ -4688,11 +3163,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
* @blk: hardware block
* @prof: pointer to profile structure to remove
*/
-static enum ice_status
+static int
ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_vsig_prof *prof)
{
- enum ice_status status;
+ int status;
u16 i;
for (i = 0; i < prof->tcam_count; i++)
@@ -4701,7 +3176,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
status = ice_rel_tcam_idx(hw, blk,
prof->tcam[i].tcam_idx);
if (status)
- return ICE_ERR_HW_TABLE;
+ return -EIO;
}
return 0;
@@ -4714,19 +3189,20 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
* @vsig: the VSIG to remove
* @chg: the change list
*/
-static enum ice_status
+static int
ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *vsi_cur;
struct ice_vsig_prof *d, *t;
- enum ice_status status;
/* remove TCAM entries */
list_for_each_entry_safe(d, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
list) {
+ int status;
+
status = ice_rem_prof_id(hw, blk, d);
if (status)
return status;
@@ -4748,7 +3224,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
p->type = ICE_VSIG_REM;
p->orig_vsig = vsig;
@@ -4771,18 +3247,19 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @hdl: profile handle indicating which profile to remove
* @chg: list to receive a record of changes
*/
-static enum ice_status
+static int
ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct list_head *chg)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_prof *p, *t;
- enum ice_status status;
list_for_each_entry_safe(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
list)
if (p->profile_cookie == hdl) {
+ int status;
+
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
return ice_rem_vsig(hw, blk, vsig, chg);
@@ -4795,7 +3272,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
return status;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -4804,12 +3281,11 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
* @blk: hardware block
* @id: profile tracking ID
*/
-static enum ice_status
-ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
+static int ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_chs_chg *del, *tmp;
- enum ice_status status;
struct list_head chg;
+ int status;
u16 i;
INIT_LIST_HEAD(&chg);
@@ -4845,16 +3321,16 @@ err_ice_rem_flow_all:
* previously created through ice_add_prof. If any existing entries
* are associated with this profile, they will be removed as well.
*/
-enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *pmap;
- enum ice_status status;
+ int status;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
pmap = ice_search_prof_id(hw, blk, id);
if (!pmap) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_rem_prof;
}
@@ -4881,20 +3357,20 @@ err_ice_rem_prof:
* @hdl: profile handle
* @chg: change list
*/
-static enum ice_status
+static int
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct list_head *chg)
{
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_chs_chg *p;
+ int status = 0;
u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_get_prof;
}
@@ -4904,7 +3380,7 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p),
GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_get_prof;
}
@@ -4936,7 +3412,7 @@ err_ice_get_prof:
*
* This routine makes a copy of the list of profiles in the specified VSIG.
*/
-static enum ice_status
+static int
ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *lst)
{
@@ -4964,7 +3440,7 @@ err_ice_get_profs_vsig:
devm_kfree(ice_hw_to_dev(hw), ent1);
}
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
/**
@@ -4974,25 +3450,25 @@ err_ice_get_profs_vsig:
* @lst: the list to be added to
* @hdl: profile handle of entry to add
*/
-static enum ice_status
+static int
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct list_head *lst, u64 hdl)
{
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
+ int status = 0;
u16 i;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_add_prof_to_lst;
}
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof_to_lst;
}
@@ -5021,17 +3497,17 @@ err_ice_add_prof_to_lst:
* @vsig: the VSIG to move the VSI to
* @chg: the change list
*/
-static enum ice_status
+static int
ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 orig_vsig;
+ int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
if (!status)
@@ -5081,13 +3557,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct list_head *chg)
*
* This function appends an enable or disable TCAM entry in the change log
*/
-static enum ice_status
+static int
ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
u16 vsig, struct ice_tcam_inf *tcam,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
+ int status;
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
@@ -5120,7 +3596,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, tcam->attr.flags,
@@ -5154,13 +3630,13 @@ err_ice_prof_tcam_ena_dis:
* @vsig: the VSIG for which to adjust profile priorities
* @chg: the change list
*/
-static enum ice_status
+static int
ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 idx;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -5209,7 +3685,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
}
/* keep track of used ptgs */
- set_bit(t->tcam[i].ptg, ptgs_used);
+ __set_bit(t->tcam[i].ptg, ptgs_used);
}
}
@@ -5225,7 +3701,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @rev: true to add entries to the end of the list
* @chg: the change list
*/
-static enum ice_status
+static int
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
bool rev, struct list_head *chg)
{
@@ -5233,26 +3709,26 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status = 0;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
+ int status = 0;
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
/* new VSIG profile structure */
t = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*t), GFP_KERNEL);
if (!t)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* Get the details on the profile specified by the handle ID */
map = ice_search_prof_id(hw, blk, hdl);
if (!map) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto err_ice_add_prof_id_vsig;
}
@@ -5267,7 +3743,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
/* add TCAM to change list */
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_prof_id_vsig;
}
@@ -5337,21 +3813,21 @@ err_ice_add_prof_id_vsig:
* @hdl: the profile handle of the profile that will be added to the VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
struct list_head *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 new_vsig;
+ int status;
p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL);
if (!p)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
new_vsig = ice_vsig_alloc(hw, blk);
if (!new_vsig) {
- status = ICE_ERR_HW_TABLE;
+ status = -EIO;
goto err_ice_create_prof_id_vsig;
}
@@ -5387,18 +3863,18 @@ err_ice_create_prof_id_vsig:
* @new_vsig: return of new VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
struct list_head *lst, u16 *new_vsig,
struct list_head *chg)
{
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 vsig;
vsig = ice_vsig_alloc(hw, blk);
if (!vsig)
- return ICE_ERR_HW_TABLE;
+ return -EIO;
status = ice_move_vsi(hw, blk, vsi, vsig, chg);
if (status)
@@ -5428,8 +3904,8 @@ static bool
ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
{
struct ice_vsig_prof *t;
- enum ice_status status;
struct list_head lst;
+ int status;
INIT_LIST_HEAD(&lst);
@@ -5459,14 +3935,14 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be enabled.
*/
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct list_head union_lst;
- enum ice_status status;
struct list_head chg;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&union_lst);
@@ -5492,7 +3968,7 @@ ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
* scenario
*/
if (ice_has_prof_vsig(hw, blk, vsig, hdl)) {
- status = ICE_ERR_ALREADY_EXISTS;
+ status = -EEXIST;
goto err_ice_add_prof_id_flow;
}
@@ -5600,7 +4076,7 @@ err_ice_add_prof_id_flow:
* @lst: list to remove the profile from
* @hdl: the profile handle indicating the profile to remove
*/
-static enum ice_status
+static int
ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
{
struct ice_vsig_prof *ent, *tmp;
@@ -5612,7 +4088,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
return 0;
}
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -5626,13 +4102,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct list_head *lst, u64 hdl)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be disabled.
*/
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct list_head chg, copy;
- enum ice_status status;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&copy);
@@ -5727,7 +4203,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
}
}
} else {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
}
/* update hardware tables */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index a2863f38fd1f..7af7c8e9aa4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -6,22 +6,10 @@
#include "ice_type.h"
-/* Package minimal version supported */
-#define ICE_PKG_SUPP_VER_MAJ 1
-#define ICE_PKG_SUPP_VER_MNR 3
-
-/* Package format version */
-#define ICE_PKG_FMT_VER_MAJ 1
-#define ICE_PKG_FMT_VER_MNR 0
-#define ICE_PKG_FMT_VER_UPD 0
-#define ICE_PKG_FMT_VER_DFT 0
-
-#define ICE_PKG_CNT 4
-
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
void
@@ -29,9 +17,15 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
unsigned long *bm);
void
ice_init_prof_result_bm(struct ice_hw *hw);
-enum ice_status
-ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+int
+ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
unsigned long *bm, struct list_head *fv_list);
+int
+ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
+int
+ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port,
enum ice_tunnel_type type);
@@ -39,23 +33,34 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti);
+int ice_set_dvm_boost_entries(struct ice_hw *hw);
-enum ice_status
+/* Rx parser PTYPE functions */
+bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
+
+/* XLT2/VSI group functions */
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks);
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
-enum ice_status
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
+enum ice_ddp_state
ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+bool ice_is_init_pkg_successful(enum ice_ddp_state state);
+int ice_init_hw_tbls(struct ice_hw *hw);
void ice_free_seg(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
-enum ice_status
-ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
+struct ice_buf_build *
+ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
+ void **section);
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
+
#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 0f572a36d021..4f42e14ed3ae 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -3,204 +3,26 @@
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
-
-#define ICE_FV_OFFSET_INVAL 0x1FF
-
-/* Extraction Sequence (Field Vector) Table */
-struct ice_fv_word {
- u8 prot_id;
- u16 off; /* Offset within the protocol header */
- u8 resvrd;
-} __packed;
-
-#define ICE_MAX_NUM_PROFILES 256
-
-#define ICE_MAX_FV_WORDS 48
-struct ice_fv {
- struct ice_fv_word ew[ICE_MAX_FV_WORDS];
-};
-
-/* Package and segment headers and tables */
-struct ice_pkg_hdr {
- struct ice_pkg_ver pkg_format_ver;
- __le32 seg_count;
- __le32 seg_offset[];
-};
-
-/* generic segment */
-struct ice_generic_seg_hdr {
-#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_ICE 0x00000010
- __le32 seg_type;
- struct ice_pkg_ver seg_format_ver;
- __le32 seg_size;
- char seg_id[ICE_PKG_NAME_SIZE];
-};
-
-/* ice specific segment */
-
-union ice_device_id {
- struct {
- __le16 device_id;
- __le16 vendor_id;
- } dev_vend_id;
- __le32 id;
-};
-
-struct ice_device_id_entry {
- union ice_device_id device;
- union ice_device_id sub_device;
-};
-
-struct ice_seg {
- struct ice_generic_seg_hdr hdr;
- __le32 device_table_count;
- struct ice_device_id_entry device_table[];
-};
-
-struct ice_nvm_table {
- __le32 table_count;
- __le32 vers[];
-};
-
-struct ice_buf {
-#define ICE_PKG_BUF_SIZE 4096
- u8 buf[ICE_PKG_BUF_SIZE];
-};
-
-struct ice_buf_table {
- __le32 buf_count;
- struct ice_buf buf_array[];
-};
-
-/* global metadata specific segment */
-struct ice_global_metadata_seg {
- struct ice_generic_seg_hdr hdr;
- struct ice_pkg_ver pkg_ver;
- __le32 rsvd;
- char pkg_name[ICE_PKG_NAME_SIZE];
-};
-
-#define ICE_MIN_S_OFF 12
-#define ICE_MAX_S_OFF 4095
-#define ICE_MIN_S_SZ 1
-#define ICE_MAX_S_SZ 4084
-
-/* section information */
-struct ice_section_entry {
- __le32 type;
- __le16 offset;
- __le16 size;
-};
-
-#define ICE_MIN_S_COUNT 1
-#define ICE_MAX_S_COUNT 511
-#define ICE_MIN_S_DATA_END 12
-#define ICE_MAX_S_DATA_END 4096
-
-#define ICE_METADATA_BUF 0x80000000
-
-struct ice_buf_hdr {
- __le16 section_count;
- __le16 data_end;
- struct ice_section_entry section_entry[];
-};
-
-#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
- (ent_sz))
-
-/* ice package section IDs */
-#define ICE_SID_METADATA 1
-#define ICE_SID_XLT0_SW 10
-#define ICE_SID_XLT_KEY_BUILDER_SW 11
-#define ICE_SID_XLT1_SW 12
-#define ICE_SID_XLT2_SW 13
-#define ICE_SID_PROFID_TCAM_SW 14
-#define ICE_SID_PROFID_REDIR_SW 15
-#define ICE_SID_FLD_VEC_SW 16
-#define ICE_SID_CDID_KEY_BUILDER_SW 17
-
-struct ice_meta_sect {
- struct ice_pkg_ver ver;
-#define ICE_META_SECT_NAME_SIZE 28
- char name[ICE_META_SECT_NAME_SIZE];
- __le32 track_id;
-};
-
-#define ICE_SID_CDID_REDIR_SW 18
-
-#define ICE_SID_XLT0_ACL 20
-#define ICE_SID_XLT_KEY_BUILDER_ACL 21
-#define ICE_SID_XLT1_ACL 22
-#define ICE_SID_XLT2_ACL 23
-#define ICE_SID_PROFID_TCAM_ACL 24
-#define ICE_SID_PROFID_REDIR_ACL 25
-#define ICE_SID_FLD_VEC_ACL 26
-#define ICE_SID_CDID_KEY_BUILDER_ACL 27
-#define ICE_SID_CDID_REDIR_ACL 28
-
-#define ICE_SID_XLT0_FD 30
-#define ICE_SID_XLT_KEY_BUILDER_FD 31
-#define ICE_SID_XLT1_FD 32
-#define ICE_SID_XLT2_FD 33
-#define ICE_SID_PROFID_TCAM_FD 34
-#define ICE_SID_PROFID_REDIR_FD 35
-#define ICE_SID_FLD_VEC_FD 36
-#define ICE_SID_CDID_KEY_BUILDER_FD 37
-#define ICE_SID_CDID_REDIR_FD 38
-
-#define ICE_SID_XLT0_RSS 40
-#define ICE_SID_XLT_KEY_BUILDER_RSS 41
-#define ICE_SID_XLT1_RSS 42
-#define ICE_SID_XLT2_RSS 43
-#define ICE_SID_PROFID_TCAM_RSS 44
-#define ICE_SID_PROFID_REDIR_RSS 45
-#define ICE_SID_FLD_VEC_RSS 46
-#define ICE_SID_CDID_KEY_BUILDER_RSS 47
-#define ICE_SID_CDID_REDIR_RSS 48
-
-#define ICE_SID_RXPARSER_BOOST_TCAM 56
-#define ICE_SID_TXPARSER_BOOST_TCAM 66
-
-#define ICE_SID_XLT0_PE 80
-#define ICE_SID_XLT_KEY_BUILDER_PE 81
-#define ICE_SID_XLT1_PE 82
-#define ICE_SID_XLT2_PE 83
-#define ICE_SID_PROFID_TCAM_PE 84
-#define ICE_SID_PROFID_REDIR_PE 85
-#define ICE_SID_FLD_VEC_PE 86
-#define ICE_SID_CDID_KEY_BUILDER_PE 87
-#define ICE_SID_CDID_REDIR_PE 88
-
-/* Label Metadata section IDs */
-#define ICE_SID_LBL_FIRST 0x80000010
-#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
-/* The following define MUST be updated to reflect the last label section ID */
-#define ICE_SID_LBL_LAST 0x80000038
-
-enum ice_block {
- ICE_BLK_SW = 0,
- ICE_BLK_ACL,
- ICE_BLK_FD,
- ICE_BLK_RSS,
- ICE_BLK_PE,
- ICE_BLK_COUNT
-};
-
-enum ice_sect {
- ICE_XLT0 = 0,
- ICE_XLT_KB,
- ICE_XLT1,
- ICE_XLT2,
- ICE_PROF_TCAM,
- ICE_PROF_REDIR,
- ICE_VEC_TBL,
- ICE_CDID_KB,
- ICE_CDID_REDIR,
- ICE_SECT_COUNT
-};
-
+#include "ice_ddp.h"
+
+/* Packet Type (PTYPE) values */
+#define ICE_PTYPE_MAC_PAY 1
+#define ICE_PTYPE_IPV4_PAY 23
+#define ICE_PTYPE_IPV4_UDP_PAY 24
+#define ICE_PTYPE_IPV4_TCP_PAY 26
+#define ICE_PTYPE_IPV4_SCTP_PAY 27
+#define ICE_PTYPE_IPV6_PAY 89
+#define ICE_PTYPE_IPV6_UDP_PAY 90
+#define ICE_PTYPE_IPV6_TCP_PAY 92
+#define ICE_PTYPE_IPV6_SCTP_PAY 93
+#define ICE_MAC_IPV4_ESP 160
+#define ICE_MAC_IPV6_ESP 161
+#define ICE_MAC_IPV4_AH 162
+#define ICE_MAC_IPV6_AH 163
+#define ICE_MAC_IPV4_NAT_T_ESP 164
+#define ICE_MAC_IPV6_NAT_T_ESP 165
+#define ICE_MAC_IPV4_GTPU 329
+#define ICE_MAC_IPV6_GTPU 330
#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331
#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332
#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333
@@ -221,6 +43,10 @@ enum ice_sect {
#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348
#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349
#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350
+#define ICE_MAC_IPV4_PFCP_SESSION 352
+#define ICE_MAC_IPV6_PFCP_SESSION 354
+#define ICE_MAC_IPV4_L2TPV3 360
+#define ICE_MAC_IPV6_L2TPV3 361
/* Attributes that can modify PTYPE definitions.
*
@@ -259,121 +85,14 @@ struct ice_ptype_attributes {
enum ice_ptype_attrib_type attrib;
};
-/* package labels */
-struct ice_label {
- __le16 value;
-#define ICE_PKG_LABEL_SIZE 64
- char name[ICE_PKG_LABEL_SIZE];
-};
-
-struct ice_label_section {
- __le16 count;
- struct ice_label label[];
-};
-
-#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- struct_size((struct ice_label_section *)0, label, 1) - \
- sizeof(struct ice_label), sizeof(struct ice_label))
-
-struct ice_sw_fv_section {
- __le16 count;
- __le16 base_offset;
- struct ice_fv fv[];
-};
-
-struct ice_sw_fv_list_entry {
- struct list_head list_entry;
- u32 profile_id;
- struct ice_fv *fv_ptr;
-};
-
-/* The BOOST TCAM stores the match packet header in reverse order, meaning
- * the fields are reversed; in addition, this means that the normally big endian
- * fields of the packet are now little endian.
- */
-struct ice_boost_key_value {
-#define ICE_BOOST_REMAINING_HV_KEY 15
- u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
- __le16 hv_dst_port_key;
- __le16 hv_src_port_key;
- u8 tcam_search_key;
-} __packed;
-
-struct ice_boost_key {
- struct ice_boost_key_value key;
- struct ice_boost_key_value key2;
-};
-
-/* package Boost TCAM entry */
-struct ice_boost_tcam_entry {
- __le16 addr;
- __le16 reserved;
- /* break up the 40 bytes of key into different fields */
- struct ice_boost_key key;
- u8 boost_hit_index_group;
- /* The following contains bitfields which are not on byte boundaries.
- * These fields are currently unused by driver software.
- */
-#define ICE_BOOST_BIT_FIELDS 43
- u8 bit_fields[ICE_BOOST_BIT_FIELDS];
-};
-
-struct ice_boost_tcam_section {
- __le16 count;
- __le16 reserved;
- struct ice_boost_tcam_entry tcam[];
-};
-
-#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
- sizeof(struct ice_boost_tcam_entry), \
- sizeof(struct ice_boost_tcam_entry))
-
-struct ice_xlt1_section {
- __le16 count;
- __le16 offset;
- u8 value[];
-};
-
-struct ice_xlt2_section {
- __le16 count;
- __le16 offset;
- __le16 value[];
-};
-
-struct ice_prof_redir_section {
- __le16 count;
- __le16 offset;
- u8 redir_value[];
-};
-
-/* package buffer building */
-
-struct ice_buf_build {
- struct ice_buf buf;
- u16 reserved_section_table_entries;
-};
-
-struct ice_pkg_enum {
- struct ice_buf_table *buf_table;
- u32 buf_idx;
-
- u32 type;
- struct ice_buf_hdr *buf;
- u32 sect_idx;
- void *sect;
- u32 sect_type;
-
- u32 entry_idx;
- void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
-};
-
/* Tunnel enabling */
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
TNL_GRETAP,
+ TNL_GTPC,
+ TNL_GTPU,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -400,6 +119,19 @@ struct ice_tunnel_table {
u16 valid_count[__TNL_TYPE_CNT];
};
+struct ice_dvm_entry {
+ u16 boost_addr;
+ u16 enable;
+ struct ice_boost_tcam_entry *boost_entry;
+};
+
+#define ICE_DVM_MAX_ENTRIES 48
+
+struct ice_dvm_table {
+ struct ice_dvm_entry tbl[ICE_DVM_MAX_ENTRIES];
+ u16 count;
+};
+
struct ice_pkg_es {
__le16 count;
__le16 offset;
@@ -617,7 +349,35 @@ enum ice_prof_type {
ICE_PROF_NON_TUN = 0x1,
ICE_PROF_TUN_UDP = 0x2,
ICE_PROF_TUN_GRE = 0x4,
- ICE_PROF_TUN_ALL = 0x6,
+ ICE_PROF_TUN_GTPU = 0x8,
+ ICE_PROF_TUN_GTPC = 0x10,
+ ICE_PROF_TUN_ALL = 0x1E,
ICE_PROF_ALL = 0xFF,
};
+
+/* Number of bits/bytes contained in meta init entry. Note, this should be a
+ * multiple of 32 bits.
+ */
+#define ICE_META_INIT_BITS 192
+#define ICE_META_INIT_DW_CNT (ICE_META_INIT_BITS / (sizeof(__le32) * \
+ BITS_PER_BYTE))
+
+/* The meta init Flag field starts at this bit */
+#define ICE_META_FLAGS_ST 123
+
+/* The entry and bit to check for Double VLAN Mode (DVM) support */
+#define ICE_META_VLAN_MODE_ENTRY 0
+#define ICE_META_FLAG_VLAN_MODE 60
+#define ICE_META_VLAN_MODE_BIT (ICE_META_FLAGS_ST + \
+ ICE_META_FLAG_VLAN_MODE)
+
+struct ice_meta_init_entry {
+ __le32 bm[ICE_META_INIT_DW_CNT];
+};
+
+struct ice_meta_init_section {
+ __le16 count;
+ __le16 offset;
+ struct ice_meta_init_entry entry;
+};
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index f160672448a0..ef103e47a8dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -3,6 +3,7 @@
#include "ice_common.h"
#include "ice_flow.h"
+#include <net/gre.h>
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
@@ -609,8 +610,6 @@ struct ice_flow_prof_params {
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
ICE_FLOW_SEG_HDR_NAT_T_ESP)
-#define ICE_FLOW_SEG_HDRS_L2_MASK \
- (ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L3_MASK \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
@@ -625,8 +624,7 @@ struct ice_flow_prof_params {
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
*/
-static enum ice_status
-ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
{
u8 i;
@@ -634,12 +632,12 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Multiple L3 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L3_MASK))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Multiple L4 headers */
if (segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK &&
!is_power_of_2(segs[i].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK))
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
return 0;
@@ -700,8 +698,7 @@ static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
* This function identifies the packet types associated with the protocol
* headers being present in packet segments of the specified flow profile.
*/
-static enum ice_status
-ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
+static int ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof;
u8 i;
@@ -898,7 +895,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* field. It then allocates one or more extraction sequence entries for the
* given field, and fill the entries with protocol ID and offset information.
*/
-static enum ice_status
+static int
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld, u64 match)
{
@@ -1035,7 +1032,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
prot_id = ICE_PROT_GRE_OF;
break;
default:
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
}
/* Each extraction sequence entry is a word in size, and extracts a
@@ -1073,7 +1070,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* does not exceed the block's capability
*/
if (params->es_cnt >= fv_words)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
@@ -1099,7 +1096,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* @params: information about the flow to be processed
* @seg: index of packet segment whose raw fields are to be extracted
*/
-static enum ice_status
+static int
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg)
{
@@ -1112,12 +1109,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
if (params->prof->segs[seg].raws_cnt >
ARRAY_SIZE(params->prof->segs[seg].raws))
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* Offsets within the segment headers are not supported */
hdrs_sz = ice_flow_calc_seg_sz(params, seg);
if (!hdrs_sz)
- return ICE_ERR_PARAM;
+ return -EINVAL;
fv_words = hw->blk[params->blk].es.fvw;
@@ -1150,7 +1147,7 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
*/
if (params->es_cnt >= hw->blk[params->blk].es.count ||
params->es_cnt >= ICE_MAX_FV_WORDS)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
@@ -1176,12 +1173,12 @@ ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
* This function iterates through all matched fields in the given segments, and
* creates an extraction sequence for the fields.
*/
-static enum ice_status
+static int
ice_flow_create_xtrct_seq(struct ice_hw *hw,
struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof = params->prof;
- enum ice_status status = 0;
+ int status = 0;
u8 i;
for (i = 0; i < prof->segs_cnt; i++) {
@@ -1210,10 +1207,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
*/
-static enum ice_status
+static int
ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
{
- enum ice_status status;
+ int status;
status = ice_flow_proc_seg_hdrs(params);
if (status)
@@ -1229,7 +1226,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
status = 0;
break;
default:
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
}
return status;
@@ -1329,12 +1326,12 @@ ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
* @blk: classification stage
* @entry: flow entry to be removed
*/
-static enum ice_status
+static int
ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
struct ice_flow_entry *entry)
{
if (!entry)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
list_del(&entry->l_entry);
@@ -1355,27 +1352,27 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, u64 prof_id,
struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
- enum ice_status status;
+ int status;
u8 i;
if (!prof)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
params->prof = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*params->prof),
GFP_KERNEL);
if (!params->prof) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto free_params;
}
@@ -1432,11 +1429,11 @@ free_params:
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof)
{
- enum ice_status status;
+ int status;
/* Remove all remaining flow entries before removing the flow profile */
if (!list_empty(&prof->entries)) {
@@ -1474,11 +1471,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = 0;
+ int status = 0;
if (!test_bit(vsi_handle, prof->vsis)) {
status = ice_add_prof_id_flow(hw, blk,
@@ -1505,11 +1502,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = 0;
+ int status = 0;
if (test_bit(vsi_handle, prof->vsis)) {
status = ice_rem_prof_id_flow(hw, blk,
@@ -1536,21 +1533,21 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @segs_cnt: number of packet segments provided
* @prof: stores the returned flow profile added
*/
-enum ice_status
+int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof)
{
- enum ice_status status;
+ int status;
if (segs_cnt > ICE_FLOW_SEG_MAX)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
if (!segs_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (!segs)
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
status = ice_flow_val_hdrs(segs, segs_cnt);
if (status)
@@ -1574,17 +1571,16 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
+int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto out;
}
@@ -1608,34 +1604,34 @@ out:
* @data: pointer to a data buffer containing flow entry's match values/masks
* @entry_h: pointer to buffer that receives the new flow entry's handle
*/
-enum ice_status
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi_handle, enum ice_flow_priority prio,
void *data, u64 *entry_h)
{
struct ice_flow_entry *e = NULL;
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
/* No flow entry data is expected for RSS */
if (!entry_h || (!data && blk != ICE_BLK_RSS))
- return ICE_ERR_BAD_PTR;
+ return -EINVAL;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->fl_profs_locks[blk]);
prof = ice_flow_find_prof_id(hw, blk, prof_id);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
} else {
/* Allocate memory for the entry being added and associate
* the VSI to the found flow profile
*/
e = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*e), GFP_KERNEL);
if (!e)
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
else
status = ice_flow_assoc_prof(hw, blk, prof, vsi_handle);
}
@@ -1654,7 +1650,7 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
case ICE_BLK_RSS:
break;
default:
- status = ICE_ERR_NOT_IMPL;
+ status = -EOPNOTSUPP;
goto out;
}
@@ -1680,15 +1676,14 @@ out:
* @blk: classification stage
* @entry_h: handle to the flow entry to be removed
*/
-enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
- u64 entry_h)
+int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h)
{
struct ice_flow_entry *entry;
struct ice_flow_prof *prof;
- enum ice_status status = 0;
+ int status = 0;
if (entry_h == ICE_FLOW_ENTRY_HANDLE_INVAL)
- return ICE_ERR_PARAM;
+ return -EINVAL;
entry = ICE_FLOW_ENTRY_PTR(entry_h);
@@ -1812,6 +1807,57 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
seg->raws_cnt++;
}
+/**
+ * ice_flow_rem_vsi_prof - remove VSI from flow profile
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: software VSI handle
+ * @prof_id: unique ID to identify this flow profile
+ *
+ * This function removes the flow entries associated to the input
+ * VSI handle and disassociate the VSI from the flow profile.
+ */
+int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id)
+{
+ struct ice_flow_prof *prof;
+ int status = 0;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ /* find flow profile pointer with input package block and profile ID */
+ prof = ice_flow_find_prof_id(hw, ICE_BLK_FD, prof_id);
+ if (!prof) {
+ ice_debug(hw, ICE_DBG_PKG, "Cannot find flow profile id=%llu\n",
+ prof_id);
+ return -ENOENT;
+ }
+
+ /* Remove all remaining flow entries before removing the flow profile */
+ if (!list_empty(&prof->entries)) {
+ struct ice_flow_entry *e, *t;
+
+ mutex_lock(&prof->entries_lock);
+ list_for_each_entry_safe(e, t, &prof->entries, l_entry) {
+ if (e->vsi_handle != vsi_handle)
+ continue;
+
+ status = ice_flow_rem_entry_sync(hw, ICE_BLK_FD, e);
+ if (status)
+ break;
+ }
+ mutex_unlock(&prof->entries_lock);
+ }
+ if (status)
+ return status;
+
+ /* disassociate the flow profile from sw VSI handle */
+ status = ice_flow_disassoc_prof(hw, ICE_BLK_FD, prof, vsi_handle);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG, "ice_flow_disassoc_prof() failed with status=%d\n",
+ status);
+ return status;
+}
+
#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
@@ -1836,7 +1882,7 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
-static enum ice_status
+static int
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
u32 flow_hdr)
{
@@ -1853,15 +1899,15 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
- return ICE_ERR_PARAM;
+ return -EINVAL;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
if (val && !is_power_of_2(val))
- return ICE_ERR_CFG;
+ return -EIO;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L4_MASKS);
if (val && !is_power_of_2(val))
- return ICE_ERR_CFG;
+ return -EIO;
return 0;
}
@@ -1899,14 +1945,14 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
* the VSI from that profile. If the flow profile has no VSIs it will
* be removed.
*/
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *p, *t;
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (list_empty(&hw->fl_profs[blk]))
return 0;
@@ -1966,7 +2012,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
struct ice_rss_cfg *r, *rss_cfg;
@@ -1981,7 +2027,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
rss_cfg = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rss_cfg),
GFP_KERNEL);
if (!rss_cfg)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
rss_cfg->hashed_flds = prof->segs[prof->segs_cnt - 1].match;
rss_cfg->packet_hdr = prof->segs[prof->segs_cnt - 1].hdrs;
@@ -2022,21 +2068,21 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
- enum ice_status status;
+ int status;
if (!segs_cnt || segs_cnt > ICE_FLOW_SEG_MAX)
- return ICE_ERR_PARAM;
+ return -EINVAL;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@@ -2128,15 +2174,15 @@ exit:
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
-enum ice_status
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
- enum ice_status status;
+ int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_add_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@@ -2159,18 +2205,18 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs, u8 segs_cnt)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
segs = kcalloc(segs_cnt, sizeof(*segs), GFP_KERNEL);
if (!segs)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Construct the packet segment info from the hashed fields */
status = ice_flow_set_rss_seg_info(&segs[segs_cnt - 1], hashed_flds,
@@ -2182,7 +2228,7 @@ ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
vsi_handle,
ICE_FLOW_FIND_PROF_CHK_FLDS);
if (!prof) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto out;
}
@@ -2216,15 +2262,15 @@ out:
* removed. Calls are made to underlying flow s which will APIs
* turn build or update buffers for RSS XLT1 section.
*/
-enum ice_status __maybe_unused
+int __maybe_unused
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs)
{
- enum ice_status status;
+ int status;
if (hashed_flds == ICE_HASH_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
status = ice_rem_rss_cfg_sync(hw, vsi_handle, hashed_flds, addl_hdrs,
@@ -2279,20 +2325,19 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
{
- enum ice_status status = 0;
+ int status = 0;
u64 hash_flds;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Make sure no unsupported bits are specified */
if (avf_hash & ~(ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS |
ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS))
- return ICE_ERR_CFG;
+ return -EIO;
hash_flds = avf_hash;
@@ -2352,7 +2397,7 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
}
if (rss_hash == ICE_HASH_INVALID)
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
status = ice_add_rss_cfg(hw, vsi_handle, rss_hash,
ICE_FLOW_SEG_HDR_NONE);
@@ -2368,13 +2413,13 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
*/
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status = 0;
struct ice_rss_cfg *r;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&hw->rss_locks);
list_for_each_entry(r, &hw->rss_list_head, l_entry) {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 2a2d8c1536cb..b465d27d9b80 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include "ice_flex_type.h"
+
#define ICE_FLOW_ENTRY_HANDLE_INVAL 0
#define ICE_FLOW_FLD_OFF_INVAL 0xffff
@@ -383,33 +385,31 @@ struct ice_rss_cfg {
u32 packet_hdr;
};
-enum ice_status
+int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_prof **prof);
-enum ice_status
-ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
-enum ice_status
+int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id);
+int
ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u64 entry_id, u16 vsi, enum ice_flow_priority prio,
void *data, u64 *entry_h);
-enum ice_status
-ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
+int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h);
void
ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 mask_loc, u16 last_loc, bool range);
void
ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
u16 val_loc, u16 mask_loc);
+int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
-enum ice_status
+int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds,
u32 addl_hdrs);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index c2e78eaf4ccb..aff7a141c30d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -47,12 +47,105 @@ ice_fltr_add_entry_to_list(struct device *dev, struct ice_fltr_info *info,
}
/**
+ * ice_fltr_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi: the VSI being configured
+ * @promisc_mask: mask of promiscuous config bits
+ *
+ * Set VSI with all associated VLANs to given promiscuous mode(s)
+ */
+int
+ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask)
+{
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+ if (result && result != -EEXIST)
+ dev_err(ice_pf_to_dev(pf),
+ "Error setting promisc mode on VSI %i (rc=%d)\n",
+ vsi->vsi_num, result);
+
+ return result;
+}
+
+/**
+ * ice_fltr_clear_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi: the VSI being configured
+ * @promisc_mask: mask of promiscuous config bits
+ *
+ * Clear VSI with all associated VLANs to given promiscuous mode(s)
+ */
+int
+ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask)
+{
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+ if (result && result != -EEXIST)
+ dev_err(ice_pf_to_dev(pf),
+ "Error clearing promisc mode on VSI %i (rc=%d)\n",
+ vsi->vsi_num, result);
+
+ return result;
+}
+
+/**
+ * ice_fltr_clear_vsi_promisc - clear specified promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+int
+ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ if (result && result != -EEXIST)
+ dev_err(ice_pf_to_dev(pf),
+ "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
+ ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
+
+ return result;
+}
+
+/**
+ * ice_fltr_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+int
+ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ struct ice_pf *pf = hw->back;
+ int result;
+
+ result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+ if (result && result != -EEXIST)
+ dev_err(ice_pf_to_dev(pf),
+ "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
+ ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
+
+ return result;
+}
+
+/**
* ice_fltr_add_mac_list - add list of MAC filters
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-enum ice_status
-ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
+int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_mac(&vsi->back->hw, list);
}
@@ -62,8 +155,7 @@ ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-enum ice_status
-ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
+int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_mac(&vsi->back->hw, list);
}
@@ -73,8 +165,7 @@ ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_vlan(&vsi->back->hw, list);
}
@@ -84,7 +175,7 @@ ice_fltr_add_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
+static int
ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_vlan(&vsi->back->hw, list);
@@ -95,8 +186,7 @@ ice_fltr_remove_vlan_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_add_eth_mac(&vsi->back->hw, list);
}
@@ -106,8 +196,7 @@ ice_fltr_add_eth_list(struct ice_vsi *vsi, struct list_head *list)
* @vsi: pointer to VSI struct
* @list: list of filters
*/
-static enum ice_status
-ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
+static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
{
return ice_remove_eth_mac(&vsi->back->hw, list);
}
@@ -119,6 +208,11 @@ ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
void ice_fltr_remove_all(struct ice_vsi *vsi)
{
ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+ /* sync netdev filters if exist */
+ if (vsi->netdev) {
+ __dev_uc_unsync(vsi->netdev, NULL);
+ __dev_mc_unsync(vsi->netdev, NULL);
+ }
}
/**
@@ -150,21 +244,22 @@ ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
* ice_fltr_add_vlan_to_list - add VLAN filter info to exsisting list
* @vsi: pointer to VSI struct
* @list: list to add filter info to
- * @vlan_id: VLAN ID to add
- * @action: filter action
+ * @vlan: VLAN filter details
*/
static int
ice_fltr_add_vlan_to_list(struct ice_vsi *vsi, struct list_head *list,
- u16 vlan_id, enum ice_sw_fwd_act_type action)
+ struct ice_vlan *vlan)
{
struct ice_fltr_info info = { 0 };
info.flag = ICE_FLTR_TX;
info.src_id = ICE_SRC_ID_VSI;
info.lkup_type = ICE_SW_LKUP_VLAN;
- info.fltr_act = action;
+ info.fltr_act = ICE_FWD_TO_VSI;
info.vsi_handle = vsi->idx;
- info.l_data.vlan.vlan_id = vlan_id;
+ info.l_data.vlan.vlan_id = vlan->vid;
+ info.l_data.vlan.tpid = vlan->tpid;
+ info.l_data.vlan.tpid_valid = true;
return ice_fltr_add_entry_to_list(ice_pf_to_dev(vsi->back), &info,
list);
@@ -207,18 +302,17 @@ ice_fltr_add_eth_to_list(struct ice_vsi *vsi, struct list_head *list,
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
-static enum ice_status
+static int
ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
- enum ice_status (*mac_action)(struct ice_vsi *,
- struct list_head *))
+ int (*mac_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
@@ -233,21 +327,21 @@ ice_fltr_prepare_mac(struct ice_vsi *vsi, const u8 *mac,
* @action: action to be performed on filter match
* @mac_action: pointer to add or remove MAC function
*/
-static enum ice_status
+static int
ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action,
- enum ice_status(*mac_action)
+ int(*mac_action)
(struct ice_vsi *, struct list_head *))
{
u8 broadcast[ETH_ALEN];
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
eth_broadcast_addr(broadcast);
if (ice_fltr_add_mac_to_list(vsi, &tmp_list, mac, action) ||
ice_fltr_add_mac_to_list(vsi, &tmp_list, broadcast, action)) {
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
result = mac_action(vsi, &tmp_list);
@@ -258,21 +352,18 @@ ice_fltr_prepare_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
/**
* ice_fltr_prepare_vlan - add or remove VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: VLAN ID to add
- * @action: action to be performed on filter match
+ * @vlan: VLAN filter details
* @vlan_action: pointer to add or remove VLAN function
*/
-static enum ice_status
-ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action,
- enum ice_status (*vlan_action)(struct ice_vsi *,
- struct list_head *))
+static int
+ice_fltr_prepare_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan,
+ int (*vlan_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
- if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan_id, action))
- return ICE_ERR_NO_MEMORY;
+ if (ice_fltr_add_vlan_to_list(vsi, &tmp_list, vlan))
+ return -ENOMEM;
result = vlan_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@@ -287,17 +378,16 @@ ice_fltr_prepare_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @action: action to be performed on filter match
* @eth_action: pointer to add or remove ethertype function
*/
-static enum ice_status
+static int
ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action,
- enum ice_status (*eth_action)(struct ice_vsi *,
- struct list_head *))
+ int (*eth_action)(struct ice_vsi *, struct list_head *))
{
- enum ice_status result;
LIST_HEAD(tmp_list);
+ int result;
if (ice_fltr_add_eth_to_list(vsi, &tmp_list, ethertype, flag, action))
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
result = eth_action(vsi, &tmp_list);
ice_fltr_free_list(ice_pf_to_dev(vsi->back), &tmp_list);
@@ -310,8 +400,8 @@ ice_fltr_prepare_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @mac: MAC to add
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_add_mac_list);
}
@@ -322,7 +412,7 @@ enum ice_status ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
* @mac: MAC to add
* @action: action to be performed on filter match
*/
-enum ice_status
+int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action)
{
@@ -336,8 +426,8 @@ ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
* @mac: filter MAC to remove
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_mac(vsi, mac, action, ice_fltr_remove_mac_list);
}
@@ -345,27 +435,21 @@ enum ice_status ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
/**
* ice_fltr_add_vlan - add single VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: VLAN ID to add
- * @action: action to be performed on filter match
+ * @vlan: VLAN filter details
*/
-enum ice_status ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- return ice_fltr_prepare_vlan(vsi, vlan_id, action,
- ice_fltr_add_vlan_list);
+ return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_add_vlan_list);
}
/**
* ice_fltr_remove_vlan - remove VLAN filter
* @vsi: pointer to VSI struct
- * @vlan_id: filter VLAN to remove
- * @action: action to remove
+ * @vlan: VLAN filter details
*/
-enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
{
- return ice_fltr_prepare_vlan(vsi, vlan_id, action,
- ice_fltr_remove_vlan_list);
+ return ice_fltr_prepare_vlan(vsi, vlan, ice_fltr_remove_vlan_list);
}
/**
@@ -375,8 +459,8 @@ enum ice_status ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vlan_id,
* @flag: direction of packet to be filtered, Tx or Rx
* @action: action to be performed on filter match
*/
-enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
- enum ice_sw_fwd_act_type action)
+int ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_add_eth_list);
@@ -389,89 +473,9 @@ enum ice_status ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
* @flag: direction of filter
* @action: action to remove
*/
-enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
- u16 flag, enum ice_sw_fwd_act_type action)
+int ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
+ enum ice_sw_fwd_act_type action)
{
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
}
-
-/**
- * ice_fltr_update_rule_flags - update lan_en/lb_en flags
- * @hw: pointer to hw
- * @rule_id: id of rule being updated
- * @recipe_id: recipe id of rule
- * @act: current action field
- * @type: Rx or Tx
- * @src: source VSI
- * @new_flags: combinations of lb_en and lan_en
- */
-static enum ice_status
-ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
- u32 act, u16 type, u16 src, u32 new_flags)
-{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status err;
- u32 flags_mask;
-
- s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
-
- flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
- act &= ~flags_mask;
- act |= (flags_mask & new_flags);
-
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
-
- if (type & ICE_FLTR_RX) {
- s_rule->pdata.lkup_tx_rx.src =
- cpu_to_le16(hw->port_info->lport);
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
-
- } else {
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
- }
-
- err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_update_sw_rules, NULL);
-
- kfree(s_rule);
- return err;
-}
-
-/**
- * ice_fltr_build_action - build action for rule
- * @vsi_id: id of VSI which is use to build action
- */
-static u32 ice_fltr_build_action(u16 vsi_id)
-{
- return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
- ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
-}
-
-/**
- * ice_fltr_update_flags_dflt_rule - update flags on default rule
- * @vsi: pointer to VSI
- * @rule_id: id of rule
- * @direction: Tx or Rx
- * @new_flags: flags to update
- *
- * Function updates flags on default rule with ICE_SW_LKUP_DFLT.
- *
- * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
- * ICE_SINGLE_ACT_LAN_ENABLE.
- */
-enum ice_status
-ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
- u32 new_flags)
-{
- u32 action = ice_fltr_build_action(vsi->vsi_num);
- struct ice_hw *hw = &vsi->back->hw;
-
- return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
- direction, vsi->vsi_num, new_flags);
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 8eec4febead1..0f3dbc308eec 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -4,39 +4,48 @@
#ifndef _ICE_FLTR_H_
#define _ICE_FLTR_H_
+#include "ice_vlan.h"
+
void ice_fltr_free_list(struct device *dev, struct list_head *h);
-enum ice_status
+int
+ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask);
+int
+ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
+ u8 promisc_mask);
+int
+ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+int
+ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+int
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_add_mac_and_broadcast(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
-enum ice_status
+int ice_fltr_add_mac_list(struct ice_vsi *vsi, struct list_head *list);
+int
ice_fltr_remove_mac(struct ice_vsi *vsi, const u8 *mac,
enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
+int ice_fltr_remove_mac_list(struct ice_vsi *vsi, struct list_head *list);
-enum ice_status
-ice_fltr_add_vlan(struct ice_vsi *vsi, u16 vid,
- enum ice_sw_fwd_act_type action);
-enum ice_status
-ice_fltr_remove_vlan(struct ice_vsi *vsi, u16 vid,
- enum ice_sw_fwd_act_type action);
+int ice_fltr_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_fltr_remove_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
-enum ice_status
+int
ice_fltr_add_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
-enum ice_status
+int
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
-enum ice_status
-ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
- u32 new_flags);
+
+int
+ice_fltr_update_flags(struct ice_vsi *vsi, u16 rule_id, u16 recipe_id,
+ u32 new_flags);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index f8601d5b0b19..3dc5662d62a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -16,6 +16,18 @@ struct ice_fwu_priv {
/* Track which NVM banks to activate at the end of the update */
u8 activate_flags;
+
+ /* Track the firmware response of the required reset to complete the
+ * flash update.
+ *
+ * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required
+ * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required
+ */
+ u8 reset_level;
+
+ /* Track if EMP reset is available */
+ u8 emp_reset_available;
};
/**
@@ -40,8 +52,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
struct device *dev = context->dev;
struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 *package_data;
+ int status;
dev_dbg(dev, "Sending PLDM record package data to firmware\n");
@@ -54,9 +66,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
kfree(package_data);
if (status) {
- dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO;
}
@@ -203,8 +214,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
struct device *dev = context->dev;
struct ice_pf *pf = priv->pf;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
size_t length;
+ int status;
switch (component->identifier) {
case NVM_COMP_ID_OROM:
@@ -240,9 +251,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
kfree(comp_tbl);
if (status) {
- dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO;
}
@@ -259,6 +269,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
* @block_size: size of the block to write, up to 4k
* @block: pointer to block of data to write
* @last_cmd: whether this is the last command
+ * @reset_level: storage for reset level required
* @extack: netlink extended ACK structure
*
* Write a block of data to a flash module, and await for the completion
@@ -266,18 +277,24 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Note this function assumes the caller has acquired the NVM resource.
*
+ * On successful return, reset level indicates the device reset required to
+ * complete the update.
+ *
+ * 0 - ICE_AQC_NVM_POR_FLAG - A full power on is required
+ * 1 - ICE_AQC_NVM_PERST_FLAG - A cold PCIe reset is required
+ * 2 - ICE_AQC_NVM_EMPR_FLAG - An EMP reset is required
+ *
* Returns: zero on success, or a negative error code on failure.
*/
static int
ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
u16 block_size, u8 *block, bool last_cmd,
- struct netlink_ext_ack *extack)
+ u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u32 completion_offset;
int err;
@@ -286,11 +303,11 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
block_size, module, offset);
- status = ice_aq_update_nvm(hw, module, offset, block_size, block,
- last_cmd, 0, NULL);
- if (status) {
- dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n",
- module, block_size, offset, ice_stat_str(status),
+ err = ice_aq_update_nvm(hw, module, offset, block_size, block,
+ last_cmd, 0, NULL);
+ if (err) {
+ dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
+ module, block_size, offset, err,
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
@@ -338,6 +355,24 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
return -EIO;
}
+ /* For the last command to write the NVM bank, newer versions of
+ * firmware indicate the required level of reset to complete
+ * activation of firmware. If the firmware supports this, cache the
+ * response for indicating to the user later. Otherwise, assume that
+ * a full power cycle is required.
+ */
+ if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
+ if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
+ *reset_level = (event.desc.params.nvm.cmd_flags &
+ ICE_AQC_NVM_RESET_LVL_M);
+ dev_dbg(dev, "Firmware reported required reset level as %u\n",
+ *reset_level);
+ } else {
+ *reset_level = ICE_AQC_NVM_POR_FLAG;
+ dev_dbg(dev, "Firmware doesn't support indicating required reset level. Assuming a power cycle is required\n");
+ }
+ }
+
return 0;
}
@@ -348,6 +383,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
* @component: the name of the component being updated
* @image: buffer of image data to write to the NVM
* @length: length of the buffer
+ * @reset_level: storage for reset level required
* @extack: netlink extended ACK structure
*
* Loop over the data for a given NVM module and program it in 4 Kb
@@ -360,7 +396,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
static int
ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
- const u8 *image, u32 length,
+ const u8 *image, u32 length, u8 *reset_level,
struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
@@ -394,7 +430,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
memcpy(block, image + offset, block_size);
err = ice_write_one_nvm_block(pf, module, offset, block_size,
- block, last_cmd, extack);
+ block, last_cmd, reset_level,
+ extack);
if (err)
break;
@@ -445,7 +482,6 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
struct devlink *devlink;
- enum ice_status status;
int err;
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
@@ -456,10 +492,10 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
- status = ice_aq_erase_nvm(hw, module, NULL);
- if (status) {
- dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n",
- component, module, ice_stat_str(status),
+ err = ice_aq_erase_nvm(hw, module, NULL);
+ if (err) {
+ dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
+ component, module, err,
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO;
@@ -511,6 +547,7 @@ out_notify_devlink:
* ice_switch_flash_banks - Tell firmware to switch NVM banks
* @pf: Pointer to the PF data structure
* @activate_flags: flags used for the activation command
+ * @emp_reset_available: on return, indicates if EMP reset is available
* @extack: netlink extended ACK structure
*
* Notify firmware to activate the newly written flash banks, and wait for the
@@ -518,27 +555,43 @@ out_notify_devlink:
*
* Returns: zero on success or an error code on failure.
*/
-static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
- struct netlink_ext_ack *extack)
+static int
+ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
+ u8 *emp_reset_available, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_rq_event_info event;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u16 completion_retval;
+ u8 response_flags;
int err;
memset(&event, 0, sizeof(event));
- status = ice_nvm_write_activate(hw, activate_flags);
- if (status) {
- dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
+ if (err) {
+ dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO;
}
+ /* Newer versions of firmware have support to indicate whether an EMP
+ * reset to reload firmware is available. For older firmware, EMP
+ * reset is always available.
+ */
+ if (emp_reset_available) {
+ if (hw->dev_caps.common_cap.reset_restrict_support) {
+ *emp_reset_available = response_flags & ICE_AQC_NVM_EMPR_ENA;
+ dev_dbg(dev, "Firmware indicated that EMP reset is %s\n",
+ *emp_reset_available ?
+ "available" : "not available");
+ } else {
+ *emp_reset_available = ICE_AQC_NVM_EMPR_ENA;
+ dev_dbg(dev, "Firmware does not support restricting EMP reset availability\n");
+ }
+ }
+
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ,
&event);
if (err) {
@@ -579,6 +632,7 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
const char *name;
+ u8 *reset_level;
u16 module;
u8 flag;
int err;
@@ -587,16 +641,19 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
case NVM_COMP_ID_OROM:
module = ICE_SR_1ST_OROM_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_OROM;
+ reset_level = NULL;
name = "fw.undi";
break;
case NVM_COMP_ID_NVM:
module = ICE_SR_1ST_NVM_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_NVM;
+ reset_level = &priv->reset_level;
name = "fw.mgmt";
break;
case NVM_COMP_ID_NETLIST:
module = ICE_SR_NETLIST_BANK_PTR;
flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ reset_level = NULL;
name = "fw.netlist";
break;
default:
@@ -616,7 +673,8 @@ ice_flash_component(struct pldmfw *context, struct pldmfw_component *component)
return err;
return ice_write_nvm_module(pf, module, name, component->component_data,
- component->component_size, extack);
+ component->component_size, reset_level,
+ extack);
}
/**
@@ -634,110 +692,163 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
+ struct devlink *devlink;
+ int err;
/* Finally, notify firmware to activate the written NVM banks */
- return ice_switch_flash_banks(pf, priv->activate_flags, extack);
-}
+ err = ice_switch_flash_banks(pf, priv->activate_flags,
+ &priv->emp_reset_available, extack);
+ if (err)
+ return err;
-static const struct pldmfw_ops ice_fwu_ops = {
- .match_record = &pldmfw_op_pci_match_record,
- .send_package_data = &ice_send_package_data,
- .send_component_table = &ice_send_component_table,
- .flash_component = &ice_flash_component,
- .finalize_update = &ice_finalize_update,
-};
+ devlink = priv_to_devlink(pf);
-/**
- * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
- * @pf: private device driver structure
- * @fw: firmware object pointing to the relevant firmware file
- * @preservation: preservation level to request from firmware
- * @extack: netlink extended ACK structure
- *
- * Parse the data for a given firmware file, verifying that it is a valid PLDM
- * formatted image that matches this device.
- *
- * Extract the device record Package Data and Component Tables and send them
- * to the firmware. Extract and write the flash data for each of the three
- * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
- * firmware once the data is written to the inactive banks.
- *
- * Returns: zero on success or a negative error code on failure.
- */
-int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- u8 preservation, struct netlink_ext_ack *extack)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- struct ice_fwu_priv priv;
- enum ice_status status;
- int err;
+ /* If the required reset is EMPR, but EMPR is disabled, report that
+ * a reboot is required instead.
+ */
+ if (priv->reset_level == ICE_AQC_NVM_EMPR_FLAG &&
+ !priv->emp_reset_available) {
+ dev_dbg(ice_pf_to_dev(pf), "Firmware indicated EMP reset as sufficient, but EMP reset is disabled\n");
+ priv->reset_level = ICE_AQC_NVM_PERST_FLAG;
+ }
- switch (preservation) {
- case ICE_AQC_NVM_PRESERVE_ALL:
- case ICE_AQC_NVM_PRESERVE_SELECTED:
- case ICE_AQC_NVM_NO_PRESERVATION:
- case ICE_AQC_NVM_FACTORY_DEFAULT:
+ switch (priv->reset_level) {
+ case ICE_AQC_NVM_EMPR_FLAG:
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by devlink reload",
+ NULL, 0, 0);
break;
+ case ICE_AQC_NVM_PERST_FLAG:
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by rebooting the system",
+ NULL, 0, 0);
+ break;
+ case ICE_AQC_NVM_POR_FLAG:
default:
- WARN(1, "Unexpected preservation level request %u", preservation);
- return -EINVAL;
+ devlink_flash_update_status_notify(devlink,
+ "Activate new firmware by power cycling the system",
+ NULL, 0, 0);
+ break;
}
- memset(&priv, 0, sizeof(priv));
+ pf->fw_emp_reset_disabled = !priv->emp_reset_available;
- priv.context.ops = &ice_fwu_ops;
- priv.context.dev = dev;
- priv.extack = extack;
- priv.pf = pf;
- priv.activate_flags = preservation;
+ return 0;
+}
- status = ice_acquire_nvm(hw, ICE_RES_WRITE);
- if (status) {
- dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
- return -EIO;
- }
+struct ice_pldm_pci_record_id {
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+};
- err = pldmfw_flash_image(&priv.context, fw);
- if (err == -ENOENT) {
- dev_err(dev, "Firmware image has no record matching this device\n");
- NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
- } else if (err) {
- /* Do not set a generic extended ACK message here. A more
- * specific message may already have been set by one of our
- * ops.
+/**
+ * ice_op_pci_match_record - Check if a PCI device matches the record
+ * @context: PLDM fw update structure
+ * @record: list of records extracted from the PLDM image
+ *
+ * Determine if the PCI device associated with this device matches the record
+ * data provided.
+ *
+ * Searches the descriptor TLVs and extracts the relevant descriptor data into
+ * a pldm_pci_record_id. This is then compared against the PCI device ID
+ * information.
+ *
+ * Returns: true if the device matches the record, false otherwise.
+ */
+static bool
+ice_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record)
+{
+ struct pci_dev *pdev = to_pci_dev(context->dev);
+ struct ice_pldm_pci_record_id id = {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subsystem_vendor = PCI_ANY_ID,
+ .subsystem_device = PCI_ANY_ID,
+ };
+ struct pldmfw_desc_tlv *desc;
+
+ list_for_each_entry(desc, &record->descs, entry) {
+ u16 value;
+ int *ptr;
+
+ switch (desc->type) {
+ case PLDM_DESC_ID_PCI_VENDOR_ID:
+ ptr = &id.vendor;
+ break;
+ case PLDM_DESC_ID_PCI_DEVICE_ID:
+ ptr = &id.device;
+ break;
+ case PLDM_DESC_ID_PCI_SUBVENDOR_ID:
+ ptr = &id.subsystem_vendor;
+ break;
+ case PLDM_DESC_ID_PCI_SUBDEV_ID:
+ ptr = &id.subsystem_device;
+ break;
+ default:
+ /* Skip unrelated TLVs */
+ continue;
+ }
+
+ value = get_unaligned_le16(desc->data);
+ /* A value of zero for one of the descriptors is sometimes
+ * used when the record should ignore this field when matching
+ * device. For example if the record applies to any subsystem
+ * device or vendor.
*/
- dev_err(dev, "Failed to flash PLDM image, err %d", err);
+ if (value)
+ *ptr = value;
+ else
+ *ptr = PCI_ANY_ID;
}
- ice_release_nvm(hw);
-
- return err;
+ /* the E822 device can have a generic device ID so check for that */
+ if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) &&
+ (id.device == PCI_ANY_ID || id.device == pdev->device ||
+ id.device == ICE_DEV_ID_E822_SI_DFLT) &&
+ (id.subsystem_vendor == PCI_ANY_ID ||
+ id.subsystem_vendor == pdev->subsystem_vendor) &&
+ (id.subsystem_device == PCI_ANY_ID ||
+ id.subsystem_device == pdev->subsystem_device))
+ return true;
+
+ return false;
}
+static const struct pldmfw_ops ice_fwu_ops_e810 = {
+ .match_record = &pldmfw_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
+static const struct pldmfw_ops ice_fwu_ops_e822 = {
+ .match_record = &ice_op_pci_match_record,
+ .send_package_data = &ice_send_package_data,
+ .send_component_table = &ice_send_component_table,
+ .flash_component = &ice_flash_component,
+ .finalize_update = &ice_finalize_update,
+};
+
/**
- * ice_check_for_pending_update - Check for a pending flash update
+ * ice_get_pending_updates - Check if the component has a pending update
* @pf: the PF driver structure
- * @component: if not NULL, the name of the component being updated
- * @extack: Netlink extended ACK structure
+ * @pending: on return, bitmap of updates pending
+ * @extack: Netlink extended ACK
*
- * Check whether the device already has a pending flash update. If such an
- * update is found, cancel it so that the requested update may proceed.
+ * Check if the device has any pending updates on any flash components.
*
- * Returns: zero on success, or a negative error code on failure.
+ * Returns: zero on success, or a negative error code on failure. Updates
+ * pending with the bitmap of pending updates.
*/
-int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
- struct netlink_ext_ack *extack)
+int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
+ struct netlink_ext_ack *extack)
{
- struct devlink *devlink = priv_to_devlink(pf);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw_dev_caps *dev_caps;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
- u8 pending = 0;
int err;
dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL);
@@ -749,30 +860,60 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
* may have changed, e.g. if an update was previously completed and
* the system has not yet rebooted.
*/
- status = ice_discover_dev_caps(hw, dev_caps);
- if (status) {
+ err = ice_discover_dev_caps(hw, dev_caps);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities");
kfree(dev_caps);
- return -EIO;
+ return err;
}
+ *pending = 0;
+
if (dev_caps->common_cap.nvm_update_pending_nvm) {
dev_info(dev, "The fw.mgmt flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_NVM;
}
if (dev_caps->common_cap.nvm_update_pending_orom) {
dev_info(dev, "The fw.undi flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_OROM;
}
if (dev_caps->common_cap.nvm_update_pending_netlist) {
dev_info(dev, "The fw.netlist flash component has a pending update\n");
- pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
+ *pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST;
}
kfree(dev_caps);
+ return 0;
+}
+
+/**
+ * ice_cancel_pending_update - Cancel any pending update for a component
+ * @pf: the PF driver structure
+ * @component: if not NULL, the name of the component being updated
+ * @extack: Netlink extended ACK structure
+ *
+ * Cancel any pending update for the specified component. If component is
+ * NULL, all device updates will be canceled.
+ *
+ * Returns: zero on success, or a negative error code on failure.
+ */
+static int
+ice_cancel_pending_update(struct ice_pf *pf, const char *component,
+ struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ u8 pending;
+ int err;
+
+ err = ice_get_pending_updates(pf, &pending, extack);
+ if (err)
+ return err;
+
/* If the flash_update request is for a specific component, ignore all
* of the other components.
*/
@@ -798,17 +939,111 @@ int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
"Canceling previous pending update",
component, 0, 0);
- status = ice_acquire_nvm(hw, ICE_RES_WRITE);
- if (status) {
- dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err) {
+ dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
- return -EIO;
+ return err;
}
pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV;
- err = ice_switch_flash_banks(pf, pending, extack);
+ err = ice_switch_flash_banks(pf, pending, NULL, extack);
+
+ ice_release_nvm(hw);
+
+ /* Since we've canceled the pending update, we no longer know if EMP
+ * reset is restricted.
+ */
+ pf->fw_emp_reset_disabled = false;
+
+ return err;
+}
+
+/**
+ * ice_devlink_flash_update - Write a firmware image to the device
+ * @devlink: pointer to devlink associated with the device to update
+ * @params: devlink flash update parameters
+ * @extack: netlink extended ACK structure
+ *
+ * Parse the data for a given firmware file, verifying that it is a valid PLDM
+ * formatted image that matches this device.
+ *
+ * Extract the device record Package Data and Component Tables and send them
+ * to the firmware. Extract and write the flash data for each of the three
+ * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify
+ * firmware once the data is written to the inactive banks.
+ *
+ * Returns: zero on success or a negative error code on failure.
+ */
+int ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_fwu_priv priv;
+ u8 preservation;
+ int err;
+
+ if (!params->overwrite_mask) {
+ /* preserve all settings and identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_ALL;
+ } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
+ /* overwrite settings, but preserve the vital device identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
+ } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = ICE_AQC_NVM_NO_PRESERVATION;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&priv, 0, sizeof(priv));
+
+ /* the E822 device needs a slightly different ops */
+ if (hw->mac_type == ICE_MAC_GENERIC)
+ priv.context.ops = &ice_fwu_ops_e822;
+ else
+ priv.context.ops = &ice_fwu_ops_e810;
+ priv.context.dev = dev;
+ priv.extack = extack;
+ priv.pf = pf;
+ priv.activate_flags = preservation;
+
+ devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+
+ err = ice_cancel_pending_update(pf, NULL, extack);
+ if (err)
+ return err;
+
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err) {
+ dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
+ return err;
+ }
+
+ err = pldmfw_flash_image(&priv.context, params->fw);
+ if (err == -ENOENT) {
+ dev_err(dev, "Firmware image has no record matching this device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
+ } else if (err) {
+ /* Do not set a generic extended ACK message here. A more
+ * specific message may already have been set by one of our
+ * ops.
+ */
+ dev_err(dev, "Failed to flash PLDM image, err %d", err);
+ }
ice_release_nvm(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index c6390f6851ff..750574885716 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -4,9 +4,10 @@
#ifndef _ICE_FW_UPDATE_H_
#define _ICE_FW_UPDATE_H_
-int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- u8 preservation, struct netlink_ext_ack *extack);
-int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
- struct netlink_ext_ack *extack);
+int ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack);
+int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
new file mode 100644
index 000000000000..2ea8a2b11bcd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021-2022, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_gnss_do_write - Write data to internal GNSS receiver
+ * @pf: board private structure
+ * @buf: command buffer
+ * @size: command buffer size
+ *
+ * Write UBX command data to the GNSS receiver
+ *
+ * Return:
+ * * number of bytes written - success
+ * * negative - error code
+ */
+static unsigned int
+ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ struct ice_hw *hw = &pf->hw;
+ unsigned int offset = 0;
+ int err = 0;
+
+ memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+ link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+ link_topo.topo_params.node_type_ctx |=
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE);
+
+ /* It's not possible to write a single byte to u-blox.
+ * Write all bytes in a loop until there are 6 or less bytes left. If
+ * there are exactly 6 bytes left, the last write would be only a byte.
+ * In this case, do 4+2 bytes writes instead of 5+1. Otherwise, do the
+ * last 2 to 5 bytes write.
+ */
+ while (size - offset > ICE_GNSS_UBX_WRITE_BYTES + 1) {
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]),
+ ICE_MAX_I2C_WRITE_BYTES,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ offset += ICE_GNSS_UBX_WRITE_BYTES;
+ }
+
+ /* Single byte would be written. Write 4 bytes instead of 5. */
+ if (size - offset == ICE_GNSS_UBX_WRITE_BYTES + 1) {
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]),
+ ICE_MAX_I2C_WRITE_BYTES - 1,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ offset += ICE_GNSS_UBX_WRITE_BYTES - 1;
+ }
+
+ /* Do the last write, 2 to 5 bytes. */
+ err = ice_aq_write_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(buf[offset]), size - offset - 1,
+ &buf[offset + 1], NULL);
+ if (err)
+ goto err_out;
+
+ return size;
+
+err_out:
+ dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
+ offset, size, err);
+
+ return offset;
+}
+
+/**
+ * ice_gnss_write_pending - Write all pending data to internal GNSS
+ * @work: GNSS write work structure
+ */
+static void ice_gnss_write_pending(struct kthread_work *work)
+{
+ struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+ write_work);
+ struct ice_pf *pf = gnss->back;
+
+ if (!pf)
+ return;
+
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return;
+
+ if (!list_empty(&gnss->queue)) {
+ struct gnss_write_buf *write_buf = NULL;
+ unsigned int bytes;
+
+ write_buf = list_first_entry(&gnss->queue,
+ struct gnss_write_buf, queue);
+
+ bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
+ dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
+
+ list_del(&write_buf->queue);
+ kfree(write_buf->buf);
+ kfree(write_buf);
+ }
+}
+
+/**
+ * ice_gnss_read - Read data from internal GNSS module
+ * @work: GNSS read work structure
+ *
+ * Read the data from internal GNSS receiver, write it to gnss_dev.
+ */
+static void ice_gnss_read(struct kthread_work *work)
+{
+ struct gnss_serial *gnss = container_of(work, struct gnss_serial,
+ read_work.work);
+ unsigned long delay = ICE_GNSS_POLL_DATA_DELAY_TIME;
+ unsigned int i, bytes_read, data_len, count;
+ struct ice_aqc_link_topo_addr link_topo;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ __be16 data_len_b;
+ char *buf = NULL;
+ u8 i2c_params;
+ int err = 0;
+
+ pf = gnss->back;
+ if (!pf) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return;
+
+ hw = &pf->hw;
+
+ memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr));
+ link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS;
+ link_topo.topo_params.node_type_ctx |=
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE);
+
+ i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH |
+ ICE_AQC_I2C_USE_REPEATED_START;
+
+ err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H),
+ i2c_params, (u8 *)&data_len_b, NULL);
+ if (err)
+ goto requeue;
+
+ data_len = be16_to_cpu(data_len_b);
+ if (data_len == 0 || data_len == U16_MAX)
+ goto requeue;
+
+ /* The u-blox has data_len bytes for us to read */
+
+ data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
+
+ buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto requeue;
+ }
+
+ /* Read received data */
+ for (i = 0; i < data_len; i += bytes_read) {
+ unsigned int bytes_left = data_len - i;
+
+ bytes_read = min_t(typeof(bytes_left), bytes_left,
+ ICE_MAX_I2C_DATA_SIZE);
+
+ err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR,
+ cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA),
+ bytes_read, &buf[i], NULL);
+ if (err)
+ goto free_buf;
+ }
+
+ count = gnss_insert_raw(pf->gnss_dev, buf, i);
+ if (count != i)
+ dev_warn(ice_pf_to_dev(pf),
+ "gnss_insert_raw ret=%d size=%d\n",
+ count, i);
+ delay = ICE_GNSS_TIMER_DELAY_TIME;
+free_buf:
+ free_page((unsigned long)buf);
+requeue:
+ kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay);
+exit:
+ if (err)
+ dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err);
+}
+
+/**
+ * ice_gnss_struct_init - Initialize GNSS receiver
+ * @pf: Board private structure
+ *
+ * Initialize GNSS structures and workers.
+ *
+ * Return:
+ * * pointer to initialized gnss_serial struct - success
+ * * NULL - error
+ */
+static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct kthread_worker *kworker;
+ struct gnss_serial *gnss;
+
+ gnss = kzalloc(sizeof(*gnss), GFP_KERNEL);
+ if (!gnss)
+ return NULL;
+
+ gnss->back = pf;
+ pf->gnss_serial = gnss;
+
+ kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
+ INIT_LIST_HEAD(&gnss->queue);
+ kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
+ kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ if (IS_ERR(kworker)) {
+ kfree(gnss);
+ return NULL;
+ }
+
+ gnss->kworker = kworker;
+
+ return gnss;
+}
+
+/**
+ * ice_gnss_open - Open GNSS device
+ * @gdev: pointer to the gnss device struct
+ *
+ * Open GNSS device and start filling the read buffer for consumer.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - error code
+ */
+static int ice_gnss_open(struct gnss_device *gdev)
+{
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
+ struct gnss_serial *gnss;
+
+ if (!pf)
+ return -EFAULT;
+
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return -EFAULT;
+
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return -ENODEV;
+
+ kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0);
+
+ return 0;
+}
+
+/**
+ * ice_gnss_close - Close GNSS device
+ * @gdev: pointer to the gnss device struct
+ *
+ * Close GNSS device, cancel worker, stop filling the read buffer.
+ */
+static void ice_gnss_close(struct gnss_device *gdev)
+{
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
+ struct gnss_serial *gnss;
+
+ if (!pf)
+ return;
+
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return;
+
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+}
+
+/**
+ * ice_gnss_write - Write to GNSS device
+ * @gdev: pointer to the gnss device struct
+ * @buf: pointer to the user data
+ * @count: size of the buffer to be sent to the GNSS device
+ *
+ * Return:
+ * * number of written bytes - success
+ * * negative - error code
+ */
+static int
+ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
+{
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
+ struct gnss_write_buf *write_buf;
+ struct gnss_serial *gnss;
+ unsigned char *cmd_buf;
+ int err = count;
+
+ /* We cannot write a single byte using our I2C implementation. */
+ if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
+ return -EINVAL;
+
+ if (!pf)
+ return -EFAULT;
+
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return -EFAULT;
+
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return -ENODEV;
+
+ cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
+ if (!cmd_buf)
+ return -ENOMEM;
+
+ memcpy(cmd_buf, buf, count);
+ write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
+ if (!write_buf) {
+ kfree(cmd_buf);
+ return -ENOMEM;
+ }
+
+ write_buf->buf = cmd_buf;
+ write_buf->size = count;
+ INIT_LIST_HEAD(&write_buf->queue);
+ list_add_tail(&write_buf->queue, &gnss->queue);
+ kthread_queue_work(gnss->kworker, &gnss->write_work);
+
+ return err;
+}
+
+static const struct gnss_operations ice_gnss_ops = {
+ .open = ice_gnss_open,
+ .close = ice_gnss_close,
+ .write_raw = ice_gnss_write,
+};
+
+/**
+ * ice_gnss_register - Register GNSS receiver
+ * @pf: Board private structure
+ *
+ * Allocate and register GNSS receiver in the Linux GNSS subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - error code
+ */
+static int ice_gnss_register(struct ice_pf *pf)
+{
+ struct gnss_device *gdev;
+ int ret;
+
+ gdev = gnss_allocate_device(ice_pf_to_dev(pf));
+ if (!gdev) {
+ dev_err(ice_pf_to_dev(pf),
+ "gnss_allocate_device returns NULL\n");
+ return -ENOMEM;
+ }
+
+ gdev->ops = &ice_gnss_ops;
+ gdev->type = GNSS_TYPE_UBX;
+ gnss_set_drvdata(gdev, pf);
+ ret = gnss_register_device(gdev);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf), "gnss_register_device err=%d\n",
+ ret);
+ gnss_put_device(gdev);
+ } else {
+ pf->gnss_dev = gdev;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_gnss_deregister - Deregister GNSS receiver
+ * @pf: Board private structure
+ *
+ * Deregister GNSS receiver from the Linux GNSS subsystem,
+ * release its resources.
+ */
+static void ice_gnss_deregister(struct ice_pf *pf)
+{
+ if (pf->gnss_dev) {
+ gnss_deregister_device(pf->gnss_dev);
+ gnss_put_device(pf->gnss_dev);
+ pf->gnss_dev = NULL;
+ }
+}
+
+/**
+ * ice_gnss_init - Initialize GNSS support
+ * @pf: Board private structure
+ */
+void ice_gnss_init(struct ice_pf *pf)
+{
+ int ret;
+
+ pf->gnss_serial = ice_gnss_struct_init(pf);
+ if (!pf->gnss_serial)
+ return;
+
+ ret = ice_gnss_register(pf);
+ if (!ret) {
+ set_bit(ICE_FLAG_GNSS, pf->flags);
+ dev_info(ice_pf_to_dev(pf), "GNSS init successful\n");
+ } else {
+ ice_gnss_exit(pf);
+ dev_err(ice_pf_to_dev(pf), "GNSS init failure\n");
+ }
+}
+
+/**
+ * ice_gnss_exit - Disable GNSS TTY support
+ * @pf: Board private structure
+ */
+void ice_gnss_exit(struct ice_pf *pf)
+{
+ ice_gnss_deregister(pf);
+ clear_bit(ICE_FLAG_GNSS, pf->flags);
+
+ if (pf->gnss_serial) {
+ struct gnss_serial *gnss = pf->gnss_serial;
+
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ kthread_destroy_worker(gnss->kworker);
+ gnss->kworker = NULL;
+
+ kfree(gnss);
+ pf->gnss_serial = NULL;
+ }
+}
+
+/**
+ * ice_gnss_is_gps_present - Check if GPS HW is present
+ * @hw: pointer to HW struct
+ */
+bool ice_gnss_is_gps_present(struct ice_hw *hw)
+{
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ return false;
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+ if (ice_is_e810t(hw)) {
+ int err;
+ u8 data;
+
+ err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N))
+ return false;
+ } else {
+ return false;
+ }
+#else
+ if (!ice_is_e810t(hw))
+ return false;
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+
+ return true;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
new file mode 100644
index 000000000000..b8bb8b63d081
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021-2022, Intel Corporation. */
+
+#ifndef _ICE_GNSS_H_
+#define _ICE_GNSS_H_
+
+#define ICE_E810T_GNSS_I2C_BUS 0x2
+#define ICE_GNSS_POLL_DATA_DELAY_TIME (HZ / 50) /* poll every 20 ms */
+#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
+#define ICE_GNSS_TTY_WRITE_BUF 250
+#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
+#define ICE_MAX_I2C_WRITE_BYTES 4
+
+/* u-blox ZED-F9T specific definitions */
+#define ICE_GNSS_UBX_I2C_BUS_ADDR 0x42
+/* Data length register is big endian */
+#define ICE_GNSS_UBX_DATA_LEN_H 0xFD
+#define ICE_GNSS_UBX_DATA_LEN_WIDTH 2
+#define ICE_GNSS_UBX_EMPTY_DATA 0xFF
+/* For u-blox writes are performed without address so the first byte to write is
+ * passed as I2C addr parameter.
+ */
+#define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1)
+
+struct gnss_write_buf {
+ struct list_head queue;
+ unsigned int size;
+ unsigned char *buf;
+};
+
+/**
+ * struct gnss_serial - data used to initialize GNSS TTY port
+ * @back: back pointer to PF
+ * @kworker: kwork thread for handling periodic work
+ * @read_work: read_work function for handling GNSS reads
+ * @write_work: write_work function for handling GNSS writes
+ * @queue: write buffers queue
+ */
+struct gnss_serial {
+ struct ice_pf *back;
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work read_work;
+ struct kthread_work write_work;
+ struct list_head queue;
+};
+
+#if IS_ENABLED(CONFIG_GNSS)
+void ice_gnss_init(struct ice_pf *pf);
+void ice_gnss_exit(struct ice_pf *pf);
+bool ice_gnss_is_gps_present(struct ice_hw *hw);
+#else
+static inline void ice_gnss_init(struct ice_pf *pf) { }
+static inline void ice_gnss_exit(struct ice_pf *pf) { }
+static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+{
+ return false;
+}
+#endif /* IS_ENABLED(CONFIG_GNSS) */
+#endif /* _ICE_GNSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index a49082485642..a92dc9a16035 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -100,6 +100,7 @@
#define PF_SB_ATQT 0x0022FE00
#define PF_SB_ATQT_ATQT_S 0
#define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0)
+#define PF_SB_REM_DEV_CTL 0x002300F0
#define PRTDCB_GENC 0x00083000
#define PRTDCB_GENC_PFCLDA_S 16
#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
@@ -109,6 +110,9 @@
#define PRTDCB_TUP2TC 0x001D26C0
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
+#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
+#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
@@ -440,6 +444,10 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
+#define GLHH_ART_CTL 0x000A41D4
+#define GLHH_ART_CTL_ACTIVE_M BIT(0)
+#define GLHH_ART_TIME_H 0x000A41D8
+#define GLHH_ART_TIME_L 0x000A41DC
#define GLTSYN_AUX_IN_0(_i) (0x000889D8 + ((_i) * 4))
#define GLTSYN_AUX_IN_0_INT_ENA_M BIT(4)
#define GLTSYN_AUX_OUT_0(_i) (0x00088998 + ((_i) * 4))
@@ -452,6 +460,8 @@
#define GLTSYN_ENA_TSYN_ENA_M BIT(0)
#define GLTSYN_EVNT_H_0(_i) (0x00088970 + ((_i) * 4))
#define GLTSYN_EVNT_L_0(_i) (0x00088968 + ((_i) * 4))
+#define GLTSYN_HHTIME_H(_i) (0x00088900 + ((_i) * 4))
+#define GLTSYN_HHTIME_L(_i) (0x000888F8 + ((_i) * 4))
#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4))
#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4))
#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4))
@@ -468,6 +478,8 @@
#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4))
#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
+#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */
+#define PFHH_SEM_BUSY_M BIT(0)
#define PFTSYN_SEM 0x00088880
#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index adcc9a251595..e6bc2285071e 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -6,6 +6,8 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+static DEFINE_XARRAY_ALLOC1(ice_aux_id);
+
/**
* ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
* @pf: pointer to PF struct
@@ -34,29 +36,20 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
{
struct iidc_auxiliary_drv *iadrv;
- if (!pf->adev)
+ if (WARN_ON_ONCE(!in_task()))
return;
+ mutex_lock(&pf->adev_mutex);
+ if (!pf->adev)
+ goto finish;
+
device_lock(&pf->adev->dev);
iadrv = ice_get_auxiliary_drv(pf);
if (iadrv && iadrv->event_handler)
iadrv->event_handler(pf, event);
device_unlock(&pf->adev->dev);
-}
-
-/**
- * ice_find_vsi - Find the VSI from VSI ID
- * @pf: The PF pointer to search in
- * @vsi_num: The VSI ID to search for
- */
-static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
-{
- int i;
-
- ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
- return pf->vsi[i];
- return NULL;
+finish:
+ mutex_unlock(&pf->adev_mutex);
}
/**
@@ -79,7 +72,7 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (!ice_is_rdma_ena(pf))
return -EINVAL;
vsi = ice_get_main_vsi(pf);
@@ -227,6 +220,11 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
+ qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
}
EXPORT_SYMBOL_GPL(ice_get_qos_params);
@@ -236,7 +234,7 @@ EXPORT_SYMBOL_GPL(ice_get_qos_params);
*/
static int ice_reserve_rdma_qvector(struct ice_pf *pf)
{
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
int index;
index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
@@ -250,6 +248,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)
}
/**
+ * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
+ * @pf: board private structure to initialize
+ */
+static void ice_free_rdma_qvector(struct ice_pf *pf)
+{
+ pf->num_avail_sw_msix -= pf->num_rdma_msix;
+ ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
+ ICE_RES_RDMA_VEC_ID);
+}
+
+/**
* ice_adev_release - function to be mapped to AUX dev's release op
* @dev: pointer to device to free
*/
@@ -274,7 +283,7 @@ int ice_plug_aux_dev(struct ice_pf *pf)
/* if this PF doesn't support a technology that requires auxiliary
* devices, then gracefully exit
*/
- if (!ice_is_aux_ena(pf))
+ if (!ice_is_rdma_ena(pf))
return 0;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
@@ -282,28 +291,29 @@ int ice_plug_aux_dev(struct ice_pf *pf)
return -ENOMEM;
adev = &iadev->adev;
- pf->adev = adev;
iadev->pf = pf;
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = IIDC_RDMA_ROCE_NAME;
+ adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
- pf->adev = NULL;
kfree(iadev);
return ret;
}
ret = auxiliary_device_add(adev);
if (ret) {
- pf->adev = NULL;
auxiliary_device_uninit(adev);
return ret;
}
+ mutex_lock(&pf->adev_mutex);
+ pf->adev = adev;
+ mutex_unlock(&pf->adev_mutex);
+
return 0;
}
@@ -312,12 +322,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
*/
void ice_unplug_aux_dev(struct ice_pf *pf)
{
- if (!pf->adev)
- return;
+ struct auxiliary_device *adev;
- auxiliary_device_delete(pf->adev);
- auxiliary_device_uninit(pf->adev);
+ mutex_lock(&pf->adev_mutex);
+ adev = pf->adev;
pf->adev = NULL;
+ mutex_unlock(&pf->adev_mutex);
+
+ if (adev) {
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+ }
}
/**
@@ -329,12 +344,48 @@ int ice_init_rdma(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev;
int ret;
+ if (!ice_is_rdma_ena(pf)) {
+ dev_warn(dev, "RDMA is not supported on this device\n");
+ return 0;
+ }
+
+ ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
+ GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "Failed to allocate device ID for AUX driver\n");
+ return -ENOMEM;
+ }
+
/* Reserve vector resources */
ret = ice_reserve_rdma_qvector(pf);
if (ret < 0) {
dev_err(dev, "failed to reserve vectors for RDMA\n");
- return ret;
+ goto err_reserve_rdma_qvector;
}
+ pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ goto err_plug_aux_dev;
+ return 0;
+
+err_plug_aux_dev:
+ ice_free_rdma_qvector(pf);
+err_reserve_rdma_qvector:
+ pf->adev = NULL;
+ xa_erase(&ice_aux_id, pf->aux_idx);
+ return ret;
+}
+
+/**
+ * ice_deinit_rdma - deinitialize RDMA on PF
+ * @pf: ptr to ice_pf
+ */
+void ice_deinit_rdma(struct ice_pf *pf)
+{
+ if (!ice_is_rdma_ena(pf))
+ return;
- return ice_plug_aux_dev(pf);
+ ice_unplug_aux_dev(pf);
+ ice_free_rdma_qvector(pf);
+ xa_erase(&ice_aux_id, pf->aux_idx);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index b7796b8aecbd..4b0c86757df9 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -5,7 +5,6 @@
#define _ICE_IDC_INT_H_
#include <linux/net/intel/iidc.h>
-#include "ice.h"
struct ice_pf;
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index e375ac849aec..ee5b36941ba3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -61,13 +61,13 @@ static void ice_lag_set_backup(struct ice_lag *lag)
*/
static void ice_display_lag_info(struct ice_lag *lag)
{
- const char *name, *peer, *upper, *role, *bonded, *master;
+ const char *name, *peer, *upper, *role, *bonded, *primary;
struct device *dev = &lag->pf->pdev->dev;
name = lag->netdev ? netdev_name(lag->netdev) : "unset";
peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
- master = lag->master ? "TRUE" : "FALSE";
+ primary = lag->primary ? "TRUE" : "FALSE";
bonded = lag->bonded ? "BONDED" : "UNBONDED";
switch (lag->role) {
@@ -87,8 +87,8 @@ static void ice_display_lag_info(struct ice_lag *lag)
role = "ERROR";
}
- dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
- bonded, peer, upper, role, master);
+ dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
+ bonded, peer, upper, role, primary);
}
/**
@@ -119,7 +119,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
- netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
+ netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
goto lag_out;
}
@@ -164,8 +164,8 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
- /* if this is the first element in an LAG mark as master */
- lag->master = !!(peers == 1);
+ /* if this is the first element in an LAG mark as primary */
+ lag->primary = !!(peers == 1);
}
/**
@@ -204,11 +204,7 @@ ice_lag_unlink(struct ice_lag *lag,
lag->upper_netdev = NULL;
}
- if (lag->peer_netdev) {
- dev_put(lag->peer_netdev);
- lag->peer_netdev = NULL;
- }
-
+ lag->peer_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
@@ -216,6 +212,32 @@ ice_lag_unlink(struct ice_lag *lag,
}
/**
+ * ice_lag_unregister - handle netdev unregister events
+ * @lag: LAG info struct
+ * @netdev: netdev reporting the event
+ */
+static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+{
+ struct ice_pf *pf = lag->pf;
+
+ /* check to see if this event is for this netdev
+ * check that we are in an aggregate
+ */
+ if (netdev != lag->netdev || !lag->bonded)
+ return;
+
+ if (lag->upper_netdev) {
+ dev_put(lag->upper_netdev);
+ lag->upper_netdev = NULL;
+ ice_set_sriov_cap(pf);
+ ice_set_rdma_cap(pf);
+ }
+ /* perform some cleanup in case we come back */
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+}
+
+/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
* @ptr: opaque pointer data
@@ -242,7 +264,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
if (!netif_is_lag_master(info->upper_dev)) {
- netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
+ netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
return;
}
@@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
ice_lag_info_event(lag, ptr);
break;
case NETDEV_UNREGISTER:
- ice_lag_unlink(lag, ptr);
+ ice_lag_unregister(lag, netdev);
break;
default:
break;
@@ -425,11 +447,9 @@ void ice_deinit_lag(struct ice_pf *pf)
if (lag->pf)
ice_unregister_lag_handler(lag);
- if (lag->upper_netdev)
- dev_put(lag->upper_netdev);
+ dev_put(lag->upper_netdev);
- if (lag->peer_netdev)
- dev_put(lag->peer_netdev);
+ dev_put(lag->peer_netdev);
kfree(lag);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index c2e3688dd8fd..51b5cf467ce2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -24,7 +24,7 @@ struct ice_lag {
struct net_device *upper_netdev; /* upper bonding netdev */
struct notifier_block notif_block;
u8 bonded:1; /* currently bonded */
- u8 master:1; /* this is a master */
+ u8 primary:1; /* this is primary */
u8 handler:1; /* did we register a rx_netdev_handler */
/* each thing blocking bonding will increment this value by one.
* If this value is zero, then bonding is allowed.
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d981dc6f2323..89f986a75cc8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -424,6 +424,8 @@ enum ice_rx_flex_desc_status_error_0_bits {
enum ice_rx_flex_desc_status_error_1_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+ /* [10:5] reserved */
+ ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
@@ -568,6 +570,7 @@ struct ice_tx_ctx_desc {
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_MIN_MSS 64
#define ICE_TXD_CTX_QW1_VSI_S 50
#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
@@ -905,17 +908,5 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
return ice_ptype_lkup[ptype];
}
-#define ICE_LINK_SPEED_UNKNOWN 0
-#define ICE_LINK_SPEED_10MBPS 10
-#define ICE_LINK_SPEED_100MBPS 100
-#define ICE_LINK_SPEED_1000MBPS 1000
-#define ICE_LINK_SPEED_2500MBPS 2500
-#define ICE_LINK_SPEED_5000MBPS 5000
-#define ICE_LINK_SPEED_10000MBPS 10000
-#define ICE_LINK_SPEED_20000MBPS 20000
-#define ICE_LINK_SPEED_25000MBPS 25000
-#define ICE_LINK_SPEED_40000MBPS 40000
-#define ICE_LINK_SPEED_50000MBPS 50000
-#define ICE_LINK_SPEED_100000MBPS 100000
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 09a3297cd63c..450317dfcca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -8,6 +8,7 @@
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_devlink.h"
+#include "ice_vsi_vlan_ops.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
@@ -165,21 +166,19 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
- * @vf_id: ID of the VF being configured
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
{
+ enum ice_vsi_type vsi_type = vsi->type;
struct ice_pf *pf = vsi->back;
- struct ice_vf *vf = NULL;
+ struct ice_vf *vf = vsi->vf;
- if (vsi->type == ICE_VSI_VF)
- vsi->vf_id = vf_id;
- else
- vsi->vf_id = ICE_INVAL_VFID;
+ if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
+ return;
- switch (vsi->type) {
+ switch (vsi_type) {
case ICE_VSI_PF:
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
@@ -216,22 +215,21 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
/* The number of queues for ctrl VSI is equal to number of VFs.
* Each ring is associated to the corresponding VF_PR netdev.
*/
- vsi->alloc_txq = pf->num_alloc_vfs;
- vsi->alloc_rxq = pf->num_alloc_vfs;
+ vsi->alloc_txq = ice_get_num_vfs(pf);
+ vsi->alloc_rxq = vsi->alloc_txq;
vsi->num_q_vectors = 1;
break;
case ICE_VSI_VF:
- vf = &pf->vf[vsi->vf_id];
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
- /* pf->num_msix_per_vf includes (VF miscellaneous vector +
+ /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_CTRL:
vsi->alloc_txq = 1;
@@ -247,7 +245,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
break;
default:
- dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
break;
}
@@ -284,29 +282,30 @@ static int ice_get_free_slot(void *array, int size, int curr)
}
/**
- * ice_vsi_delete - delete a VSI from the switch
+ * ice_vsi_delete_from_hw - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
-void ice_vsi_delete(struct ice_vsi *vsi)
+static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
+ int status;
+ ice_fltr_remove_all(vsi);
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
if (vsi->type == ICE_VSI_VF)
- ctxt->vf_num = vsi->vf_id;
+ ctxt->vf_num = vsi->vf->vf_id;
ctxt->vsi_num = vsi->vsi_num;
memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
if (status)
- dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
+ vsi->vsi_num, status);
kfree(ctxt);
}
@@ -350,48 +349,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
}
/**
- * ice_vsi_clear - clean up and deallocate the provided VSI
+ * ice_vsi_free_stats - Free the ring statistics structures
+ * @vsi: VSI pointer
+ */
+static void ice_vsi_free_stats(struct ice_vsi *vsi)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ ice_for_each_alloc_txq(vsi, i) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+
+ ice_for_each_alloc_rxq(vsi, i) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat->rx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+}
+
+/**
+ * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
+ * @vsi: VSI which is having stats allocated
+ */
+static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
+{
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
+ struct ice_vsi_stats *vsi_stats;
+ struct ice_pf *pf = vsi->back;
+ u16 i;
+
+ vsi_stats = pf->vsi_stats[vsi->idx];
+ tx_ring_stats = vsi_stats->tx_ring_stats;
+ rx_ring_stats = vsi_stats->rx_ring_stats;
+
+ /* Allocate Tx ring stats */
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_tx_ring *ring;
+
+ ring = vsi->tx_rings[i];
+ ring_stats = tx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(tx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ /* Allocate Rx ring stats */
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_rx_ring *ring;
+
+ ring = vsi->rx_rings[i];
+ ring_stats = rx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(rx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_free_stats(vsi);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_free - clean up and deallocate the provided VSI
* @vsi: pointer to VSI being cleared
*
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
- *
- * Returns 0 on success, negative on failure
*/
-int ice_vsi_clear(struct ice_vsi *vsi)
+static void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
- if (!vsi)
- return 0;
-
- if (!vsi->back)
- return -EINVAL;
+ if (!vsi || !vsi->back)
+ return;
pf = vsi->back;
dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
- return -EINVAL;
+ return;
}
mutex_lock(&pf->sw_mutex);
/* updates the PF for this cleared VSI */
pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
- pf->next_vsi = vsi->idx;
- if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL &&
- vsi->vf_id != ICE_INVAL_VFID)
- pf->next_vsi = vsi->idx;
+ pf->next_vsi = vsi->idx;
+ ice_vsi_free_stats(vsi);
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
devm_kfree(dev, vsi);
+}
- return 0;
+void ice_vsi_delete(struct ice_vsi *vsi)
+{
+ ice_vsi_delete_from_hw(vsi);
+ ice_vsi_free(vsi);
}
/**
@@ -436,122 +531,155 @@ static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *d
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
struct ice_pf *pf = q_vector->vsi->back;
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
- ice_for_each_vf(pf, i)
- napi_schedule(&pf->vf[i].repr->q_vector->napi);
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ napi_schedule(&vf->repr->q_vector->napi);
+ rcu_read_unlock();
return IRQ_HANDLED;
}
/**
- * ice_vsi_alloc - Allocates the next available struct VSI in the PF
- * @pf: board private structure
- * @vsi_type: type of VSI
- * @ch: ptr to channel
- * @vf_id: ID of the VF being configured
- *
- * returns a pointer to a VSI on success, NULL on failure.
+ * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
+ * @vsi: VSI pointer
*/
-static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
- struct ice_channel *ch, u16 vf_id)
+static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_vsi *vsi = NULL;
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
- /* Need to protect the allocation of the VSIs at the PF level */
- mutex_lock(&pf->sw_mutex);
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+ if (!pf->vsi_stats)
+ return -ENOENT;
- /* If we have already allocated our maximum number of VSIs,
- * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
- * is available to be populated
- */
- if (pf->next_vsi == ICE_NO_VSI) {
- dev_dbg(dev, "out of VSI slots!\n");
- goto unlock_pf;
- }
+ if (pf->vsi_stats[vsi->idx])
+ /* realloc will happen in rebuild path */
+ return 0;
- vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
- if (!vsi)
- goto unlock_pf;
+ vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
+ if (!vsi_stat)
+ return -ENOMEM;
- vsi->type = vsi_type;
- vsi->back = pf;
- set_bit(ICE_VSI_DOWN, vsi->state);
+ vsi_stat->tx_ring_stats =
+ kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
+ GFP_KERNEL);
+ if (!vsi_stat->tx_ring_stats)
+ goto err_alloc_tx;
- if (vsi_type == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf_id);
- else if (vsi_type != ICE_VSI_CHNL)
- ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
+ vsi_stat->rx_ring_stats =
+ kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
+ GFP_KERNEL);
+ if (!vsi_stat->rx_ring_stats)
+ goto err_alloc_rx;
- switch (vsi->type) {
- case ICE_VSI_SWITCHDEV_CTRL:
+ pf->vsi_stats[vsi->idx] = vsi_stat;
+
+ return 0;
+
+err_alloc_rx:
+ kfree(vsi_stat->rx_ring_stats);
+err_alloc_tx:
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_alloc_def - set default values for already allocated VSI
+ * @vsi: ptr to VSI
+ * @ch: ptr to channel
+ */
+static int
+ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ if (vsi->type != ICE_VSI_CHNL) {
+ ice_vsi_set_num_qs(vsi);
if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
+ return -ENOMEM;
+ }
+ switch (vsi->type) {
+ case ICE_VSI_SWITCHDEV_CTRL:
/* Setup eswitch MSIX irq handler for VSI */
vsi->irq_handler = ice_eswitch_msix_clean_rings;
break;
case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_CTRL:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
break;
- case ICE_VSI_VF:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
- break;
case ICE_VSI_CHNL:
if (!ch)
- goto err_rings;
+ return -EINVAL;
+
vsi->num_rxq = ch->num_rxq;
vsi->num_txq = ch->num_txq;
vsi->next_base_q = ch->base_q;
break;
+ case ICE_VSI_VF:
case ICE_VSI_LB:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
break;
default:
- dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
- goto unlock_pf;
+ ice_vsi_free_arrays(vsi);
+ return -EINVAL;
}
- if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) {
- /* Use the last VSI slot as the index for PF control VSI */
- vsi->idx = pf->num_alloc_vsi - 1;
- pf->ctrl_vsi_idx = vsi->idx;
- pf->vsi[vsi->idx] = vsi;
- } else {
- /* fill slot and make note of the index */
- vsi->idx = pf->next_vsi;
- pf->vsi[pf->next_vsi] = vsi;
+ return 0;
+}
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
+/**
+ * ice_vsi_alloc - Allocates the next available struct VSI in the PF
+ * @pf: board private structure
+ *
+ * Reserves a VSI index from the PF and allocates an empty VSI structure
+ * without a type. The VSI structure must later be initialized by calling
+ * ice_vsi_cfg().
+ *
+ * returns a pointer to a VSI on success, NULL on failure.
+ */
+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi = NULL;
+
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->sw_mutex);
+
+ /* If we have already allocated our maximum number of VSIs,
+ * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
+ * is available to be populated
+ */
+ if (pf->next_vsi == ICE_NO_VSI) {
+ dev_dbg(dev, "out of VSI slots!\n");
+ goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID)
- pf->vf[vf_id].ctrl_vsi_idx = vsi->idx;
- goto unlock_pf;
+ vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
+ if (!vsi)
+ goto unlock_pf;
+
+ vsi->back = pf;
+ set_bit(ICE_VSI_DOWN, vsi->state);
+
+ /* fill slot and make note of the index */
+ vsi->idx = pf->next_vsi;
+ pf->vsi[pf->next_vsi] = vsi;
+
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
-err_rings:
- devm_kfree(dev, vsi);
- vsi = NULL;
unlock_pf:
mutex_unlock(&pf->sw_mutex);
return vsi;
@@ -570,10 +698,16 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
u32 g_val, b_val;
- /* Flow Director filters are only allocated/assigned to the PF VSI which
- * passes the traffic. The CTRL VSI is only used to add/delete filters
- * so we don't allocate resources to it
+ /* Flow Director filters are only allocated/assigned to the PF VSI or
+ * CHNL VSI which passes the traffic. The CTRL VSI is only used to
+ * add/delete filters so resources are not allocated to it
*/
+ if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ return -EPERM;
+
+ if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
+ vsi->type == ICE_VSI_CHNL))
+ return -EPERM;
/* FD filters from guaranteed pool per VSI */
g_val = pf->hw.func_caps.fd_fltr_guar;
@@ -585,19 +719,56 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
if (!b_val)
return -EPERM;
- if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF))
- return -EPERM;
+ /* PF main VSI gets only 64 FD resources from guaranteed pool
+ * when ADQ is configured.
+ */
+#define ICE_PF_VSI_GFLTR 64
- if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
- return -EPERM;
+ /* determine FD filter resources per VSI from shared(best effort) and
+ * dedicated pool
+ */
+ if (vsi->type == ICE_VSI_PF) {
+ vsi->num_gfltr = g_val;
+ /* if MQPRIO is configured, main VSI doesn't get all FD
+ * resources from guaranteed pool. PF VSI gets 64 FD resources
+ */
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ if (g_val < ICE_PF_VSI_GFLTR)
+ return -EPERM;
+ /* allow bare minimum entries for PF VSI */
+ vsi->num_gfltr = ICE_PF_VSI_GFLTR;
+ }
- vsi->num_gfltr = g_val / pf->num_alloc_vsi;
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ } else if (vsi->type == ICE_VSI_VF) {
+ vsi->num_gfltr = 0;
- /* each VSI gets same "best_effort" quota */
- vsi->num_bfltr = b_val;
+ /* each VSI gets same "best_effort" quota */
+ vsi->num_bfltr = b_val;
+ } else {
+ struct ice_vsi *main_vsi;
+ int numtc;
- if (vsi->type == ICE_VSI_VF) {
- vsi->num_gfltr = 0;
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return -EPERM;
+
+ if (!main_vsi->all_numtc)
+ return -EINVAL;
+
+ /* figure out ADQ numtc */
+ numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
+
+ /* only one TC but still asking resources for channels,
+ * invalid config
+ */
+ if (numtc < ICE_CHNL_START_TC)
+ return -EPERM;
+
+ g_val -= ICE_PF_VSI_GFLTR;
+ /* channel VSIs gets equal share from guaranteed pool */
+ vsi->num_gfltr = g_val / numtc;
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
@@ -689,14 +860,14 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
- * ice_is_aux_ena
+ * ice_is_rdma_ena
* @pf: pointer to the PF struct
*
- * returns true if AUX devices/drivers are supported, false otherwise
+ * returns true if RDMA is currently supported, false otherwise
*/
-bool ice_is_aux_ena(struct ice_pf *pf)
+bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
/**
@@ -709,15 +880,15 @@ bool ice_is_aux_ena(struct ice_pf *pf)
static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
+ int status;
if (ice_is_safe_mode(pf))
return;
status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
if (status)
- dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
}
/**
@@ -795,11 +966,12 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
/**
* ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
+ * @hw: HW structure used to determine the VLAN mode of the device
* @ctxt: the VSI context being set
*
* This initializes a default VSI context for all sections except the Queues.
*/
-static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
+static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
{
u32 table = 0;
@@ -810,13 +982,30 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
- /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
- * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
- * packets untagged/tagged.
+ /* allow all untagged/tagged packets by default on Tx */
+ ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
+ ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
+ /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
+ * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
+ *
+ * DVM - leave inner VLAN in packet by default
*/
- ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
- ICE_AQ_VSI_VLAN_MODE_M) >>
- ICE_AQ_VSI_VLAN_MODE_S);
+ if (ice_is_dvm_ena(hw)) {
+ ctxt->info.inner_vlan_flags |=
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
+ ctxt->info.outer_vlan_flags |=
+ (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
+ ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M;
+ ctxt->info.outer_vlan_flags |=
+ FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
+ }
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -838,9 +1027,9 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
* @vsi: the VSI being configured
* @ctxt: VSI context structure
*/
-static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, tx_count = 0, pow = 0;
+ u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
@@ -907,11 +1096,24 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* at least 1)
*/
if (offset)
- vsi->num_rxq = offset;
+ rx_count = offset;
else
- vsi->num_rxq = num_rxq_per_tc;
+ rx_count = num_rxq_per_tc;
+
+ if (rx_count > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+ rx_count, vsi->alloc_rxq);
+ return -EINVAL;
+ }
+
+ if (tx_count > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+ tx_count, vsi->alloc_txq);
+ return -EINVAL;
+ }
vsi->num_txq = tx_count;
+ vsi->num_rxq = rx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -929,6 +1131,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
*/
ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
+
+ return 0;
}
/**
@@ -942,7 +1146,7 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
u16 dflt_q, report_q, val;
if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
- vsi->type != ICE_VSI_VF)
+ vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
return;
val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
@@ -1040,12 +1244,15 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
- * @init_vsi: is this call creating a VSI
+ * @vsi_flags: VSI configuration flags
+ *
+ * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
+ * reconfigure an existing context.
*
* This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI.
*/
-static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
+static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -1071,7 +1278,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
- ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
+ ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
break;
default:
ret = -ENODEV;
@@ -1093,7 +1300,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
- ice_set_dflt_vsi_ctx(ctxt);
+ ice_set_dflt_vsi_ctx(hw, ctxt);
if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi);
/* if the switch is in VEB mode, allow VSI loopback */
@@ -1107,7 +1314,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
/* if updating VSI context, make sure to set valid_section:
* to indicate which section of VSI context being updated
*/
- if (!init_vsi)
+ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
}
@@ -1116,8 +1323,12 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (vsi->type == ICE_VSI_CHNL) {
ice_chnl_vsi_setup_q_map(vsi, ctxt);
} else {
- ice_vsi_setup_q_map(vsi, ctxt);
- if (!init_vsi) /* means VSI being updated */
+ ret = ice_vsi_setup_q_map(vsi, ctxt);
+ if (ret)
+ goto out;
+
+ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
+ /* means VSI being updated */
/* must to indicate which section of VSI context are
* being modified
*/
@@ -1125,25 +1336,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
}
- /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
- * respectively
- */
- if (vsi->type == ICE_VSI_VF) {
- ctxt->info.valid_sections |=
- cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- if (pf->vf[vsi->vf_id].spoofchk) {
- ctxt->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
- } else {
- ctxt->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
- }
- }
-
/* Allow control frames out of main VSI */
if (vsi->type == ICE_VSI_PF) {
ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
@@ -1151,7 +1343,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
}
- if (init_vsi) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(dev, "Add VSI failed, err %d\n", ret);
@@ -1282,10 +1474,40 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
}
/**
+ * ice_get_vf_ctrl_res - Get VF control VSI resource
+ * @pf: pointer to the PF structure
+ * @vsi: the VSI to allocate a resource for
+ *
+ * Look up whether another VF has already allocated the control VSI resource.
+ * If so, re-use this resource so that we share it among all VFs.
+ *
+ * Otherwise, allocate the resource and return it.
+ */
+static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ int base;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
+ rcu_read_unlock();
+ return base;
+ }
+ }
+ rcu_read_unlock();
+
+ return ice_get_res(pf, pf->irq_tracker, vsi->num_q_vectors,
+ ICE_RES_VF_CTRL_VEC_ID);
+}
+
+/**
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
- * This should only be called after ice_vsi_alloc() which allocates the
+ * This should only be called after ice_vsi_alloc_def() which allocates the
* corresponding SW VSI structure and initializes num_queue_pairs for the
* newly allocated VSI.
*
@@ -1313,20 +1535,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
- break;
- }
- }
- if (i == pf->num_alloc_vfs)
- base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
- ICE_RES_VF_CTRL_VEC_ID);
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ base = ice_get_vf_ctrl_res(pf, vsi);
} else {
base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
@@ -1388,6 +1598,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
*/
static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
{
+ bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
struct ice_pf *pf = vsi->back;
struct device *dev;
u16 i;
@@ -1409,6 +1620,11 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
+ ring->txq_teid = ICE_INVAL_TEID;
+ if (dvm_ena)
+ ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
+ else
+ ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
WRITE_ONCE(vsi->tx_rings[i], ring);
}
@@ -1427,6 +1643,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev;
ring->dev = dev;
ring->count = vsi->num_rx_desc;
+ ring->cached_phctime = pf->ptp.cached_phc_time;
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1467,6 +1684,22 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
}
/**
+ * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
+ * @vsi: VSI to be configured
+ * @disable: set to true to have FCS / CRC in the frame data
+ */
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
+{
+ int i;
+
+ ice_for_each_rxq(vsi, i)
+ if (disable)
+ vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+}
+
+/**
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
@@ -1545,8 +1778,8 @@ ice_vsi_cfg_rss_exit:
static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1557,8 +1790,8 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
if (status)
- dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
- vsi->vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
+ vsi->vsi_num, status);
}
/**
@@ -1577,8 +1810,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
if (ice_is_safe_mode(pf)) {
@@ -1590,57 +1823,63 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for IPv6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for sctp4 with input set IP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
/* configure RSS for sctp6 with input set IPv6 src/dst */
status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
if (status)
- dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
- vsi_num, ice_stat_str(status));
+ dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
+ vsi_num, status);
+
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
+ ICE_FLOW_SEG_HDR_ESP);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
@@ -1675,11 +1914,15 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
{
struct ice_eth_stats *prev_es, *cur_es;
struct ice_hw *hw = &vsi->back->hw;
+ struct ice_pf *pf = vsi->back;
u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
prev_es = &vsi->eth_stats_prev;
cur_es = &vsi->eth_stats;
+ if (ice_is_reset_in_progress(pf->state))
+ vsi->stat_offsets_loaded = false;
+
ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
&prev_es->rx_bytes, &cur_es->rx_bytes);
@@ -1714,71 +1957,14 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_vsi_add_vlan - Add VSI membership for given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN ID to be added
- * @action: filter action to be performed on match
- */
-int
-ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int err = 0;
-
- dev = ice_pf_to_dev(pf);
-
- if (!ice_fltr_add_vlan(vsi, vid, action)) {
- vsi->num_vlan++;
- } else {
- err = -ENODEV;
- dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
- vsi->vsi_num);
- }
-
- return err;
-}
-
-/**
- * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
- * @vsi: the VSI being configured
- * @vid: VLAN ID to be removed
- *
- * Returns 0 on success and negative on failure
- */
-int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
-{
- struct ice_pf *pf = vsi->back;
- enum ice_status status;
- struct device *dev;
- int err = 0;
-
- dev = ice_pf_to_dev(pf);
-
- status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!status) {
- vsi->num_vlan--;
- } else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
- vid, vsi->vsi_num, ice_stat_str(status));
- } else {
- dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
- vid, vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
- }
-
- return err;
-}
-
-/**
* ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
* @vsi: VSI
*/
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_2048;
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
@@ -1787,11 +1973,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
-#if (PAGE_SIZE < 8192)
vsi->rx_buf_len = ICE_RXBUF_3072;
-#else
- vsi->rx_buf_len = ICE_RXBUF_2048;
-#endif
}
}
@@ -1942,10 +2124,10 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- ice_for_each_xdp_txq(vsi, i)
- vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_rxq(vsi, i)
+ ice_tx_xsk_pool(vsi, i);
- return ret;
+ return 0;
}
/**
@@ -2098,101 +2280,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
}
/**
- * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
- * @vsi: the VSI being changed
- */
-int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- /* Here we are configuring the VSI to let the driver add VLAN tags by
- * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
- * insertion happens in the Tx hot path, in ice_tx_map.
- */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
-
- /* Preserve existing VLAN strip setting */
- ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
- ICE_AQ_VSI_VLAN_EMOD_M);
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
- goto out;
- }
-
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
- * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
- * @vsi: the VSI being changed
- * @ena: boolean value indicating if this is a enable or disable request
- */
-int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
-
- /* do not allow modifying VLAN stripping when a port VLAN is configured
- * on this VSI
- */
- if (vsi->info.pvid)
- return 0;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- /* Here we are configuring what the VSI should do with the VLAN tag in
- * the Rx packet. We can either leave the tag in the packet or put it in
- * the Rx descriptor.
- */
- if (ena)
- /* Strip VLAN tag from Rx packet and put it in the desc */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
- else
- /* Disable stripping. Leave tag in packet */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
-
- /* Allow all packets untagged/tagged */
- ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
- ena, ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
- goto out;
- }
-
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
* ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
* @vsi: the VSI whose rings are to be enabled
*
@@ -2272,73 +2359,42 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
}
/**
- * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
- * @vsi: VSI to check whether or not VLAN pruning is enabled.
+ * ice_vsi_is_rx_queue_active
+ * @vsi: the VSI being configured
*
- * returns true if Rx VLAN pruning is enabled and false otherwise.
+ * Return true if at least one queue is active.
*/
-bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
+bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
{
- if (!vsi)
- return false;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ int i;
- return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
+ ice_for_each_rxq(vsi, i) {
+ u32 rx_reg;
+ int pf_q;
+
+ pf_q = vsi->rxq_map[i];
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
+ if (rx_reg & QRX_CTRL_QENA_STAT_M)
+ return true;
+ }
+
+ return false;
}
/**
- * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
- * @vsi: VSI to enable or disable VLAN pruning on
- * @ena: set to true to enable VLAN pruning and false to disable it
+ * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
+ * @vsi: VSI to check whether or not VLAN pruning is enabled.
*
- * returns 0 if VSI is updated, negative otherwise
+ * returns true if Rx VLAN pruning is enabled and false otherwise.
*/
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
{
- struct ice_vsi_ctx *ctxt;
- struct ice_pf *pf;
- int status;
-
if (!vsi)
- return -EINVAL;
-
- /* Don't enable VLAN pruning if the netdev is currently in promiscuous
- * mode. VLAN pruning will be enabled when the interface exits
- * promiscuous mode if any VLAN filters are active.
- */
- if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
- return 0;
-
- pf = vsi->back;
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->info = vsi->info;
-
- if (ena)
- ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- else
- ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
-
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
-
- status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
- if (status) {
- netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
- ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
- ice_stat_str(status),
- ice_aq_str(pf->hw.adminq.sq_last_status));
- goto err_out;
- }
-
- vsi->info.sw_flags2 = ctxt->info.sw_flags2;
-
- kfree(ctxt);
- return 0;
+ return false;
-err_out:
- kfree(ctxt);
- return -EIO;
+ return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
}
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
@@ -2375,7 +2431,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
}
if (vsi->type == ICE_VSI_VF) {
- struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
+ struct ice_vf *vf = vsi->vf;
q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
} else {
@@ -2405,11 +2461,11 @@ clear_reg_idx:
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
- enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
- enum ice_sw_fwd_act_type act);
+ int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
+ enum ice_sw_fwd_act_type act);
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
dev = ice_pf_to_dev(pf);
eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
@@ -2428,9 +2484,9 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
}
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
+ dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, ice_stat_str(status));
+ vsi->vsi_num, status);
}
/**
@@ -2450,7 +2506,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
struct ice_port_info *port_info;
struct ice_pf *pf = vsi->back;
u32 agg_node_id_start = 0;
- enum ice_status status;
+ int status;
/* create (as needed) scheduler aggregator node and move VSI into
* corresponding aggregator node
@@ -2529,7 +2585,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
agg_id);
return;
}
- /* aggregator node is created, store the neeeded info */
+ /* aggregator node is created, store the needed info */
agg_node->valid = true;
agg_node->agg_id = agg_id;
}
@@ -2556,58 +2612,99 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
}
/**
- * ice_vsi_setup - Set up a VSI by a given type
- * @pf: board private structure
- * @pi: pointer to the port_info instance
- * @vsi_type: VSI type
- * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
- * used only for ICE_VSI_VF VSI type. For other VSI types, should
- * fill-in ICE_INVAL_VFID as input.
- * @ch: ptr to channel
- *
- * This allocates the sw VSI structure and its queue resources.
+ * ice_free_vf_ctrl_res - Free the VF control VSI resource
+ * @pf: pointer to PF structure
+ * @vsi: the VSI to free resources for
*
- * Returns pointer to the successfully allocated and configured VSI sw struct on
- * success, NULL on failure.
+ * Check if the VF control VSI resource is still in use. If no VF is using it
+ * any more, release the VSI resource. Otherwise, leave it to be cleaned up
+ * once no other VF uses it.
*/
-struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch)
+static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+
+ /* No other VFs left that have control VSI. It is now safe to reclaim
+ * SW interrupts back to the common pool.
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+}
+
+static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
- struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_CHNL)
- vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID);
- else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id);
- else
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID);
+ /* configure VSI nodes based on number of queues and TC's */
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i)))
+ continue;
- if (!vsi) {
- dev_err(dev, "could not allocate VSI\n");
- return NULL;
+ if (vsi->type == ICE_VSI_CHNL) {
+ if (!vsi->alloc_txq && vsi->num_txq)
+ max_txqs[i] = vsi->num_txq;
+ else
+ max_txqs[i] = pf->num_lan_tx;
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
}
- vsi->port_info = pi;
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_cfg_def - configure default VSI based on the type
+ * @vsi: pointer to VSI
+ * @params: the parameters to configure this VSI with
+ */
+static int
+ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_pf *pf = vsi->back;
+ int ret;
+
vsi->vsw = pf->first_sw;
- if (vsi->type == ICE_VSI_PF)
- vsi->ethtype = ETH_P_PAUSE;
- if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL)
- vsi->vf_id = vf_id;
+ ret = ice_vsi_alloc_def(vsi, params->ch);
+ if (ret)
+ return ret;
+
+ /* allocate memory for Tx/Rx ring stat pointers */
+ ret = ice_vsi_alloc_stat_arrays(vsi);
+ if (ret)
+ goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi);
- if (vsi_type != ICE_VSI_CHNL) {
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto unroll_vsi_alloc;
- }
+ ret = ice_vsi_get_qs(vsi);
+ if (ret) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_vsi_alloc_stat;
}
/* set RSS capabilities */
@@ -2617,10 +2714,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi, true);
+ ret = ice_vsi_init(vsi, params->flags);
if (ret)
goto unroll_get_qs;
+ ice_vsi_init_vlan_ops(vsi);
+
switch (vsi->type) {
case ICE_VSI_CTRL:
case ICE_VSI_SWITCHDEV_CTRL:
@@ -2641,18 +2740,19 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
- /* Always add VLAN ID 0 switch rule by default. This is needed
- * in order to allow all untagged and 0 tagged priority traffic
- * if Rx VLAN pruning is enabled. Also there are cases where we
- * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
- * so this handles those cases (i.e. adding the PF to a bridge
- * without the 8021q module loaded).
- */
- ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
+ ret = ice_vsi_alloc_ring_stats(vsi);
if (ret)
- goto unroll_clear_rings;
+ goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto unroll_vector_base;
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ if (ret)
+ goto unroll_vector_base;
+ }
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
@@ -2690,6 +2790,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_vector_base;
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
/* Do not exit if configuring RSS had an issue, at least
* receive traffic on first queue. Hence no need to capture
* return value
@@ -2703,36 +2806,167 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto unroll_vsi_init;
+
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
break;
default:
/* clean up the resources and exit */
+ ret = -EINVAL;
goto unroll_vsi_init;
}
- /* configure VSI nodes based on number of queues and TC's */
- ice_for_each_traffic_class(i) {
- if (!(vsi->tc_cfg.ena_tc & BIT(i)))
- continue;
+ return 0;
- if (vsi->type == ICE_VSI_CHNL) {
- if (!vsi->alloc_txq && vsi->num_txq)
- max_txqs[i] = vsi->num_txq;
- else
- max_txqs[i] = pf->num_lan_tx;
+unroll_vector_base:
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+unroll_alloc_q_vector:
+ ice_vsi_free_q_vectors(vsi);
+unroll_vsi_init:
+ ice_vsi_delete_from_hw(vsi);
+unroll_get_qs:
+ ice_vsi_put_qs(vsi);
+unroll_vsi_alloc_stat:
+ ice_vsi_free_stats(vsi);
+unroll_vsi_alloc:
+ ice_vsi_free_arrays(vsi);
+ return ret;
+}
+
+/**
+ * ice_vsi_cfg - configure a previously allocated VSI
+ * @vsi: pointer to VSI
+ * @params: parameters used to configure this VSI
+ */
+int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+{
+ struct ice_pf *pf = vsi->back;
+ int ret;
+
+ if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
+ return -EINVAL;
+
+ vsi->type = params->type;
+ vsi->port_info = params->pi;
+
+ /* For VSIs which don't have a connected VF, this will be NULL */
+ vsi->vf = params->vf;
+
+ ret = ice_vsi_cfg_def(vsi, params);
+ if (ret)
+ return ret;
+
+ ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
+ if (ret)
+ ice_vsi_decfg(vsi);
+
+ if (vsi->type == ICE_VSI_CTRL) {
+ if (vsi->vf) {
+ WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
+ vsi->vf->ctrl_vsi_idx = vsi->idx;
} else {
- max_txqs[i] = vsi->alloc_txq;
+ WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
+ pf->ctrl_vsi_idx = vsi->idx;
}
}
- dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (status) {
- dev_err(dev, "VSI %d failed lan queue config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- goto unroll_clear_rings;
+ return ret;
+}
+
+/**
+ * ice_vsi_decfg - remove all VSI configuration
+ * @vsi: pointer to VSI
+ */
+void ice_vsi_decfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, err);
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+ ice_destroy_xdp_rings(vsi);
+
+ ice_vsi_clear_rings(vsi);
+ ice_vsi_free_q_vectors(vsi);
+ ice_vsi_put_qs(vsi);
+ ice_vsi_free_arrays(vsi);
+
+ /* SR-IOV determines needed MSIX resources all at once instead of per
+ * VSI since when VFs are spawned we know how many VFs there are and how
+ * many interrupts each VF needs. SR-IOV MSIX resources are also
+ * cleared in the same manner.
+ */
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ ice_free_vf_ctrl_res(pf, vsi);
+ } else if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ vsi->base_vector = 0;
}
+ if (vsi->type == ICE_VSI_VF &&
+ vsi->agg_node && vsi->agg_node->valid)
+ vsi->agg_node->num_vsis--;
+ if (vsi->agg_node) {
+ vsi->agg_node->valid = false;
+ vsi->agg_node->agg_id = 0;
+ }
+}
+
+/**
+ * ice_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @params: parameters to use when creating the VSI
+ *
+ * This allocates the sw VSI structure and its queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct on
+ * success, NULL on failure.
+ */
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi;
+ int ret;
+
+ /* ice_vsi_setup can only initialize a new VSI, and we must have
+ * a port_info structure for it.
+ */
+ if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
+ WARN_ON(!params->pi))
+ return NULL;
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ dev_err(dev, "could not allocate VSI\n");
+ return NULL;
+ }
+
+ ret = ice_vsi_cfg(vsi, params);
+ if (ret)
+ goto err_vsi_cfg;
+
/* Add switch rule to drop all Tx Flow Control Frames, of look up
* type ETHERTYPE from VSIs, and restrict malicious VF from sending
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
@@ -2742,33 +2976,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* be dropped so that VFs cannot send LLDP packets to reconfig DCB
* settings in the HW.
*/
- if (!ice_is_safe_mode(pf))
- if (vsi->type == ICE_VSI_PF) {
- ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
- ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
- }
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
+ ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
+ ice_cfg_sw_lldp(vsi, true, true);
+ }
if (!vsi->agg_node)
ice_set_agg_vsi(vsi);
+
return vsi;
-unroll_clear_rings:
- ice_vsi_clear_rings(vsi);
-unroll_vector_base:
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
-unroll_alloc_q_vector:
- ice_vsi_free_q_vectors(vsi);
-unroll_vsi_init:
- ice_vsi_delete(vsi);
-unroll_get_qs:
- ice_vsi_put_qs(vsi);
-unroll_vsi_alloc:
- if (vsi_type == ICE_VSI_VF)
+err_vsi_cfg:
+ if (params->type == ICE_VSI_VF)
ice_enable_lag(pf->lag);
- ice_vsi_clear(vsi);
+ ice_vsi_free(vsi);
return NULL;
}
@@ -2828,6 +3050,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
+ ice_free_cpu_rx_rmap(vsi);
+
ice_for_each_q_vector(vsi, i) {
u16 vector = i + base;
int irq_num;
@@ -2841,7 +3065,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
continue;
/* clear the affinity notifier in the IRQ descriptor */
- irq_set_affinity_notifier(irq_num, NULL);
+ if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+ irq_set_affinity_notifier(irq_num, NULL);
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
@@ -3036,7 +3261,6 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
- enum ice_status err;
struct ice_pf *pf;
if (!vsi->back)
@@ -3061,60 +3285,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
- /* Disable VSI and free resources */
- if (vsi->type != ICE_VSI_LB)
- ice_vsi_dis_irq(vsi);
ice_vsi_close(vsi);
-
- /* SR-IOV determines needed MSIX resources all at once instead of per
- * VSI since when VFs are spawned we know how many VFs there are and how
- * many interrupts each VF needs. SR-IOV MSIX resources are also
- * cleared in the same manner.
- */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
- break;
- }
- if (i == pf->num_alloc_vfs) {
- /* No other VFs left that have control VSI, reclaim SW
- * interrupts back to the common pool
- */
- ice_free_res(pf->irq_tracker, vsi->base_vector,
- ICE_RES_VF_CTRL_VEC_ID);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- }
- } else if (vsi->type != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- }
-
- if (!ice_is_safe_mode(pf)) {
- if (vsi->type == ICE_VSI_PF) {
- ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
- ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, false);
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
- }
- }
-
- ice_fltr_remove_all(vsi);
- ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
- err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
- if (err)
- dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
- vsi->vsi_num, err);
- ice_vsi_delete(vsi);
- ice_vsi_free_q_vectors(vsi);
+ ice_vsi_decfg(vsi);
if (vsi->netdev) {
if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
@@ -3128,19 +3300,12 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- if (vsi->type == ICE_VSI_VF &&
- vsi->agg_node && vsi->agg_node->valid)
- vsi->agg_node->num_vsis--;
- ice_vsi_clear_rings(vsi);
-
- ice_vsi_put_qs(vsi);
-
/* retain SW VSI data structure since it is needed to unregister and
* free VSI netdev when PF is not in reset recovery pending state,\
* for ex: during rmmod.
*/
if (!ice_is_reset_in_progress(pf->state))
- ice_vsi_clear(vsi);
+ ice_vsi_delete(vsi);
return 0;
}
@@ -3161,8 +3326,8 @@ ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
- coalesce[i].itr_tx = q_vector->tx.itr_setting;
- coalesce[i].itr_rx = q_vector->rx.itr_setting;
+ coalesce[i].itr_tx = q_vector->tx.itr_settings;
+ coalesce[i].itr_rx = q_vector->rx.itr_settings;
coalesce[i].intrl = q_vector->intrl;
if (i < vsi->num_txq)
@@ -3218,21 +3383,21 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
*/
if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[i].itr_rx;
+ rc->itr_settings = coalesce[i].itr_rx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_rxq) {
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
}
if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[i].itr_tx;
+ rc->itr_settings = coalesce[i].itr_tx;
ice_write_itr(rc, rc->itr_setting);
} else if (i < vsi->alloc_txq) {
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
}
@@ -3246,12 +3411,12 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
for (; i < vsi->num_q_vectors; i++) {
/* transmit */
rc = &vsi->q_vectors[i]->tx;
- rc->itr_setting = coalesce[0].itr_tx;
+ rc->itr_settings = coalesce[0].itr_tx;
ice_write_itr(rc, rc->itr_setting);
/* receive */
rc = &vsi->q_vectors[i]->rx;
- rc->itr_setting = coalesce[0].itr_rx;
+ rc->itr_settings = coalesce[0].itr_rx;
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
@@ -3260,30 +3425,71 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
/**
+ * ice_vsi_realloc_stat_arrays - Frees unused stat structures
+ * @vsi: VSI pointer
+ * @prev_txq: Number of Tx rings before ring reallocation
+ * @prev_rxq: Number of Rx rings before ring reallocation
+ */
+static void
+ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (!prev_txq || !prev_rxq)
+ return;
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+
+ if (vsi->num_txq < prev_txq) {
+ for (i = vsi->num_txq; i < prev_txq; i++) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+ }
+
+ if (vsi->num_rxq < prev_rxq) {
+ for (i = vsi->num_rxq; i < prev_rxq; i++) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+ }
+}
+
+/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
- * @init_vsi: is this an initialization or a reconfigure of the VSI
+ * @vsi_flags: flags used for VSI rebuild flow
+ *
+ * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
+ * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
*
* Returns 0 on success and negative value on failure
*/
-int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
- u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
+ int ret, prev_txq, prev_rxq;
int prev_num_q_vectors = 0;
- struct ice_vf *vf = NULL;
- enum ice_vsi_type vtype;
- enum ice_status status;
struct ice_pf *pf;
- int ret, i;
if (!vsi)
return -EINVAL;
+ params = ice_vsi_to_params(vsi);
+ params.flags = vsi_flags;
+
pf = vsi->back;
- vtype = vsi->type;
- if (vtype == ICE_VSI_VF)
- vf = &pf->vf[vsi->vf_id];
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
+ return -EINVAL;
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
@@ -3292,167 +3498,35 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
- ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
- ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
- if (ret)
- dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
- vsi->vsi_num, ret);
- ice_vsi_free_q_vectors(vsi);
-
- /* SR-IOV determines needed MSIX resources all at once instead of per
- * VSI since when VFs are spawned we know how many VFs there are and how
- * many interrupts each VF needs. SR-IOV MSIX resources are also
- * cleared in the same manner.
- */
- if (vtype != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- vsi->base_vector = 0;
- }
+ prev_txq = vsi->num_txq;
+ prev_rxq = vsi->num_rxq;
- if (ice_is_xdp_ena_vsi(vsi))
- /* return value check can be skipped here, it always returns
- * 0 if reset is in progress
- */
- ice_destroy_xdp_rings(vsi);
- ice_vsi_put_qs(vsi);
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi);
- if (vtype == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf->vf_id);
- else
- ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
-
- ret = ice_vsi_alloc_arrays(vsi);
- if (ret < 0)
- goto err_vsi;
-
- ice_vsi_get_qs(vsi);
-
- ice_alloc_fd_res(vsi);
- ice_vsi_set_tc_cfg(vsi);
-
- /* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_init(vsi, init_vsi);
- if (ret < 0)
- goto err_vsi;
-
- switch (vtype) {
- case ICE_VSI_CTRL:
- case ICE_VSI_SWITCHDEV_CTRL:
- case ICE_VSI_PF:
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ice_vsi_map_rings_to_vectors(vsi);
- if (ice_is_xdp_ena_vsi(vsi)) {
- ret = ice_vsi_determine_xdp_res(vsi);
- if (ret)
- goto err_vectors;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
- if (ret)
- goto err_vectors;
- }
- /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
- if (vtype != ICE_VSI_CTRL)
- /* Do not exit if configuring RSS had an issue, at
- * least receive traffic on first queue. Hence no
- * need to capture return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss_lut_key(vsi);
- break;
- case ICE_VSI_VF:
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
+ ice_vsi_decfg(vsi);
+ ret = ice_vsi_cfg_def(vsi, &params);
+ if (ret)
+ goto err_vsi_cfg;
- break;
- case ICE_VSI_CHNL:
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- ice_vsi_cfg_rss_lut_key(vsi);
- ice_vsi_set_rss_flow_fld(vsi);
+ ret = ice_vsi_cfg_tc_lan(pf, vsi);
+ if (ret) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
+ ret = -EIO;
+ goto err_vsi_cfg_tc_lan;
}
- break;
- default:
- break;
- }
- /* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++) {
- /* configure VSI nodes based on number of queues and TC's.
- * ADQ creates VSIs for each TC/Channel but doesn't
- * allocate queues instead it reconfigures the PF queues
- * as per the TC command. So max_txqs should point to the
- * PF Tx queues.
- */
- if (vtype == ICE_VSI_CHNL)
- max_txqs[i] = pf->num_lan_tx;
- else
- max_txqs[i] = vsi->alloc_txq;
-
- if (ice_is_xdp_ena_vsi(vsi))
- max_txqs[i] += vsi->num_xdp_txq;
+ kfree(coalesce);
+ return ice_schedule_reset(pf, ICE_RESET_PFR);
}
- if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- /* If MQPRIO is set, means channel code path, hence for main
- * VSI's, use TC as 1
- */
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
- else
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
- if (status) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- if (init_vsi) {
- ret = -EIO;
- goto err_vectors;
- } else {
- return ice_schedule_reset(pf, ICE_RESET_PFR);
- }
- }
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
return 0;
-err_vectors:
- ice_vsi_free_q_vectors(vsi);
-err_rings:
- if (vsi->netdev) {
- vsi->current_netdev_flags = 0;
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-err_vsi:
- ice_vsi_clear(vsi);
- set_bit(ICE_RESET_FAILED, pf->state);
+err_vsi_cfg_tc_lan:
+ ice_vsi_decfg(vsi);
+err_vsi_cfg:
kfree(coalesce);
return ret;
}
@@ -3582,13 +3656,14 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
*
* Prepares VSI tc_config to have queue configurations based on MQPRIO options.
*/
-static void
+static int
ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
u8 ena_tc)
{
u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u16 new_txq, new_rxq;
u8 netdev_tc = 0;
int i;
@@ -3629,9 +3704,23 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
}
}
+ new_txq = offset + qcount_tx;
+ if (new_txq > vsi->alloc_txq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
+ new_txq, vsi->alloc_txq);
+ return -EINVAL;
+ }
+
+ new_rxq = offset + qcount_rx;
+ if (new_rxq > vsi->alloc_rxq) {
+ dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
+ new_rxq, vsi->alloc_rxq);
+ return -EINVAL;
+ }
+
/* Set actual Tx/Rx queue pairs */
- vsi->num_txq = offset + qcount_tx;
- vsi->num_rxq = offset + qcount_rx;
+ vsi->num_txq = new_txq;
+ vsi->num_rxq = new_rxq;
/* Setup queue TC[0].qmap for given VSI context */
ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
@@ -3649,6 +3738,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+
+ return 0;
}
/**
@@ -3662,8 +3753,8 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
+ struct ice_tc_cfg old_tc_cfg;
struct ice_vsi_ctx *ctx;
- enum ice_status status;
struct device *dev;
int i, ret = 0;
u8 num_tc = 0;
@@ -3671,7 +3762,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
dev = ice_pf_to_dev(pf);
if (vsi->tc_cfg.ena_tc == ena_tc &&
vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
- return ret;
+ return 0;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
@@ -3687,6 +3778,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
max_txqs[i] = vsi->num_txq;
}
+ memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
@@ -3699,31 +3791,33 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
if (vsi->type == ICE_VSI_PF &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
else
- ice_vsi_setup_q_map(vsi, ctx);
+ ret = ice_vsi_setup_q_map(vsi, ctx);
+
+ if (ret) {
+ memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
+ goto out;
+ }
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
- status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
- if (status) {
+ ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
+ if (ret) {
dev_info(dev, "Failed VSI Update\n");
- ret = -EIO;
goto out;
}
if (vsi->type == ICE_VSI_PF &&
test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1,
- max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
else
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
- if (status) {
- dev_err(dev, "VSI %d failed TC config, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- ret = -EIO;
+ if (ret) {
+ dev_err(dev, "VSI %d failed TC config, error %d\n",
+ vsi->vsi_num, ret);
goto out;
}
ice_vsi_update_q_map(vsi, ctx);
@@ -3757,9 +3851,9 @@ static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes
*/
void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
- u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
- u64_stats_update_end(&tx_ring->syncp);
+ u64_stats_update_begin(&tx_ring->ring_stats->syncp);
+ ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_update_end(&tx_ring->ring_stats->syncp);
}
/**
@@ -3770,156 +3864,104 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
*/
void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
- u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
- u64_stats_update_end(&rx_ring->syncp);
-}
-
-/**
- * ice_status_to_errno - convert from enum ice_status to Linux errno
- * @err: ice_status value to convert
- */
-int ice_status_to_errno(enum ice_status err)
-{
- switch (err) {
- case ICE_SUCCESS:
- return 0;
- case ICE_ERR_DOES_NOT_EXIST:
- return -ENOENT;
- case ICE_ERR_OUT_OF_RANGE:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_EMPTY:
- case ICE_ERR_AQ_FW_CRITICAL:
- return -EIO;
- case ICE_ERR_PARAM:
- case ICE_ERR_INVAL_SIZE:
- return -EINVAL;
- case ICE_ERR_NO_MEMORY:
- return -ENOMEM;
- case ICE_ERR_MAX_LIMIT:
- return -EAGAIN;
- case ICE_ERR_RESET_ONGOING:
- return -EBUSY;
- case ICE_ERR_AQ_FULL:
- return -ENOSPC;
- default:
- return -EINVAL;
- }
+ u64_stats_update_begin(&rx_ring->ring_stats->syncp);
+ ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
+ u64_stats_update_end(&rx_ring->ring_stats->syncp);
}
/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
- * @sw: switch to check if its default forwarding VSI is free
+ * @pi: port info of the switch with default VSI
*
- * Return true if the default forwarding VSI is already being used, else returns
- * false signalling that it's available to use.
+ * Return true if the there is a single VSI in default forwarding VSI list
*/
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
{
- return (sw->dflt_vsi && sw->dflt_vsi_ena);
+ bool exists = false;
+
+ ice_check_if_dflt_vsi(pi, 0, &exists);
+ return exists;
}
/**
* ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
- * @sw: switch for the default forwarding VSI to compare against
* @vsi: VSI to compare against default forwarding VSI
*
* If this VSI passed in is the default forwarding VSI then return true, else
* return false
*/
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
{
- return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
+ return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
}
/**
* ice_set_dflt_vsi - set the default forwarding VSI
- * @sw: switch used to assign the default forwarding VSI
* @vsi: VSI getting set as the default forwarding VSI on the switch
*
* If the VSI passed in is already the default VSI and it's enabled just return
* success.
*
- * If there is already a default VSI on the switch and it's enabled then return
- * -EEXIST since there can only be one default VSI per switch.
- *
- * Otherwise try to set the VSI passed in as the switch's default VSI and
- * return the result.
+ * Otherwise try to set the VSI passed in as the switch's default VSI and
+ * return the result.
*/
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
+int ice_set_dflt_vsi(struct ice_vsi *vsi)
{
- enum ice_status status;
struct device *dev;
+ int status;
- if (!sw || !vsi)
+ if (!vsi)
return -EINVAL;
dev = ice_pf_to_dev(vsi->back);
/* the VSI passed in is already the default VSI */
- if (ice_is_vsi_dflt_vsi(sw, vsi)) {
+ if (ice_is_vsi_dflt_vsi(vsi)) {
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
vsi->vsi_num);
return 0;
}
- /* another VSI is already the default VSI for this switch */
- if (ice_is_dflt_vsi_in_use(sw)) {
- dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
- sw->dflt_vsi->vsi_num);
- return -EEXIST;
- }
-
- status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- return -EIO;
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ vsi->vsi_num, status);
+ return status;
}
- sw->dflt_vsi = vsi;
- sw->dflt_vsi_ena = true;
-
return 0;
}
/**
* ice_clear_dflt_vsi - clear the default forwarding VSI
- * @sw: switch used to clear the default VSI
+ * @vsi: VSI to remove from filter list
*
* If the switch has no default VSI or it's not enabled then return error.
*
* Otherwise try to clear the default VSI and return the result.
*/
-int ice_clear_dflt_vsi(struct ice_sw *sw)
+int ice_clear_dflt_vsi(struct ice_vsi *vsi)
{
- struct ice_vsi *dflt_vsi;
- enum ice_status status;
struct device *dev;
+ int status;
- if (!sw)
+ if (!vsi)
return -EINVAL;
- dev = ice_pf_to_dev(sw->pf);
-
- dflt_vsi = sw->dflt_vsi;
+ dev = ice_pf_to_dev(vsi->back);
/* there is no default VSI configured */
- if (!ice_is_dflt_vsi_in_use(sw))
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info))
return -ENODEV;
- status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
+ status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
- dflt_vsi->vsi_num, ice_stat_str(status));
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
+ vsi->vsi_num, status);
return -EIO;
}
- sw->dflt_vsi = NULL;
- sw->dflt_vsi_ena = false;
-
return 0;
}
@@ -3931,33 +3973,11 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
*/
int ice_get_link_speed_mbps(struct ice_vsi *vsi)
{
- switch (vsi->port_info->phy.link_info.link_speed) {
- case ICE_AQ_LINK_SPEED_100GB:
- return SPEED_100000;
- case ICE_AQ_LINK_SPEED_50GB:
- return SPEED_50000;
- case ICE_AQ_LINK_SPEED_40GB:
- return SPEED_40000;
- case ICE_AQ_LINK_SPEED_25GB:
- return SPEED_25000;
- case ICE_AQ_LINK_SPEED_20GB:
- return SPEED_20000;
- case ICE_AQ_LINK_SPEED_10GB:
- return SPEED_10000;
- case ICE_AQ_LINK_SPEED_5GB:
- return SPEED_5000;
- case ICE_AQ_LINK_SPEED_2500MB:
- return SPEED_2500;
- case ICE_AQ_LINK_SPEED_1000MB:
- return SPEED_1000;
- case ICE_AQ_LINK_SPEED_100MB:
- return SPEED_100;
- case ICE_AQ_LINK_SPEED_10MB:
- return SPEED_10;
- case ICE_AQ_LINK_SPEED_UNKNOWN:
- default:
- return 0;
- }
+ unsigned int link_speed;
+
+ link_speed = vsi->port_info->phy.link_info.link_speed;
+
+ return (int)ice_get_link_speed(fls(link_speed) - 1);
}
/**
@@ -3987,8 +4007,8 @@ int ice_get_link_speed_kbps(struct ice_vsi *vsi)
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
int speed;
dev = ice_pf_to_dev(pf);
@@ -4014,7 +4034,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
min_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
@@ -4026,7 +4046,7 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
if (status) {
dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
@@ -4048,8 +4068,8 @@ int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
{
struct ice_pf *pf = vsi->back;
- enum ice_status status;
struct device *dev;
+ int status;
int speed;
dev = ice_pf_to_dev(pf);
@@ -4075,7 +4095,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
max_tx_rate, ice_vsi_type_str(vsi->type),
vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
@@ -4087,7 +4107,7 @@ int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
if (status) {
dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
ice_vsi_type_str(vsi->type), vsi->idx);
- return -EIO;
+ return status;
}
dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
@@ -4107,7 +4127,7 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_port_info *pi = vsi->port_info;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
@@ -4119,22 +4139,140 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
* a success code. Return an error if FW returns an error code other
* than ICE_AQ_RC_EMODE
*/
- if (status == ICE_ERR_AQ_ERROR) {
+ if (status == -EIO) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n",
- (ena ? "ON" : "OFF"), ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
+ (ena ? "ON" : "OFF"), status,
+ ice_aq_str(hw->adminq.sq_last_status));
} else if (status) {
- dev_err(dev, "can't set link to %s, err %s aq_err %s\n",
- (ena ? "ON" : "OFF"), ice_stat_str(status),
+ dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
+ (ena ? "ON" : "OFF"), status,
ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
+ return status;
}
return 0;
}
/**
+ * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
+ * @vsi: VSI used to add VLAN filters
+ *
+ * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
+ * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
+ * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
+ * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
+ *
+ * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
+ * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
+ * traffic in SVM, since the VLAN TPID isn't part of filtering.
+ *
+ * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
+ * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
+ * part of filtering.
+ */
+int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_vlan vlan;
+ int err;
+
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->add_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* in SVM both VLAN 0 filters are identical */
+ if (!ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->add_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
+ * @vsi: VSI used to add VLAN filters
+ *
+ * Delete the VLAN 0 filters in the same manner that they were added in
+ * ice_vsi_add_vlan_zero.
+ */
+int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct ice_vlan vlan;
+ int err;
+
+ vlan = ICE_VLAN(0, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* in SVM both VLAN 0 filters are identical */
+ if (!ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
+ err = vlan_ops->del_vlan(vsi, &vlan);
+ if (err && err != -EEXIST)
+ return err;
+
+ /* when deleting the last VLAN filter, make sure to disable the VLAN
+ * promisc mode so the filter isn't left by accident
+ */
+ return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
+}
+
+/**
+ * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
+ * @vsi: VSI used to get the VLAN mode
+ *
+ * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
+ * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
+ */
+static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
+{
+#define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2
+#define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1
+ /* no VLAN 0 filter is created when a port VLAN is active */
+ if (vsi->type == ICE_VSI_VF) {
+ if (WARN_ON(!vsi->vf))
+ return 0;
+
+ if (ice_vf_is_port_vlan_ena(vsi->vf))
+ return 0;
+ }
+
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
+ else
+ return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
+}
+
+/**
+ * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
+ * @vsi: VSI used to determine if any non-zero VLANs have been added
+ */
+bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
+{
+ return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
+}
+
+/**
+ * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
+ * @vsi: VSI used to get the number of non-zero VLANs added
+ */
+u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
+{
+ return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
+}
+
+/**
* ice_is_feature_supported
* @pf: pointer to the struct ice_pf instance
* @f: feature enum to be checked
@@ -4188,8 +4326,12 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
- if (ice_is_e810t(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
+ if (ice_is_e810t(&pf->hw)) {
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
+ if (ice_gnss_is_gps_present(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_GNSS);
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 6c803407b67d..75221478f2dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -5,6 +5,48 @@
#define _ICE_LIB_H_
#include "ice.h"
+#include "ice_vlan.h"
+
+/* Flags used for VSI configuration and rebuild */
+#define ICE_VSI_FLAG_INIT BIT(0)
+#define ICE_VSI_FLAG_NO_INIT 0
+
+/**
+ * struct ice_vsi_cfg_params - VSI configuration parameters
+ * @pi: pointer to the port_info instance for the VSI
+ * @ch: pointer to the channel structure for the VSI, may be NULL
+ * @vf: pointer to the VF associated with this VSI, may be NULL
+ * @type: the type of VSI to configure
+ * @flags: VSI flags used for rebuild and configuration
+ *
+ * Parameter structure used when configuring a new VSI.
+ */
+struct ice_vsi_cfg_params {
+ struct ice_port_info *pi;
+ struct ice_channel *ch;
+ struct ice_vf *vf;
+ enum ice_vsi_type type;
+ u32 flags;
+};
+
+/**
+ * ice_vsi_to_params - Get parameters for an existing VSI
+ * @vsi: the VSI to get parameters for
+ *
+ * Fill a parameter structure for reconfiguring a VSI with its current
+ * parameters, such as during a rebuild operation.
+ */
+static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi)
+{
+ struct ice_vsi_cfg_params params = {};
+
+ params.pi = vsi->port_info;
+ params.ch = vsi->ch;
+ params.vf = vsi->vf;
+ params.type = vsi->type;
+
+ return params;
+}
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
@@ -22,15 +64,6 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi);
-int
-ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action);
-
-int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
-
-int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi);
-
-int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena);
-
int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi);
int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi);
@@ -45,14 +78,11 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
-
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
void ice_vsi_delete(struct ice_vsi *vsi);
-int ice_vsi_clear(struct ice_vsi *vsi);
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
@@ -61,8 +91,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
+ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
void ice_napi_del(struct ice_vsi *vsi);
@@ -72,6 +101,7 @@ void ice_vsi_close(struct ice_vsi *vsi);
int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
+void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
@@ -79,7 +109,8 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
-int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
+int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
@@ -98,30 +129,25 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable);
+
void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
-
-int ice_status_to_errno(enum ice_status err);
-
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
-enum ice_status
-ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
-bool ice_is_aux_ena(struct ice_pf *pf);
-bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
-
-bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
-
-int ice_clear_dflt_vsi(struct ice_sw *sw);
+bool ice_is_rdma_ena(struct ice_pf *pf);
+bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
+bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
+int ice_set_dflt_vsi(struct ice_vsi *vsi);
+int ice_clear_dflt_vsi(struct ice_vsi *vsi);
int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
int ice_get_link_speed_kbps(struct ice_vsi *vsi);
@@ -136,8 +162,12 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
-
+int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
+int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
+bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
+u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
+bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 73c61cdb036f..a1f7c8edc22f 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -21,6 +21,8 @@
#include "ice_trace.h"
#include "ice_eswitch.h"
#include "ice_tc_lib.h"
+#include "ice_vsi_vlan_ops.h"
+#include <net/xdp_sock_drv.h>
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -43,10 +45,24 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
-static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key);
+/**
+ * ice_hw_to_dev - Get device pointer from the hardware structure
+ * @hw: pointer to the device HW structure
+ *
+ * Used to access the device pointer from compilation units which can't easily
+ * include the definition of struct ice_pf without leading to circular header
+ * dependencies.
+ */
+struct device *ice_hw_to_dev(struct ice_hw *hw)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ return &pf->pdev->dev;
+}
+
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -114,12 +130,17 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
ice_for_each_txq(vsi, i) {
struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
+ struct ice_ring_stats *ring_stats;
if (!tx_ring)
continue;
if (ice_ring_ch_enabled(tx_ring))
continue;
+ ring_stats = tx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+
if (tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
@@ -128,8 +149,8 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* prev_pkt would be negative if there was no
* pending work.
*/
- packets = tx_ring->stats.pkts & INT_MAX;
- if (tx_ring->tx_stats.prev_pkt == packets) {
+ packets = ring_stats->stats.pkts & INT_MAX;
+ if (ring_stats->tx_stats.prev_pkt == packets) {
/* Trigger sw interrupt to revive the queue */
ice_trigger_sw_intr(hw, tx_ring->q_vector);
continue;
@@ -139,7 +160,7 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
* to ice_get_tx_pending()
*/
smp_rmb();
- tx_ring->tx_stats.prev_pkt =
+ ring_stats->tx_stats.prev_pkt =
ice_get_tx_pending(tx_ring) ? packets : -1;
}
}
@@ -155,7 +176,6 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
*/
static int ice_init_mac_fltr(struct ice_pf *pf)
{
- enum ice_status status;
struct ice_vsi *vsi;
u8 *perm_addr;
@@ -164,11 +184,7 @@ static int ice_init_mac_fltr(struct ice_pf *pf)
return -EINVAL;
perm_addr = vsi->port_info->mac.perm_addr;
- status = ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
- if (status)
- return -EIO;
-
- return 0;
+ return ice_fltr_add_mac_and_broadcast(vsi, perm_addr, ICE_FWD_TO_VSI);
}
/**
@@ -232,44 +248,66 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
{
return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
- test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
- test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
+ test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
}
/**
- * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
+ * ice_set_promisc - Enable promiscuous mode for a given PF
* @vsi: the VSI being configured
* @promisc_m: mask of promiscuous config bits
- * @set_promisc: enable or disable promisc flag request
*
*/
-static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
{
- struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status = 0;
+ int status;
if (vsi->type != ICE_VSI_PF)
return 0;
- if (vsi->num_vlan > 1) {
- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
- set_promisc);
+ if (ice_vsi_has_non_zero_vlans(vsi)) {
+ promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
+ status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
+ promisc_m);
} else {
- if (set_promisc)
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- else
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ promisc_m, 0);
}
+ if (status && status != -EEXIST)
+ return status;
- if (status)
- return -EIO;
-
+ netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
+ vsi->vsi_num, promisc_m);
return 0;
}
/**
+ * ice_clear_promisc - Disable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ *
+ */
+static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
+{
+ int status;
+
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ if (ice_vsi_has_non_zero_vlans(vsi)) {
+ promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
+ status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
+ promisc_m);
+ } else {
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ promisc_m, 0);
+ }
+
+ netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
+ vsi->vsi_num, promisc_m);
+ return status;
+}
+
+/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -277,15 +315,14 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
struct device *dev = ice_pf_to_dev(vsi->back);
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- enum ice_status status = 0;
u32 changed_flags = 0;
- u8 promisc_m;
- int err = 0;
+ int err;
if (!vsi->netdev)
return -EINVAL;
@@ -302,7 +339,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (ice_vsi_fltr_changed(vsi)) {
clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
- clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
/* grab the netdev's addr_list_lock */
netif_addr_lock_bh(netdev);
@@ -315,25 +351,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
/* Remove MAC addresses in the unsync list */
- status = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
+ err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list);
ice_fltr_free_list(dev, &vsi->tmp_unsync_list);
- if (status) {
+ if (err) {
netdev_err(netdev, "Failed to delete MAC filters\n");
/* if we failed because of alloc failures, just bail */
- if (status == ICE_ERR_NO_MEMORY) {
- err = -ENOMEM;
+ if (err == -ENOMEM)
goto out;
- }
}
/* Add MAC addresses in the sync list */
- status = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
+ err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list);
ice_fltr_free_list(dev, &vsi->tmp_sync_list);
/* If filter is added successfully or already exists, do not go into
* 'if' condition and report it as error. Instead continue processing
* rest of the function.
*/
- if (status && status != ICE_ERR_ALREADY_EXISTS) {
+ if (err && err != -EEXIST) {
netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
@@ -346,36 +380,22 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
vsi->vsi_num);
} else {
- err = -EIO;
goto out;
}
}
+ err = 0;
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
if (vsi->current_netdev_flags & IFF_ALLMULTI) {
- if (vsi->num_vlan > 1)
- promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_MCAST_PROMISC_BITS;
-
- err = ice_cfg_promisc(vsi, promisc_m, true);
+ err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
if (err) {
- netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
- vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_ALLMULTI;
goto out_promisc;
}
} else {
/* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
- if (vsi->num_vlan > 1)
- promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_MCAST_PROMISC_BITS;
-
- err = ice_cfg_promisc(vsi, promisc_m, false);
+ err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
if (err) {
- netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
- vsi->vsi_num);
vsi->current_netdev_flags |= IFF_ALLMULTI;
goto out_promisc;
}
@@ -387,8 +407,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
- if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
- err = ice_set_dflt_vsi(pf->first_sw, vsi);
+ if (!ice_is_dflt_vsi_in_use(vsi->port_info)) {
+ err = ice_set_dflt_vsi(vsi);
if (err && err != -EEXIST) {
netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -396,12 +416,23 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
- ice_cfg_vlan_pruning(vsi, false);
+ err = 0;
+ vlan_ops->dis_rx_filtering(vsi);
+
+ /* promiscuous mode implies allmulticast so
+ * that VSIs that are in promiscuous mode are
+ * subscribed to multicast packets coming to
+ * the port
+ */
+ err = ice_set_promisc(vsi,
+ ICE_MCAST_PROMISC_BITS);
+ if (err)
+ goto out_promisc;
}
} else {
/* Clear Rx filter to remove traffic from wire */
- if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
- err = ice_clear_dflt_vsi(pf->first_sw);
+ if (ice_is_vsi_dflt_vsi(vsi)) {
+ err = ice_clear_dflt_vsi(vsi);
if (err) {
netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
err, vsi->vsi_num);
@@ -409,8 +440,21 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
IFF_PROMISC;
goto out_promisc;
}
- if (vsi->num_vlan > 1)
- ice_cfg_vlan_pruning(vsi, true);
+ if (vsi->netdev->features &
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+ vlan_ops->ena_rx_filtering(vsi);
+ }
+
+ /* disable allmulti here, but only if allmulti is not
+ * still enabled for the netdev
+ */
+ if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ err = ice_clear_promisc(vsi,
+ ICE_MCAST_PROMISC_BITS);
+ if (err) {
+ netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
+ err, vsi->vsi_num);
+ }
}
}
}
@@ -472,6 +516,25 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
+ * ice_clear_sw_switch_recipes - clear switch recipes
+ * @pf: board private structure
+ *
+ * Mark switch recipes as not created in sw structures. There are cases where
+ * rules (especially advanced rules) need to be restored, either re-read from
+ * hardware or added again. For example after the reset. 'recp_created' flag
+ * prevents from doing that and need to be cleared upfront.
+ */
+static void ice_clear_sw_switch_recipes(struct ice_pf *pf)
+{
+ struct ice_sw_recipe *recp;
+ u8 i;
+
+ recp = pf->hw.switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
+ recp[i].recp_created = false;
+}
+
+/**
* ice_prepare_for_reset - prep for reset
* @pf: board private structure
* @reset_type: reset type requested
@@ -483,7 +546,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
@@ -498,8 +562,15 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
- ice_for_each_vf(pf, i)
- ice_set_vf_state_qs_dis(&pf->vf[i]);
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_set_vf_state_dis(vf);
+ mutex_unlock(&pf->vfs.table_lock);
+
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ if (reset_type != ICE_RESET_PFR)
+ ice_clear_sw_switch_recipes(pf);
+ }
/* release ADQ specific HW and SW resources */
vsi = ice_get_main_vsi(pf);
@@ -539,7 +610,10 @@ skip:
ice_pf_dis_all_vsi(pf, false);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_release(pf);
+ ice_ptp_prepare_for_reset(pf);
+
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -586,7 +660,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(ICE_PFR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
}
@@ -637,7 +711,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
wake_up(&pf->reset_wait_queue);
- ice_reset_all_vfs(pf, true);
+ ice_reset_all_vfs(pf);
}
return;
@@ -695,12 +769,12 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
{
struct ice_aqc_get_phy_caps_data *caps;
const char *an_advertised;
- enum ice_status status;
const char *fec_req;
const char *speed;
const char *fec;
const char *fc;
const char *an;
+ int status;
if (!vsi)
return;
@@ -1020,10 +1094,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_phy_info *phy_info;
- enum ice_status status;
struct ice_vsi *vsi;
u16 old_link_speed;
bool old_link;
+ int status;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
@@ -1036,8 +1110,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
status = ice_update_link_info(pi);
if (status)
- dev_dbg(dev, "Failed to update link status on port %d, err %s aq_err %s\n",
- pi->lport, ice_stat_str(status),
+ dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
+ pi->lport, status,
ice_aq_str(pi->hw->adminq.sq_last_status));
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -1063,6 +1137,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
+ ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
@@ -1317,6 +1393,8 @@ static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf)
wake_up(&pf->aq_wait_queue);
}
+#define ICE_MBX_OVERFLOW_WATERMARK 64
+
/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
@@ -1407,15 +1485,16 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
return 0;
do {
- enum ice_status ret;
+ struct ice_mbx_data data = {};
u16 opcode;
+ int ret;
ret = ice_clean_rq_elem(hw, cq, &event, &pending);
- if (ret == ICE_ERR_AQ_NO_WORK)
+ if (ret == -EALREADY)
break;
if (ret) {
- dev_err(dev, "%s Receive Queue event error %s\n", qtype,
- ice_stat_str(ret));
+ dev_err(dev, "%s Receive Queue event error %d\n", qtype,
+ ret);
break;
}
@@ -1433,8 +1512,12 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_vf_lan_overflow_event(pf, &event);
break;
case ice_mbx_opc_send_msg_to_pf:
- if (!ice_is_malicious_vf(pf, &event, i, pending))
- ice_vc_process_vf_msg(pf, &event);
+ data.num_msg_proc = i;
+ data.num_pending_arq = pending;
+ data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries;
+ data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
+
+ ice_vc_process_vf_msg(pf, &event, &data);
break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
@@ -1633,7 +1716,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- unsigned int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
u32 reg;
if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) {
@@ -1721,47 +1805,46 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
/* Check to see if one of the VFs caused an MDD event, and then
* increment counters and set print pending
*/
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- reg = rd32(hw, VP_MDET_TX_PQM(i));
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
- wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TCLAN(i));
+ reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_TX_TDPU(i));
+ reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
- wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
+ wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF);
vf->mdd_tx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
- i);
+ vf->vf_id);
}
- reg = rd32(hw, VP_MDET_RX(i));
+ reg = rd32(hw, VP_MDET_RX(vf->vf_id));
if (reg & VP_MDET_RX_VALID_M) {
- wr32(hw, VP_MDET_RX(i), 0xFFFF);
+ wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF);
vf->mdd_rx_events.count++;
set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state);
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
- i);
+ vf->vf_id);
/* Since the queue is disabled on VF Rx MDD events, the
* PF can be configured to reset the VF through ethtool
@@ -1772,10 +1855,11 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
* reset, so print the event prior to reset.
*/
ice_print_vf_rx_mdd_event(vf);
- ice_reset_vf(&pf->vf[i], false);
+ ice_reset_vf(vf, ICE_VF_RESET_LOCK);
}
}
}
+ mutex_unlock(&pf->vfs.table_lock);
ice_print_vfs_mdd_events(pf);
}
@@ -1866,19 +1950,17 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_pf *pf = pi->hw->back;
- enum ice_status status;
- int err = 0;
+ int err;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return -ENOMEM;
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA, pcaps,
- NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA,
+ pcaps, NULL);
- if (status) {
+ if (err) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
- err = -EIO;
goto out;
}
@@ -1977,8 +2059,7 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = pi->hw->back;
- enum ice_status status;
- int err = 0;
+ int err;
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
return -EIO;
@@ -1988,14 +2069,13 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi)
return -ENOMEM;
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- pcaps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- pcaps, NULL);
- if (status) {
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
+ if (err) {
dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
- err = -EIO;
goto err_out;
}
@@ -2049,8 +2129,7 @@ static int ice_configure_phy(struct ice_vsi *vsi)
struct ice_aqc_set_phy_cfg_data *cfg;
struct ice_phy_info *phy = &pi->phy;
struct ice_pf *pf = vsi->back;
- enum ice_status status;
- int err = 0;
+ int err;
/* Ensure we have media as we cannot configure a medialess port */
if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
@@ -2070,12 +2149,11 @@ static int ice_configure_phy(struct ice_vsi *vsi)
return -ENOMEM;
/* Get current PHY config */
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
- NULL);
- if (status) {
- dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, pcaps,
+ NULL);
+ if (err) {
+ dev_err(dev, "Failed to get PHY configuration, VSI %d error %d\n",
+ vsi->vsi_num, err);
goto done;
}
@@ -2089,15 +2167,14 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Use PHY topology as baseline for configuration */
memset(pcaps, 0, sizeof(*pcaps));
if (ice_fw_supports_report_dflt_cfg(pi->hw))
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
- pcaps, NULL);
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_DFLT_CFG,
+ pcaps, NULL);
else
- status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
- pcaps, NULL);
- if (status) {
- dev_err(dev, "Failed to get PHY caps, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
+ err = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
+ pcaps, NULL);
+ if (err) {
+ dev_err(dev, "Failed to get PHY caps, VSI %d error %d\n",
+ vsi->vsi_num, err);
goto done;
}
@@ -2150,12 +2227,10 @@ static int ice_configure_phy(struct ice_vsi *vsi)
/* Enable link and link update */
cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
- status = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
- if (status) {
- dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
- vsi->vsi_num, ice_stat_str(status));
- err = -EIO;
- }
+ err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL);
+ if (err)
+ dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
+ vsi->vsi_num, err);
kfree(cfg);
done:
@@ -2235,6 +2310,40 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ /* report the entire OICR value to AUX driver */
+ swap(event->reg, pf->oicr_err_reg);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
+ /* unplug aux dev per request, if an unplug request came in
+ * while processing a plug request, this will handle it
+ */
+ if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags))
+ ice_unplug_aux_dev(pf);
+
+ /* Plug aux device per request */
+ if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_plug_aux_dev(pf);
+
+ if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
+ struct iidc_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
+ }
+
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
@@ -2310,8 +2419,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
- ice_unplug_aux_dev(pf);
-
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@@ -2410,7 +2517,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf)
err = devm_request_irq(dev, irq_num, vsi->irq_handler,
IRQF_SHARED, q_vector->name,
q_vector);
@@ -2437,6 +2544,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
}
+ err = ice_set_cpu_rx_rmap(vsi);
+ if (err) {
+ netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
+ vsi->vsi_num, ERR_PTR(err));
+ goto free_q_irqs;
+ }
+
vsi->irqs_ready = true;
return 0;
@@ -2466,46 +2580,47 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
ice_for_each_xdp_txq(vsi, i) {
u16 xdp_q_idx = vsi->alloc_txq + i;
+ struct ice_ring_stats *ring_stats;
struct ice_tx_ring *xdp_ring;
xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
-
if (!xdp_ring)
goto free_xdp_rings;
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats) {
+ ice_free_tx_ring(xdp_ring);
+ goto free_xdp_rings;
+ }
+
+ xdp_ring->ring_stats = ring_stats;
xdp_ring->q_index = xdp_q_idx;
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
spin_lock_init(&xdp_ring->tx_lock);
for (j = 0; j < xdp_ring->count; j++) {
tx_desc = ICE_TX_DESC(xdp_ring, j);
- tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
+ tx_desc->cmd_type_offset_bsz = 0;
}
}
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key))
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- else
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
- }
-
return 0;
free_xdp_rings:
- for (; i >= 0; i--)
- if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
+ for (; i >= 0; i--) {
+ if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) {
+ kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
+ vsi->xdp_rings[i]->ring_stats = NULL;
ice_free_tx_ring(vsi->xdp_rings[i]);
+ }
+ }
return -ENOMEM;
}
@@ -2549,9 +2664,9 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.vsi_map_offset = vsi->alloc_txq,
.mapping_mode = ICE_VSI_MAP_CONTIG
};
- enum ice_status status;
struct device *dev;
int i, v_idx;
+ int status;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2589,6 +2704,23 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
xdp_rings_rem -= xdp_rings_per_v;
}
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key)) {
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ } else {
+ struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx) {
+ if (ice_ring_is_xdp(ring)) {
+ vsi->rx_rings[i]->xdp_ring = ring;
+ break;
+ }
+ }
+ }
+ ice_tx_xsk_pool(vsi, i);
+ }
+
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
@@ -2605,8 +2737,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(dev, "Failed VSI LAN queue config for XDP, error: %s\n",
- ice_stat_str(status));
+ dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
+ status);
goto clear_xdp_rings;
}
@@ -2685,8 +2817,12 @@ free_qmap:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
- if (vsi->xdp_rings[i]->desc)
+ if (vsi->xdp_rings[i]->desc) {
+ synchronize_rcu();
ice_free_tx_ring(vsi->xdp_rings[i]);
+ }
+ kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu);
+ vsi->xdp_rings[i]->ring_stats = NULL;
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
}
@@ -2755,6 +2891,18 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
}
/**
+ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: Pointer to VSI structure
+ */
+static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+{
+ if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ return ICE_RXBUF_1664;
+ else
+ return ICE_RXBUF_3072;
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2764,13 +2912,16 @@ static int
ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
bool if_running = netif_running(vsi->netdev);
int ret = 0, xdp_ring_err = 0;
- if (frame_size > vsi->rx_buf_len) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
- return -EOPNOTSUPP;
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (frame_size > ice_max_xdp_frame_size(vsi)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MTU is too large for linear frames and XDP prog does not support frags");
+ return -EOPNOTSUPP;
+ }
}
/* need to stop netdev while setting up the program for Rx rings */
@@ -2791,10 +2942,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ xdp_features_set_redirect_target(vsi->netdev, true);
+ /* reallocate Rx queues that are used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, true);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+ /* reallocate Rx queues that were used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, false);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
} else {
/* safe to call even when prog == vsi->xdp_prog as
* dev_xdp_install in net/core/dev.c incremented prog's
@@ -2980,7 +3141,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- ice_ptp_process_ts(pf);
+ if (!hw->reset_ongoing)
+ ret = IRQ_WAKE_THREAD;
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -2997,17 +3159,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
if (oicr & ICE_AUX_CRIT_ERR) {
- struct iidc_event *event;
-
+ pf->oicr_err_reg |= oicr;
+ set_bit(ICE_AUX_ERR_PENDING, pf->state);
ena_mask &= ~ICE_AUX_CRIT_ERR;
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
- /* report the entire OICR value to AUX driver */
- event->reg = oicr;
- ice_send_event_to_aux(pf, event);
- kfree(event);
- }
}
/* Report any remaining unexpected interrupts */
@@ -3023,7 +3177,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ice_service_task_schedule(pf);
}
}
- ret = IRQ_HANDLED;
+ if (!ret)
+ ret = IRQ_HANDLED;
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -3032,6 +3187,24 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
/**
+ * ice_misc_intr_thread_fn - misc interrupt thread function
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
+{
+ struct ice_pf *pf = data;
+
+ if (ice_is_reset_in_progress(pf->state))
+ return IRQ_HANDLED;
+
+ while (!ice_ptp_process_ts(pf))
+ usleep_range(50, 100);
+
+ return IRQ_HANDLED;
+}
+
+/**
* ice_dis_ctrlq_interrupts - disable control queue interrupts
* @hw: pointer to HW structure
*/
@@ -3143,10 +3316,12 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
pf->num_avail_sw_msix -= 1;
pf->oicr_idx = (u16)oicr_idx;
- err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
- ice_misc_intr, 0, pf->int_name, pf);
+ err = devm_request_threaded_irq(dev,
+ pf->msix_entries[pf->oicr_idx].vector,
+ ice_misc_intr, ice_misc_intr_thread_fn,
+ 0, pf->int_name, pf);
if (err) {
- dev_err(dev, "devm_request_irq for %s failed: %d\n",
+ dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
@@ -3183,15 +3358,16 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll, NAPI_POLL_WEIGHT);
+ ice_napi_poll);
}
/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
- * @netdev: netdev instance
+ * @vsi: the VSI associated with the new netdev
*/
-static void ice_set_ops(struct net_device *netdev)
+static void ice_set_ops(struct ice_vsi *vsi)
{
+ struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (ice_is_safe_mode(pf)) {
@@ -3203,6 +3379,13 @@ static void ice_set_ops(struct net_device *netdev)
netdev->netdev_ops = &ice_netdev_ops;
netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
ice_set_ethtool_ops(netdev);
+
+ if (vsi->type != ICE_VSI_PF)
+ return;
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
}
/**
@@ -3212,6 +3395,7 @@ static void ice_set_ops(struct net_device *netdev)
static void ice_set_netdev_features(struct net_device *netdev)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ bool is_dvm_ena = ice_is_dvm_ena(&pf->hw);
netdev_features_t csumo_features;
netdev_features_t vlano_features;
netdev_features_t dflt_features;
@@ -3238,6 +3422,10 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
+ /* Enable CTAG/STAG filtering by default in Double VLAN Mode (DVM) */
+ if (is_dvm_ena)
+ vlano_features |= NETIF_F_HW_VLAN_STAG_FILTER;
+
tso_features = NETIF_F_TSO |
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
@@ -3257,65 +3445,37 @@ static void ice_set_netdev_features(struct net_device *netdev)
vlano_features | tso_features;
/* add support for HW_CSUM on packets with MPLS header */
- netdev->mpls_features = NETIF_F_HW_CSUM;
+ netdev->mpls_features = NETIF_F_HW_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
/* enable features */
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_LOOPBACK;
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
tso_features;
netdev->vlan_features |= dflt_features | csumo_features |
tso_features;
-}
-/**
- * ice_cfg_netdev - Allocate, configure and register a netdev
- * @vsi: the VSI associated with the new netdev
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_cfg_netdev(struct ice_vsi *vsi)
-{
- struct ice_netdev_priv *np;
- struct net_device *netdev;
- u8 mac_addr[ETH_ALEN];
-
- netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
- vsi->alloc_rxq);
- if (!netdev)
- return -ENOMEM;
-
- set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- vsi->netdev = netdev;
- np = netdev_priv(netdev);
- np->vsi = vsi;
-
- ice_set_netdev_features(netdev);
-
- ice_set_ops(netdev);
-
- if (vsi->type == ICE_VSI_PF) {
- SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
- ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
- eth_hw_addr_set(netdev, mac_addr);
- ether_addr_copy(netdev->perm_addr, mac_addr);
- }
-
- netdev->priv_flags |= IFF_UNICAST_FLT;
-
- /* Setup netdev TC information */
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
-
- /* setup watchdog timeout value to be 5 second */
- netdev->watchdog_timeo = 5 * HZ;
+ /* advertise support but don't enable by default since only one type of
+ * VLAN offload can be enabled at a time (i.e. CTAG or STAG). When one
+ * type turns on the other has to be turned off. This is enforced by the
+ * ice_fix_features() ndo callback.
+ */
+ if (is_dvm_ena)
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX;
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = ICE_MAX_MTU;
+ /* Leave CRC / FCS stripping enabled by default, but allow the value to
+ * be changed at runtime
+ */
+ netdev->hw_features |= NETIF_F_RXFCS;
- return 0;
+ netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
/**
@@ -3343,14 +3503,27 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_PF;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_CHNL;
+ params.pi = pi;
+ params.ch = ch;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3364,7 +3537,13 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_CTRL;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3378,42 +3557,74 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_LB;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
* ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be added
*
* net_device_ops implementation for adding VLAN IDs
*/
static int
-ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* VLAN 0 is added by default during load/reset */
if (!vid)
return 0;
- /* Enable VLAN pruning when a VLAN other than 0 is added */
- if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
- ret = ice_cfg_vlan_pruning(vsi, true);
+ while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
+ usleep_range(1000, 2000);
+
+ /* Add multicast promisc rule for the VLAN ID to be added if
+ * all-multicast is currently enabled.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS,
+ vid);
if (ret)
- return ret;
+ goto finish;
}
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
/* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
* packets aren't pruned by the device's internal switch on Rx
*/
- ret = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (!ret)
- set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->add_vlan(vsi, &vlan);
+ if (ret)
+ goto finish;
+
+ /* If all-multicast is currently enabled and this VLAN ID is only one
+ * besides VLAN-0 we have to update look-up type of multicast promisc
+ * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
+ */
+ if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
+ ice_vsi_num_non_zero_vlans(vsi) == 1) {
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_PROMISC_BITS, 0);
+ ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, 0);
+ }
+
+finish:
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return ret;
}
@@ -3421,35 +3632,69 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/**
* ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
- * @proto: unused protocol
+ * @proto: VLAN TPID
* @vid: VLAN ID to be removed
*
* net_device_ops implementation for removing VLAN IDs
*/
static int
-ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
- u16 vid)
+ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi_vlan_ops *vlan_ops;
struct ice_vsi *vsi = np->vsi;
+ struct ice_vlan vlan;
int ret;
/* don't allow removal of VLAN 0 */
if (!vid)
return 0;
- /* Make sure ice_vsi_kill_vlan is successful before updating VLAN
+ while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
+ usleep_range(1000, 2000);
+
+ ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+ if (ret) {
+ netdev_err(netdev, "Error clearing multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ /* Make sure VLAN delete is successful before updating VLAN
* information
*/
- ret = ice_vsi_kill_vlan(vsi, vid);
+ vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
+ ret = vlan_ops->del_vlan(vsi, &vlan);
if (ret)
- return ret;
+ goto finish;
+
+ /* Remove multicast promisc rule for the removed VLAN ID if
+ * all-multicast is enabled.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI)
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS, vid);
+
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ /* Update look-up type of multicast promisc rule for VLAN 0
+ * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
+ * all-multicast is enabled and VLAN 0 is the only VLAN rule.
+ */
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_VLAN_PROMISC_BITS,
+ 0);
+ ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+ ICE_MCAST_PROMISC_BITS, 0);
+ }
+ }
- /* Disable pruning when VLAN 0 is the only VLAN rule */
- if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
- ret = ice_cfg_vlan_pruning(vsi, false);
+finish:
+ clear_bit(ICE_CFG_BUSY, vsi->state);
- set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
return ret;
}
@@ -3478,20 +3723,6 @@ static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
}
/**
- * ice_tc_indir_block_remove - clean indirect TC block notifications
- * @pf: PF structure
- */
-static void ice_tc_indir_block_remove(struct ice_pf *pf)
-{
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (!pf_vsi)
- return;
-
- ice_tc_indir_block_unregister(pf_vsi);
-}
-
-/**
* ice_tc_indir_block_register - Register TC indirect block notifications
* @vsi: VSI struct which has the netdev
*
@@ -3511,84 +3742,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
}
/**
- * ice_setup_pf_sw - Setup the HW switch on startup or after reset
- * @pf: board private structure
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_setup_pf_sw(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_vsi *vsi;
- int status = 0;
-
- if (ice_is_reset_in_progress(pf->state))
- return -EBUSY;
-
- vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
- if (!vsi)
- return -ENOMEM;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- status = ice_cfg_netdev(vsi);
- if (status) {
- status = -ENODEV;
- goto unroll_vsi_setup;
- }
- /* netdev has to be configured before setting frame size */
- ice_vsi_cfg_frame_size(vsi);
-
- /* init indirect block notifications */
- status = ice_tc_indir_block_register(vsi);
- if (status) {
- dev_err(dev, "Failed to register netdev notifier\n");
- goto unroll_cfg_netdev;
- }
-
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- /* registering the NAPI handler requires both the queues and
- * netdev to be created, which are done in ice_pf_vsi_setup()
- * and ice_cfg_netdev() respectively
- */
- ice_napi_add(vsi);
-
- status = ice_set_cpu_rx_rmap(vsi);
- if (status) {
- dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
- vsi->vsi_num, status);
- status = -EINVAL;
- goto unroll_napi_add;
- }
- status = ice_init_mac_fltr(pf);
- if (status)
- goto free_cpu_rx_map;
-
- return status;
-
-free_cpu_rx_map:
- ice_free_cpu_rx_rmap(vsi);
-unroll_napi_add:
- ice_tc_indir_block_unregister(vsi);
-unroll_cfg_netdev:
- if (vsi) {
- ice_napi_del(vsi);
- if (vsi->netdev) {
- clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
- }
-
-unroll_vsi_setup:
- ice_vsi_release(vsi);
- return status;
-}
-
-/**
* ice_get_avail_q_count - Get count of queues in use
* @pf_qmap: bitmap to get queue use count from
* @lock: pointer to a mutex that protects access to pf_qmap
@@ -3635,9 +3788,11 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
+ mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
+ mutex_destroy(&pf->vfs.table_lock);
if (pf->avail_txqs) {
bitmap_free(pf->avail_txqs);
@@ -3662,19 +3817,16 @@ static void ice_set_pf_caps(struct ice_pf *pf)
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
- if (func_caps->common_cap.rdma) {
+ if (func_caps->common_cap.rdma)
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- }
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
- pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
- ICE_MAX_VF_COUNT);
+ pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs,
+ ICE_MAX_SRIOV_VFS);
}
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
@@ -3715,6 +3867,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
+ mutex_init(&pf->adev_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
@@ -3735,96 +3888,147 @@ static int ice_init_pf(struct ice_pf *pf)
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
+ bitmap_free(pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
+ mutex_init(&pf->vfs.table_lock);
+ hash_init(pf->vfs.table);
+ ice_mbx_init_snapshot(&pf->hw);
+
return 0;
}
/**
+ * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
+ * @pf: board private structure
+ * @v_remain: number of remaining MSI-X vectors to be distributed
+ *
+ * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
+ * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
+ * remaining vectors.
+ */
+static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
+{
+ int v_rdma;
+
+ if (!ice_is_rdma_ena(pf)) {
+ pf->num_lan_msix = v_remain;
+ return;
+ }
+
+ /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
+ dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining vectors to LAN MSIX */
+ pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
+ pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
+ } else {
+ /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
+}
+
+/**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @pf: board private structure
*
- * compute the number of MSIX vectors required (v_budget) and request from
- * the OS. Return the number of vectors reserved or negative on failure
+ * Compute the number of MSIX vectors wanted and request from the OS. Adjust
+ * device usage if there are not enough vectors. Return the number of vectors
+ * reserved or negative on failure.
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
- int num_cpus, v_left, v_actual, v_other, v_budget = 0;
+ int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
struct device *dev = ice_pf_to_dev(pf);
- int needed, err, i;
+ int err, i;
- v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+ hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
num_cpus = num_online_cpus();
- /* reserve for LAN miscellaneous handler */
- needed = ICE_MIN_LAN_OICR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
+ /* LAN miscellaneous handler */
+ v_other = ICE_MIN_LAN_OICR_MSIX;
- /* reserve for flow director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
- needed = ICE_FDIR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
- }
-
- /* reserve for switchdev */
- needed = ICE_ESWITCH_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
-
- /* total used for non-traffic vectors */
- v_other = v_budget;
-
- /* reserve vectors for LAN traffic */
- needed = num_cpus;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_lan_msix = needed;
- v_budget += needed;
- v_left -= needed;
-
- /* reserve vectors for RDMA auxiliary driver */
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
- needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_rdma_msix = needed;
- v_budget += needed;
- v_left -= needed;
- }
-
- pf->msix_entries = devm_kcalloc(dev, v_budget,
+ /* Flow Director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ v_other += ICE_FDIR_MSIX;
+
+ /* switchdev */
+ v_other += ICE_ESWITCH_MSIX;
+
+ v_wanted = v_other;
+
+ /* LAN traffic */
+ pf->num_lan_msix = num_cpus;
+ v_wanted += pf->num_lan_msix;
+
+ /* RDMA auxiliary driver */
+ if (ice_is_rdma_ena(pf)) {
+ pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ v_wanted += pf->num_rdma_msix;
+ }
+
+ if (v_wanted > hw_num_msix) {
+ int v_remain;
+
+ dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
+ v_wanted, hw_num_msix);
+
+ if (hw_num_msix < ICE_MIN_MSIX) {
+ err = -ERANGE;
+ goto exit_err;
+ }
+
+ v_remain = hw_num_msix - v_other;
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
+ v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+ }
+
+ ice_reduce_msix_usage(pf, v_remain);
+ v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
+
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
+ }
+
+ pf->msix_entries = devm_kcalloc(dev, v_wanted,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
}
- for (i = 0; i < v_budget; i++)
+ for (i = 0; i < v_wanted; i++)
pf->msix_entries[i].entry = i;
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
- ICE_MIN_MSIX, v_budget);
+ ICE_MIN_MSIX, v_wanted);
if (v_actual < 0) {
dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
- if (v_actual < v_budget) {
+ if (v_actual < v_wanted) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_budget, v_actual);
+ v_wanted, v_actual);
if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
@@ -3833,43 +4037,16 @@ static int ice_ena_msix_range(struct ice_pf *pf)
goto msix_err;
} else {
int v_remain = v_actual - v_other;
- int v_rdma = 0, v_min_rdma = 0;
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
- /* Need at least 1 interrupt in addition to
- * AEQ MSIX
- */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
- v_min_rdma = ICE_MIN_RDMA_MSIX;
- }
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
- if (v_actual == ICE_MIN_MSIX ||
- v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
- dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining
- * vectors to LAN MSIX
- */
- pf->num_rdma_msix = v_min_rdma;
- pf->num_lan_msix = v_remain - v_min_rdma;
- } else {
- /* Split remaining MSIX with RDMA after
- * accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
+ ice_reduce_msix_usage(pf, v_remain);
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (ice_is_rdma_ena(pf))
dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
pf->num_rdma_msix);
}
@@ -3879,12 +4056,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
msix_err:
devm_kfree(dev, pf->msix_entries);
- goto exit_err;
-no_hw_vecs_left_err:
- dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
- needed, v_left);
- err = -ERANGE;
exit_err:
pf->num_rdma_msix = 0;
pf->num_lan_msix = 0;
@@ -3971,12 +4143,13 @@ bool ice_is_wol_supported(struct ice_hw *hw)
* @vsi: VSI being changed
* @new_rx: new number of Rx queues
* @new_tx: new number of Tx queues
+ * @locked: is adev device_lock held
*
* Only change the number of queues if new_tx, or new_rx is non-0.
*
* Returns 0 on success.
*/
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
int err = 0, timeout = 50;
@@ -3998,14 +4171,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, false);
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, false);
- ice_pf_dcb_recfg(pf);
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
@@ -4023,8 +4196,8 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!vsi)
return;
@@ -4049,18 +4222,17 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
/* allow all VLANs on Tx and don't strip on Rx */
- ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
- ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
}
kfree(ctxt);
@@ -4069,108 +4241,80 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
- * @status: status of package load
+ * @state: state of package load
*/
-static void
-ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
+static void ice_log_pkg_init(struct ice_hw *hw, enum ice_ddp_state state)
{
- struct ice_pf *pf = (struct ice_pf *)hw->back;
- struct device *dev = ice_pf_to_dev(pf);
+ struct ice_pf *pf = hw->back;
+ struct device *dev;
- switch (*status) {
- case ICE_SUCCESS:
- /* The package download AdminQ command returned success because
- * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
- * already a package loaded on the device.
- */
- if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
- hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
- hw->pkg_ver.update == hw->active_pkg_ver.update &&
- hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
- !memcmp(hw->pkg_name, hw->active_pkg_name,
- sizeof(hw->pkg_name))) {
- if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
- dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft);
- else
- dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft);
- } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
- hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
- *status = ICE_ERR_NOT_SUPPORTED;
- } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
- hw->active_pkg_name,
- hw->active_pkg_ver.major,
- hw->active_pkg_ver.minor,
- hw->active_pkg_ver.update,
- hw->active_pkg_ver.draft,
- hw->pkg_name,
- hw->pkg_ver.major,
- hw->pkg_ver.minor,
- hw->pkg_ver.update,
- hw->pkg_ver.draft);
- } else {
- dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
- *status = ICE_ERR_NOT_SUPPORTED;
- }
+ dev = ice_pf_to_dev(pf);
+
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
break;
- case ICE_ERR_FW_DDP_MISMATCH:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED:
+ dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft,
+ hw->pkg_name,
+ hw->pkg_ver.major,
+ hw->pkg_ver.minor,
+ hw->pkg_ver.update,
+ hw->pkg_ver.draft);
+ break;
+ case ICE_DDP_PKG_FW_MISMATCH:
dev_err(dev, "The firmware loaded on the device is not compatible with the DDP package. Please update the device's NVM. Entering safe mode.\n");
break;
- case ICE_ERR_BUF_TOO_SHORT:
- case ICE_ERR_CFG:
+ case ICE_DDP_PKG_INVALID_FILE:
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
- case ICE_ERR_NOT_SUPPORTED:
- /* Package File version not supported */
- if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
- (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
- dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
- else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
- (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
- dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
- ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ case ICE_DDP_PKG_FILE_VERSION_TOO_HIGH:
+ dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
break;
- case ICE_ERR_AQ_ERROR:
- switch (hw->pkg_dwnld_status) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
- dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
- return;
- case ICE_AQ_RC_ESVN:
- dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
- return;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
- dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
- /* poll for reset to complete */
- if (ice_check_reset(hw))
- dev_err(dev, "Error resetting device. Please reload the driver\n");
- return;
- default:
- break;
- }
- fallthrough;
+ case ICE_DDP_PKG_FILE_VERSION_TOO_LOW:
+ dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_DDP_PKG_FILE_SIGNATURE_INVALID:
+ dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_FILE_REVISION_TOO_LOW:
+ dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
+ break;
+ case ICE_DDP_PKG_LOAD_ERROR:
+ dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ /* poll for reset to complete */
+ if (ice_check_reset(hw))
+ dev_err(dev, "Error resetting device. Please reload the driver\n");
+ break;
+ case ICE_DDP_PKG_ERR:
default:
- dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
- *status);
+ dev_err(dev, "An unknown error occurred when loading the DDP package. Entering Safe Mode.\n");
break;
}
}
@@ -4186,24 +4330,24 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
static void
ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
{
- enum ice_status status = ICE_ERR_PARAM;
+ enum ice_ddp_state state = ICE_DDP_PKG_ERR;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
/* Load DDP Package */
if (firmware && !hw->pkg_copy) {
- status = ice_copy_and_init_pkg(hw, firmware->data,
- firmware->size);
- ice_log_pkg_init(hw, &status);
+ state = ice_copy_and_init_pkg(hw, firmware->data,
+ firmware->size);
+ ice_log_pkg_init(hw, state);
} else if (!firmware && hw->pkg_copy) {
/* Reload package during rebuild after CORER/GLOBR reset */
- status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
- ice_log_pkg_init(hw, &status);
+ state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
+ ice_log_pkg_init(hw, state);
} else {
dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
}
- if (status) {
+ if (!ice_is_init_pkg_successful(state)) {
/* Safe Mode */
clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
return;
@@ -4234,9 +4378,9 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
* ice_send_version - update firmware with driver version
* @pf: PF struct
*
- * Returns ICE_SUCCESS on success, else error code
+ * Returns 0 on success, else error code
*/
-static enum ice_status ice_send_version(struct ice_pf *pf)
+static int ice_send_version(struct ice_pf *pf)
{
struct ice_driver_ver dv;
@@ -4296,6 +4440,23 @@ err_vsi_open:
return err;
}
+static void ice_deinit_fdir(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_manage_fdir(vsi, false);
+ ice_vsi_release(vsi);
+ if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[pf->ctrl_vsi_idx] = NULL;
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+}
+
/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
@@ -4395,126 +4556,178 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
- * ice_register_netdev - register netdev and devlink port
- * @pf: pointer to the PF struct
+ * ice_register_netdev - register netdev
+ * @vsi: pointer to the VSI struct
*/
-static int ice_register_netdev(struct ice_pf *pf)
+static int ice_register_netdev(struct ice_vsi *vsi)
{
- struct ice_vsi *vsi;
- int err = 0;
+ int err;
- vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->netdev)
return -EIO;
err = register_netdev(vsi->netdev);
if (err)
- goto err_register_netdev;
+ return err;
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create;
-
- devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
-err_devlink_create:
+}
+
+static void ice_unregister_netdev(struct ice_vsi *vsi)
+{
+ if (!vsi || !vsi->netdev)
+ return;
+
unregister_netdev(vsi->netdev);
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
-err_register_netdev:
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- return err;
}
/**
- * ice_probe - Device initialization routine
- * @pdev: PCI device information struct
- * @ent: entry in ice_pci_tbl
+ * ice_cfg_netdev - Allocate, configure and register a netdev
+ * @vsi: the VSI associated with the new netdev
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative value on failure
*/
-static int
-ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+static int ice_cfg_netdev(struct ice_vsi *vsi)
{
- struct device *dev = &pdev->dev;
- struct ice_pf *pf;
- struct ice_hw *hw;
- int i, err;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
- if (pdev->is_virtfn) {
- dev_err(dev, "can't probe a virtual function\n");
- return -EINVAL;
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+ ice_set_ops(vsi);
+
+ if (vsi->type == ICE_VSI_PF) {
+ SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
+ ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+ eth_hw_addr_set(netdev, mac_addr);
}
- /* this driver uses devres, see
- * Documentation/driver-api/driver-model/devres.rst
- */
- err = pcim_enable_device(pdev);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Setup netdev TC information */
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
+ netdev->max_mtu = ICE_MAX_MTU;
+
+ return 0;
+}
+
+static void ice_decfg_netdev(struct ice_vsi *vsi)
+{
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+}
+
+static int ice_start_eth(struct ice_vsi *vsi)
+{
+ int err;
+
+ err = ice_init_mac_fltr(vsi->back);
if (err)
return err;
- err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
- if (err) {
- dev_err(dev, "BAR0 I/O map error %d\n", err);
- return err;
- }
+ rtnl_lock();
+ err = ice_vsi_open(vsi);
+ rtnl_unlock();
- pf = ice_allocate_pf(dev);
- if (!pf)
- return -ENOMEM;
+ return err;
+}
- /* initialize Auxiliary index to invalid value */
- pf->aux_idx = -1;
+static void ice_stop_eth(struct ice_vsi *vsi)
+{
+ ice_fltr_remove_all(vsi);
+ ice_vsi_close(vsi);
+}
- /* set up for high or low DMA */
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+static int ice_init_eth(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ int err;
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
if (err)
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
- }
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- pci_enable_pcie_error_reporting(pdev);
- pci_set_master(pdev);
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
- pf->pdev = pdev;
- pci_set_drvdata(pdev, pf);
- set_bit(ICE_DOWN, pf->state);
- /* Disable service task until DOWN bit is cleared */
- set_bit(ICE_SERVICE_DIS, pf->state);
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create_pf_port;
- hw = &pf->hw;
- hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
- pci_save_state(pdev);
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
- hw->back = pf;
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
- hw->bus.device = PCI_SLOT(pdev->devfn);
- hw->bus.func = PCI_FUNC(pdev->devfn);
- ice_set_ctrlq_len(hw);
+ err = ice_register_netdev(vsi);
+ if (err)
+ goto err_register_netdev;
- pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+ err = ice_tc_indir_block_register(vsi);
+ if (err)
+ goto err_tc_indir_block_register;
-#ifndef CONFIG_DYNAMIC_DEBUG
- if (debug < -1)
- hw->debug_mask = debug;
-#endif
+ ice_napi_add(vsi);
+
+ return 0;
+
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
+ return err;
+}
+
+static void ice_deinit_eth(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_close(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_decfg_netdev(vsi);
+}
+
+static int ice_init_dev(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err;
err = ice_init_hw(hw);
if (err) {
dev_err(dev, "ice_init_hw failed: %d\n", err);
- err = -EIO;
- goto err_exit_unroll;
+ return err;
}
ice_init_feature_support(pf);
@@ -4526,7 +4739,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* true
*/
if (ice_is_safe_mode(pf)) {
- dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -4538,55 +4750,31 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf_unroll;
+ goto err_init_pf;
}
- ice_devlink_init_regions(pf);
-
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
- i = 0;
if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
- pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.udp_tunnel_nic.tables[0].n_entries =
pf->hw.tnl.valid_count[TNL_VXLAN];
- pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
UDP_TUNNEL_TYPE_VXLAN;
- i++;
}
if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
- pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.udp_tunnel_nic.tables[1].n_entries =
pf->hw.tnl.valid_count[TNL_GENEVE];
- pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
UDP_TUNNEL_TYPE_GENEVE;
- i++;
- }
-
- pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
- if (!pf->num_alloc_vsi) {
- err = -EIO;
- goto err_init_pf_unroll;
- }
- if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
- dev_warn(&pf->pdev->dev,
- "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
- pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
- pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
- }
-
- pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
- GFP_KERNEL);
- if (!pf->vsi) {
- err = -ENOMEM;
- goto err_init_pf_unroll;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_vsi_unroll;
+ goto err_init_interrupt_scheme;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4597,49 +4785,94 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_init_interrupt_unroll;
+ goto err_req_irq_msix_misc;
}
- /* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
- if (!pf->first_sw) {
- err = -ENOMEM;
- goto err_msix_misc_unroll;
- }
+ return 0;
- if (hw->evb_veb)
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
- else
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
+err_req_irq_msix_misc:
+ ice_clear_interrupt_scheme(pf);
+err_init_interrupt_scheme:
+ ice_deinit_pf(pf);
+err_init_pf:
+ ice_deinit_hw(hw);
+ return err;
+}
- pf->first_sw->pf = pf;
+static void ice_deinit_dev(struct ice_pf *pf)
+{
+ ice_free_irq_msix_misc(pf);
+ ice_clear_interrupt_scheme(pf);
+ ice_deinit_pf(pf);
+ ice_deinit_hw(&pf->hw);
+}
- /* record the sw_id available for later use */
- pf->first_sw->sw_id = hw->port_info->sw_id;
+static void ice_init_features(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
- err = ice_setup_pf_sw(pf);
- if (err) {
- dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
- goto err_alloc_sw_unroll;
- }
+ if (ice_is_safe_mode(pf))
+ return;
- clear_bit(ICE_SERVICE_DIS, pf->state);
+ /* initialize DDP driven features */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_init(pf);
- /* tell the firmware we are up */
- err = ice_send_version(pf);
- if (err) {
- dev_err(dev, "probe failed sending driver version %s. error: %d\n",
- UTS_RELEASE, err);
- goto err_send_version_unroll;
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
+ /* Note: Flow director init failure is non-fatal to load */
+ if (ice_init_fdir(pf))
+ dev_err(dev, "could not initialize flow director\n");
+
+ /* Note: DCB init failure is non-fatal to load */
+ if (ice_init_pf_dcb(pf, false)) {
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ ice_cfg_lldp_mib_change(&pf->hw, true);
}
- /* since everything is good, start the service timer */
- mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ if (ice_init_lag(pf))
+ dev_warn(dev, "Failed to init link aggregation support\n");
+}
+
+static void ice_deinit_features(struct ice_pf *pf)
+{
+ ice_deinit_lag(pf);
+ if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ ice_cfg_lldp_mib_change(&pf->hw, false);
+ ice_deinit_fdir(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_release(pf);
+}
+
+static void ice_init_wakeup(struct ice_pf *pf)
+{
+ /* Save wakeup reason register for later use */
+ pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
+
+ /* check for a power management event */
+ ice_print_wake_reason(pf);
+
+ /* clear wake status, all bits */
+ wr32(&pf->hw, PFPM_WUS, U32_MAX);
+
+ /* Disable WoL at init, wait for user to enable */
+ device_set_wakeup_enable(ice_pf_to_dev(pf), false);
+}
+
+static int ice_init_link(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_send_version_unroll;
+ return err;
}
/* not a fatal error if this fails */
@@ -4675,97 +4908,350 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
- ice_verify_cacheline_size(pf);
+ return err;
+}
- /* Save wakeup reason register for later use */
- pf->wakeup_reason = rd32(hw, PFPM_WUS);
+static int ice_init_pf_sw(struct ice_pf *pf)
+{
+ bool dvm = ice_is_dvm_ena(&pf->hw);
+ struct ice_vsi *vsi;
+ int err;
- /* check for a power management event */
- ice_print_wake_reason(pf);
+ /* create switch struct for the switch element created by FW on boot */
+ pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
+ if (!pf->first_sw)
+ return -ENOMEM;
- /* clear wake status, all bits */
- wr32(hw, PFPM_WUS, U32_MAX);
+ if (pf->hw.evb_veb)
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
- /* Disable WoL at init, wait for user to enable */
- device_set_wakeup_enable(dev, false);
+ pf->first_sw->pf = pf;
- if (ice_is_safe_mode(pf)) {
- ice_set_safe_mode_vlan_cfg(pf);
- goto probe_done;
+ /* record the sw_id available for later use */
+ pf->first_sw->sw_id = pf->hw.port_info->sw_id;
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_aq_set_port_params;
+
+ vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
+ if (!vsi) {
+ err = -ENOMEM;
+ goto err_pf_vsi_setup;
}
- /* initialize DDP driven features */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_init(pf);
+ return 0;
- /* Note: Flow director init failure is non-fatal to load */
- if (ice_init_fdir(pf))
- dev_err(dev, "could not initialize flow director\n");
+err_pf_vsi_setup:
+err_aq_set_port_params:
+ kfree(pf->first_sw);
+ return err;
+}
- /* Note: DCB init failure is non-fatal to load */
- if (ice_init_pf_dcb(pf, false)) {
- clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
- clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
- } else {
- ice_cfg_lldp_mib_change(&pf->hw, true);
+static void ice_deinit_pf_sw(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_release(vsi);
+ kfree(pf->first_sw);
+}
+
+static int ice_alloc_vsis(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
+ if (!pf->num_alloc_vsi)
+ return -EIO;
+
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
}
- if (ice_init_lag(pf))
- dev_warn(dev, "Failed to init link aggregation support\n");
+ pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
+ GFP_KERNEL);
+ if (!pf->vsi)
+ return -ENOMEM;
+
+ pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
+ sizeof(*pf->vsi_stats), GFP_KERNEL);
+ if (!pf->vsi_stats) {
+ devm_kfree(dev, pf->vsi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ice_dealloc_vsis(struct ice_pf *pf)
+{
+ devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
+ pf->vsi_stats = NULL;
+
+ pf->num_alloc_vsi = 0;
+ devm_kfree(ice_pf_to_dev(pf), pf->vsi);
+ pf->vsi = NULL;
+}
- /* print PCI link speed and width */
- pcie_print_link_status(pf->pdev);
+static int ice_init_devlink(struct ice_pf *pf)
+{
+ int err;
+
+ err = ice_devlink_register_params(pf);
+ if (err)
+ return err;
+
+ ice_devlink_init_regions(pf);
+ ice_devlink_register(pf);
+
+ return 0;
+}
+
+static void ice_deinit_devlink(struct ice_pf *pf)
+{
+ ice_devlink_unregister(pf);
+ ice_devlink_destroy_regions(pf);
+ ice_devlink_unregister_params(pf);
+}
-probe_done:
- err = ice_register_netdev(pf);
+static int ice_init(struct ice_pf *pf)
+{
+ int err;
+
+ err = ice_init_dev(pf);
if (err)
- goto err_netdev_reg;
+ return err;
+
+ err = ice_alloc_vsis(pf);
+ if (err)
+ goto err_alloc_vsis;
+
+ err = ice_init_pf_sw(pf);
+ if (err)
+ goto err_init_pf_sw;
+
+ ice_init_wakeup(pf);
+
+ err = ice_init_link(pf);
+ if (err)
+ goto err_init_link;
+
+ err = ice_send_version(pf);
+ if (err)
+ goto err_init_link;
+
+ ice_verify_cacheline_size(pf);
+
+ if (ice_is_safe_mode(pf))
+ ice_set_safe_mode_vlan_cfg(pf);
+ else
+ /* print PCI link speed and width */
+ pcie_print_link_status(pf->pdev);
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_aux_ena(pf)) {
- pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
- if (pf->aux_idx < 0) {
- dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- err = -ENOMEM;
- goto err_netdev_reg;
- }
+ clear_bit(ICE_SERVICE_DIS, pf->state);
- err = ice_init_rdma(pf);
- if (err) {
- dev_err(dev, "Failed to initialize RDMA: %d\n", err);
- err = -EIO;
- goto err_init_aux_unroll;
- }
- } else {
- dev_warn(dev, "RDMA is not supported on this device\n");
- }
+ /* since everything is good, start the service timer */
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
- ice_devlink_register(pf);
return 0;
-err_init_aux_unroll:
- pf->adev = NULL;
- ida_free(&ice_aux_ida, pf->aux_idx);
-err_netdev_reg:
-err_send_version_unroll:
- ice_vsi_release_all(pf);
-err_alloc_sw_unroll:
+err_init_link:
+ ice_deinit_pf_sw(pf);
+err_init_pf_sw:
+ ice_dealloc_vsis(pf);
+err_alloc_vsis:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+static void ice_deinit(struct ice_pf *pf)
+{
set_bit(ICE_SERVICE_DIS, pf->state);
set_bit(ICE_DOWN, pf->state);
- devm_kfree(dev, pf->first_sw);
-err_msix_misc_unroll:
- ice_free_irq_msix_misc(pf);
-err_init_interrupt_unroll:
- ice_clear_interrupt_scheme(pf);
-err_init_vsi_unroll:
- devm_kfree(dev, pf->vsi);
-err_init_pf_unroll:
- ice_deinit_pf(pf);
- ice_devlink_destroy_regions(pf);
- ice_deinit_hw(hw);
-err_exit_unroll:
- pci_disable_pcie_error_reporting(pdev);
+
+ ice_deinit_pf_sw(pf);
+ ice_dealloc_vsis(pf);
+ ice_deinit_dev(pf);
+}
+
+/**
+ * ice_load - load pf by init hw and starting VSI
+ * @pf: pointer to the pf instance
+ */
+int ice_load(struct ice_pf *pf)
+{
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *vsi;
+ int err;
+
+ err = ice_reset(&pf->hw, ICE_RESET_PFR);
+ if (err)
+ return err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ vsi = ice_get_main_vsi(pf);
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ err = ice_vsi_cfg(vsi, &params);
+ if (err)
+ goto err_vsi_cfg;
+
+ err = ice_start_eth(ice_get_main_vsi(pf));
+ if (err)
+ goto err_start_eth;
+
+ err = ice_init_rdma(pf);
+ if (err)
+ goto err_init_rdma;
+
+ ice_init_features(pf);
+ ice_service_task_restart(pf);
+
+ clear_bit(ICE_DOWN, pf->state);
+
+ return 0;
+
+err_init_rdma:
+ ice_vsi_close(ice_get_main_vsi(pf));
+err_start_eth:
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
+ * ice_unload - unload pf by stopping VSI and deinit hw
+ * @pf: pointer to the pf instance
+ */
+void ice_unload(struct ice_pf *pf)
+{
+ ice_deinit_features(pf);
+ ice_deinit_rdma(pf);
+ ice_stop_eth(ice_get_main_vsi(pf));
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ ice_deinit_dev(pf);
+}
+
+/**
+ * ice_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ice_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ if (pdev->is_virtfn) {
+ dev_err(dev, "can't probe a virtual function\n");
+ return -EINVAL;
+ }
+
+ /* this driver uses devres, see
+ * Documentation/driver-api/driver-model/devres.rst
+ */
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
+ if (err) {
+ dev_err(dev, "BAR0 I/O map error %d\n", err);
+ return err;
+ }
+
+ pf = ice_allocate_pf(dev);
+ if (!pf)
+ return -ENOMEM;
+
+ /* initialize Auxiliary index to invalid value */
+ pf->aux_idx = -1;
+
+ /* set up for high or low DMA */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(dev, "DMA configuration failed: 0x%x\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ pf->pdev = pdev;
+ pci_set_drvdata(pdev, pf);
+ set_bit(ICE_DOWN, pf->state);
+ /* Disable service task until DOWN bit is cleared */
+ set_bit(ICE_SERVICE_DIS, pf->state);
+
+ hw = &pf->hw;
+ hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+ pci_save_state(pdev);
+
+ hw->back = pf;
+ hw->port_info = NULL;
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+ ice_set_ctrlq_len(hw);
+
+ pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+ if (debug < -1)
+ hw->debug_mask = debug;
+#endif
+
+ err = ice_init(pf);
+ if (err)
+ goto err_init;
+
+ err = ice_init_eth(pf);
+ if (err)
+ goto err_init_eth;
+
+ err = ice_init_rdma(pf);
+ if (err)
+ goto err_init_rdma;
+
+ err = ice_init_devlink(pf);
+ if (err)
+ goto err_init_devlink;
+
+ ice_init_features(pf);
+
+ return 0;
+
+err_init_devlink:
+ ice_deinit_rdma(pf);
+err_init_rdma:
+ ice_deinit_eth(pf);
+err_init_eth:
+ ice_deinit(pf);
+err_init:
pci_disable_device(pdev);
return err;
}
@@ -4803,9 +5289,9 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
u8 mac_addr[ETH_ALEN];
struct ice_vsi *vsi;
+ int status;
u8 flags;
if (!pf->wol_ena)
@@ -4827,9 +5313,8 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
if (status)
- dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+ dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
}
/**
@@ -4841,46 +5326,33 @@ static void ice_remove(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
int i;
- ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
msleep(100);
}
- ice_tc_indir_block_remove(pf);
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
ice_service_task_stop(pf);
-
ice_aq_cancel_waiting_tasks(pf);
- ice_unplug_aux_dev(pf);
- if (pf->aux_idx >= 0)
- ida_free(&ice_aux_ida, pf->aux_idx);
set_bit(ICE_DOWN, pf->state);
- mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
- ice_deinit_lag(pf);
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_release(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_setup_mc_magic_wake(pf);
+ ice_deinit_features(pf);
+ ice_deinit_devlink(pf);
+ ice_deinit_rdma(pf);
+ ice_deinit_eth(pf);
+ ice_deinit(pf);
+
ice_vsi_release_all(pf);
+
+ ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
- ice_free_irq_msix_misc(pf);
- ice_for_each_vsi(pf, i) {
- if (!pf->vsi[i])
- continue;
- ice_vsi_free_q_vectors(pf->vsi[i]);
- }
- ice_deinit_pf(pf);
- ice_devlink_destroy_regions(pf);
- ice_deinit_hw(&pf->hw);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
@@ -4888,8 +5360,6 @@ static void ice_remove(struct pci_dev *pdev)
*/
ice_reset(&pf->hw, ICE_RESET_PFR);
pci_wait_for_pending_transaction(pdev);
- ice_clear_interrupt_scheme(pf);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -5052,7 +5522,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
continue;
ice_vsi_free_q_vectors(pf->vsi[v]);
}
- ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
ice_clear_interrupt_scheme(pf);
pci_save_state(pdev);
@@ -5182,12 +5651,6 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_aer_clear_nonfatal_status(pdev);
- if (err)
- dev_dbg(&pdev->dev, "pci_aer_clear_nonfatal_status() failed, error %d\n",
- err);
- /* non-fatal, continue */
-
return result;
}
@@ -5282,6 +5745,7 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
/* required last entry */
{ 0, }
};
@@ -5323,7 +5787,7 @@ static int __init ice_module_init(void)
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
@@ -5367,11 +5831,10 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct sockaddr *addr = pi;
- enum ice_status status;
u8 old_mac[ETH_ALEN];
u8 flags = 0;
- int err = 0;
u8 *mac;
+ int err;
mac = (u8 *)addr->sa_data;
@@ -5403,24 +5866,27 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
netif_addr_unlock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
- status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
- if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ err = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
+ if (err && err != -ENOENT) {
err = -EADDRNOTAVAIL;
goto err_update_filters;
}
/* Add filter for new MAC. If filter exists, return success */
- status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS)
+ err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
+ if (err == -EEXIST) {
/* Although this MAC filter is already present in hardware it's
* possible in some cases (e.g. bonding) that dev_addr was
* modified outside of the driver and needs to be restored back
* to this value.
*/
netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
- else if (status)
+
+ return 0;
+ } else if (err) {
/* error if the new filter addition failed */
err = -EADDRNOTAVAIL;
+ }
err_update_filters:
if (err) {
@@ -5437,10 +5903,10 @@ err_update_filters:
/* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
- status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
- if (status) {
- netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %s\n",
- mac, ice_stat_str(status));
+ err = ice_aq_manage_mac_write(hw, mac, flags, NULL);
+ if (err) {
+ netdev_err(netdev, "can't set MAC %pM. write to firmware failed error %d\n",
+ mac, err);
}
return 0;
}
@@ -5482,8 +5948,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- enum ice_status status;
u16 q_handle;
+ int status;
u8 tc;
/* Validate maxrate requested is within permitted range */
@@ -5503,13 +5969,11 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
else
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
- if (status) {
- netdev_err(netdev, "Unable to set Tx max rate, error %s\n",
- ice_stat_str(status));
- return -EIO;
- }
+ if (status)
+ netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
+ status);
- return 0;
+ return status;
}
/**
@@ -5559,11 +6023,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @dev: the net device pointer
* @addr: the MAC address entry being added
* @vid: VLAN ID
+ * @extack: netlink extended ack
*/
static int
ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid)
+ __always_unused u16 vid, struct netlink_ext_ack *extack)
{
int err;
@@ -5582,6 +6047,251 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
return err;
}
+#define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_RX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+/**
+ * ice_fix_features - fix the netdev features flags based on device limitations
+ * @netdev: ptr to the netdev that flags are being fixed on
+ * @features: features that need to be checked and possibly fixed
+ *
+ * Make sure any fixups are made to features in this callback. This enables the
+ * driver to not have to check unsupported configurations throughout the driver
+ * because that's the responsiblity of this callback.
+ *
+ * Single VLAN Mode (SVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * Double VLAN Mode (DVM) Supported Features:
+ * NETIF_F_HW_VLAN_CTAG_FILTER
+ * NETIF_F_HW_VLAN_CTAG_RX
+ * NETIF_F_HW_VLAN_CTAG_TX
+ *
+ * NETIF_F_HW_VLAN_STAG_FILTER
+ * NETIF_HW_VLAN_STAG_RX
+ * NETIF_HW_VLAN_STAG_TX
+ *
+ * Features that need fixing:
+ * Cannot simultaneously enable CTAG and STAG stripping and/or insertion.
+ * These are mutually exlusive as the VSI context cannot support multiple
+ * VLAN ethertypes simultaneously for stripping and/or insertion. If this
+ * is not done, then default to clearing the requested STAG offload
+ * settings.
+ *
+ * All supported filtering has to be enabled or disabled together. For
+ * example, in DVM, CTAG and STAG filtering have to be enabled and disabled
+ * together. If this is not done, then default to VLAN filtering disabled.
+ * These are mutually exclusive as there is currently no way to
+ * enable/disable VLAN filtering based on VLAN ethertype when using VLAN
+ * prune rules.
+ */
+static netdev_features_t
+ice_fix_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ netdev_features_t req_vlan_fltr, cur_vlan_fltr;
+ bool cur_ctag, cur_stag, req_ctag, req_stag;
+
+ cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+ cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
+ cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
+
+ req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES;
+ req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER;
+ req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER;
+
+ if (req_vlan_fltr != cur_vlan_fltr) {
+ if (ice_is_dvm_ena(&np->vsi->back->hw)) {
+ if (req_ctag && req_stag) {
+ features |= NETIF_VLAN_FILTERING_FEATURES;
+ } else if (!req_ctag && !req_stag) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ } else if ((!cur_ctag && req_ctag && !cur_stag) ||
+ (!cur_stag && req_stag && !cur_ctag)) {
+ features |= NETIF_VLAN_FILTERING_FEATURES;
+ netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n");
+ } else if ((cur_ctag && !req_ctag && cur_stag) ||
+ (cur_stag && !req_stag && cur_ctag)) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n");
+ }
+ } else {
+ if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER)
+ netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n");
+
+ if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER)
+ features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+ }
+
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
+ (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))) {
+ netdev_warn(netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
+ features &= ~(NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_TX);
+ }
+
+ if (!(netdev->features & NETIF_F_RXFCS) &&
+ (features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) &&
+ !ice_vsi_has_non_zero_vlans(np->vsi)) {
+ netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ }
+
+ return features;
+}
+
+/**
+ * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN offload settings
+ *
+ * First, determine the vlan_ethertype based on the VLAN offload bits in
+ * features. Then determine if stripping and insertion should be enabled or
+ * disabled. Finally enable or disable VLAN stripping and insertion.
+ */
+static int
+ice_set_vlan_offload_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ bool enable_stripping = true, enable_insertion = true;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int strip_err = 0, insert_err = 0;
+ u16 vlan_ethertype = 0;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
+ vlan_ethertype = ETH_P_8021AD;
+ else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
+ vlan_ethertype = ETH_P_8021Q;
+
+ if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
+ enable_stripping = false;
+ if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
+ enable_insertion = false;
+
+ if (enable_stripping)
+ strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype);
+ else
+ strip_err = vlan_ops->dis_stripping(vsi);
+
+ if (enable_insertion)
+ insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype);
+ else
+ insert_err = vlan_ops->dis_insertion(vsi);
+
+ if (strip_err || insert_err)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
+ * @vsi: PF's VSI
+ * @features: features used to determine VLAN filtering settings
+ *
+ * Enable or disable Rx VLAN filtering based on the VLAN filtering bits in the
+ * features.
+ */
+static int
+ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ int err = 0;
+
+ /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
+ * if either bit is set
+ */
+ if (features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ err = vlan_ops->ena_rx_filtering(vsi);
+ else
+ err = vlan_ops->dis_rx_filtering(vsi);
+
+ return err;
+}
+
+/**
+ * ice_set_vlan_features - set VLAN settings based on suggested feature set
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ *
+ * Only update VLAN settings if the requested_vlan_features are different than
+ * the current_vlan_features.
+ */
+static int
+ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t current_vlan_features, requested_vlan_features;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ int err;
+
+ current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
+ return -EIO;
+ }
+
+ err = ice_set_vlan_offload_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ current_vlan_features = netdev->features &
+ NETIF_VLAN_FILTERING_FEATURES;
+ requested_vlan_features = features & NETIF_VLAN_FILTERING_FEATURES;
+ if (current_vlan_features ^ requested_vlan_features) {
+ err = ice_set_vlan_filtering_features(vsi, features);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_loopback - turn on/off loopback mode on underlying PF
+ * @vsi: ptr to VSI
+ * @ena: flag to indicate the on/off setting
+ */
+static int ice_set_loopback(struct ice_vsi *vsi, bool ena)
+{
+ bool if_running = netif_running(vsi->netdev);
+ int ret;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
+ ret = ice_down(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n");
+ return ret;
+ }
+ }
+ ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL);
+ if (ret)
+ netdev_err(vsi->netdev, "Failed to toggle loopback state\n");
+ if (if_running)
+ ret = ice_up(vsi);
+
+ return ret;
+}
+
/**
* ice_set_features - set the netdev feature flags
* @netdev: ptr to the netdev being adjusted
@@ -5590,61 +6300,58 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
static int
ice_set_features(struct net_device *netdev, netdev_features_t features)
{
+ netdev_features_t changed = netdev->features ^ features;
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
- if (ice_is_safe_mode(vsi->back)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
+ if (ice_is_safe_mode(pf)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
- dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ dev_err(ice_pf_to_dev(pf),
+ "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
- if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
- ice_vsi_manage_rss_lut(vsi, true);
- else if (!(features & NETIF_F_RXHASH) &&
- netdev->features & NETIF_F_RXHASH)
- ice_vsi_manage_rss_lut(vsi, false);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
- ret = ice_vsi_manage_vlan_stripping(vsi, false);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
- ret = ice_vsi_manage_vlan_insertion(vsi);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, true);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, false);
-
- if ((features & NETIF_F_NTUPLE) &&
- !(netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, true);
- ice_init_arfs(vsi);
- } else if (!(features & NETIF_F_NTUPLE) &&
- (netdev->features & NETIF_F_NTUPLE)) {
- ice_vsi_manage_fdir(vsi, false);
- ice_clear_arfs(vsi);
+ if (changed & NETIF_F_RXHASH)
+ ice_vsi_manage_rss_lut(vsi, !!(features & NETIF_F_RXHASH));
+
+ ret = ice_set_vlan_features(netdev, features);
+ if (ret)
+ return ret;
+
+ /* Turn on receive of FCS aka CRC, and after setting this
+ * flag the packet data will have the 4 byte CRC appended
+ */
+ if (changed & NETIF_F_RXFCS) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
+ return -EIO;
+ }
+
+ ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
+ ret = ice_down_up(vsi);
+ if (ret)
+ return ret;
+ }
+
+ if (changed & NETIF_F_NTUPLE) {
+ bool ena = !!(features & NETIF_F_NTUPLE);
+
+ ice_vsi_manage_fdir(vsi, ena);
+ ena ? ice_init_arfs(vsi) : ice_clear_arfs(vsi);
}
/* don't turn off hw_tc_offload when ADQ is already enabled */
@@ -5653,46 +6360,52 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return -EACCES;
}
- if ((features & NETIF_F_HW_TC) &&
- !(netdev->features & NETIF_F_HW_TC))
- set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
- else
- clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ if (changed & NETIF_F_HW_TC) {
+ bool ena = !!(features & NETIF_F_HW_TC);
+
+ ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) :
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ }
+
+ if (changed & NETIF_F_LOOPBACK)
+ ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
return ret;
}
/**
- * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
+ * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
* @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
- int ret = 0;
+ int err;
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- ret = ice_vsi_manage_vlan_stripping(vsi, true);
- if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
- ret = ice_vsi_manage_vlan_insertion(vsi);
+ err = ice_set_vlan_offload_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
- return ret;
+ err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features);
+ if (err)
+ return err;
+
+ return ice_vsi_add_vlan_zero(vsi);
}
/**
- * ice_vsi_cfg - Setup the VSI
+ * ice_vsi_cfg_lan - Setup the VSI lan related config
* @vsi: the VSI being configured
*
* Return 0 on success and negative value on error
*/
-int ice_vsi_cfg(struct ice_vsi *vsi)
+int ice_vsi_cfg_lan(struct ice_vsi *vsi)
{
int err;
- if (vsi->netdev) {
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_set_rx_mode(vsi->netdev);
err = ice_vsi_vlan_setup(vsi);
-
if (err)
return err;
}
@@ -5875,16 +6588,20 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev) {
+ vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
+ ice_ptp_link_change(pf, pf->hw.pf_id, true);
}
- /* clear this now, and the first stats read will be used as baseline */
- vsi->stat_offsets_loaded = false;
+ /* Perform an initial read of the statistics registers now to
+ * set the baseline so counters are ready when interface is up
+ */
+ ice_update_eth_stats(vsi);
- ice_service_task_schedule(pf);
+ if (vsi->type == ICE_VSI_PF)
+ ice_service_task_schedule(pf);
return 0;
}
@@ -5897,7 +6614,7 @@ int ice_up(struct ice_vsi *vsi)
{
int err;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (!err)
err = ice_up_complete(vsi);
@@ -5914,17 +6631,17 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void
-ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
- u64 *pkts, u64 *bytes)
+void
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
+ struct ice_q_stats stats, u64 *pkts, u64 *bytes)
{
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(syncp);
+ start = u64_stats_fetch_begin(syncp);
*pkts = stats.pkts;
*bytes = stats.bytes;
- } while (u64_stats_fetch_retry_irq(syncp, start));
+ } while (u64_stats_fetch_retry(syncp, start));
}
/**
@@ -5946,13 +6663,16 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- if (ring)
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ if (!ring || !ring->ring_stats)
+ continue;
+ ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp,
+ ring->ring_stats->stats, &pkts,
+ &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
- vsi->tx_restart += ring->tx_stats.restart_q;
- vsi->tx_busy += ring->tx_stats.tx_busy;
- vsi->tx_linearize += ring->tx_stats.tx_linearize;
+ vsi->tx_restart += ring->ring_stats->tx_stats.restart_q;
+ vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy;
+ vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize;
}
}
@@ -5962,6 +6682,7 @@ ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi,
*/
static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
+ struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
u64 pkts, bytes;
int i;
@@ -5986,12 +6707,16 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+ struct ice_ring_stats *ring_stats;
- ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
+ ring_stats = ring->ring_stats;
+ ice_fetch_u64_stats_per_ring(&ring_stats->syncp,
+ ring_stats->stats, &pkts,
+ &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
- vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
- vsi->rx_page_failed += ring->rx_stats.alloc_page_failed;
+ vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed;
+ vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed;
}
/* update XDP Tx rings counters */
@@ -6001,10 +6726,28 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
rcu_read_unlock();
- vsi->net_stats.tx_packets = vsi_stats->tx_packets;
- vsi->net_stats.tx_bytes = vsi_stats->tx_bytes;
- vsi->net_stats.rx_packets = vsi_stats->rx_packets;
- vsi->net_stats.rx_bytes = vsi_stats->rx_bytes;
+ net_stats = &vsi->net_stats;
+ stats_prev = &vsi->net_stats_prev;
+
+ /* clear prev counters after reset */
+ if (vsi_stats->tx_packets < stats_prev->tx_packets ||
+ vsi_stats->rx_packets < stats_prev->rx_packets) {
+ stats_prev->tx_packets = 0;
+ stats_prev->tx_bytes = 0;
+ stats_prev->rx_packets = 0;
+ stats_prev->rx_bytes = 0;
+ }
+
+ /* update netdev counters */
+ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
+
+ stats_prev->tx_packets = vsi_stats->tx_packets;
+ stats_prev->tx_bytes = vsi_stats->tx_bytes;
+ stats_prev->rx_packets = vsi_stats->rx_packets;
+ stats_prev->rx_bytes = vsi_stats->rx_bytes;
kfree(vsi_stats);
}
@@ -6066,6 +6809,9 @@ void ice_update_pf_stats(struct ice_pf *pf)
prev_ps = &pf->stats_prev;
cur_ps = &pf->stats;
+ if (ice_is_reset_in_progress(pf->state))
+ pf->stat_prev_loaded = false;
+
ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
&prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes);
@@ -6269,15 +7015,18 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
+ *
+ * Caller of this function is expected to set the vsi->state ICE_DOWN bit
*/
int ice_down(struct ice_vsi *vsi)
{
- int i, tx_err, rx_err, link_err = 0;
+ int i, tx_err, rx_err, vlan_err = 0;
+
+ WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
- /* Caller of this function is expected to set the
- * vsi->state ICE_DOWN bit
- */
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ vlan_err = ice_vsi_del_vlan_zero(vsi);
+ ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
} else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
@@ -6304,20 +7053,13 @@ int ice_down(struct ice_vsi *vsi)
ice_napi_disable_all(vsi);
- if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
- link_err = ice_force_phys_link_state(vsi, false);
- if (link_err)
- netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
- vsi->vsi_num, link_err);
- }
-
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
ice_for_each_rxq(vsi, i)
ice_clean_rx_ring(vsi->rx_rings[i]);
- if (tx_err || rx_err || link_err) {
+ if (tx_err || rx_err || vlan_err) {
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
@@ -6327,6 +7069,31 @@ int ice_down(struct ice_vsi *vsi)
}
/**
+ * ice_down_up - shutdown the VSI connection and bring it up
+ * @vsi: the VSI to be reconnected
+ */
+int ice_down_up(struct ice_vsi *vsi)
+{
+ int ret;
+
+ /* if DOWN already set, nothing to do */
+ if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
+ return 0;
+
+ ret = ice_down(vsi);
+ if (ret)
+ return ret;
+
+ ret = ice_up(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
* @vsi: VSI having resources allocated
*
@@ -6415,7 +7182,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (err)
goto err_setup_rx;
@@ -6469,7 +7236,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (err)
goto err_setup_rx;
@@ -6479,6 +7246,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
if (vsi->type == ICE_VSI_PF) {
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
@@ -6543,7 +7312,6 @@ static void ice_vsi_release_all(struct ice_pf *pf)
static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
int i, err;
ice_for_each_vsi(pf, i) {
@@ -6553,7 +7321,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
continue;
/* rebuild the VSI */
- err = ice_vsi_rebuild(vsi, true);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) {
dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
@@ -6561,12 +7329,11 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
}
/* replay filters for the VSI */
- status = ice_replay_vsi(&pf->hw, vsi->idx);
- if (status) {
- dev_err(dev, "replay VSI failed, status %s, VSI index %d, type %s\n",
- ice_stat_str(status), vsi->idx,
- ice_vsi_type_str(type));
- return -EIO;
+ err = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (err) {
+ dev_err(dev, "replay VSI failed, error %d, VSI index %d, type %s\n",
+ err, vsi->idx, ice_vsi_type_str(type));
+ return err;
}
/* Re-map HW VSI number, using VSI handle that has been
@@ -6629,7 +7396,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- enum ice_status ret;
+ bool dvm;
int err;
if (test_bit(ICE_DOWN, pf->state))
@@ -6637,10 +7404,20 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
- ret = ice_init_all_ctrlq(hw);
- if (ret) {
- dev_err(dev, "control queues init failed %s\n",
- ice_stat_str(ret));
+#define ICE_EMP_RESET_SLEEP_MS 5000
+ if (reset_type == ICE_RESET_EMPR) {
+ /* If an EMP reset has occurred, any previously pending flash
+ * update will have completed. We no longer know whether or
+ * not the NVM update EMP reset is restricted.
+ */
+ pf->fw_emp_reset_disabled = false;
+
+ msleep(ICE_EMP_RESET_SLEEP_MS);
+ }
+
+ err = ice_init_all_ctrlq(hw);
+ if (err) {
+ dev_err(dev, "control queues init failed %d\n", err);
goto err_init_ctrlq;
}
@@ -6654,39 +7431,38 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_load_pkg(NULL, pf);
}
- ret = ice_clear_pf_cfg(hw);
- if (ret) {
- dev_err(dev, "clear PF configuration failed %s\n",
- ice_stat_str(ret));
+ err = ice_clear_pf_cfg(hw);
+ if (err) {
+ dev_err(dev, "clear PF configuration failed %d\n", err);
goto err_init_ctrlq;
}
- if (pf->first_sw->dflt_vsi_ena)
- dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
- /* clear the default VSI configuration if it exists */
- pf->first_sw->dflt_vsi = NULL;
- pf->first_sw->dflt_vsi_ena = false;
-
ice_clear_pxe_mode(hw);
- ret = ice_init_nvm(hw);
- if (ret) {
- dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
+ err = ice_init_nvm(hw);
+ if (err) {
+ dev_err(dev, "ice_init_nvm failed %d\n", err);
goto err_init_ctrlq;
}
- ret = ice_get_caps(hw);
- if (ret) {
- dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
+ err = ice_get_caps(hw);
+ if (err) {
+ dev_err(dev, "ice_get_caps failed %d\n", err);
goto err_init_ctrlq;
}
- ret = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
- if (ret) {
- dev_err(dev, "set_mac_cfg failed %s\n", ice_stat_str(ret));
+ err = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
+ if (err) {
+ dev_err(dev, "set_mac_cfg failed %d\n", err);
goto err_init_ctrlq;
}
+ dvm = ice_is_dvm_ena(hw);
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_init_ctrlq;
+
err = ice_sched_init_port(hw->port_info);
if (err)
goto err_sched_init_port;
@@ -6721,7 +7497,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
* fail.
*/
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_init(pf);
+ ice_ptp_reset(pf);
+
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
@@ -6730,6 +7509,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
+ /* configure PTP timestamping after VSI rebuild */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_cfg_timestamp(pf, false);
+
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
@@ -6766,10 +7549,10 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_update_pf_netdev_link(pf);
/* tell the firmware we are up */
- ret = ice_send_version(pf);
- if (ret) {
- dev_err(dev, "Rebuild failed due to error sending driver version: %s\n",
- ice_stat_str(ret));
+ err = ice_send_version(pf);
+ if (err) {
+ dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
+ err);
goto err_vsi_rebuild;
}
@@ -6794,18 +7577,6 @@ clear_recovery:
}
/**
- * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
- * @vsi: Pointer to VSI structure
- */
-static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
-{
- if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
- else
- return ICE_RXBUF_3072;
-}
-
-/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -6817,7 +7588,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct iidc_event *event;
+ struct bpf_prog *prog;
u8 count = 0;
int err = 0;
@@ -6826,7 +7597,8 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
- if (ice_is_xdp_ena_vsi(vsi)) {
+ prog = vsi->xdp_prog;
+ if (prog && !prog->aux->xdp_has_frags) {
int frame_size = ice_max_xdp_frame_size(vsi);
if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
@@ -6834,6 +7606,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
+ } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
+ if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
+ netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
+ ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
+ return -EINVAL;
+ }
}
/* if a reset is in progress, wait for some time for it to complete */
@@ -6852,14 +7630,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
-
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
@@ -6867,21 +7637,18 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
- goto event_after;
+ return err;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
- goto event_after;
+ return err;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
-event_after:
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
return err;
}
@@ -6950,78 +7717,6 @@ const char *ice_aq_str(enum ice_aq_err aq_err)
}
/**
- * ice_stat_str - convert status err code to a string
- * @stat_err: the status error code to convert
- */
-const char *ice_stat_str(enum ice_status stat_err)
-{
- switch (stat_err) {
- case ICE_SUCCESS:
- return "OK";
- case ICE_ERR_PARAM:
- return "ICE_ERR_PARAM";
- case ICE_ERR_NOT_IMPL:
- return "ICE_ERR_NOT_IMPL";
- case ICE_ERR_NOT_READY:
- return "ICE_ERR_NOT_READY";
- case ICE_ERR_NOT_SUPPORTED:
- return "ICE_ERR_NOT_SUPPORTED";
- case ICE_ERR_BAD_PTR:
- return "ICE_ERR_BAD_PTR";
- case ICE_ERR_INVAL_SIZE:
- return "ICE_ERR_INVAL_SIZE";
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- return "ICE_ERR_DEVICE_NOT_SUPPORTED";
- case ICE_ERR_RESET_FAILED:
- return "ICE_ERR_RESET_FAILED";
- case ICE_ERR_FW_API_VER:
- return "ICE_ERR_FW_API_VER";
- case ICE_ERR_NO_MEMORY:
- return "ICE_ERR_NO_MEMORY";
- case ICE_ERR_CFG:
- return "ICE_ERR_CFG";
- case ICE_ERR_OUT_OF_RANGE:
- return "ICE_ERR_OUT_OF_RANGE";
- case ICE_ERR_ALREADY_EXISTS:
- return "ICE_ERR_ALREADY_EXISTS";
- case ICE_ERR_NVM:
- return "ICE_ERR_NVM";
- case ICE_ERR_NVM_CHECKSUM:
- return "ICE_ERR_NVM_CHECKSUM";
- case ICE_ERR_BUF_TOO_SHORT:
- return "ICE_ERR_BUF_TOO_SHORT";
- case ICE_ERR_NVM_BLANK_MODE:
- return "ICE_ERR_NVM_BLANK_MODE";
- case ICE_ERR_IN_USE:
- return "ICE_ERR_IN_USE";
- case ICE_ERR_MAX_LIMIT:
- return "ICE_ERR_MAX_LIMIT";
- case ICE_ERR_RESET_ONGOING:
- return "ICE_ERR_RESET_ONGOING";
- case ICE_ERR_HW_TABLE:
- return "ICE_ERR_HW_TABLE";
- case ICE_ERR_DOES_NOT_EXIST:
- return "ICE_ERR_DOES_NOT_EXIST";
- case ICE_ERR_FW_DDP_MISMATCH:
- return "ICE_ERR_FW_DDP_MISMATCH";
- case ICE_ERR_AQ_ERROR:
- return "ICE_ERR_AQ_ERROR";
- case ICE_ERR_AQ_TIMEOUT:
- return "ICE_ERR_AQ_TIMEOUT";
- case ICE_ERR_AQ_FULL:
- return "ICE_ERR_AQ_FULL";
- case ICE_ERR_AQ_NO_WORK:
- return "ICE_ERR_AQ_NO_WORK";
- case ICE_ERR_AQ_EMPTY:
- return "ICE_ERR_AQ_EMPTY";
- case ICE_ERR_AQ_FW_CRITICAL:
- return "ICE_ERR_AQ_FW_CRITICAL";
- }
-
- return "ICE_ERR_UNKNOWN";
-}
-
-/**
* ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
* @lut: Lookup table
@@ -7033,7 +7728,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
struct ice_aq_get_set_rss_lut_params params = {};
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!lut)
return -EINVAL;
@@ -7044,14 +7739,11 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
params.lut = lut;
status = ice_aq_set_rss_lut(hw, &params);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7064,20 +7756,17 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
{
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!seed)
return -EINVAL;
status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7092,7 +7781,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
{
struct ice_aq_get_set_rss_lut_params params = {};
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!lut)
return -EINVAL;
@@ -7103,14 +7792,11 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
params.lut = lut;
status = ice_aq_get_rss_lut(hw, &params);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7123,20 +7809,17 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
{
struct ice_hw *hw = &vsi->back->hw;
- enum ice_status status;
+ int status;
if (!seed)
return -EINVAL;
status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- return -EIO;
- }
+ if (status)
+ dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
- return 0;
+ return status;
}
/**
@@ -7177,8 +7860,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
struct ice_aqc_vsi_props *vsi_props;
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
+ int ret;
vsi_props = &vsi->info;
@@ -7196,12 +7878,10 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %s aq_err %s\n",
- bmode, ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
+ bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
goto out;
}
/* Update sw flags for book keeping */
@@ -7233,7 +7913,6 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
struct ice_pf *pf = np->vsi->back;
struct nlattr *attr, *br_spec;
struct ice_hw *hw = &pf->hw;
- enum ice_status status;
struct ice_sw *pf_sw;
int rem, v, err = 0;
@@ -7267,14 +7946,14 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
/* Update the unicast switch filter rules for the corresponding
* switch of the netdev
*/
- status = ice_update_sw_rule_bridge_mode(hw);
- if (status) {
- netdev_err(dev, "switch rule update failed, mode = %d err %s aq_err %s\n",
- mode, ice_stat_str(status),
+ err = ice_update_sw_rule_bridge_mode(hw);
+ if (err) {
+ netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
+ mode, err,
ice_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
- return -EIO;
+ return err;
}
pf_sw->bridge_mode = mode;
@@ -7552,6 +8231,67 @@ ice_validate_mqprio_qopt(struct ice_vsi *vsi,
}
/**
+ * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
+ * @pf: ptr to PF device
+ * @vsi: ptr to VSI
+ */
+static int ice_add_vsi_to_fdir(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ bool added = false;
+ struct ice_hw *hw;
+ int flow;
+
+ if (!(vsi->num_gfltr || vsi->num_bfltr))
+ return -EINVAL;
+
+ hw = &pf->hw;
+ for (flow = 0; flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ struct ice_fd_hw_prof *prof;
+ int tun, status;
+ u64 entry_h;
+
+ if (!(hw->fdir_prof && hw->fdir_prof[flow] &&
+ hw->fdir_prof[flow]->cnt))
+ continue;
+
+ for (tun = 0; tun < ICE_FD_HW_SEG_MAX; tun++) {
+ enum ice_flow_priority prio;
+ u64 prof_id;
+
+ /* add this VSI to FDir profile for this flow */
+ prio = ICE_FLOW_PRIO_NORMAL;
+ prof = hw->fdir_prof[flow];
+ prof_id = flow + tun * ICE_FLTR_PTYPE_MAX;
+ status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id,
+ prof->vsi_h[0], vsi->idx,
+ prio, prof->fdir_seg[tun],
+ &entry_h);
+ if (status) {
+ dev_err(dev, "channel VSI idx %d, not able to add to group %d\n",
+ vsi->idx, flow);
+ continue;
+ }
+
+ prof->entry_h[prof->cnt][tun] = entry_h;
+ }
+
+ /* store VSI for filter replay and delete */
+ prof->vsi_h[prof->cnt] = vsi->idx;
+ prof->cnt++;
+
+ added = true;
+ dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx,
+ flow);
+ }
+
+ if (!added)
+ dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx);
+
+ return 0;
+}
+
+/**
* ice_add_channel - add a channel by adding VSI
* @pf: ptr to PF device
* @sw_id: underlying HW switching element ID
@@ -7575,6 +8315,8 @@ static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
return -EINVAL;
}
+ ice_add_vsi_to_fdir(pf, vsi);
+
ch->sw_id = sw_id;
ch->vsi_num = vsi->vsi_num;
ch->info.mapping_flags = vsi->info.mapping_flags;
@@ -7832,7 +8574,7 @@ static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
rule.rid = fltr->rid;
rule.rule_id = fltr->rule_id;
- rule.vsi_handle = fltr->dest_id;
+ rule.vsi_handle = fltr->dest_vsi_handle;
status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
if (status) {
if (status == -ENOENT)
@@ -7875,6 +8617,15 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
if (rem_fltr)
ice_rem_all_chnl_fltrs(pf);
+ /* remove ntuple filters since queue configuration is being changed */
+ if (vsi->netdev->features & NETIF_F_NTUPLE) {
+ struct ice_hw *hw = &pf->hw;
+
+ mutex_lock(&hw->fdir_fltr_lock);
+ ice_fdir_del_all_fltrs(vsi);
+ mutex_unlock(&hw->fdir_fltr_lock);
+ }
+
/* perform cleanup for channels if they exist */
list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
struct ice_vsi *ch_vsi;
@@ -7905,15 +8656,15 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
}
}
+ /* Release FD resources for the channel VSI */
+ ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx);
+
/* clear the VSI from scheduler tree */
ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
- /* Delete VSI from FW */
+ /* Delete VSI from FW, PF and HW VSI arrays */
ice_vsi_delete(ch->ch_vsi);
- /* Delete VSI from PF and HW VSI arrays */
- ice_vsi_clear(ch->ch_vsi);
-
/* free the channel */
kfree(ch);
}
@@ -7972,7 +8723,7 @@ static int ice_rebuild_channels(struct ice_pf *pf)
type = vsi->type;
/* rebuild ADQ VSI */
- err = ice_vsi_rebuild(vsi, true);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) {
dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
ice_vsi_type_str(type), vsi->idx, err);
@@ -8132,6 +8883,12 @@ static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
switch (mode) {
case TC_MQPRIO_MODE_CHANNEL:
+ if (pf->hw.port_info->is_custom_tx_enabled) {
+ dev_err(dev, "Custom Tx scheduler feature enabled, can't configure ADQ\n");
+ return -EBUSY;
+ }
+ ice_tear_down_devlink_rate_tree(pf);
+
ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
if (ret) {
netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
@@ -8198,14 +8955,14 @@ config_tcf:
cur_rxq = vsi->num_rxq;
/* proceed with rebuild main VSI using correct number of queues */
- ret = ice_vsi_rebuild(vsi, false);
+ ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
if (ret) {
/* fallback to current number of queues */
dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
vsi->req_txq = cur_txq;
vsi->req_rxq = cur_rxq;
clear_bit(ICE_RESET_FAILED, pf->state);
- if (ice_vsi_rebuild(vsi, false)) {
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
dev_err(dev, "Rebuild of main VSI failed again\n");
return ret;
}
@@ -8449,7 +9206,6 @@ int ice_open_internal(struct net_device *netdev)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_port_info *pi;
- enum ice_status status;
int err;
if (test_bit(ICE_NEEDS_RESTART, pf->state)) {
@@ -8460,11 +9216,10 @@ int ice_open_internal(struct net_device *netdev)
netif_carrier_off(netdev);
pi = vsi->port_info;
- status = ice_update_link_info(pi);
- if (status) {
- netdev_err(netdev, "Failed to get link info, error %s\n",
- ice_stat_str(status));
- return -EIO;
+ err = ice_update_link_info(pi);
+ if (err) {
+ netdev_err(netdev, "Failed to get link info, error %d\n", err);
+ return err;
}
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -8524,6 +9279,16 @@ int ice_stop(struct net_device *netdev)
return -EBUSY;
}
+ if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
+ int link_err = ice_force_phys_link_state(vsi, false);
+
+ if (link_err) {
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
+ vsi->vsi_num, link_err);
+ return -EIO;
+ }
+ }
+
ice_vsi_close(vsi);
return 0;
@@ -8540,6 +9305,7 @@ ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev,
netdev_features_t features)
{
+ bool gso = skb_is_gso(skb);
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -8552,24 +9318,32 @@ ice_features_check(struct sk_buff *skb,
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK;
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
- len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
- goto out_rm_features;
+ /* this must work for VXLAN frames AND IPIP/SIT frames, and in
+ * the case of IPIP frames, the transport header pointer is
+ * after the inner header! So check to make sure that this
+ * is a GRE or UDP_TUNNEL frame before doing that math.
+ */
+ if (gso && (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
+ len = skb_inner_network_header(skb) -
+ skb_transport_header(skb);
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
+ goto out_rm_features;
+ }
- len = skb_inner_transport_header(skb) -
- skb_inner_network_header(skb);
+ len = skb_inner_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
@@ -8597,6 +9371,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_start_xmit = ice_start_xmit,
.ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
+ .ndo_fix_features = ice_fix_features,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index fee37a5844cf..f6f52a248066 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
+#include <linux/vmalloc.h>
+
#include "ice_common.h"
/**
@@ -16,7 +18,7 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-static enum ice_status
+static int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
@@ -27,7 +29,7 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
cmd = &desc.params.nvm;
if (offset > ICE_AQC_NVM_MAX_OFFSET)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_read);
@@ -60,21 +62,21 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
*/
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram)
{
- enum ice_status status;
u32 inlen = *length;
u32 bytes_read = 0;
bool last_cmd;
+ int status;
*length = 0;
/* Verify the length of the read if this is for the Shadow RAM */
if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
do {
@@ -119,7 +121,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*
* Update the NVM using the admin queue commands (0x0703)
*/
-enum ice_status
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
@@ -131,7 +133,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write);
@@ -158,8 +160,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*
* Erase the NVM sector using the admin queue commands (0x0702)
*/
-enum ice_status
-ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
@@ -184,12 +185,11 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
*
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
-static enum ice_status
-ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+static int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
- enum ice_status status;
__le16 data_local;
+ int status;
/* Note that ice_read_flat_nvm takes into account the 4Kb AdminQ and
* Shadow RAM sector restrictions necessary when reading from the NVM.
@@ -210,8 +210,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* This function will request NVM ownership.
*/
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->flash.blank_nvm_mode)
return 0;
@@ -318,18 +317,18 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban
* hw->flash.banks data being setup by ice_determine_active_flash_banks()
* during initialization.
*/
-static enum ice_status
+static int
ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
u32 offset, u8 *data, u32 length)
{
- enum ice_status status;
+ int status;
u32 start;
start = ice_get_flash_bank_offset(hw, bank, module);
if (!start) {
ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
module);
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
status = ice_acquire_nvm(hw, ICE_RES_READ);
@@ -353,11 +352,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
* Read the specified word from the active NVM module. This includes the CSS
* header at the start of the NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16));
@@ -377,7 +376,7 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
@@ -392,11 +391,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
*
* Read a word from the specified netlist bank.
*/
-static enum ice_status
+static int
ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
(__force u8 *)&data_local, sizeof(u16));
@@ -414,9 +413,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
@@ -438,13 +437,13 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- enum ice_status status;
u16 pfa_len, pfa_ptr;
u16 next_tlv;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
if (status) {
@@ -482,7 +481,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*module_tlv_len = tlv_len;
return 0;
}
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
/* Check next TLV, i.e. current TLV pointer + length + 2 words
* (for current TLV's type and length)
@@ -490,7 +489,7 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
next_tlv = next_tlv + tlv_len + 2;
}
/* Module does not exist */
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
}
/**
@@ -501,12 +500,11 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*
* Reads the part number string from the NVM.
*/
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
{
u16 pba_tlv, pba_tlv_len;
- enum ice_status status;
u16 pba_word, pba_size;
+ int status;
u16 i;
status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
@@ -525,7 +523,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
if (pba_tlv_len < pba_size) {
ice_debug(hw, ICE_DBG_INIT, "Invalid PBA Block TLV size.\n");
- return ICE_ERR_INVAL_SIZE;
+ return -EINVAL;
}
/* Subtract one to get PBA word count (PBA Size word is included in
@@ -534,7 +532,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
pba_size--;
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
ice_debug(hw, ICE_DBG_INIT, "Buffer too small for PBA data.\n");
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
for (i = 0; i < pba_size; i++) {
@@ -561,11 +559,11 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling
* in the NVM info structure.
*/
-static enum ice_status
+static int
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{
u16 eetrack_lo, eetrack_hi, ver;
- enum ice_status status;
+ int status;
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
@@ -601,7 +599,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
* inactive NVM bank. Used to access version data for a pending update that
* has not yet been activated.
*/
-enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
{
return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
}
@@ -615,49 +613,73 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
* Searches through the Option ROM flash contents to locate the CIVD data for
* the image.
*/
-static enum ice_status
+static int
ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
- struct ice_orom_civd_info tmp;
- enum ice_status status;
+ u8 *orom_data;
+ int status;
u32 offset;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
* The first 4 bytes must contain the ASCII characters "$CIV".
* A simple modulo 256 sum of all of the bytes of the structure must
* equal 0.
+ *
+ * The exact location is unknown and varies between images but is
+ * usually somewhere in the middle of the bank. We need to scan the
+ * Option ROM bank to locate it.
+ *
+ * It's significantly faster to read the entire Option ROM up front
+ * using the maximum page size, than to read each possible location
+ * with a separate firmware command.
*/
+ orom_data = vzalloc(hw->flash.banks.orom_size);
+ if (!orom_data)
+ return -ENOMEM;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
+ orom_data, hw->flash.banks.orom_size);
+ if (status) {
+ vfree(orom_data);
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
+ return status;
+ }
+
+ /* Scan the memory buffer to locate the CIVD data section */
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
+ struct ice_orom_civd_info *tmp;
u8 sum = 0, i;
- status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
- offset, (u8 *)&tmp, sizeof(tmp));
- if (status) {
- ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n");
- return status;
- }
+ tmp = (struct ice_orom_civd_info *)&orom_data[offset];
/* Skip forward until we find a matching signature */
- if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0)
+ if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0)
continue;
+ ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n",
+ offset);
+
/* Verify that the simple checksum is zero */
- for (i = 0; i < sizeof(tmp); i++)
- /* cppcheck-suppress objectIndex */
- sum += ((u8 *)&tmp)[i];
+ for (i = 0; i < sizeof(*tmp); i++)
+ sum += ((u8 *)tmp)[i];
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
sum);
- return ICE_ERR_NVM;
+ goto err_invalid_checksum;
}
- *civd = tmp;
+ *civd = *tmp;
+ vfree(orom_data);
return 0;
}
- return ICE_ERR_NVM;
+ ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
+
+err_invalid_checksum:
+ vfree(orom_data);
+ return -EIO;
}
/**
@@ -669,12 +691,12 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* Read Option ROM version and security revision from the Option ROM flash
* section.
*/
-static enum ice_status
+static int
ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
{
struct ice_orom_civd_info civd;
- enum ice_status status;
u32 combo_ver;
+ int status;
status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) {
@@ -700,7 +722,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
* section of flash. Used to access version data for a pending update that has
* not yet been activated.
*/
-enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
{
return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
}
@@ -715,13 +737,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf
* Topology section to find the Netlist ID block and extract the relevant
* information into the netlist version structure.
*/
-static enum ice_status
+static int
ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_netlist_info *netlist)
{
u16 module_id, length, node_count, i;
- enum ice_status status;
u16 *id_blk;
+ int status;
status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
if (status)
@@ -730,7 +752,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
- return ICE_ERR_NVM;
+ return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
@@ -741,7 +763,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
if (length < ICE_NETLIST_ID_BLK_SIZE) {
ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
ICE_NETLIST_ID_BLK_SIZE, length);
- return ICE_ERR_NVM;
+ return -EIO;
}
status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
@@ -751,7 +773,7 @@ ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
if (!id_blk)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Read out the entire Netlist ID Block at once. */
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
@@ -791,7 +813,7 @@ exit_error:
* extract version data of a pending flash update in order to display the
* version data.
*/
-enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
+int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
{
return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
}
@@ -804,10 +826,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
* the actual size is smaller. Use bisection to determine the accessible size
* of flash memory.
*/
-static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+static int ice_discover_flash_size(struct ice_hw *hw)
{
u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -819,7 +841,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
u8 data;
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
- if (status == ICE_ERR_AQ_ERROR &&
+ if (status == -EIO &&
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
@@ -859,10 +881,9 @@ err_read_flat_nvm:
* sector size by using the highest bit. The reported pointer value will be in
* bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -891,10 +912,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
* Each area size word is specified in 4KB sector units. This function reports
* the size in bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -917,12 +937,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
* structure for later use in order to calculate the correct offset to read
* from the active module.
*/
-static enum ice_status
-ice_determine_active_flash_banks(struct ice_hw *hw)
+static int ice_determine_active_flash_banks(struct ice_hw *hw)
{
struct ice_bank_info *banks = &hw->flash.banks;
- enum ice_status status;
u16 ctrl_word;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
if (status) {
@@ -933,7 +952,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
/* Check that the control word indicates validity */
if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
@@ -997,12 +1016,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
*/
-enum ice_status ice_init_nvm(struct ice_hw *hw)
+int ice_init_nvm(struct ice_hw *hw)
{
struct ice_flash_info *flash = &hw->flash;
- enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
+ int status;
/* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line.
@@ -1021,7 +1040,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
/* Blank programming mode */
flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
- return ICE_ERR_NVM_BLANK_MODE;
+ return -EIO;
}
status = ice_discover_flash_size(hw);
@@ -1060,11 +1079,11 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
*
* Verify NVM PFA checksum validity (0x0706)
*/
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -1080,7 +1099,7 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
if (!status)
if (le16_to_cpu(cmd->checksum) != ICE_AQC_NVM_CHECKSUM_CORRECT)
- status = ICE_ERR_NVM_CHECKSUM;
+ status = -EIO;
return status;
}
@@ -1088,22 +1107,40 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
/**
* ice_nvm_write_activate
* @hw: pointer to the HW struct
- * @cmd_flags: NVM activate admin command bits (banks to be validated)
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
*
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
*/
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
+ int err;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
- cmd->cmd_flags = cmd_flags;
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF);
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!err && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return err;
}
/**
@@ -1113,7 +1150,7 @@ enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags)
* Update empr (0x0709). This command allows SW to
* request an EMPR to activate new FW.
*/
-enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
+int ice_aq_nvm_update_empr(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -1136,7 +1173,7 @@ enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw)
* as part of the NVM update as the first cmd in the flow.
*/
-enum ice_status
+int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd)
{
@@ -1144,7 +1181,7 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
struct ice_aq_desc desc;
if (length != 0 && !data)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.pkg_data;
@@ -1173,17 +1210,17 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
* the TransferFlag is set to End or StartAndEnd.
*/
-enum ice_status
+int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pass_comp_tbl *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || !comp_response || !comp_response_code)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.pass_comp_tbl;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index c6f05f43d593..774c2317967d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -12,38 +12,34 @@ struct ice_orom_civd_info {
__le16 combo_name[32]; /* Unicode string representing the Combo Image version */
} __packed;
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
-enum ice_status
-ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
-enum ice_status
-ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
-enum ice_status
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
+int
ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
-enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
+int ice_init_nvm(struct ice_hw *hw);
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags);
-enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw);
-enum ice_status
+int ice_nvm_validate_checksum(struct ice_hw *hw);
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
+int ice_aq_nvm_update_empr(struct ice_hw *hw);
+int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 transfer_flag, u8 *comp_response,
u8 *comp_response_code, struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h
index f57c414bc0a9..82bc54fec7f3 100644
--- a/drivers/net/ethernet/intel/ice/ice_osdep.h
+++ b/drivers/net/ethernet/intel/ice/ice_osdep.h
@@ -5,10 +5,18 @@
#define _ICE_OSDEP_H_
#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/pci_ids.h>
#ifndef CONFIG_64BIT
#include <linux/io-64-nonatomic-lo-hi.h>
#endif
+#include <net/udp_tunnel.h>
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
#define rd32(a, reg) readl((a)->hw_addr + (reg))
@@ -24,8 +32,8 @@ struct ice_dma_mem {
size_t size;
};
-#define ice_hw_to_dev(ptr) \
- (&(container_of((ptr), struct ice_pf, hw))->pdev->dev)
+struct ice_hw;
+struct device *ice_hw_to_dev(struct ice_hw *hw);
#ifdef CONFIG_DYNAMIC_DEBUG
#define ice_debug(hw, type, fmt, args...) \
diff --git a/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..976a03d3bdd5
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_pf_vsi_vlan_ops.h"
+
+void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ if (ice_is_dvm_ena(&vsi->back->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ }
+}
+
diff --git a/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..6741ec8c5f6b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_pf_vsi_vlan_ops.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_PF_VSI_VLAN_OPS_H_
+#define _ICE_PF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_pf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index dc1b0e9e6df5..02a4e1cf624e 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -29,6 +29,7 @@ enum ice_protocol_type {
ICE_MAC_OFOS = 0,
ICE_MAC_IL,
ICE_ETYPE_OL,
+ ICE_ETYPE_IL,
ICE_VLAN_OFOS,
ICE_IPV4_OFOS,
ICE_IPV4_IL,
@@ -40,6 +41,12 @@ enum ice_protocol_type {
ICE_VXLAN,
ICE_GENEVE,
ICE_NVGRE,
+ ICE_GTP,
+ ICE_GTP_NO_PAY,
+ ICE_PPPOE,
+ ICE_L2TPV3,
+ ICE_VLAN_EX,
+ ICE_VLAN_IN,
ICE_VXLAN_GPE,
ICE_SCTP_IL,
ICE_PROTOCOL_LAST
@@ -47,9 +54,12 @@ enum ice_protocol_type {
enum ice_sw_tunnel_type {
ICE_NON_TUN = 0,
+ ICE_SW_TUN_AND_NON_TUN,
ICE_SW_TUN_VXLAN,
ICE_SW_TUN_GENEVE,
ICE_SW_TUN_NVGRE,
+ ICE_SW_TUN_GTPU,
+ ICE_SW_TUN_GTPC,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -91,6 +101,7 @@ enum ice_prot_id {
#define ICE_MAC_OFOS_HW 1
#define ICE_MAC_IL_HW 4
#define ICE_ETYPE_OL_HW 9
+#define ICE_ETYPE_IL_HW 10
#define ICE_VLAN_OF_HW 16
#define ICE_VLAN_OL_HW 17
#define ICE_IPV4_OFOS_HW 32
@@ -100,15 +111,22 @@ enum ice_prot_id {
#define ICE_TCP_IL_HW 49
#define ICE_UDP_ILOS_HW 53
#define ICE_GRE_OF_HW 64
+#define ICE_PPPOE_HW 103
+#define ICE_L2TPV3_HW 104
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
-#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
#define ICE_MDID_SIZE 2
+
#define ICE_TUN_FLAG_MDID 21
#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
#define ICE_TUN_FLAG_MASK 0xFF
+#define ICE_VLAN_FLAG_MDID 20
+#define ICE_VLAN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_VLAN_FLAG_MDID)
+#define ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK 0xD000
+
#define ICE_TUN_FLAG_FV_IND 2
/* Mapping of software defined protocol ID to hardware defined protocol ID */
@@ -179,6 +197,33 @@ struct ice_udp_tnl_hdr {
__be32 vni; /* only use lower 24-bits */
};
+struct ice_udp_gtp_hdr {
+ u8 flags;
+ u8 msg_type;
+ __be16 rsrvd_len;
+ __be32 teid;
+ __be16 rsrvd_seq_nbr;
+ u8 rsrvd_n_pdu_nbr;
+ u8 rsrvd_next_ext;
+ u8 rsvrd_ext_len;
+ u8 pdu_type;
+ u8 qfi;
+ u8 rsvrd;
+};
+
+struct ice_pppoe_hdr {
+ u8 rsrvd_ver_type;
+ u8 rsrvd_code;
+ __be16 session_id;
+ __be16 length;
+ __be16 ppp_prot_id; /* control and data only */
+};
+
+struct ice_l2tpv3_sess_hdr {
+ __be32 session_id;
+ __be64 cookie;
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -195,6 +240,9 @@ union ice_prot_hdr {
struct ice_sctp_hdr sctp_hdr;
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
+ struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pppoe_hdr pppoe_hdr;
+ struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 442b031b0edc..ac6f06f9a2ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -3,9 +3,12 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_trace.h"
#define E810_OUT_PROP_DELAY_NS 1
+#define UNKNOWN_INCVAL_E822 0x100000000ULL
+
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
{ "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
@@ -281,6 +284,8 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
else
val &= ~PFINT_OICR_TSYN_TX_M;
wr32(&pf->hw, PFINT_OICR_ENA, val);
+
+ pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
}
/**
@@ -303,6 +308,9 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
+
+ pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
+ HWTSTAMP_FILTER_NONE;
}
/**
@@ -313,18 +321,10 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
* This function will configure timestamping during PTP initialization
* and deinitialization
*/
-static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
{
ice_set_tx_tstamp(pf, ena);
ice_set_rx_tstamp(pf, ena);
-
- if (ena) {
- pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
- pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
- } else {
- pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
- pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
- }
}
/**
@@ -491,46 +491,6 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
}
/**
- * ice_ptp_update_cached_phctime - Update the cached PHC time values
- * @pf: Board specific private structure
- *
- * This function updates the system time values which are cached in the PF
- * structure and the Rx rings.
- *
- * This function must be called periodically to ensure that the cached value
- * is never more than 2 seconds old. It must also be called whenever the PHC
- * time has been changed.
- */
-static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
-{
- u64 systime;
- int i;
-
- /* Read the current PHC time */
- systime = ice_ptp_read_src_clk_reg(pf, NULL);
-
- /* Update the cached PHC time stored in the PF structure */
- WRITE_ONCE(pf->ptp.cached_phc_time, systime);
-
- ice_for_each_vsi(pf, i) {
- struct ice_vsi *vsi = pf->vsi[i];
- int j;
-
- if (!vsi)
- continue;
-
- if (vsi->type != ICE_VSI_PF)
- continue;
-
- ice_for_each_rxq(vsi, j) {
- if (!vsi->rx_rings[j])
- continue;
- WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
- }
- }
-}
-
-/**
* ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
* @cached_phc_time: recently cached copy of PHC time
* @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
@@ -626,12 +586,501 @@ static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
{
const u64 mask = GENMASK_ULL(31, 0);
+ unsigned long discard_time;
+
+ /* Discard the hardware timestamp if the cached PHC time is too old */
+ discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (time_is_before_jiffies(discard_time)) {
+ pf->ptp.tx_hwtstamp_discarded++;
+ return 0;
+ }
return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
(in_tstamp >> 8) & mask);
}
/**
+ * ice_ptp_is_tx_tracker_up - Check if Tx tracker is ready for new timestamps
+ * @tx: the PTP Tx timestamp tracker to check
+ *
+ * Check that a given PTP Tx timestamp tracker is up, i.e. that it is ready
+ * to accept new timestamp requests.
+ *
+ * Assumes the tx->lock spinlock is already held.
+ */
+static bool
+ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
+{
+ lockdep_assert_held(&tx->lock);
+
+ return tx->init && !tx->calibrating;
+}
+
+/**
+ * ice_ptp_tx_tstamp - Process Tx timestamps for a port
+ * @tx: the PTP Tx timestamp tracker
+ *
+ * Process timestamps captured by the PHY associated with this port. To do
+ * this, loop over each index with a waiting skb.
+ *
+ * If a given index has a valid timestamp, perform the following steps:
+ *
+ * 1) check that the timestamp request is not stale
+ * 2) check that a timestamp is ready and available in the PHY memory bank
+ * 3) read and copy the timestamp out of the PHY register
+ * 4) unlock the index by clearing the associated in_use bit
+ * 5) check if the timestamp is stale, and discard if so
+ * 6) extend the 40 bit timestamp value to get a 64 bit timestamp value
+ * 7) send this 64 bit timestamp to the stack
+ *
+ * Returns true if all timestamps were handled, and false if any slots remain
+ * without a timestamp.
+ *
+ * After looping, if we still have waiting SKBs, return false. This may cause
+ * us effectively poll even when not strictly necessary. We do this because
+ * it's possible a new timestamp was requested around the same time as the
+ * interrupt. In some cases hardware might not interrupt us again when the
+ * timestamp is captured.
+ *
+ * Note that we do not hold the tracking lock while reading the Tx timestamp.
+ * This is because reading the timestamp requires taking a mutex that might
+ * sleep.
+ *
+ * The only place where we set in_use is when a new timestamp is initiated
+ * with a slot index. This is only called in the hard xmit routine where an
+ * SKB has a request flag set. The only places where we clear this bit is this
+ * function, or during teardown when the Tx timestamp tracker is being
+ * removed. A timestamp index will never be re-used until the in_use bit for
+ * that index is cleared.
+ *
+ * If a Tx thread starts a new timestamp, we might not begin processing it
+ * right away but we will notice it at the end when we re-queue the task.
+ *
+ * If a Tx thread starts a new timestamp just after this function exits, the
+ * interrupt for that timestamp should re-trigger this function once
+ * a timestamp is ready.
+ *
+ * In cases where the PTP hardware clock was directly adjusted, some
+ * timestamps may not be able to safely use the timestamp extension math. In
+ * this case, software will set the stale bit for any outstanding Tx
+ * timestamps when the clock is adjusted. Then this function will discard
+ * those captured timestamps instead of sending them to the stack.
+ *
+ * If a Tx packet has been waiting for more than 2 seconds, it is not possible
+ * to correctly extend the timestamp using the cached PHC time. It is
+ * extremely unlikely that a packet will ever take this long to timestamp. If
+ * we detect a Tx timestamp request that has waited for this long we assume
+ * the packet will never be sent by hardware and discard it without reading
+ * the timestamp register.
+ */
+static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+{
+ struct ice_ptp_port *ptp_port;
+ bool more_timestamps;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u64 tstamp_ready;
+ bool link_up;
+ int err;
+ u8 idx;
+
+ if (!tx->init)
+ return true;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+ hw = &pf->hw;
+
+ /* Read the Tx ready status first */
+ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
+ if (err)
+ return false;
+
+ /* Drop packets if the link went down */
+ link_up = ptp_port->link_up;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 phy_idx = idx + tx->offset;
+ u64 raw_tstamp = 0, tstamp;
+ bool drop_ts = !link_up;
+ struct sk_buff *skb;
+
+ /* Drop packets which have waited for more than 2 seconds */
+ if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
+ drop_ts = true;
+
+ /* Count the number of Tx timestamps that timed out */
+ pf->ptp.tx_hwtstamp_timeouts++;
+ }
+
+ /* Only read a timestamp from the PHY if its marked as ready
+ * by the tstamp_ready register. This avoids unnecessary
+ * reading of timestamps which are not yet valid. This is
+ * important as we must read all timestamps which are valid
+ * and only timestamps which are valid during each interrupt.
+ * If we do not, the hardware logic for generating a new
+ * interrupt can get stuck on some devices.
+ */
+ if (!(tstamp_ready & BIT_ULL(phy_idx))) {
+ if (drop_ts)
+ goto skip_ts_read;
+
+ continue;
+ }
+
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
+ err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
+ if (err && !drop_ts)
+ continue;
+
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
+ /* For PHYs which don't implement a proper timestamp ready
+ * bitmap, verify that the timestamp value is different
+ * from the last cached timestamp. If it is not, skip this for
+ * now assuming it hasn't yet been captured by hardware.
+ */
+ if (!drop_ts && tx->verify_cached &&
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ continue;
+
+ /* Discard any timestamp value without the valid bit set */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID))
+ drop_ts = true;
+
+skip_ts_read:
+ spin_lock(&tx->lock);
+ if (tx->verify_cached && raw_tstamp)
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ if (test_and_clear_bit(idx, tx->stale))
+ drop_ts = true;
+ spin_unlock(&tx->lock);
+
+ /* It is unlikely but possible that the SKB will have been
+ * flushed at this point due to link change or teardown.
+ */
+ if (!skb)
+ continue;
+
+ if (drop_ts) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ if (tstamp) {
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+ }
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check if we still have work to do. If so, re-queue this task to
+ * poll for remaining timestamps.
+ */
+ spin_lock(&tx->lock);
+ more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);
+ spin_unlock(&tx->lock);
+
+ return !more_timestamps;
+}
+
+/**
+ * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
+ * @tx: Tx tracking structure to initialize
+ *
+ * Assumes that the length has already been initialized. Do not call directly,
+ * use the ice_ptp_init_tx_* instead.
+ */
+static int
+ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
+{
+ unsigned long *in_use, *stale;
+ struct ice_tx_tstamp *tstamps;
+
+ tstamps = kcalloc(tx->len, sizeof(*tstamps), GFP_KERNEL);
+ in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
+ stale = bitmap_zalloc(tx->len, GFP_KERNEL);
+
+ if (!tstamps || !in_use || !stale) {
+ kfree(tstamps);
+ bitmap_free(in_use);
+ bitmap_free(stale);
+
+ return -ENOMEM;
+ }
+
+ tx->tstamps = tstamps;
+ tx->in_use = in_use;
+ tx->stale = stale;
+ tx->init = 1;
+
+ spin_lock_init(&tx->lock);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
+ * @pf: Board private structure
+ * @tx: the tracker to flush
+ *
+ * Called during teardown when a Tx tracker is being removed.
+ */
+static void
+ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ struct ice_hw *hw = &pf->hw;
+ u64 tstamp_ready;
+ int err;
+ u8 idx;
+
+ err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);
+ if (err) {
+ dev_dbg(ice_pf_to_dev(pf), "Failed to get the Tx tstamp ready bitmap for block %u, err %d\n",
+ tx->block, err);
+
+ /* If we fail to read the Tx timestamp ready bitmap just
+ * skip clearing the PHY timestamps.
+ */
+ tstamp_ready = 0;
+ }
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ u8 phy_idx = idx + tx->offset;
+ struct sk_buff *skb;
+
+ /* In case this timestamp is ready, we need to clear it. */
+ if (!hw->reset_ongoing && (tstamp_ready & BIT_ULL(phy_idx)))
+ ice_clear_phy_tstamp(hw, tx->block, phy_idx);
+
+ spin_lock(&tx->lock);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+ clear_bit(idx, tx->stale);
+ spin_unlock(&tx->lock);
+
+ /* Count the number of Tx timestamps flushed */
+ pf->ptp.tx_hwtstamp_flushed++;
+
+ /* Free the SKB after we've cleared the bit */
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/**
+ * ice_ptp_mark_tx_tracker_stale - Mark unfinished timestamps as stale
+ * @tx: the tracker to mark
+ *
+ * Mark currently outstanding Tx timestamps as stale. This prevents sending
+ * their timestamp value to the stack. This is required to prevent extending
+ * the 40bit hardware timestamp incorrectly.
+ *
+ * This should be called when the PTP clock is modified such as after a set
+ * time request.
+ */
+static void
+ice_ptp_mark_tx_tracker_stale(struct ice_ptp_tx *tx)
+{
+ spin_lock(&tx->lock);
+ bitmap_or(tx->stale, tx->stale, tx->in_use, tx->len);
+ spin_unlock(&tx->lock);
+}
+
+/**
+ * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
+ * @pf: Board private structure
+ * @tx: Tx tracking structure to release
+ *
+ * Free memory associated with the Tx timestamp tracker.
+ */
+static void
+ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ spin_lock(&tx->lock);
+ tx->init = 0;
+ spin_unlock(&tx->lock);
+
+ /* wait for potentially outstanding interrupt to complete */
+ synchronize_irq(pf->msix_entries[pf->oicr_idx].vector);
+
+ ice_ptp_flush_tx_tracker(pf, tx);
+
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+
+ bitmap_free(tx->in_use);
+ tx->in_use = NULL;
+
+ bitmap_free(tx->stale);
+ tx->stale = NULL;
+
+ tx->len = 0;
+}
+
+/**
+ * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
+ * the timestamp block is shared for all ports in the same quad. To avoid
+ * ports using the same timestamp index, logically break the block of
+ * registers into chunks based on the port number.
+ */
+static int
+ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+{
+ tx->block = port / ICE_PORTS_PER_QUAD;
+ tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E822;
+ tx->len = INDEX_PER_PORT_E822;
+ tx->verify_cached = 0;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ *
+ * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
+ * port has its own block of timestamps, independent of the other ports.
+ */
+static int
+ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->block = pf->hw.port_info->lport;
+ tx->offset = 0;
+ tx->len = INDEX_PER_PORT_E810;
+ /* The E810 PHY does not provide a timestamp ready bitmap. Instead,
+ * verify new timestamps against cached copy of the last read
+ * timestamp.
+ */
+ tx->verify_cached = 1;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_update_cached_phctime - Update the cached PHC time values
+ * @pf: Board specific private structure
+ *
+ * This function updates the system time values which are cached in the PF
+ * structure and the Rx rings.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Note that the cached copy in the PF PTP structure is always updated, even
+ * if we can't update the copy in the Rx rings.
+ *
+ * Return:
+ * * 0 - OK, successfully updated
+ * * -EAGAIN - PF was busy, need to reschedule the update
+ */
+static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ unsigned long update_before;
+ u64 systime;
+ int i;
+
+ update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (pf->ptp.cached_phc_time &&
+ time_is_before_jiffies(update_before)) {
+ unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
+
+ dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
+ jiffies_to_msecs(time_taken));
+ pf->ptp.late_cached_phc_updates++;
+ }
+
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+ /* Update the cached PHC time stored in the PF structure */
+ WRITE_ONCE(pf->ptp.cached_phc_time, systime);
+ WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
+
+ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
+ return -EAGAIN;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+ int j;
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type != ICE_VSI_PF)
+ continue;
+
+ ice_for_each_rxq(vsi, j) {
+ if (!vsi->rx_rings[j])
+ continue;
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
+ clear_bit(ICE_CFG_BUSY, pf->state);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
+ * @pf: Board specific private structure
+ *
+ * This function must be called when the cached PHC time is no longer valid,
+ * such as after a time adjustment. It marks any currently outstanding Tx
+ * timestamps as stale and updates the cached PHC time for both the PF and Rx
+ * rings.
+ *
+ * If updating the PHC time cannot be done immediately, a warning message is
+ * logged and the work item is scheduled immediately to minimize the window
+ * with a wrong cached timestamp.
+ */
+static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Update the cached PHC time immediately if possible, otherwise
+ * schedule the work item to execute soon.
+ */
+ err = ice_ptp_update_cached_phctime(pf);
+ if (err) {
+ /* If another thread is updating the Rx rings, we won't
+ * properly reset them here. This could lead to reporting of
+ * invalid timestamps, but there isn't much we can do.
+ */
+ dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
+ __func__);
+
+ /* Queue the work item to update the Rx rings when possible */
+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
+ msecs_to_jiffies(10));
+ }
+
+ /* Mark any outstanding timestamps as stale, since they might have
+ * been captured in hardware before the time update. This could lead
+ * to us extending them with the wrong cached value resulting in
+ * incorrect timestamp values.
+ */
+ ice_ptp_mark_tx_tracker_stale(&pf->ptp.port.tx);
+}
+
+/**
* ice_ptp_read_time - Read the time from the device
* @pf: Board private structure
* @ts: timespec structure to hold the current time value
@@ -682,45 +1131,322 @@ static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
}
/**
- * ice_ptp_adjfine - Adjust clock increment rate
- * @info: the driver's PTP info structure
- * @scaled_ppm: Parts per million with 16-bit fractional field
+ * ice_base_incval - Get base timer increment value
+ * @pf: Board private structure
*
- * Adjust the frequency of the clock by the indicated scaled ppm from the
- * base frequency.
+ * Look up the base timer increment value for this device. The base increment
+ * value is used to define the nominal clock tick rate. This increment value
+ * is programmed during device initialization. It is also used as the basis
+ * for calculating adjustments using scaled_ppm.
*/
-static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+static u64 ice_base_incval(struct ice_pf *pf)
{
- struct ice_pf *pf = ptp_info_to_pf(info);
- u64 freq, divisor = 1000000ULL;
struct ice_hw *hw = &pf->hw;
- s64 incval, diff;
- int neg_adj = 0;
+ u64 incval;
+
+ if (ice_is_e810(hw))
+ incval = ICE_PTP_NOMINAL_INCVAL_E810;
+ else if (ice_e822_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
+ incval = ice_e822_nominal_incval(ice_e822_time_ref(hw));
+ else
+ incval = UNKNOWN_INCVAL_E822;
+
+ dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
+ incval);
+
+ return incval;
+}
+
+/**
+ * ice_ptp_check_tx_fifo - Check whether Tx FIFO is in an OK state
+ * @port: PTP port for which Tx FIFO is checked
+ */
+static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
+{
+ int quad = port->port_num / ICE_PORTS_PER_QUAD;
+ int offs = port->port_num % ICE_PORTS_PER_QUAD;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u32 val, phy_sts;
int err;
- incval = ICE_PTP_NOMINAL_INCVAL_E810;
+ pf = ptp_port_to_pf(port);
+ hw = &pf->hw;
+
+ if (port->tx_fifo_busy_cnt == FIFO_OK)
+ return 0;
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
+ /* need to read FIFO state */
+ if (offs == 0 || offs == 1)
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO01_STATUS,
+ &val);
+ else
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_FIFO23_STATUS,
+ &val);
+
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to check port %d Tx FIFO, err %d\n",
+ port->port_num, err);
+ return err;
}
- while ((u64)scaled_ppm > div64_u64(U64_MAX, incval)) {
- /* handle overflow by scaling down the scaled_ppm and
- * the divisor, losing some precision
- */
- scaled_ppm >>= 2;
- divisor >>= 2;
+ if (offs & 0x1)
+ phy_sts = (val & Q_REG_FIFO13_M) >> Q_REG_FIFO13_S;
+ else
+ phy_sts = (val & Q_REG_FIFO02_M) >> Q_REG_FIFO02_S;
+
+ if (phy_sts & FIFO_EMPTY) {
+ port->tx_fifo_busy_cnt = FIFO_OK;
+ return 0;
}
- freq = (incval * (u64)scaled_ppm) >> 16;
- diff = div_u64(freq, divisor);
+ port->tx_fifo_busy_cnt++;
- if (neg_adj)
- incval -= diff;
- else
- incval += diff;
+ dev_dbg(ice_pf_to_dev(pf), "Try %d, port %d FIFO not empty\n",
+ port->tx_fifo_busy_cnt, port->port_num);
+
+ if (port->tx_fifo_busy_cnt == ICE_PTP_FIFO_NUM_CHECKS) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "Port %d Tx FIFO still not empty; resetting quad %d\n",
+ port->port_num, quad);
+ ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+ port->tx_fifo_busy_cnt = FIFO_OK;
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
+/**
+ * ice_ptp_wait_for_offsets - Check for valid Tx and Rx offsets
+ * @work: Pointer to the kthread_work structure for this task
+ *
+ * Check whether hardware has completed measuring the Tx and Rx offset values
+ * used to configure and enable vernier timestamp calibration.
+ *
+ * Once the offset in either direction is measured, configure the associated
+ * registers with the calibrated offset values and enable timestamping. The Tx
+ * and Rx directions are configured independently as soon as their associated
+ * offsets are known.
+ *
+ * This function reschedules itself until both Tx and Rx calibration have
+ * completed.
+ */
+static void ice_ptp_wait_for_offsets(struct kthread_work *work)
+{
+ struct ice_ptp_port *port;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int tx_err;
+ int rx_err;
+
+ port = container_of(work, struct ice_ptp_port, ov_work.work);
+ pf = ptp_port_to_pf(port);
+ hw = &pf->hw;
+
+ if (ice_is_reset_in_progress(pf->state)) {
+ /* wait for device driver to complete reset */
+ kthread_queue_delayed_work(pf->ptp.kworker,
+ &port->ov_work,
+ msecs_to_jiffies(100));
+ return;
+ }
+
+ tx_err = ice_ptp_check_tx_fifo(port);
+ if (!tx_err)
+ tx_err = ice_phy_cfg_tx_offset_e822(hw, port->port_num);
+ rx_err = ice_phy_cfg_rx_offset_e822(hw, port->port_num);
+ if (tx_err || rx_err) {
+ /* Tx and/or Rx offset not yet configured, try again later */
+ kthread_queue_delayed_work(pf->ptp.kworker,
+ &port->ov_work,
+ msecs_to_jiffies(100));
+ return;
+ }
+}
+
+/**
+ * ice_ptp_port_phy_stop - Stop timestamping for a PHY port
+ * @ptp_port: PTP port to stop
+ */
+static int
+ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(ptp_port);
+ u8 port = ptp_port->port_num;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (ice_is_e810(hw))
+ return 0;
+
+ mutex_lock(&ptp_port->ps_lock);
+
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+
+ err = ice_stop_phy_timer_e822(hw, port, true);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
+ port, err);
+
+ mutex_unlock(&ptp_port->ps_lock);
+
+ return err;
+}
+
+/**
+ * ice_ptp_port_phy_restart - (Re)start and calibrate PHY timestamping
+ * @ptp_port: PTP port for which the PHY start is set
+ *
+ * Start the PHY timestamping block, and initiate Vernier timestamping
+ * calibration. If timestamping cannot be calibrated (such as if link is down)
+ * then disable the timestamping block instead.
+ */
+static int
+ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
+{
+ struct ice_pf *pf = ptp_port_to_pf(ptp_port);
+ u8 port = ptp_port->port_num;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (ice_is_e810(hw))
+ return 0;
+
+ if (!ptp_port->link_up)
+ return ice_ptp_port_phy_stop(ptp_port);
+
+ mutex_lock(&ptp_port->ps_lock);
+
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+
+ /* temporarily disable Tx timestamps while calibrating PHY offset */
+ spin_lock(&ptp_port->tx.lock);
+ ptp_port->tx.calibrating = true;
+ spin_unlock(&ptp_port->tx.lock);
+ ptp_port->tx_fifo_busy_cnt = 0;
+
+ /* Start the PHY timer in Vernier mode */
+ err = ice_start_phy_timer_e822(hw, port);
+ if (err)
+ goto out_unlock;
+
+ /* Enable Tx timestamps right away */
+ spin_lock(&ptp_port->tx.lock);
+ ptp_port->tx.calibrating = false;
+ spin_unlock(&ptp_port->tx.lock);
+
+ kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
+
+out_unlock:
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
+ port, err);
+
+ mutex_unlock(&ptp_port->ps_lock);
+
+ return err;
+}
+
+/**
+ * ice_ptp_link_change - Reconfigure PTP after link status change
+ * @pf: Board private structure
+ * @port: Port for which the PHY start is set
+ * @linkup: Link is up or down
+ */
+void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+{
+ struct ice_ptp_port *ptp_port;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
+ if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
+ return;
+
+ ptp_port = &pf->ptp.port;
+ if (WARN_ON_ONCE(ptp_port->port_num != port))
+ return;
+
+ /* Update cached link status for this port immediately */
+ ptp_port->link_up = linkup;
+
+ /* E810 devices do not need to reconfigure the PHY */
+ if (ice_is_e810(&pf->hw))
+ return;
+ ice_ptp_port_phy_restart(ptp_port);
+}
+
+/**
+ * ice_ptp_tx_ena_intr - Enable or disable the Tx timestamp interrupt
+ * @pf: PF private structure
+ * @ena: bool value to enable or disable interrupt
+ * @threshold: Minimum number of packets at which intr is triggered
+ *
+ * Utility function to enable or disable Tx timestamp interrupt and threshold
+ */
+static int ice_ptp_tx_ena_intr(struct ice_pf *pf, bool ena, u32 threshold)
+{
+ struct ice_hw *hw = &pf->hw;
+ int err = 0;
+ int quad;
+ u32 val;
+
+ ice_ptp_reset_ts_memory(hw);
+
+ for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ &val);
+ if (err)
+ break;
+
+ if (ena) {
+ val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
+ val |= ((threshold << Q_REG_TX_MEM_GBL_CFG_INTR_THR_S) &
+ Q_REG_TX_MEM_GBL_CFG_INTR_THR_M);
+ } else {
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ }
+
+ err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG,
+ val);
+ if (err)
+ break;
+ }
+
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
+ err);
+ return err;
+}
+
+/**
+ * ice_ptp_reset_phy_timestamping - Reset PHY timestamping block
+ * @pf: Board private structure
+ */
+static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
+{
+ ice_ptp_port_phy_restart(&pf->ptp.port);
+}
+
+/**
+ * ice_ptp_adjfine - Adjust clock increment rate
+ * @info: the driver's PTP info structure
+ * @scaled_ppm: Parts per million with 16-bit fractional field
+ *
+ * Adjust the frequency of the clock by the indicated scaled ppm from the
+ * base frequency.
+ */
+static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ u64 incval;
+ int err;
+
+ incval = adjust_by_scaled_ppm(ice_base_incval(pf), scaled_ppm);
err = ice_ptp_write_incval_locked(hw, incval);
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
@@ -905,7 +1631,10 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
NSEC_PER_SEC) * NSEC_PER_SEC + phase;
- start_time -= E810_OUT_PROP_DELAY_NS;
+ if (ice_is_e810(hw))
+ start_time -= E810_OUT_PROP_DELAY_NS;
+ else
+ start_time -= ice_e822_pps_delay(ice_e822_time_ref(hw));
/* 2. Write TARGET time */
wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time));
@@ -1045,6 +1774,38 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
}
/**
+ * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ */
+static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_perout_channel clk_cfg = {0};
+ int err;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ clk_cfg.gpio_pin = PPS_PIN_INDEX;
+ clk_cfg.period = NSEC_PER_SEC;
+ clk_cfg.ena = !!on;
+
+ err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
+ TIME_SYNC_PIN_INDEX, rq->extts.flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+/**
* ice_ptp_gettimex64 - Get the time of the clock
* @info: the driver's PTP info structure
* @ts: timespec64 structure to hold the current time value
@@ -1088,6 +1849,12 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
struct ice_hw *hw = &pf->hw;
int err;
+ /* For Vernier mode, we need to recalibrate after new settime
+ * Start with disabling timestamp block
+ */
+ if (pf->ptp.port.link_up)
+ ice_ptp_port_phy_stop(&pf->ptp.port);
+
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
goto exit;
@@ -1100,10 +1867,14 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_unlock(hw);
if (!err)
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
+
+ /* Recalibrate and re-enable timestamp block */
+ if (pf->ptp.port.link_up)
+ ice_ptp_port_phy_restart(&pf->ptp.port);
exit:
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
@@ -1121,9 +1892,12 @@ exit:
static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
{
struct timespec64 now, then;
+ int ret;
then = ns_to_timespec64(delta);
- ice_ptp_gettimex64(info, &now, NULL);
+ ret = ice_ptp_gettimex64(info, &now, NULL);
+ if (ret)
+ return ret;
now = timespec64_add(now, then);
return ice_ptp_settime64(info, (const struct timespec64 *)&now);
@@ -1172,12 +1946,107 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return err;
}
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
+
+ return 0;
+}
+
+#ifdef CONFIG_ICE_HWTS
+/**
+ * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * @device: Current device time
+ * @system: System counter value read synchronously with device time
+ * @ctx: Context provided by timekeeping code
+ *
+ * Read device and system (ART) clock simultaneously and return the corrected
+ * clock values in ns.
+ */
+static int
+ice_ptp_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct ice_pf *pf = (struct ice_pf *)ctx;
+ struct ice_hw *hw = &pf->hw;
+ u32 hh_lock, hh_art_ctl;
+ int i;
+
+ /* Get the HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ if (hh_lock & PFHH_SEM_BUSY_M) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ return -EFAULT;
+ }
+
+ /* Start the ART and device clock sync sequence */
+ hh_art_ctl = rd32(hw, GLHH_ART_CTL);
+ hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
+ wr32(hw, GLHH_ART_CTL, hh_art_ctl);
+
+#define MAX_HH_LOCK_TRIES 100
+
+ for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
+ /* Wait for sync to complete */
+ hh_art_ctl = rd32(hw, GLHH_ART_CTL);
+ if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
+ udelay(1);
+ continue;
+ } else {
+ u32 hh_ts_lo, hh_ts_hi, tmr_idx;
+ u64 hh_ts;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ /* Read ART time */
+ hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
+ hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
+ hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ *system = convert_art_ns_to_tsc(hh_ts);
+ /* Read Device source clock time */
+ hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
+ hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
+ hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+ *device = ns_to_ktime(hh_ts);
+ break;
+ }
+ }
+ /* Release HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
+ wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
+
+ if (i == MAX_HH_LOCK_TRIES)
+ return -ETIMEDOUT;
return 0;
}
/**
+ * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
+ * @info: the driver's PTP info structure
+ * @cts: The memory to fill the cross timestamp info
+ *
+ * Capture a cross timestamp between the ART and the device PTP hardware
+ * clock. Fill the cross timestamp information and report it back to the
+ * caller.
+ *
+ * This is only valid for E822 devices which have support for generating the
+ * cross timestamp via PCIe PTM.
+ *
+ * In order to correctly correlate the ART timestamp back to the TSC time, the
+ * CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ */
+static int
+ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+
+ return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
+ pf, NULL, cts);
+}
+#endif /* CONFIG_ICE_HWTS */
+
+/**
* ice_ptp_get_ts_config - ioctl interface to read the timestamping config
* @pf: Board private structure
* @ifr: ioctl data
@@ -1205,10 +2074,6 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
static int
ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
- /* Reserved for future extensions. */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
ice_set_tx_tstamp(pf, false);
@@ -1238,7 +2103,6 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- config->rx_filter = HWTSTAMP_FILTER_ALL;
ice_set_rx_tstamp(pf, true);
break;
default:
@@ -1270,8 +2134,8 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
if (err)
return err;
- /* Save these settings for future reference */
- pf->ptp.tstamp_config = config;
+ /* Return the actual configuration set */
+ config = pf->ptp.tstamp_config;
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
@@ -1290,26 +2154,31 @@ void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
+ struct skb_shared_hwtstamps *hwtstamps;
+ u64 ts_ns, cached_time;
u32 ts_high;
- u64 ts_ns;
- /* Populate timesync data into skb */
- if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
- struct skb_shared_hwtstamps *hwtstamps;
+ if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
+ return;
- /* Use ice_ptp_extend_32b_ts directly, using the ring-specific
- * cached PHC value, rather than accessing the PF. This also
- * allows us to simply pass the upper 32bits of nanoseconds
- * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
- * it would just discard these bits itself.
- */
- ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
- ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
+ cached_time = READ_ONCE(rx_ring->cached_phctime);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
- }
+ /* Do not report a timestamp if we don't have a cached PHC time */
+ if (!cached_time)
+ return;
+
+ /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
+ * PHC value, rather than accessing the PF. This also allows us to
+ * simply pass the upper 32bits of nanoseconds directly. Calling
+ * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
+ * bits itself.
+ */
+ ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
+ ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
}
/**
@@ -1365,41 +2234,59 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @pf: pointer to the PF instance
* @info: PTP clock capabilities
*/
static void
-ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
- /* Check if SMA controller is in the netlist */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
- !ice_is_pca9575_present(&pf->hw))
- ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
+ info->n_per_out = N_PER_OUT_E810;
- if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
- info->n_per_out = N_PER_OUT_E810T_NO_SMA;
- return;
- }
+ if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+ info->n_ext_ts = N_EXT_TS_E810;
- info->n_per_out = N_PER_OUT_E810T;
- info->n_ext_ts = N_EXT_TS_E810;
- info->n_pins = NUM_PTP_PINS_E810T;
- info->verify = ice_verify_pin_e810t;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
- /* Complete setup of the SMA pins */
- ice_ptp_setup_sma_pins_e810t(pf, info);
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+ }
}
/**
- * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
+ * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
* @info: PTP clock capabilities
*/
-static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
+static void
+ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
{
- info->n_per_out = N_PER_OUT_E810;
- info->n_ext_ts = N_EXT_TS_E810;
+ info->pps = 1;
+ info->n_per_out = 0;
+ info->n_ext_ts = 1;
+}
+
+/**
+ * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E822 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E822
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (boot_cpu_has(X86_FEATURE_ART) &&
+ boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
+ info->getcrosststamp = ice_ptp_getcrosststamp_e822;
+#endif /* CONFIG_ICE_HWTS */
}
/**
@@ -1416,11 +2303,24 @@ static void
ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
+ ice_ptp_setup_pins_e810(pf, info);
+}
- if (ice_is_e810t(&pf->hw))
- ice_ptp_setup_pins_e810t(pf, info);
- else
- ice_ptp_setup_pins_e810(info);
+/**
+ * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E823 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for e823
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ info->enable = ice_ptp_gpio_enable_e823;
+ ice_ptp_setup_pins_e823(pf, info);
}
/**
@@ -1435,13 +2335,18 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
dev_driver_string(dev), dev_name(dev));
info->owner = THIS_MODULE;
- info->max_adj = 999999999;
+ info->max_adj = 100000000;
info->adjtime = ice_ptp_adjtime;
info->adjfine = ice_ptp_adjfine;
info->gettimex64 = ice_ptp_gettimex64;
info->settime64 = ice_ptp_settime64;
- ice_ptp_set_funcs_e810(pf, info);
+ if (ice_is_e810(&pf->hw))
+ ice_ptp_set_funcs_e810(pf, info);
+ else if (ice_is_e823(&pf->hw))
+ ice_ptp_set_funcs_e823(pf, info);
+ else
+ ice_ptp_set_funcs_e822(pf, info);
}
/**
@@ -1479,106 +2384,6 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
}
/**
- * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
- * @work: pointer to the kthread_work struct
- *
- * Process timestamps captured by the PHY associated with this port. To do
- * this, loop over each index with a waiting skb.
- *
- * If a given index has a valid timestamp, perform the following steps:
- *
- * 1) copy the timestamp out of the PHY register
- * 4) clear the timestamp valid bit in the PHY register
- * 5) unlock the index by clearing the associated in_use bit.
- * 2) extend the 40b timestamp value to get a 64bit timestamp
- * 3) send that timestamp to the stack
- *
- * After looping, if we still have waiting SKBs, then re-queue the work. This
- * may cause us effectively poll even when not strictly necessary. We do this
- * because it's possible a new timestamp was requested around the same time as
- * the interrupt. In some cases hardware might not interrupt us again when the
- * timestamp is captured.
- *
- * Note that we only take the tracking lock when clearing the bit and when
- * checking if we need to re-queue this task. The only place where bits can be
- * set is the hard xmit routine where an SKB has a request flag set. The only
- * places where we clear bits are this work function, or the periodic cleanup
- * thread. If the cleanup thread clears a bit we're processing we catch it
- * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
- * starts a new timestamp, we might not begin processing it right away but we
- * will notice it at the end when we re-queue the work item. If a Tx thread
- * starts a new timestamp just after this function exits without re-queuing,
- * the interrupt when the timestamp finishes should trigger. Avoiding holding
- * the lock for the entire function is important in order to ensure that Tx
- * threads do not get blocked while waiting for the lock.
- */
-static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
-{
- struct ice_ptp_port *ptp_port;
- struct ice_ptp_tx *tx;
- struct ice_pf *pf;
- struct ice_hw *hw;
- u8 idx;
-
- tx = container_of(work, struct ice_ptp_tx, work);
- if (!tx->init)
- return;
-
- ptp_port = container_of(tx, struct ice_ptp_port, tx);
- pf = ptp_port_to_pf(ptp_port);
- hw = &pf->hw;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct skb_shared_hwtstamps shhwtstamps = {};
- u8 phy_idx = idx + tx->quad_offset;
- u64 raw_tstamp, tstamp;
- struct sk_buff *skb;
- int err;
-
- err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
- &raw_tstamp);
- if (err)
- continue;
-
- /* Check if the timestamp is invalid or stale */
- if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
- continue;
-
- /* The timestamp is valid, so we'll go ahead and clear this
- * index and then send the timestamp up to the stack.
- */
- spin_lock(&tx->lock);
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
- clear_bit(idx, tx->in_use);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- spin_unlock(&tx->lock);
-
- /* it's (unlikely but) possible we raced with the cleanup
- * thread for discarding old timestamp requests.
- */
- if (!skb)
- continue;
-
- /* Extend the timestamp using cached PHC time */
- tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
- shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
-
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
- }
-
- /* Check if we still have work to do. If so, re-queue this task to
- * poll for remaining timestamps.
- */
- spin_lock(&tx->lock);
- if (!bitmap_empty(tx->in_use, tx->len))
- kthread_queue_work(pf->ptp.kworker, &tx->work);
- spin_unlock(&tx->lock);
-}
-
-/**
* ice_ptp_request_ts - Request an available Tx timestamp index
* @tx: the PTP Tx timestamp tracker to request from
* @skb: the SKB to associate with this timestamp request
@@ -1587,11 +2392,14 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
{
u8 idx;
- /* Check if this tracker is initialized */
- if (!tx->init)
+ spin_lock(&tx->lock);
+
+ /* Check that this tracker is accepting new timestamp requests */
+ if (!ice_ptp_is_tx_tracker_up(tx)) {
+ spin_unlock(&tx->lock);
return -1;
+ }
- spin_lock(&tx->lock);
/* Find and set the first available index */
idx = find_first_zero_bit(tx->in_use, tx->len);
if (idx < tx->len) {
@@ -1600,9 +2408,11 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
* requests.
*/
set_bit(idx, tx->in_use);
+ clear_bit(idx, tx->stale);
tx->tstamps[idx].start = jiffies;
tx->tstamps[idx].skb = skb_get(skb);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ ice_trace(tx_tstamp_request, skb, idx);
}
spin_unlock(&tx->lock);
@@ -1613,171 +2423,158 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
if (idx >= tx->len)
return -1;
else
- return idx + tx->quad_offset;
+ return idx + tx->offset;
}
/**
- * ice_ptp_process_ts - Spawn kthread work to handle timestamps
+ * ice_ptp_process_ts - Process the PTP Tx timestamps
* @pf: Board private structure
*
- * Queue work required to process the PTP Tx timestamps outside of interrupt
- * context.
+ * Returns true if timestamps are processed.
*/
-void ice_ptp_process_ts(struct ice_pf *pf)
+bool ice_ptp_process_ts(struct ice_pf *pf)
{
- if (pf->ptp.port.tx.init)
- kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
}
-/**
- * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
- * @tx: Tx tracking structure to initialize
- *
- * Assumes that the length has already been initialized. Do not call directly,
- * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
- */
-static int
-ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
+static void ice_ptp_periodic_work(struct kthread_work *work)
{
- tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
- if (!tx->tstamps)
- return -ENOMEM;
-
- tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
- if (!tx->in_use) {
- kfree(tx->tstamps);
- tx->tstamps = NULL;
- return -ENOMEM;
- }
+ struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
+ struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ int err;
- spin_lock_init(&tx->lock);
- kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
- tx->init = 1;
+ err = ice_ptp_update_cached_phctime(pf);
- return 0;
+ /* Run twice a second or reschedule if phc update failed */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work,
+ msecs_to_jiffies(err ? 10 : 500));
}
/**
- * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
+ * ice_ptp_reset - Initialize PTP hardware clock support after reset
* @pf: Board private structure
- * @tx: the tracker to flush
*/
-static void
-ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+void ice_ptp_reset(struct ice_pf *pf)
{
- u8 idx;
+ struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
+ struct timespec64 ts;
+ int err, itr = 1;
+ u64 time_diff;
- for (idx = 0; idx < tx->len; idx++) {
- u8 phy_idx = idx + tx->quad_offset;
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ goto pfr;
- spin_lock(&tx->lock);
- if (tx->tstamps[idx].skb) {
- dev_kfree_skb_any(tx->tstamps[idx].skb);
- tx->tstamps[idx].skb = NULL;
- }
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ goto reset_ts;
+
+ err = ice_ptp_init_phc(hw);
+ if (err)
+ goto err;
- /* Clear any potential residual timestamp in the PHY block */
- if (!pf->hw.reset_ongoing)
- ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+ /* Acquire the global hardware lock */
+ if (!ice_ptp_lock(hw)) {
+ err = -EBUSY;
+ goto err;
}
-}
-/**
- * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
- * @pf: Board private structure
- * @tx: Tx tracking structure to release
- *
- * Free memory associated with the Tx timestamp tracker.
- */
-static void
-ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->init = 0;
+ /* Write the increment time value to PHY and LAN */
+ err = ice_ptp_write_incval(hw, ice_base_incval(pf));
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err;
+ }
- kthread_cancel_work_sync(&tx->work);
+ /* Write the initial Time value to PHY and LAN using the cached PHC
+ * time before the reset and time difference between stopping and
+ * starting the clock.
+ */
+ if (ptp->cached_phc_time) {
+ time_diff = ktime_get_real_ns() - ptp->reset_time;
+ ts = ns_to_timespec64(ptp->cached_phc_time + time_diff);
+ } else {
+ ts = ktime_to_timespec64(ktime_get_real());
+ }
+ err = ice_ptp_write_init(pf, &ts);
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err;
+ }
- ice_ptp_flush_tx_tracker(pf, tx);
+ /* Release the global hardware lock */
+ ice_ptp_unlock(hw);
- kfree(tx->tstamps);
- tx->tstamps = NULL;
+ if (!ice_is_e810(hw)) {
+ /* Enable quad interrupts */
+ err = ice_ptp_tx_ena_intr(pf, true, itr);
+ if (err)
+ goto err;
+ }
- kfree(tx->in_use);
- tx->in_use = NULL;
+reset_ts:
+ /* Restart the PHY timestamping block */
+ ice_ptp_reset_phy_timestamping(pf);
- tx->len = 0;
-}
+pfr:
+ /* Init Tx structures */
+ if (ice_is_e810(&pf->hw)) {
+ err = ice_ptp_init_tx_e810(pf, &ptp->port.tx);
+ } else {
+ kthread_init_delayed_work(&ptp->port.ov_work,
+ ice_ptp_wait_for_offsets);
+ err = ice_ptp_init_tx_e822(pf, &ptp->port.tx,
+ ptp->port.port_num);
+ }
+ if (err)
+ goto err;
-/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- *
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
- */
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->quad = pf->hw.port_info->lport;
- tx->quad_offset = 0;
- tx->len = INDEX_PER_QUAD;
+ set_bit(ICE_FLAG_PTP, pf->flags);
- return ice_ptp_alloc_tx_tracker(tx);
+ /* Start periodic work going */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+
+ dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");
+ return;
+
+err:
+ dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
/**
- * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
- * @tx: PTP Tx tracker to clean up
- *
- * Loop through the Tx timestamp requests and see if any of them have been
- * waiting for a long time. Discard any SKBs that have been waiting for more
- * than 2 seconds. This is long enough to be reasonably sure that the
- * timestamp will never be captured. This might happen if the packet gets
- * discarded before it reaches the PHY timestamping block.
+ * ice_ptp_prepare_for_reset - Prepare PTP for reset
+ * @pf: Board private structure
*/
-static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
+void ice_ptp_prepare_for_reset(struct ice_pf *pf)
{
- u8 idx;
-
- if (!tx->init)
- return;
+ struct ice_ptp *ptp = &pf->ptp;
+ u8 src_tmr;
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct sk_buff *skb;
+ clear_bit(ICE_FLAG_PTP, pf->flags);
- /* Check if this SKB has been waiting for too long */
- if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
- continue;
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
- spin_lock(&tx->lock);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
+ kthread_cancel_delayed_work_sync(&ptp->work);
+ kthread_cancel_work_sync(&ptp->extts_work);
- /* Free the SKB after we've cleared the bit */
- dev_kfree_skb_any(skb);
- }
-}
+ if (test_bit(ICE_PFR_REQ, pf->state))
+ return;
-static void ice_ptp_periodic_work(struct kthread_work *work)
-{
- struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
- struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
- if (!test_bit(ICE_FLAG_PTP, pf->flags))
- return;
+ /* Disable periodic outputs */
+ ice_ptp_disable_all_clkout(pf);
- ice_ptp_update_cached_phctime(pf);
+ src_tmr = ice_get_ptp_src_clock_index(&pf->hw);
- ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
+ /* Disable source clock */
+ wr32(&pf->hw, GLTSYN_ENA(src_tmr), (u32)~GLTSYN_ENA_TSYN_ENA_M);
- /* Run twice a second */
- kthread_queue_delayed_work(ptp->kworker, &ptp->work,
- msecs_to_jiffies(500));
+ /* Acquire PHC and system timer to restore after reset */
+ ptp->reset_time = ktime_get_real_ns();
}
/**
@@ -1790,27 +2587,16 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
*/
static int ice_ptp_init_owner(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
struct timespec64 ts;
- u8 src_idx;
- int err;
-
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
-
- /* Clear some HW residue and enable source clock */
- src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ int err, itr = 1;
- /* Enable source clocks */
- wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
-
- /* Enable PHY time sync */
- err = ice_ptp_init_phy_e810(hw);
- if (err)
- goto err_exit;
-
- /* Clear event status indications for auxiliary pins */
- (void)rd32(hw, GLTSYN_STAT(src_idx));
+ err = ice_ptp_init_phc(hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize PHC, err %d\n",
+ err);
+ return err;
+ }
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
@@ -1819,7 +2605,7 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
}
/* Write the increment time value to PHY and LAN */
- err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+ err = ice_ptp_write_incval(hw, ice_base_incval(pf));
if (err) {
ice_ptp_unlock(hw);
goto err_exit;
@@ -1836,6 +2622,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
+ if (!ice_is_e810(hw)) {
+ /* Enable quad interrupts */
+ err = ice_ptp_tx_ena_intr(pf, true, itr);
+ if (err)
+ goto err_exit;
+ }
+
/* Ensure we have a clock device */
err = ice_ptp_create_clock(pf);
if (err)
@@ -1849,72 +2642,106 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
err_clk:
pf->ptp.clock = NULL;
err_exit:
- dev_err(dev, "PTP failed to register clock, err %d\n", err);
-
return err;
}
/**
- * ice_ptp_init - Initialize the PTP support after device probe or reset
+ * ice_ptp_init_work - Initialize PTP work threads
+ * @pf: Board private structure
+ * @ptp: PF PTP structure
+ */
+static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
+{
+ struct kthread_worker *kworker;
+
+ /* Initialize work functions */
+ kthread_init_delayed_work(&ptp->work, ice_ptp_periodic_work);
+ kthread_init_work(&ptp->extts_work, ice_ptp_extts_work);
+
+ /* Allocate a kworker for handling work required for the ports
+ * connected to the PTP hardware clock.
+ */
+ kworker = kthread_create_worker(0, "ice-ptp-%s",
+ dev_name(ice_pf_to_dev(pf)));
+ if (IS_ERR(kworker))
+ return PTR_ERR(kworker);
+
+ ptp->kworker = kworker;
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_port - Initialize PTP port structure
+ * @pf: Board private structure
+ * @ptp_port: PTP port structure
+ */
+static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
+{
+ mutex_init(&ptp_port->ps_lock);
+
+ if (ice_is_e810(&pf->hw))
+ return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
+
+ kthread_init_delayed_work(&ptp_port->ov_work,
+ ice_ptp_wait_for_offsets);
+ return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
+}
+
+/**
+ * ice_ptp_init - Initialize PTP hardware clock support
* @pf: Board private structure
*
- * This function sets device up for PTP support. The first time it is run, it
- * will create a clock device. It does not create a clock device if one
- * already exists. It also reconfigures the device after a reset.
+ * Set up the device for interacting with the PTP hardware clock for all
+ * functions, both the function that owns the clock hardware, and the
+ * functions connected to the clock hardware.
+ *
+ * The clock owner will allocate and register a ptp_clock with the
+ * PTP_1588_CLOCK infrastructure. All functions allocate a kthread and work
+ * items used for asynchronous work such as Tx timestamps and periodic work.
*/
void ice_ptp_init(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct kthread_worker *kworker;
+ struct ice_ptp *ptp = &pf->ptp;
struct ice_hw *hw = &pf->hw;
int err;
- /* PTP is currently only supported on E810 devices */
- if (!ice_is_e810(hw))
- return;
-
- /* Check if this PF owns the source timer */
+ /* If this function owns the clock hardware, it must allocate and
+ * configure the PTP clock device to represent it.
+ */
if (hw->func_caps.ts_func_info.src_tmr_owned) {
err = ice_ptp_init_owner(pf);
if (err)
- return;
+ goto err;
}
- /* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
-
- /* Initialize the PTP port Tx timestamp tracker */
- ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx);
-
- /* Initialize work functions */
- kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
- kthread_init_work(&pf->ptp.extts_work, ice_ptp_extts_work);
+ ptp->port.port_num = hw->pf_id;
+ err = ice_ptp_init_port(pf, &ptp->port);
+ if (err)
+ goto err;
- /* Allocate a kworker for handling work required for the ports
- * connected to the PTP hardware clock.
- */
- kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev));
- if (IS_ERR(kworker)) {
- err = PTR_ERR(kworker);
- goto err_kworker;
- }
- pf->ptp.kworker = kworker;
+ /* Start the PHY timestamping block */
+ ice_ptp_reset_phy_timestamping(pf);
set_bit(ICE_FLAG_PTP, pf->flags);
+ err = ice_ptp_init_work(pf, ptp);
+ if (err)
+ goto err;
- /* Start periodic work going */
- kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
-
- dev_info(dev, "PTP init successful\n");
+ dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
-err_kworker:
+err:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
- ptp_clock_unregister(pf->ptp.clock);
+ ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- dev_err(dev, "PTP failed %d\n", err);
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+ dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
/**
@@ -1938,6 +2765,8 @@ void ice_ptp_release(struct ice_pf *pf)
kthread_cancel_delayed_work_sync(&pf->ptp.work);
+ ice_ptp_port_phy_stop(&pf->ptp.port);
+ mutex_destroy(&pf->ptp.port.ps_lock);
if (pf->ptp.kworker) {
kthread_destroy_worker(pf->ptp.kworker);
pf->ptp.kworker = NULL;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 53c15fc9d996..9cda2f43e0e5 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -49,6 +49,37 @@ struct ice_perout_channel {
* To allow multiple ports to access the shared register block independently,
* the blocks are split up so that indexes are assigned to each port based on
* hardware logical port number.
+ *
+ * The timestamp blocks are handled differently for E810- and E822-based
+ * devices. In E810 devices, each port has its own block of timestamps, while in
+ * E822 there is a need to logically break the block of registers into smaller
+ * chunks based on the port number to avoid collisions.
+ *
+ * Example for port 5 in E810:
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * |register|register|register|register|register|register|register|register|
+ * | block | block | block | block | block | block | block | block |
+ * | for | for | for | for | for | for | for | for |
+ * | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 |
+ * +--------+--------+--------+--------+--------+--------+--------+--------+
+ * ^^
+ * ||
+ * |--- quad offset is always 0
+ * ---- quad number
+ *
+ * Example for port 5 in E822:
+ * +-----------------------------+-----------------------------+
+ * | register block for quad 0 | register block for quad 1 |
+ * |+------+------+------+------+|+------+------+------+------+|
+ * ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3||
+ * |+------+------+------+------+|+------+------+------+------+|
+ * +-----------------------------+-------^---------------------+
+ * ^ |
+ * | --- quad offset*
+ * ---- quad number
+ *
+ * * PHY port 5 is port 1 in quad 1
+ *
*/
/**
@@ -62,9 +93,14 @@ struct ice_perout_channel {
* we discard old requests that were not fulfilled within a 2 second time
* window.
* Timestamp values in the PHY are read only and do not get cleared except at
- * hardware reset or when a new timestamp value is captured. The cached_tstamp
- * field is used to detect the case where a new timestamp has not yet been
- * captured, ensuring that we avoid sending stale timestamp data to the stack.
+ * hardware reset or when a new timestamp value is captured.
+ *
+ * Some PHY types do not provide a "ready" bitmap indicating which timestamp
+ * indexes are valid. In these cases, we use a cached_tstamp to keep track of
+ * the last timestamp we read for a given index. If the current timestamp
+ * value is the same as the cached value, we assume a new timestamp hasn't
+ * been captured. This avoids reporting stale timestamps to the stack. This is
+ * only done if the verify_cached flag is set in ice_ptp_tx structure.
*/
struct ice_tx_tstamp {
struct sk_buff *skb;
@@ -74,42 +110,57 @@ struct ice_tx_tstamp {
/**
* struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
- * @work: work function to handle processing of Tx timestamps
- * @lock: lock to prevent concurrent write to in_use bitmap
+ * @lock: lock to prevent concurrent access to fields of this struct
* @tstamps: array of len to store outstanding requests
* @in_use: bitmap of len to indicate which slots are in use
- * @quad: which quad the timestamps are captured in
- * @quad_offset: offset into timestamp block of the quad to get the real index
+ * @stale: bitmap of len to indicate slots which have stale timestamps
+ * @block: which memory block (quad or port) the timestamps are captured in
+ * @offset: offset into timestamp block to get the real index
* @len: length of the tstamps and in_use fields.
* @init: if true, the tracker is initialized;
+ * @calibrating: if true, the PHY is calibrating the Tx offset. During this
+ * window, timestamps are temporarily disabled.
+ * @verify_cached: if true, verify new timestamp differs from last read value
*/
struct ice_ptp_tx {
- struct kthread_work work;
spinlock_t lock; /* lock protecting in_use bitmap */
struct ice_tx_tstamp *tstamps;
unsigned long *in_use;
- u8 quad;
- u8 quad_offset;
+ unsigned long *stale;
+ u8 block;
+ u8 offset;
u8 len;
- u8 init;
+ u8 init : 1;
+ u8 calibrating : 1;
+ u8 verify_cached : 1;
};
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
-#define INDEX_PER_PORT (INDEX_PER_QUAD / ICE_PORTS_PER_QUAD)
+#define INDEX_PER_PORT_E822 16
+#define INDEX_PER_PORT_E810 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
*
- * This structure contains PTP data related to the external ports. Currently
- * it is used for tracking the Tx timestamps of a port. In the future this
- * structure will also hold information for the E822 port initialization
- * logic.
+ * This structure contains data indicating whether a single external port is
+ * ready for PTP functionality. It is used to track the port initialization
+ * and determine when the port's PHY offset is valid.
*
* @tx: Tx timestamp tracking for this port
+ * @ov_work: delayed work task for tracking when PHY offset is valid
+ * @ps_lock: mutex used to protect the overall PTP PHY start procedure
+ * @link_up: indicates whether the link is up
+ * @tx_fifo_busy_cnt: number of times the Tx FIFO was busy
+ * @port_num: the port number this structure represents
*/
struct ice_ptp_port {
struct ice_ptp_tx tx;
+ struct kthread_delayed_work ov_work;
+ struct mutex ps_lock; /* protects overall PTP PHY start procedure */
+ bool link_up;
+ u8 tx_fifo_busy_cnt;
+ u8 port_num;
};
#define GLTSYN_TGT_H_IDX_MAX 4
@@ -120,6 +171,7 @@ struct ice_ptp_port {
* @work: delayed work function for periodic tasks
* @extts_work: work function for handling external Tx timestamps
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
* @ext_ts_chan: the external timestamp channel in use
* @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
@@ -127,12 +179,20 @@ struct ice_ptp_port {
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
+ * @reset_time: kernel time after clock stop on reset
+ * @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
+ * @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
+ * @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
+ * @tx_hwtstamp_discarded: number of Tx skbs discarded due to cached PHC time
+ * being too old to correctly extend timestamp
+ * @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
struct ice_ptp_port port;
struct kthread_delayed_work work;
struct kthread_work extts_work;
u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
u8 ext_ts_chan;
u8 ext_ts_irq;
struct kthread_worker *kworker;
@@ -140,6 +200,12 @@ struct ice_ptp {
struct ptp_clock_info info;
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
+ u64 reset_time;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_flushed;
+ u32 tx_hwtstamp_discarded;
+ u32 late_cached_phc_updates;
};
#define __ptp_port_to_ptp(p) \
@@ -152,9 +218,15 @@ struct ice_ptp {
#define ptp_info_to_pf(i) \
container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp)
+#define PFTSYN_SEM_BYTES 4
#define PTP_SHARED_CLK_IDX_VALID BIT(31)
+#define TS_CMD_MASK 0xF
+#define SYNC_EXEC_CMD 0x3
#define ICE_PTP_TS_VALID BIT(0)
+#define FIFO_EMPTY BIT(2)
+#define FIFO_OK 0xFF
+#define ICE_PTP_FIFO_NUM_CHECKS 5
/* Per-channel register definitions */
#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8))
#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8))
@@ -173,23 +245,28 @@ struct ice_ptp {
#define N_EXT_TS_E810 3
#define N_PER_OUT_E810 4
#define N_PER_OUT_E810T 3
-#define N_PER_OUT_E810T_NO_SMA 2
-#define N_EXT_TS_E810_NO_SMA 2
+#define N_PER_OUT_NO_SMA_E810T 2
+#define N_EXT_TS_NO_SMA_E810T 2
+#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
-void ice_ptp_process_ts(struct ice_pf *pf);
+bool ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+void ice_ptp_reset(struct ice_pf *pf);
+void ice_ptp_prepare_for_reset(struct ice_pf *pf);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
+void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
{
@@ -201,6 +278,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
+static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
{
return -1;
@@ -212,11 +290,19 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return -1;
}
-static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
+static inline bool ice_ptp_process_ts(struct ice_pf *pf)
+{
+ return true;
+}
static inline void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+static inline void ice_ptp_reset(struct ice_pf *pf) { }
+static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
+static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+{
+}
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
new file mode 100644
index 000000000000..4109aa3b2fcd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_PTP_CONSTS_H_
+#define _ICE_PTP_CONSTS_H_
+
+/* Constant definitions related to the hardware clock used for PTP 1588
+ * features and functionality.
+ */
+/* Constants defined for the PTP 1588 clock hardware. */
+
+/* struct ice_time_ref_info_e822
+ *
+ * E822 hardware can use different sources as the reference for the PTP
+ * hardware clock. Each clock has different characteristics such as a slightly
+ * different frequency, etc.
+ *
+ * This lookup table defines several constants that depend on the current time
+ * reference. See the struct ice_time_ref_info_e822 for information about the
+ * meaning of each constant.
+ */
+const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* pll_freq */
+ 823437500, /* 823.4375 MHz PLL */
+ /* nominal_incval */
+ 0x136e44fabULL,
+ /* pps_delay */
+ 11,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* pll_freq */
+ 783360000, /* 783.36 MHz */
+ /* nominal_incval */
+ 0x146cc2177ULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* pll_freq */
+ 796875000, /* 796.875 MHz */
+ /* nominal_incval */
+ 0x141414141ULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* pll_freq */
+ 816000000, /* 816 MHz */
+ /* nominal_incval */
+ 0x139b9b9baULL,
+ /* pps_delay */
+ 12,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* pll_freq */
+ 830078125, /* 830.78125 MHz */
+ /* nominal_incval */
+ 0x134679aceULL,
+ /* pps_delay */
+ 11,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* pll_freq */
+ 783360000, /* 783.36 MHz */
+ /* nominal_incval */
+ 0x146cc2177ULL,
+ /* pps_delay */
+ 12,
+ },
+};
+
+const struct ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* refclk_pre_div */
+ 1,
+ /* feedback_div */
+ 197,
+ /* frac_n_div */
+ 2621440,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 159,
+ /* frac_n_div */
+ 1572864,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* refclk_pre_div */
+ 5,
+ /* feedback_div */
+ 159,
+ /* frac_n_div */
+ 1572864,
+ /* post_pll_div */
+ 6,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* refclk_pre_div */
+ 10,
+ /* feedback_div */
+ 223,
+ /* frac_n_div */
+ 524288,
+ /* post_pll_div */
+ 7,
+ },
+};
+
+/* struct ice_vernier_info_e822
+ *
+ * E822 hardware calibrates the delay of the timestamp indication from the
+ * actual packet transmission or reception during the initialization of the
+ * PHY. To do this, the hardware mechanism uses some conversions between the
+ * various clocks within the PHY block. This table defines constants used to
+ * calculate the correct conversion ratios in the PHY registers.
+ *
+ * Many of the values relate to the PAR/PCS clock conversion registers. For
+ * these registers, a value of 0 means that the associated register is not
+ * used by this link speed, and that the register should be cleared by writing
+ * 0. Other values specify the clock frequency in Hz.
+ */
+const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
+ /* ICE_PTP_LNK_SPD_1G */
+ {
+ /* tx_par_clk */
+ 31250000, /* 31.25 MHz */
+ /* rx_par_clk */
+ 31250000, /* 31.25 MHz */
+ /* tx_pcs_clk */
+ 125000000, /* 125 MHz */
+ /* rx_pcs_clk */
+ 125000000, /* 125 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 25140,
+ /* pmd_adj_divisor */
+ 10000000,
+ /* rx_fixed_delay */
+ 17372,
+ },
+ /* ICE_PTP_LNK_SPD_10G */
+ {
+ /* tx_par_clk */
+ 257812500, /* 257.8125 MHz */
+ /* rx_par_clk */
+ 257812500, /* 257.8125 MHz */
+ /* tx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* rx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 6938,
+ /* pmd_adj_divisor */
+ 82500000,
+ /* rx_fixed_delay */
+ 6212,
+ },
+ /* ICE_PTP_LNK_SPD_25G */
+ {
+ /* tx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* rx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 0, /* unused */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* tx_fixed_delay */
+ 2778,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 2491,
+ },
+ /* ICE_PTP_LNK_SPD_25G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 0, /* unused */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 0, /* unused */
+ /* tx_desk_rsgb_par */
+ 161132812, /* 162.1328125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 161132812, /* 162.1328125 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 97656250, /* 97.62625 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 97656250, /* 97.62625 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 3928,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 29535,
+ },
+ /* ICE_PTP_LNK_SPD_40G */
+ {
+ /* tx_par_clk */
+ 257812500,
+ /* rx_par_clk */
+ 257812500,
+ /* tx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* rx_pcs_clk */
+ 156250000, /* 156.25 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 156250000, /* 156.25 MHz deskew clock */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 156250000, /* 156.25 MHz deskew clock */
+ /* tx_fixed_delay */
+ 5666,
+ /* pmd_adj_divisor */
+ 82500000,
+ /* rx_fixed_delay */
+ 4244,
+ },
+ /* ICE_PTP_LNK_SPD_50G */
+ {
+ /* tx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHZ */
+ /* tx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* rx_pcs_clk */
+ 390625000, /* 390.625 MHz */
+ /* tx_desk_rsgb_par */
+ 0, /* unused */
+ /* rx_desk_rsgb_par */
+ 195312500, /* 193.3125 MHz deskew clock */
+ /* tx_desk_rsgb_pcs */
+ 0, /* unused */
+ /* rx_desk_rsgb_pcs */
+ 195312500, /* 193.3125 MHz deskew clock */
+ /* tx_fixed_delay */
+ 2778,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 2868,
+ },
+ /* ICE_PTP_LNK_SPD_50G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_desk_rsgb_par */
+ 322265625, /* 322.265625 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 322265625, /* 322.265625 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 2095,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 14524,
+ },
+ /* ICE_PTP_LNK_SPD_100G_RS */
+ {
+ /* tx_par_clk */
+ 0, /* unused */
+ /* rx_par_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_pcs_clk */
+ 0, /* unused */
+ /* rx_pcs_clk */
+ 644531250, /* 644.53125 MHz */
+ /* tx_desk_rsgb_par */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_par */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* rx_desk_rsgb_pcs */
+ 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ /* tx_fixed_delay */
+ 1620,
+ /* pmd_adj_divisor */
+ 206250000,
+ /* rx_fixed_delay */
+ 7775,
+ },
+};
+
+#endif /* _ICE_PTP_CONSTS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 29f947c0cd2e..a38614d21ea8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
+#include <linux/delay.h>
#include "ice_common.h"
#include "ice_ptp_hw.h"
+#include "ice_ptp_consts.h"
+#include "ice_cgu_regs.h"
/* Low level functions for interacting with and managing the device clock used
* for the Precision Time Protocol.
@@ -29,6 +32,15 @@
*
* For E810 devices, the increment frequency is 812.5 MHz
*
+ * For E822 devices the clock can be derived from different sources, and the
+ * increment has an effective frequency of one of the following:
+ * - 823.4375 MHz
+ * - 783.36 MHz
+ * - 796.875 MHz
+ * - 816 MHz
+ * - 830.078125 MHz
+ * - 783.36 MHz
+ *
* The hardware captures timestamps in the PHY for incoming packets, and for
* outgoing packets on request. To support this, the PHY maintains a timer
* that matches the lower 64 bits of the global source timer.
@@ -37,6 +49,24 @@
* shadow registers are used to prepare the desired initial values. A special
* sync command is issued to trigger copying from the shadow registers into
* the appropriate source and PHY registers simultaneously.
+ *
+ * The driver supports devices which have different PHYs with subtly different
+ * mechanisms to program and control the timers. We divide the devices into
+ * families named after the first major device, E810 and similar devices, and
+ * E822 and similar devices.
+ *
+ * - E822 based devices have additional support for fine grained Vernier
+ * calibration which requires significant setup
+ * - The layout of timestamp data in the PHY register blocks is different
+ * - The way timer synchronization commands are issued is different.
+ *
+ * To support this, very low level functions have an e810 or e822 suffix
+ * indicating what type of device they work on. Higher level abstractions for
+ * tasks that can be done on both devices do not have the suffix and will
+ * correctly look up the appropriate low level function when running.
+ *
+ * Functions which only make sense on a single device family may not have
+ * a suitable generic implementation
*/
/**
@@ -51,6 +81,2380 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
return hw->func_caps.ts_func_info.tmr_index_assoc;
}
+/**
+ * ice_ptp_read_src_incval - Read source timer increment value
+ * @hw: pointer to HW struct
+ *
+ * Read the increment value of the source timer and return it.
+ */
+static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
+{
+ u32 lo, hi;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+
+ return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo;
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= GLTSYN_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= GLTSYN_CMD_READ_TIME;
+ break;
+ }
+
+ wr32(hw, GLTSYN_CMD, cmd_val);
+}
+
+/**
+ * ice_ptp_exec_tmr_cmd - Execute all prepared timer commands
+ * @hw: pointer to HW struct
+ *
+ * Write the SYNC_EXEC_CMD bit to the GLTSYN_CMD_SYNC register, and flush the
+ * write immediately. This triggers the hardware to begin executing all of the
+ * source and PHY timer commands synchronously.
+ */
+static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
+{
+ wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+ ice_flush(hw);
+}
+
+/* E822 family functions
+ *
+ * The following functions operate on the E822 family of devices.
+ */
+
+/**
+ * ice_fill_phy_msg_e822 - Fill message data for a PHY register access
+ * @msg: the PHY message buffer to fill in
+ * @port: the port to access
+ * @offset: the register offset
+ */
+static void
+ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+{
+ int phy_port, phy, quadtype;
+
+ phy_port = port % ICE_PORTS_PER_PHY;
+ phy = port / ICE_PORTS_PER_PHY;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+
+ if (quadtype == 0) {
+ msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
+ msg->msg_addr_high = P_Q0_H(P_0_BASE + offset, phy_port);
+ } else {
+ msg->msg_addr_low = P_Q1_L(P_4_BASE + offset, phy_port);
+ msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
+ }
+
+ if (phy == 0)
+ msg->dest_dev = rmn_0;
+ else if (phy == 1)
+ msg->dest_dev = rmn_1;
+ else
+ msg->dest_dev = rmn_2;
+}
+
+/**
+ * ice_is_64b_phy_reg_e822 - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Checks if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_64b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case P_REG_PAR_PCS_TX_OFFSET_L:
+ *high_addr = P_REG_PAR_PCS_TX_OFFSET_U;
+ return true;
+ case P_REG_PAR_PCS_RX_OFFSET_L:
+ *high_addr = P_REG_PAR_PCS_RX_OFFSET_U;
+ return true;
+ case P_REG_PAR_TX_TIME_L:
+ *high_addr = P_REG_PAR_TX_TIME_U;
+ return true;
+ case P_REG_PAR_RX_TIME_L:
+ *high_addr = P_REG_PAR_RX_TIME_U;
+ return true;
+ case P_REG_TOTAL_TX_OFFSET_L:
+ *high_addr = P_REG_TOTAL_TX_OFFSET_U;
+ return true;
+ case P_REG_TOTAL_RX_OFFSET_L:
+ *high_addr = P_REG_TOTAL_RX_OFFSET_U;
+ return true;
+ case P_REG_UIX66_10G_40G_L:
+ *high_addr = P_REG_UIX66_10G_40G_U;
+ return true;
+ case P_REG_UIX66_25G_100G_L:
+ *high_addr = P_REG_UIX66_25G_100G_U;
+ return true;
+ case P_REG_TX_CAPTURE_L:
+ *high_addr = P_REG_TX_CAPTURE_U;
+ return true;
+ case P_REG_RX_CAPTURE_L:
+ *high_addr = P_REG_RX_CAPTURE_U;
+ return true;
+ case P_REG_TX_TIMER_INC_PRE_L:
+ *high_addr = P_REG_TX_TIMER_INC_PRE_U;
+ return true;
+ case P_REG_RX_TIMER_INC_PRE_L:
+ *high_addr = P_REG_RX_TIMER_INC_PRE_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_e822 - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Checks if the provided low address is one of the known 40bit PHY values
+ * split into two registers with the lower 8 bits in the low register and the
+ * upper 32 bits in the high register. If it is, return the appropriate high
+ * register offset to use.
+ */
+static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case P_REG_TIMETUS_L:
+ *high_addr = P_REG_TIMETUS_U;
+ return true;
+ case P_REG_PAR_RX_TUS_L:
+ *high_addr = P_REG_PAR_RX_TUS_U;
+ return true;
+ case P_REG_PAR_TX_TUS_L:
+ *high_addr = P_REG_PAR_TX_TUS_U;
+ return true;
+ case P_REG_PCS_RX_TUS_L:
+ *high_addr = P_REG_PCS_RX_TUS_U;
+ return true;
+ case P_REG_PCS_TX_TUS_L:
+ *high_addr = P_REG_PCS_TX_TUS_U;
+ return true;
+ case P_REG_DESK_PAR_RX_TUS_L:
+ *high_addr = P_REG_DESK_PAR_RX_TUS_U;
+ return true;
+ case P_REG_DESK_PAR_TX_TUS_L:
+ *high_addr = P_REG_DESK_PAR_TX_TUS_U;
+ return true;
+ case P_REG_DESK_PCS_RX_TUS_L:
+ *high_addr = P_REG_DESK_PCS_RX_TUS_U;
+ return true;
+ case P_REG_DESK_PCS_TX_TUS_L:
+ *high_addr = P_REG_DESK_PCS_TX_TUS_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_phy_reg_e822 - Read a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @offset: PHY register offset to read
+ * @val: on return, the contents read from the PHY
+ *
+ * Read a PHY register for the given port over the device sideband queue.
+ */
+int
+ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ ice_fill_phy_msg_e822(&msg, port, offset);
+ msg.opcode = ice_sbq_msg_rd;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_read_64b_phy_reg_e822 - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Reads the two registers associated with a 64bit value and returns it in the
+ * val pointer. The offset always specifies the lower register offset to use.
+ * The high offset is looked up. This function only operates on registers
+ * known to be two parts of a 64bit value.
+ */
+static int
+ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, low_addr, &low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_read_phy_reg_e822(hw, port, high_addr, &high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ *val = (u64)high << 32 | low;
+
+ return 0;
+}
+
+/**
+ * ice_write_phy_reg_e822 - Write a PHY register
+ * @hw: pointer to the HW struct
+ * @port: PHY port to write to
+ * @offset: PHY register offset to write
+ * @val: The value to write to the register
+ *
+ * Write a PHY register for the given port over the device sideband queue.
+ */
+int
+ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ ice_fill_phy_msg_e822(&msg, port, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_40b_phy_reg_e822 - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Write the provided 40b value to the two associated registers by splitting
+ * it up into two chunks, the lower 8 bits and the upper 32 bits.
+ */
+static int
+ice_write_40b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into a lower 8 bit
+ * register and an upper 32 bit register.
+ */
+ if (!ice_is_40b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 40b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ low = (u32)(val & P_REG_40B_LOW_M);
+ high = (u32)(val >> P_REG_40B_HIGH_S);
+
+ err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_64b_phy_reg_e822 - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Write the 64bit value to the two associated 32bit PHY registers. The offset
+ * is always specified as the lower register, and the high address is looked
+ * up. This function only operates on registers known to be two parts of
+ * a 64bit value.
+ */
+static int
+ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
+{
+ u32 low, high;
+ u16 high_addr;
+ int err;
+
+ /* Only operate on registers known to be split into two 32bit
+ * registers.
+ */
+ if (!ice_is_64b_phy_reg_e822(low_addr, &high_addr)) {
+ ice_debug(hw, ICE_DBG_PTP, "Invalid 64b register addr 0x%08x\n",
+ low_addr);
+ return -EINVAL;
+ }
+
+ low = lower_32_bits(val);
+ high = upper_32_bits(val);
+
+ err = ice_write_phy_reg_e822(hw, port, low_addr, low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_phy_reg_e822(hw, port, high_addr, high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_quad_msg_e822 - Fill message data for quad register access
+ * @msg: the PHY message buffer to fill in
+ * @quad: the quad to access
+ * @offset: the register offset
+ *
+ * Fill a message buffer for accessing a register in a quad shared between
+ * multiple PHYs.
+ */
+static void
+ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+{
+ u32 addr;
+
+ msg->dest_dev = rmn_0;
+
+ if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ addr = Q_0_BASE + offset;
+ else
+ addr = Q_1_BASE + offset;
+
+ msg->msg_addr_low = lower_16_bits(addr);
+ msg->msg_addr_high = upper_16_bits(addr);
+}
+
+/**
+ * ice_read_quad_reg_e822 - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to read from
+ * @offset: quad register offset to read
+ * @val: on return, the contents read from the quad
+ *
+ * Read a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+int
+ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_rd;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_quad_reg_e822 - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @quad: quad to write to
+ * @offset: quad register offset to write
+ * @val: The value to write to the register
+ *
+ * Write a quad register over the device sideband queue. Quad registers are
+ * shared between multiple PHYs.
+ */
+int
+ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int err;
+
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
+ ice_fill_quad_msg_e822(&msg, quad, offset);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e822 - Read a PHY timestamp out of the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated registers in the
+ * quad memory block that is shared between the internal PHYs of the E822
+ * family of devices.
+ */
+static int
+ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
+{
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+ int err;
+
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+ err = ice_read_quad_reg_e822(hw, quad, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_quad_reg_e822(hw, quad, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ /* For E822 based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_phy_tstamp_e822 - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
+ * shared between the internal PHYs on the E822 devices.
+ */
+static int
+ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
+{
+ u16 lo_addr, hi_addr;
+ int err;
+
+ lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
+ hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
+
+ err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_ts_memory_quad_e822 - Clear all timestamps from the quad block
+ * @hw: pointer to the HW struct
+ * @quad: the quad to read from
+ *
+ * Clear all timestamps from the PHY quad block that is shared between the
+ * internal PHYs on the E822 devices.
+ */
+void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad)
+{
+ ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, Q_REG_TS_CTRL_M);
+ ice_write_quad_reg_e822(hw, quad, Q_REG_TS_CTRL, ~(u32)Q_REG_TS_CTRL_M);
+}
+
+/**
+ * ice_ptp_reset_ts_memory_e822 - Clear all timestamps from all quad blocks
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_reset_ts_memory_e822(struct ice_hw *hw)
+{
+ unsigned int quad;
+
+ for (quad = 0; quad < ICE_MAX_QUAD; quad++)
+ ice_ptp_reset_ts_memory_quad_e822(hw, quad);
+}
+
+/**
+ * ice_read_cgu_reg_e822 - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static int
+ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ int err;
+
+ cgu_msg.opcode = ice_sbq_msg_rd;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return err;
+}
+
+/**
+ * ice_write_cgu_reg_e822 - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ */
+static int
+ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg;
+ int err;
+
+ cgu_msg.opcode = ice_sbq_msg_wr;
+ cgu_msg.dest_dev = cgu;
+ cgu_msg.msg_addr_low = addr;
+ cgu_msg.msg_addr_high = 0x0;
+ cgu_msg.data = val;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Convert the specified TIME_REF clock frequency to a string.
+ */
+static const char *ice_clk_freq_str(u8 clk_freq)
+{
+ switch ((enum ice_time_ref_freq)clk_freq) {
+ case ICE_TIME_REF_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TIME_REF_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TIME_REF_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TIME_REF_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TIME_REF_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TIME_REF_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Convert the specified clock source to its string name.
+ */
+static const char *ice_clk_src_str(u8 clk_src)
+{
+ switch ((enum ice_clk_src)clk_src) {
+ case ICE_CLK_SRC_TCX0:
+ return "TCX0";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_cfg_cgu_pll_e822 - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCX0)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ */
+static int
+ice_cfg_cgu_pll_e822(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCX0 &&
+ clk_freq != ICE_TIME_REF_FREQ_25_000) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCX0 only supports 25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.field.ts_pll_enable) {
+ dw24.field.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.field.time_ref_freq_sel = clk_freq;
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+ dw19.field.tspll_ndivratio = 1;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+ dw22.field.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e822(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+ dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+ dw24.field.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw24.field.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e822(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ if (!bwm_lf.field.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.field.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.field.time_ref_sel),
+ ice_clk_freq_str(dw9.field.time_ref_freq_sel),
+ bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_init_cgu_e822 - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ */
+static int ice_init_cgu_e822(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ union tspll_cntr_bist_settings cntr_bist;
+ int err;
+
+ err = ice_read_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ &cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ cntr_bist.field.i_plllock_sel_0 = 0;
+ cntr_bist.field.i_plllock_sel_1 = 0;
+
+ err = ice_write_cgu_reg_e822(hw, TSPLL_CNTR_BIST_SETTINGS,
+ cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Configure the CGU PLL using the parameters from the function
+ * capabilities.
+ */
+ err = ice_cfg_cgu_pll_e822(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_vernier_wl - Set the window length for vernier calibration
+ * @hw: pointer to the HW struct
+ *
+ * Set the window length used for the vernier port calibration process.
+ */
+static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_WL,
+ PTP_VERNIER_WL);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to set vernier window length for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_phc_e822 - Perform E822 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E822 devices.
+ */
+static int ice_ptp_init_phc_e822(struct ice_hw *hw)
+{
+ int err;
+ u32 regval;
+
+ /* Enable reading switch and PHY registers over the sideband queue */
+#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
+#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
+ regval = rd32(hw, PF_SB_REM_DEV_CTL);
+ regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
+ PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, regval);
+
+ /* Initialize the Clock Generation Unit */
+ err = ice_init_cgu_e822(hw);
+ if (err)
+ return err;
+
+ /* Set window length for all the ports */
+ return ice_ptp_set_vernier_wl(hw);
+}
+
+/**
+ * ice_ptp_prep_phy_time_e822 - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ */
+static int
+ice_ptp_prep_phy_time_e822(struct ice_hw *hw, u32 time)
+{
+ u64 phy_time;
+ u8 port;
+ int err;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ /* Tx case */
+ err = ice_write_64b_phy_reg_e822(hw, port,
+ P_REG_TX_TIMER_INC_PRE_L,
+ phy_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_64b_phy_reg_e822(hw, port,
+ P_REG_RX_TIMER_INC_PRE_L,
+ phy_time);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
+ port, err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_prep_port_adj_e822 - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port Tx and Rx clocks
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ */
+int
+ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
+{
+ u32 l_time, u_time;
+ int err;
+
+ l_time = lower_32_bits(time);
+ u_time = upper_32_bits(time);
+
+ /* Tx case */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_e822 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ */
+static int
+ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
+{
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock. We shift the provided adjustment in nanoseconds to
+ * calculate the appropriate adjustment to program into the PHY ports.
+ */
+ if (adj > 0)
+ cycles = (s64)adj << 32;
+ else
+ cycles = -(((s64)-adj) << 32);
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_adj_e822(hw, port, cycles);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_e822 - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an INIT_INCVAL command.
+ */
+static int
+ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
+{
+ int err;
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L,
+ incval);
+ if (err)
+ goto exit_err;
+ }
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
+ port, err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_read_port_capture - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Note this has no equivalent for the E810 devices.
+ */
+static int
+ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_read_64b_phy_reg_e822(hw, port, P_REG_TX_CAPTURE_L, tx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = 0x%016llx\n",
+ (unsigned long long)*tx_ts);
+
+ /* Rx case */
+ err = ice_read_64b_phy_reg_e822(hw, port, P_REG_RX_CAPTURE_L, rx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = 0x%016llx\n",
+ (unsigned long long)*rx_ts);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ */
+static int
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, val;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_PHY_SRC;
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= PHY_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= PHY_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= PHY_CMD_READ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
+ break;
+ }
+
+ /* Tx case */
+ /* Read, modify, write */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Rx case */
+ /* Read, modify, write */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK;
+ val |= cmd_val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
+ * @hw: pointer to the HW struct
+ * @cmd: timer command to prepare
+ *
+ * Prepare all ports connected to this device for an upcoming timer sync
+ * command.
+ */
+static int
+ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ int err;
+
+ err = ice_ptp_one_port_cmd(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* E822 Vernier calibration functions
+ *
+ * The following functions are used as part of the vernier calibration of
+ * a port. This calibration increases the precision of the timestamps on the
+ * port.
+ */
+
+/**
+ * ice_phy_get_speed_and_fec_e822 - Get link speed and FEC based on serdes mode
+ * @hw: pointer to HW struct
+ * @port: the port to read from
+ * @link_out: if non-NULL, holds link speed on success
+ * @fec_out: if non-NULL, holds FEC algorithm on success
+ *
+ * Read the serdes data for the PHY port and extract the link speed and FEC
+ * algorithm.
+ */
+static int
+ice_phy_get_speed_and_fec_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_link_spd *link_out,
+ enum ice_ptp_fec_mode *fec_out)
+{
+ enum ice_ptp_link_spd link;
+ enum ice_ptp_fec_mode fec;
+ u32 serdes;
+ int err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_LINK_SPEED, &serdes);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read serdes info\n");
+ return err;
+ }
+
+ /* Determine the FEC algorithm */
+ fec = (enum ice_ptp_fec_mode)P_REG_LINK_SPEED_FEC_MODE(serdes);
+
+ serdes &= P_REG_LINK_SPEED_SERDES_M;
+
+ /* Determine the link speed */
+ if (fec == ICE_PTP_FEC_MODE_RS_FEC) {
+ switch (serdes) {
+ case ICE_PTP_SERDES_25G:
+ link = ICE_PTP_LNK_SPD_25G_RS;
+ break;
+ case ICE_PTP_SERDES_50G:
+ link = ICE_PTP_LNK_SPD_50G_RS;
+ break;
+ case ICE_PTP_SERDES_100G:
+ link = ICE_PTP_LNK_SPD_100G_RS;
+ break;
+ default:
+ return -EIO;
+ }
+ } else {
+ switch (serdes) {
+ case ICE_PTP_SERDES_1G:
+ link = ICE_PTP_LNK_SPD_1G;
+ break;
+ case ICE_PTP_SERDES_10G:
+ link = ICE_PTP_LNK_SPD_10G;
+ break;
+ case ICE_PTP_SERDES_25G:
+ link = ICE_PTP_LNK_SPD_25G;
+ break;
+ case ICE_PTP_SERDES_40G:
+ link = ICE_PTP_LNK_SPD_40G;
+ break;
+ case ICE_PTP_SERDES_50G:
+ link = ICE_PTP_LNK_SPD_50G;
+ break;
+ default:
+ return -EIO;
+ }
+ }
+
+ if (link_out)
+ *link_out = link;
+ if (fec_out)
+ *fec_out = fec;
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_lane_e822 - Configure PHY quad for single/multi-lane timestamp
+ * @hw: pointer to HW struct
+ * @port: to configure the quad for
+ */
+static void ice_phy_cfg_lane_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ int err;
+ u32 val;
+ u8 quad;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, NULL);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to get PHY link speed, err %d\n",
+ err);
+ return;
+ }
+
+ quad = port / ICE_PORTS_PER_QUAD;
+
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEM_GLB_CFG, err %d\n",
+ err);
+ return;
+ }
+
+ if (link_spd >= ICE_PTP_LNK_SPD_40G)
+ val &= ~Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+ else
+ val |= Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M;
+
+ err = ice_write_quad_reg_e822(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_MEM_GBL_CFG, err %d\n",
+ err);
+ return;
+ }
+}
+
+/**
+ * ice_phy_cfg_uix_e822 - Configure Serdes UI to TU conversion for E822
+ * @hw: pointer to the HW structure
+ * @port: the port to configure
+ *
+ * Program the conversion ration of Serdes clock "unit intervals" (UIs) to PHC
+ * hardware clock time units (TUs). That is, determine the number of TUs per
+ * serdes unit interval, and program the UIX registers with this conversion.
+ *
+ * This conversion is used as part of the calibration process when determining
+ * the additional error of a timestamp vs the real time of transmission or
+ * receipt of the packet.
+ *
+ * Hardware uses the number of TUs per 66 UIs, written to the UIX registers
+ * for the two main serdes clock rates, 10G/40G and 25G/100G serdes clocks.
+ *
+ * To calculate the conversion ratio, we use the following facts:
+ *
+ * a) the clock frequency in Hz (cycles per second)
+ * b) the number of TUs per cycle (the increment value of the clock)
+ * c) 1 second per 1 billion nanoseconds
+ * d) the duration of 66 UIs in nanoseconds
+ *
+ * Given these facts, we can use the following table to work out what ratios
+ * to multiply in order to get the number of TUs per 66 UIs:
+ *
+ * cycles | 1 second | incval (TUs) | nanoseconds
+ * -------+--------------+--------------+-------------
+ * second | 1 billion ns | cycle | 66 UIs
+ *
+ * To perform the multiplication using integers without too much loss of
+ * precision, we can take use the following equation:
+ *
+ * (freq * incval * 6600 LINE_UI ) / ( 100 * 1 billion)
+ *
+ * We scale up to using 6600 UI instead of 66 in order to avoid fractional
+ * nanosecond UIs (66 UI at 10G/40G is 6.4 ns)
+ *
+ * The increment value has a maximum expected range of about 34 bits, while
+ * the frequency value is about 29 bits. Multiplying these values shouldn't
+ * overflow the 64 bits. However, we must then further multiply them again by
+ * the Serdes unit interval duration. To avoid overflow here, we split the
+ * overall divide by 1e11 into a divide by 256 (shift down by 8 bits) and
+ * a divide by 390,625,000. This does lose some precision, but avoids
+ * miscalculation due to arithmetic overflow.
+ */
+static int ice_phy_cfg_uix_e822(struct ice_hw *hw, u8 port)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, uix;
+ int err;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second divided by 256 */
+ tu_per_sec = (cur_freq * clk_incval) >> 8;
+
+#define LINE_UI_10G_40G 640 /* 6600 UIs is 640 nanoseconds at 10Gb/40Gb */
+#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
+
+ /* Program the 10Gb/40Gb conversion ratio */
+ uix = div_u64(tu_per_sec * LINE_UI_10G_40G, 390625000);
+
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
+ uix);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_10G_40G, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Program the 25Gb/100Gb conversion ratio */
+ uix = div_u64(tu_per_sec * LINE_UI_25G_100G, 390625000);
+
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
+ uix);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write UIX66_25G_100G, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_parpcs_e822 - Configure TUs per PAR/PCS clock cycle
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ *
+ * Configure the number of TUs for the PAR and PCS clocks used as part of the
+ * timestamp calibration process. This depends on the link speed, as the PHY
+ * uses different markers depending on the speed.
+ *
+ * 1Gb/10Gb/25Gb:
+ * - Tx/Rx PAR/PCS markers
+ *
+ * 25Gb RS:
+ * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
+ *
+ * 40Gb/50Gb:
+ * - Tx/Rx PAR/PCS markers
+ * - Rx Deskew PAR/PCS markers
+ *
+ * 50G RS and 100GB RS:
+ * - Tx/Rx Reed Solomon gearbox PAR/PCS markers
+ * - Rx Deskew PAR/PCS markers
+ * - Tx PAR/PCS markers
+ *
+ * To calculate the conversion, we use the PHC clock frequency (cycles per
+ * second), the increment value (TUs per cycle), and the related PHY clock
+ * frequency to calculate the TUs per unit of the PHY link clock. The
+ * following table shows how the units convert:
+ *
+ * cycles | TUs | second
+ * -------+-------+--------
+ * second | cycle | cycles
+ *
+ * For each conversion register, look up the appropriate frequency from the
+ * e822 PAR/PCS table and calculate the TUs per unit of that clock. Program
+ * this to the appropriate register, preparing hardware to perform timestamp
+ * calibration to calculate the total Tx or Rx offset to adjust the timestamp
+ * in order to calibrate for the internal PHY delays.
+ *
+ * Note that the increment value ranges up to ~34 bits, and the clock
+ * frequency is ~29 bits, so multiplying them together should fit within the
+ * 64 bit arithmetic.
+ */
+static int ice_phy_cfg_parpcs_e822(struct ice_hw *hw, u8 port)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, phy_tus;
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ int err;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per cycle of the PHC clock */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* For each PHY conversion register, look up the appropriate link
+ * speed frequency and determine the TUs per that clock's cycle time.
+ * Split this into a high and low value and then program the
+ * appropriate register. If that link speed does not use the
+ * associated register, write zeros to clear it instead.
+ */
+
+ /* P_REG_PAR_TX_TUS */
+ if (e822_vernier[link_spd].tx_par_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_par_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PAR_RX_TUS */
+ if (e822_vernier[link_spd].rx_par_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_par_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PAR_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PCS_TX_TUS */
+ if (e822_vernier[link_spd].tx_pcs_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_pcs_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_PCS_RX_TUS */
+ if (e822_vernier[link_spd].rx_pcs_clk)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_pcs_clk);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_PCS_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PAR_TX_TUS */
+ if (e822_vernier[link_spd].tx_desk_rsgb_par)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_par);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PAR_RX_TUS */
+ if (e822_vernier[link_spd].rx_desk_rsgb_par)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_par);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PAR_RX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PCS_TX_TUS */
+ if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_pcs);
+ else
+ phy_tus = 0;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_TX_TUS_L,
+ phy_tus);
+ if (err)
+ return err;
+
+ /* P_REG_DESK_PCS_RX_TUS */
+ if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
+ phy_tus = div_u64(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_pcs);
+ else
+ phy_tus = 0;
+
+ return ice_write_40b_phy_reg_e822(hw, port, P_REG_DESK_PCS_RX_TUS_L,
+ phy_tus);
+}
+
+/**
+ * ice_calc_fixed_tx_offset_e822 - Calculated Fixed Tx offset for a port
+ * @hw: pointer to the HW struct
+ * @link_spd: the Link speed to calculate for
+ *
+ * Calculate the fixed offset due to known static latency data.
+ */
+static u64
+ice_calc_fixed_tx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* Calculate number of TUs to add for the fixed Tx latency. Since the
+ * latency measurement is in 1/100th of a nanosecond, we need to
+ * multiply by tu_per_sec and then divide by 1e11. This calculation
+ * overflows 64 bit integer arithmetic, so break it up into two
+ * divisions by 1e4 first then by 1e7.
+ */
+ fixed_offset = div_u64(tu_per_sec, 10000);
+ fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
+ fixed_offset = div_u64(fixed_offset, 10000000);
+
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_tx_offset_e822 - Configure total Tx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the P_REG_TOTAL_TX_OFFSET register with the total number of TUs to
+ * adjust Tx timestamps by. This is calculated by combining some known static
+ * latency along with the Vernier offset computations done by hardware.
+ *
+ * This function will not return successfully until the Tx offset calculations
+ * have been completed, which requires waiting until at least one packet has
+ * been transmitted by the device. It is safe to call this function
+ * periodically until calibration succeeds, as it will only program the offset
+ * once.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ *
+ * Returns zero on success, -EBUSY if the hardware vernier offset
+ * calibration has not completed, or another error code on failure.
+ */
+int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset, val;
+ int err;
+ u32 reg;
+
+ /* Nothing to do if we've already programmed the offset */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OR, &reg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OR for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (reg)
+ return 0;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &reg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(reg & P_REG_TX_OV_STATUS_OV_M))
+ return -EBUSY;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_tx_offset_e822(hw, link_spd);
+
+ /* Read the first Vernier offset from the PHY register and add it to
+ * the total offset.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_1G ||
+ link_spd == ICE_PTP_LNK_SPD_10G ||
+ link_spd == ICE_PTP_LNK_SPD_25G ||
+ link_spd == ICE_PTP_LNK_SPD_25G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_PCS_TX_OFFSET_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* For Tx, we only need to use the second Vernier offset for
+ * multi-lane link speeds with RS-FEC. The lanes will always be
+ * aligned.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_TX_TIME_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Tx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_TX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 1);
+ if (err)
+ return err;
+
+ dev_info(ice_hw_to_dev(hw), "Port=%d Tx vernier offset calibration complete\n",
+ port);
+
+ return 0;
+}
+
+/**
+ * ice_phy_calc_pmd_adj_e822 - Calculate PMD adjustment for Rx
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to adjust for
+ * @link_spd: the current link speed of the PHY
+ * @fec_mode: the current FEC mode of the PHY
+ * @pmd_adj: on return, the amount to adjust the Rx total offset by
+ *
+ * Calculates the adjustment to Rx timestamps due to PMD alignment in the PHY.
+ * This varies by link speed and FEC mode. The value calculated accounts for
+ * various delays caused when receiving a packet.
+ */
+static int
+ice_phy_calc_pmd_adj_e822(struct ice_hw *hw, u8 port,
+ enum ice_ptp_link_spd link_spd,
+ enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
+ u8 pmd_align;
+ u32 val;
+ int err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PMD alignment, err %d\n",
+ err);
+ return err;
+ }
+
+ pmd_align = (u8)val;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* The PMD alignment adjustment measurement depends on the link speed,
+ * and whether FEC is enabled. For each link speed, the alignment
+ * adjustment is calculated by dividing a value by the length of
+ * a Time Unit in nanoseconds.
+ *
+ * 1G: align == 4 ? 10 * 0.8 : (align + 6 % 10) * 0.8
+ * 10G: align == 65 ? 0 : (align * 0.1 * 32/33)
+ * 10G w/FEC: align * 0.1 * 32/33
+ * 25G: align == 65 ? 0 : (align * 0.4 * 32/33)
+ * 25G w/FEC: align * 0.4 * 32/33
+ * 40G: align == 65 ? 0 : (align * 0.1 * 32/33)
+ * 40G w/FEC: align * 0.1 * 32/33
+ * 50G: align == 65 ? 0 : (align * 0.4 * 32/33)
+ * 50G w/FEC: align * 0.8 * 32/33
+ *
+ * For RS-FEC, if align is < 17 then we must also add 1.6 * 32/33.
+ *
+ * To allow for calculating this value using integer arithmetic, we
+ * instead start with the number of TUs per second, (inverse of the
+ * length of a Time Unit in nanoseconds), multiply by a value based
+ * on the PMD alignment register, and then divide by the right value
+ * calculated based on the table above. To avoid integer overflow this
+ * division is broken up into a step of dividing by 125 first.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_1G) {
+ if (pmd_align == 4)
+ mult = 10;
+ else
+ mult = (pmd_align + 6) % 10;
+ } else if (link_spd == ICE_PTP_LNK_SPD_10G ||
+ link_spd == ICE_PTP_LNK_SPD_25G ||
+ link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G) {
+ /* If Clause 74 FEC, always calculate PMD adjust */
+ if (pmd_align != 65 || fec_mode == ICE_PTP_FEC_MODE_CLAUSE74)
+ mult = pmd_align;
+ else
+ mult = 0;
+ } else if (link_spd == ICE_PTP_LNK_SPD_25G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ if (pmd_align < 17)
+ mult = pmd_align + 40;
+ else
+ mult = pmd_align;
+ } else {
+ ice_debug(hw, ICE_DBG_PTP, "Unknown link speed %d, skipping PMD adjustment\n",
+ link_spd);
+ mult = 0;
+ }
+
+ /* In some cases, there's no need to adjust for the PMD alignment */
+ if (!mult) {
+ *pmd_adj = 0;
+ return 0;
+ }
+
+ /* Calculate the adjustment by multiplying TUs per second by the
+ * appropriate multiplier and divisor. To avoid overflow, we first
+ * divide by 125, and then handle remaining divisor based on the link
+ * speed pmd_adj_divisor value.
+ */
+ adj = div_u64(tu_per_sec, 125);
+ adj *= mult;
+ adj = div_u64(adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ /* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
+ * cycle count is necessary.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_25G_RS) {
+ u64 cycle_adj;
+ u8 rx_cycle;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_40_TO_160_CNT,
+ &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read 25G-RS Rx cycle count, err %d\n",
+ err);
+ return err;
+ }
+
+ rx_cycle = val & P_REG_RX_40_TO_160_CNT_RXCYC_M;
+ if (rx_cycle) {
+ mult = (4 - rx_cycle) * 40;
+
+ cycle_adj = div_u64(tu_per_sec, 125);
+ cycle_adj *= mult;
+ cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ adj += cycle_adj;
+ }
+ } else if (link_spd == ICE_PTP_LNK_SPD_50G_RS) {
+ u64 cycle_adj;
+ u8 rx_cycle;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_80_TO_160_CNT,
+ &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read 50G-RS Rx cycle count, err %d\n",
+ err);
+ return err;
+ }
+
+ rx_cycle = val & P_REG_RX_80_TO_160_CNT_RXCYC_M;
+ if (rx_cycle) {
+ mult = rx_cycle * 40;
+
+ cycle_adj = div_u64(tu_per_sec, 125);
+ cycle_adj *= mult;
+ cycle_adj = div_u64(cycle_adj, e822_vernier[link_spd].pmd_adj_divisor);
+
+ adj += cycle_adj;
+ }
+ }
+
+ /* Return the calculated adjustment */
+ *pmd_adj = adj;
+
+ return 0;
+}
+
+/**
+ * ice_calc_fixed_rx_offset_e822 - Calculated the fixed Rx offset for a port
+ * @hw: pointer to HW struct
+ * @link_spd: The Link speed to calculate for
+ *
+ * Determine the fixed Rx latency for a given link speed.
+ */
+static u64
+ice_calc_fixed_rx_offset_e822(struct ice_hw *hw, enum ice_ptp_link_spd link_spd)
+{
+ u64 cur_freq, clk_incval, tu_per_sec, fixed_offset;
+
+ cur_freq = ice_e822_pll_freq(ice_e822_time_ref(hw));
+ clk_incval = ice_ptp_read_src_incval(hw);
+
+ /* Calculate TUs per second */
+ tu_per_sec = cur_freq * clk_incval;
+
+ /* Calculate number of TUs to add for the fixed Rx latency. Since the
+ * latency measurement is in 1/100th of a nanosecond, we need to
+ * multiply by tu_per_sec and then divide by 1e11. This calculation
+ * overflows 64 bit integer arithmetic, so break it up into two
+ * divisions by 1e4 first then by 1e7.
+ */
+ fixed_offset = div_u64(tu_per_sec, 10000);
+ fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
+ fixed_offset = div_u64(fixed_offset, 10000000);
+
+ return fixed_offset;
+}
+
+/**
+ * ice_phy_cfg_rx_offset_e822 - Configure total Rx timestamp offset
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * Program the P_REG_TOTAL_RX_OFFSET register with the number of Time Units to
+ * adjust Rx timestamps by. This combines calculations from the Vernier offset
+ * measurements taken in hardware with some data about known fixed delay as
+ * well as adjusting for multi-lane alignment delay.
+ *
+ * This function will not return successfully until the Rx offset calculations
+ * have been completed, which requires waiting until at least one packet has
+ * been received by the device. It is safe to call this function periodically
+ * until calibration succeeds, as it will only program the offset once.
+ *
+ * This function must be called only after the offset registers are valid,
+ * i.e. after the Vernier calibration wait has passed, to ensure that the PHY
+ * has measured the offset.
+ *
+ * To avoid overflow, when calculating the offset based on the known static
+ * latency values, we use measurements in 1/100th of a nanosecond, and divide
+ * the TUs per second up front. This avoids overflow while allowing
+ * calculation of the adjustment using integer arithmetic.
+ *
+ * Returns zero on success, -EBUSY if the hardware vernier offset
+ * calibration has not completed, or another error code on failure.
+ */
+int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_ptp_link_spd link_spd;
+ enum ice_ptp_fec_mode fec_mode;
+ u64 total_offset, pmd, val;
+ int err;
+ u32 reg;
+
+ /* Nothing to do if we've already programmed the offset */
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OR, &reg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OR for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (reg)
+ return 0;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &reg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ if (!(reg & P_REG_RX_OV_STATUS_OV_M))
+ return -EBUSY;
+
+ err = ice_phy_get_speed_and_fec_e822(hw, port, &link_spd, &fec_mode);
+ if (err)
+ return err;
+
+ total_offset = ice_calc_fixed_rx_offset_e822(hw, link_spd);
+
+ /* Read the first Vernier offset from the PHY register and add it to
+ * the total offset.
+ */
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_PCS_RX_OFFSET_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+
+ /* For Rx, all multi-lane link speeds include a second Vernier
+ * calibration, because the lanes might not be aligned.
+ */
+ if (link_spd == ICE_PTP_LNK_SPD_40G ||
+ link_spd == ICE_PTP_LNK_SPD_50G ||
+ link_spd == ICE_PTP_LNK_SPD_50G_RS ||
+ link_spd == ICE_PTP_LNK_SPD_100G_RS) {
+ err = ice_read_64b_phy_reg_e822(hw, port,
+ P_REG_PAR_RX_TIME_L,
+ &val);
+ if (err)
+ return err;
+
+ total_offset += val;
+ }
+
+ /* In addition, Rx must account for the PMD alignment */
+ err = ice_phy_calc_pmd_adj_e822(hw, port, link_spd, fec_mode, &pmd);
+ if (err)
+ return err;
+
+ /* For RS-FEC, this adjustment adds delay, but for other modes, it
+ * subtracts delay.
+ */
+ if (fec_mode == ICE_PTP_FEC_MODE_RS_FEC)
+ total_offset += pmd;
+ else
+ total_offset -= pmd;
+
+ /* Now that the total offset has been calculated, program it to the
+ * PHY and indicate that the Rx offset is ready. After this,
+ * timestamps will be enabled.
+ */
+ err = ice_write_64b_phy_reg_e822(hw, port, P_REG_TOTAL_RX_OFFSET_L,
+ total_offset);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 1);
+ if (err)
+ return err;
+
+ dev_info(ice_hw_to_dev(hw), "Port=%d Rx vernier offset calibration complete\n",
+ port);
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_and_phc_time_e822 - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
+ * timer values.
+ */
+static int
+ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
+ u64 *phc_time)
+{
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a READ_TIME capture command */
+ ice_ptp_src_cmd(hw, READ_TIME);
+
+ /* Prepare the PHY timer for a READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
+ if (err)
+ return err;
+
+ /* Issue the sync to start the READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ err = ice_ptp_read_port_capture(hw, port, &tx_time, &rx_time);
+ if (err)
+ return err;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ dev_warn(ice_hw_to_dev(hw),
+ "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, (unsigned long long)tx_time,
+ (unsigned long long)rx_time);
+
+ *phy_time = tx_time;
+
+ return 0;
+}
+
+/**
+ * ice_sync_phy_timer_e822 - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a READ_TIME command which triggers a simultaneous
+ * read of the PHY timer and PHC timer. Then we use the difference to
+ * calculate an appropriate 2s complement addition to add to the PHY timer in
+ * order to ensure it reads the same value as the primary PHC timer.
+ */
+static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+ difference = phc_time - phy_time;
+
+ err = ice_ptp_prep_port_adj_e822(hw, port, (s64)difference);
+ if (err)
+ goto err_unlock;
+
+ err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
+ if (err)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+ err = ice_read_phy_and_phc_time_e822(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ dev_info(ice_hw_to_dev(hw),
+ "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, (unsigned long long)phy_time,
+ (unsigned long long)phc_time);
+
+ ice_ptp_unlock(hw);
+
+ return 0;
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
+}
+
+/**
+ * ice_stop_phy_timer_e822 - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of P_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ */
+int
+ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ int err;
+ u32 val;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_TX_OR, 0);
+ if (err)
+ return err;
+
+ err = ice_write_phy_reg_e822(hw, port, P_REG_RX_OR, 0);
+ if (err)
+ return err;
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_START_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_ENA_CLK_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ if (soft_reset) {
+ val |= P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_start_phy_timer_e822 - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Hardware will take Vernier measurements on Tx or Rx of packets.
+ */
+int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
+{
+ u32 lo, hi, val;
+ u64 incval;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ err = ice_stop_phy_timer_e822(hw, port, false);
+ if (err)
+ return err;
+
+ ice_phy_cfg_lane_e822(hw, port);
+
+ err = ice_phy_cfg_uix_e822(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_parpcs_e822(hw, port);
+ if (err)
+ return err;
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ incval = (u64)hi << 32 | lo;
+
+ err = ice_write_40b_phy_reg_e822(hw, port, P_REG_TIMETUS_L, incval);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_START_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val &= ~P_REG_PS_SFT_RESET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ val |= P_REG_PS_ENA_CLK_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ val |= P_REG_PS_LOAD_OFFSET_M;
+ err = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_sync_phy_timer_e822(hw, port);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e822 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @quad: the timestamp quad to read from
+ * @tstamp_ready: contents of the Tx memory status register
+ *
+ * Read the Q_REG_TX_MEMORY_STATUS register indicating which timestamps in
+ * the PHY are ready. A set bit means the corresponding timestamp is valid and
+ * ready to be captured from the PHY timestamp block.
+ */
+static int
+ice_get_phy_tx_tstamp_ready_e822(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
+{
+ u32 hi, lo;
+ int err;
+
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_U, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_U for quad %u, err %d\n",
+ quad, err);
+ return err;
+ }
+
+ err = ice_read_quad_reg_e822(hw, quad, Q_REG_TX_MEMORY_STATUS_L, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS_L for quad %u, err %d\n",
+ quad, err);
+ return err;
+ }
+
+ *tstamp_ready = (u64)hi << 32 | (u64)lo;
+
+ return 0;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use
@@ -68,18 +2472,18 @@ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
- int status;
+ int err;
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
msg.dest_dev = rmn_0;
- status = ice_sbq_rw_reg(hw, &msg);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
- status);
- return status;
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
}
*val = msg.data;
@@ -98,7 +2502,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input msg = {0};
- int status;
+ int err;
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
@@ -106,13 +2510,96 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.dest_dev = rmn_0;
msg.data = val;
- status = ice_sbq_rw_reg(hw, &msg);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
- status);
- return status;
+ err = ice_sbq_rw_reg(hw, &msg);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
+ *
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using the low latency
+ * timestamp read.
+ */
+static int
+ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
+{
+ u32 val;
+ u8 i;
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
+ wr32(hw, PF_SB_ATQBAL, val);
+
+ /* Read the register repeatedly until the FW provides us the TS */
+ for (i = TS_LL_READ_RETRIES; i > 0; i--) {
+ val = rd32(hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (!(FIELD_GET(TS_LL_READ_TS, val))) {
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
+ return 0;
+ }
+
+ udelay(10);
+ }
+
+ /* FW failed to provide the TS in time */
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ return -EINVAL;
+}
+
+/**
+ * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
+ *
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using sideband queue.
+ */
+static int
+ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
+ u32 *lo)
+{
+ u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_val, hi_val;
+ int err;
+
+ err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
}
+ *lo = lo_val;
+ *hi = (u8)hi_val;
+
return 0;
}
@@ -129,25 +2616,17 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
{
- u32 lo_addr, hi_addr, lo, hi;
- int status;
-
- lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
- hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo = 0;
+ u8 hi = 0;
+ int err;
- status = ice_read_phy_reg_e810(hw, lo_addr, &lo);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
- status);
- return status;
- }
+ if (hw->dev_caps.ts_dev_info.ts_ll_read)
+ err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
+ else
+ err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
- status = ice_read_phy_reg_e810(hw, hi_addr, &hi);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
- status);
- return status;
- }
+ if (err)
+ return err;
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
@@ -169,23 +2648,23 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
{
u32 lo_addr, hi_addr;
- int status;
+ int err;
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
- status = ice_write_phy_reg_e810(hw, lo_addr, 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, hi_addr, 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, hi_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -200,17 +2679,32 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
*/
int ice_ptp_init_phy_e810(struct ice_hw *hw)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
- GLTSYN_ENA_TSYN_ENA_M);
- if (status)
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
+ GLTSYN_ENA_TSYN_ENA_M);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
- status);
+ err);
- return status;
+ return err;
+}
+
+/**
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E810-specific PTP hardware clock initialization steps.
+ */
+static int ice_ptp_init_phc_e810(struct ice_hw *hw)
+{
+ /* Ensure synchronization delay is zero */
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
+ /* Initialize the PHY */
+ return ice_ptp_init_phy_e810(hw);
}
/**
@@ -227,22 +2721,22 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw)
*/
static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -263,26 +2757,26 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
*/
static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
* nanoseconds. Sub-nanosecond adjustment is not supported.
*/
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -300,25 +2794,25 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
{
u32 high, low;
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, err %d\n",
+ err);
+ return err;
}
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n",
- status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, err %d\n",
+ err);
+ return err;
}
return 0;
@@ -335,7 +2829,7 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
- int status;
+ int err;
switch (cmd) {
case INIT_TIME:
@@ -356,20 +2850,20 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
}
/* Read, modify, write */
- status = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n", status);
- return status;
+ err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
+ return err;
}
/* Modify necessary bits only and perform write */
val &= ~TS_CMD_MASK_E810;
val |= cmd_val;
- status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n", status);
- return status;
+ err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
+ return err;
}
return 0;
@@ -377,12 +2871,9 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
/* Device agnostic functions
*
- * The following functions implement useful behavior to hide the differences
- * between E810 and other devices. They call the device-specific
- * implementations where necessary.
- *
- * Currently, the driver only supports E810, but future work will enable
- * support for E822-based devices.
+ * The following functions implement shared behavior common to both E822 and
+ * E810 devices, possibly calling a device specific implementation where
+ * necessary.
*/
/**
@@ -405,16 +2896,18 @@ bool ice_ptp_lock(struct ice_hw *hw)
u32 hw_lock;
int i;
-#define MAX_TRIES 5
+#define MAX_TRIES 15
for (i = 0; i < MAX_TRIES; i++) {
hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
- if (!hw_lock)
- break;
+ if (hw_lock) {
+ /* Somebody is holding the lock */
+ usleep_range(5000, 6000);
+ continue;
+ }
- /* Somebody is holding the lock */
- usleep_range(10000, 20000);
+ break;
}
return !hw_lock;
@@ -433,42 +2926,6 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
- * @cmd: Timer command
- *
- * Prepare the source timer for an upcoming timer sync command.
- */
-static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u32 cmd_val;
- u8 tmr_idx;
-
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
-
- switch (cmd) {
- case INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
- break;
- case INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
- break;
- case ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
- break;
- case ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
- break;
- }
-
- wr32(hw, GLTSYN_CMD, cmd_val);
-}
-
-/**
* ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
* @hw: pointer to HW struct
* @cmd: the command to issue
@@ -480,23 +2937,26 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
- int status;
+ int err;
/* First, prepare the source timer */
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- status = ice_ptp_port_cmd_e810(hw, cmd);
- if (status) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n",
- cmd, status);
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_port_cmd_e810(hw, cmd);
+ else
+ err = ice_ptp_port_cmd_e822(hw, cmd);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
+ cmd, err);
+ return err;
}
- /* Write the sync command register to drive both source and PHY timer commands
- * synchronously
+ /* Write the sync command register to drive both source and PHY timer
+ * commands synchronously
*/
- wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+ ice_ptp_exec_tmr_cmd(hw);
return 0;
}
@@ -516,8 +2976,8 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
int ice_ptp_init_time(struct ice_hw *hw, u64 time)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -528,9 +2988,12 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
+ else
+ err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, INIT_TIME);
}
@@ -551,8 +3014,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
*/
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -560,9 +3023,12 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- status = ice_ptp_prep_phy_incval_e810(hw, incval);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_incval_e810(hw, incval);
+ else
+ err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
}
@@ -576,16 +3042,16 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
{
- int status;
+ int err;
if (!ice_ptp_lock(hw))
return -EBUSY;
- status = ice_ptp_write_incval(hw, incval);
+ err = ice_ptp_write_incval(hw, incval);
ice_ptp_unlock(hw);
- return status;
+ return err;
}
/**
@@ -603,8 +3069,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*/
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
{
- int status;
u8 tmr_idx;
+ int err;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
@@ -616,9 +3082,12 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- status = ice_ptp_prep_phy_adj_e810(hw, adj);
- if (status)
- return status;
+ if (ice_is_e810(hw))
+ err = ice_ptp_prep_phy_adj_e810(hw, adj);
+ else
+ err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ if (err)
+ return err;
return ice_ptp_tmr_cmd(hw, ADJ_TIME);
}
@@ -630,11 +3099,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
* @idx: the timestamp index to read
* @tstamp: on return, the 40bit timestamp value
*
- * Read a 40bit timestamp value out of the timestamp block.
+ * Read a 40bit timestamp value out of the timestamp block. For E822 devices,
+ * the block is the quad to read from. For E810 devices, the block is the
+ * logical port to read from.
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ if (ice_is_e810(hw))
+ return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+ else
+ return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
}
/**
@@ -643,11 +3117,32 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
* @block: the block to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block.
+ * Clear a timestamp, resetting its valid bit, from the timestamp block. For
+ * E822 devices, the block is the quad to clear from. For E810 devices, the
+ * block is the logical port to clear from.
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- return ice_clear_phy_tstamp_e810(hw, block, idx);
+ if (ice_is_e810(hw))
+ return ice_clear_phy_tstamp_e810(hw, block, idx);
+ else
+ return ice_clear_phy_tstamp_e822(hw, block, idx);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ *
+ * E810 devices do not use a Tx memory status register. Instead simply
+ * indicate that all timestamps are currently ready.
+ */
+static int
+ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
+{
+ *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
+ return 0;
}
/* E810T SMA functions
@@ -783,6 +3278,37 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
}
/**
+ * ice_read_pca9575_reg_e810t
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the register from the GPIO controller
+ */
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_is_pca9575_present
* @hw: pointer to the hw struct
*
@@ -800,3 +3326,58 @@ bool ice_is_pca9575_present(struct ice_hw *hw)
return !status && handle;
}
+
+/**
+ * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
+ * @hw: pointer to the HW struct
+ */
+void ice_ptp_reset_ts_memory(struct ice_hw *hw)
+{
+ if (ice_is_e810(hw))
+ return;
+
+ ice_ptp_reset_ts_memory_e822(hw);
+}
+
+/**
+ * ice_ptp_init_phc - Initialize PTP hardware clock
+ * @hw: pointer to the HW struct
+ *
+ * Perform the steps required to initialize the PTP hardware clock.
+ */
+int ice_ptp_init_phc(struct ice_hw *hw)
+{
+ u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Enable source clocks */
+ wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Clear event err indications for auxiliary pins */
+ (void)rd32(hw, GLTSYN_STAT(src_idx));
+
+ if (ice_is_e810(hw))
+ return ice_ptp_init_phc_e810(hw);
+ else
+ return ice_ptp_init_phc_e822(hw);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
+ * @hw: pointer to the HW struct
+ * @block: the timestamp block to check
+ * @tstamp_ready: storage for the PHY Tx memory status information
+ *
+ * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
+ * which indicates which timestamps in the block may be captured. A set bit
+ * means the timestamp can be read. An unset bit means the timestamp is not
+ * ready and software should avoid reading the register.
+ */
+int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
+{
+ if (ice_is_e810(hw))
+ return ice_get_phy_tx_tstamp_ready_e810(hw, block,
+ tstamp_ready);
+ else
+ return ice_get_phy_tx_tstamp_ready_e822(hw, block,
+ tstamp_ready);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index b2984b5c22c1..3b68cb91bd81 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -12,6 +12,112 @@ enum ice_ptp_tmr_cmd {
READ_TIME
};
+enum ice_ptp_serdes {
+ ICE_PTP_SERDES_1G,
+ ICE_PTP_SERDES_10G,
+ ICE_PTP_SERDES_25G,
+ ICE_PTP_SERDES_40G,
+ ICE_PTP_SERDES_50G,
+ ICE_PTP_SERDES_100G
+};
+
+enum ice_ptp_link_spd {
+ ICE_PTP_LNK_SPD_1G,
+ ICE_PTP_LNK_SPD_10G,
+ ICE_PTP_LNK_SPD_25G,
+ ICE_PTP_LNK_SPD_25G_RS,
+ ICE_PTP_LNK_SPD_40G,
+ ICE_PTP_LNK_SPD_50G,
+ ICE_PTP_LNK_SPD_50G_RS,
+ ICE_PTP_LNK_SPD_100G_RS,
+ NUM_ICE_PTP_LNK_SPD /* Must be last */
+};
+
+enum ice_ptp_fec_mode {
+ ICE_PTP_FEC_MODE_NONE,
+ ICE_PTP_FEC_MODE_CLAUSE74,
+ ICE_PTP_FEC_MODE_RS_FEC
+};
+
+/**
+ * struct ice_time_ref_info_e822
+ * @pll_freq: Frequency of PLL that drives timer ticks in Hz
+ * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
+ * @pps_delay: propagation delay of the PPS output signal
+ *
+ * Characteristic information for the various TIME_REF sources possible in the
+ * E822 devices
+ */
+struct ice_time_ref_info_e822 {
+ u64 pll_freq;
+ u64 nominal_incval;
+ u8 pps_delay;
+};
+
+/**
+ * struct ice_vernier_info_e822
+ * @tx_par_clk: Frequency used to calculate P_REG_PAR_TX_TUS
+ * @rx_par_clk: Frequency used to calculate P_REG_PAR_RX_TUS
+ * @tx_pcs_clk: Frequency used to calculate P_REG_PCS_TX_TUS
+ * @rx_pcs_clk: Frequency used to calculate P_REG_PCS_RX_TUS
+ * @tx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_TX_TUS
+ * @rx_desk_rsgb_par: Frequency used to calculate P_REG_DESK_PAR_RX_TUS
+ * @tx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_TX_TUS
+ * @rx_desk_rsgb_pcs: Frequency used to calculate P_REG_DESK_PCS_RX_TUS
+ * @tx_fixed_delay: Fixed Tx latency measured in 1/100th nanoseconds
+ * @pmd_adj_divisor: Divisor used to calculate PDM alignment adjustment
+ * @rx_fixed_delay: Fixed Rx latency measured in 1/100th nanoseconds
+ *
+ * Table of constants used during as part of the Vernier calibration of the Tx
+ * and Rx timestamps. This includes frequency values used to compute TUs per
+ * PAR/PCS clock cycle, and static delay values measured during hardware
+ * design.
+ *
+ * Note that some values are not used for all link speeds, and the
+ * P_REG_DESK_PAR* registers may represent different clock markers at
+ * different link speeds, either the deskew marker for multi-lane link speeds
+ * or the Reed Solomon gearbox marker for RS-FEC.
+ */
+struct ice_vernier_info_e822 {
+ u32 tx_par_clk;
+ u32 rx_par_clk;
+ u32 tx_pcs_clk;
+ u32 rx_pcs_clk;
+ u32 tx_desk_rsgb_par;
+ u32 rx_desk_rsgb_par;
+ u32 tx_desk_rsgb_pcs;
+ u32 rx_desk_rsgb_pcs;
+ u32 tx_fixed_delay;
+ u32 pmd_adj_divisor;
+ u32 rx_fixed_delay;
+};
+
+/**
+ * struct ice_cgu_pll_params_e822
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ * @post_pll_div: Post PLL divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF frequency.
+ */
+struct ice_cgu_pll_params_e822 {
+ u32 refclk_pre_div;
+ u32 feedback_div;
+ u32 frac_n_div;
+ u32 post_pll_div;
+};
+
+extern const struct
+ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+
+/* Table of constants related to possible TIME_REF sources */
+extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
+
+/* Table of constants for Vernier calibration on E822 */
+extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
+
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
@@ -27,28 +133,261 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
+void ice_ptp_reset_ts_memory(struct ice_hw *hw);
+int ice_ptp_init_phc(struct ice_hw *hw);
+int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
+
+/* E822 family functions */
+int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
+int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);
+int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
+int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
+int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time);
+void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);
+
+/**
+ * ice_e822_time_ref - Get the current TIME_REF from capabilities
+ * @hw: pointer to the HW structure
+ *
+ * Returns the current TIME_REF from the capabilities structure.
+ */
+static inline enum ice_time_ref_freq ice_e822_time_ref(struct ice_hw *hw)
+{
+ return hw->func_caps.ts_func_info.time_ref;
+}
+
+/**
+ * ice_set_e822_time_ref - Set new TIME_REF
+ * @hw: pointer to the HW structure
+ * @time_ref: new TIME_REF to set
+ *
+ * Update the TIME_REF in the capabilities structure in response to some
+ * change, such as an update to the CGU registers.
+ */
+static inline void
+ice_set_e822_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+{
+ hw->func_caps.ts_func_info.time_ref = time_ref;
+}
+
+static inline u64 ice_e822_pll_freq(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].pll_freq;
+}
+
+static inline u64 ice_e822_nominal_incval(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].nominal_incval;
+}
+
+static inline u64 ice_e822_pps_delay(enum ice_time_ref_freq time_ref)
+{
+ return e822_time_ref[time_ref].pps_delay;
+}
+
+/* E822 Vernier calibration functions */
+int ice_stop_phy_timer_e822(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_tx_offset_e822(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
+#define ICE_PTP_CLOCK_INDEX_0 0x00
+#define ICE_PTP_CLOCK_INDEX_1 0x01
+
/* PHY timer commands */
#define SEL_CPK_SRC 8
+#define SEL_PHY_SRC 3
/* Time Sync command Definitions */
#define GLTSYN_CMD_INIT_TIME BIT(0)
#define GLTSYN_CMD_INIT_INCVAL BIT(1)
+#define GLTSYN_CMD_INIT_TIME_INCVAL (BIT(0) | BIT(1))
#define GLTSYN_CMD_ADJ_TIME BIT(2)
#define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3))
#define GLTSYN_CMD_READ_TIME BIT(7)
+/* PHY port Time Sync command definitions */
+#define PHY_CMD_INIT_TIME BIT(0)
+#define PHY_CMD_INIT_INCVAL BIT(1)
+#define PHY_CMD_ADJ_TIME (BIT(0) | BIT(1))
+#define PHY_CMD_ADJ_TIME_AT_TIME (BIT(0) | BIT(2))
+#define PHY_CMD_READ_TIME (BIT(0) | BIT(1) | BIT(2))
+
#define TS_CMD_MASK_E810 0xFF
+#define TS_CMD_MASK 0xF
#define SYNC_EXEC_CMD 0x3
+/* Macros to derive port low and high addresses on both quads */
+#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
+#define P_Q0_H(a, p) ((((a) + (0x2000 * (p)))) >> 16)
+#define P_Q1_L(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) & 0xFFFF)
+#define P_Q1_H(a, p) ((((a) - (0x2000 * ((p) - ICE_PORTS_PER_QUAD)))) >> 16)
+
+/* PHY QUAD register base addresses */
+#define Q_0_BASE 0x94000
+#define Q_1_BASE 0x114000
+
+/* Timestamp memory reset registers */
+#define Q_REG_TS_CTRL 0x618
+#define Q_REG_TS_CTRL_S 0
+#define Q_REG_TS_CTRL_M BIT(0)
+
+/* Timestamp availability status registers */
+#define Q_REG_TX_MEMORY_STATUS_L 0xCF0
+#define Q_REG_TX_MEMORY_STATUS_U 0xCF4
+
+/* Tx FIFO status registers */
+#define Q_REG_FIFO23_STATUS 0xCF8
+#define Q_REG_FIFO01_STATUS 0xCFC
+#define Q_REG_FIFO02_S 0
+#define Q_REG_FIFO02_M ICE_M(0x3FF, 0)
+#define Q_REG_FIFO13_S 10
+#define Q_REG_FIFO13_M ICE_M(0x3FF, 10)
+
+/* Interrupt control Config registers */
+#define Q_REG_TX_MEM_GBL_CFG 0xC08
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0
+#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0)
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1
+#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9
+#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9)
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15
+#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15)
+
+/* Tx Timestamp data registers */
+#define Q_REG_TX_MEMORY_BANK_START 0xA00
+
+/* PHY port register base addresses */
+#define P_0_BASE 0x80000
+#define P_4_BASE 0x106000
+
+/* Timestamp init registers */
+#define P_REG_RX_TIMER_INC_PRE_L 0x46C
+#define P_REG_RX_TIMER_INC_PRE_U 0x470
+#define P_REG_TX_TIMER_INC_PRE_L 0x44C
+#define P_REG_TX_TIMER_INC_PRE_U 0x450
+
+/* Timestamp match and adjust target registers */
+#define P_REG_RX_TIMER_CNT_ADJ_L 0x474
+#define P_REG_RX_TIMER_CNT_ADJ_U 0x478
+#define P_REG_TX_TIMER_CNT_ADJ_L 0x454
+#define P_REG_TX_TIMER_CNT_ADJ_U 0x458
+
+/* Timestamp capture registers */
+#define P_REG_RX_CAPTURE_L 0x4D8
+#define P_REG_RX_CAPTURE_U 0x4DC
+#define P_REG_TX_CAPTURE_L 0x4B4
+#define P_REG_TX_CAPTURE_U 0x4B8
+
+/* Timestamp PHY incval registers */
+#define P_REG_TIMETUS_L 0x410
+#define P_REG_TIMETUS_U 0x414
+
+#define P_REG_40B_LOW_M 0xFF
+#define P_REG_40B_HIGH_S 8
+
+/* PHY window length registers */
+#define P_REG_WL 0x40C
+
+#define PTP_VERNIER_WL 0x111ed
+
+/* PHY start registers */
+#define P_REG_PS 0x408
+#define P_REG_PS_START_S 0
+#define P_REG_PS_START_M BIT(0)
+#define P_REG_PS_BYPASS_MODE_S 1
+#define P_REG_PS_BYPASS_MODE_M BIT(1)
+#define P_REG_PS_ENA_CLK_S 2
+#define P_REG_PS_ENA_CLK_M BIT(2)
+#define P_REG_PS_LOAD_OFFSET_S 3
+#define P_REG_PS_LOAD_OFFSET_M BIT(3)
+#define P_REG_PS_SFT_RESET_S 11
+#define P_REG_PS_SFT_RESET_M BIT(11)
+
+/* PHY offset valid registers */
+#define P_REG_TX_OV_STATUS 0x4D4
+#define P_REG_TX_OV_STATUS_OV_S 0
+#define P_REG_TX_OV_STATUS_OV_M BIT(0)
+#define P_REG_RX_OV_STATUS 0x4F8
+#define P_REG_RX_OV_STATUS_OV_S 0
+#define P_REG_RX_OV_STATUS_OV_M BIT(0)
+
+/* PHY offset ready registers */
+#define P_REG_TX_OR 0x45C
+#define P_REG_RX_OR 0x47C
+
+/* PHY total offset registers */
+#define P_REG_TOTAL_RX_OFFSET_L 0x460
+#define P_REG_TOTAL_RX_OFFSET_U 0x464
+#define P_REG_TOTAL_TX_OFFSET_L 0x440
+#define P_REG_TOTAL_TX_OFFSET_U 0x444
+
+/* Timestamp PAR/PCS registers */
+#define P_REG_UIX66_10G_40G_L 0x480
+#define P_REG_UIX66_10G_40G_U 0x484
+#define P_REG_UIX66_25G_100G_L 0x488
+#define P_REG_UIX66_25G_100G_U 0x48C
+#define P_REG_DESK_PAR_RX_TUS_L 0x490
+#define P_REG_DESK_PAR_RX_TUS_U 0x494
+#define P_REG_DESK_PAR_TX_TUS_L 0x498
+#define P_REG_DESK_PAR_TX_TUS_U 0x49C
+#define P_REG_DESK_PCS_RX_TUS_L 0x4A0
+#define P_REG_DESK_PCS_RX_TUS_U 0x4A4
+#define P_REG_DESK_PCS_TX_TUS_L 0x4A8
+#define P_REG_DESK_PCS_TX_TUS_U 0x4AC
+#define P_REG_PAR_RX_TUS_L 0x420
+#define P_REG_PAR_RX_TUS_U 0x424
+#define P_REG_PAR_TX_TUS_L 0x428
+#define P_REG_PAR_TX_TUS_U 0x42C
+#define P_REG_PCS_RX_TUS_L 0x430
+#define P_REG_PCS_RX_TUS_U 0x434
+#define P_REG_PCS_TX_TUS_L 0x438
+#define P_REG_PCS_TX_TUS_U 0x43C
+#define P_REG_PAR_RX_TIME_L 0x4F0
+#define P_REG_PAR_RX_TIME_U 0x4F4
+#define P_REG_PAR_TX_TIME_L 0x4CC
+#define P_REG_PAR_TX_TIME_U 0x4D0
+#define P_REG_PAR_PCS_RX_OFFSET_L 0x4E8
+#define P_REG_PAR_PCS_RX_OFFSET_U 0x4EC
+#define P_REG_PAR_PCS_TX_OFFSET_L 0x4C4
+#define P_REG_PAR_PCS_TX_OFFSET_U 0x4C8
+#define P_REG_LINK_SPEED 0x4FC
+#define P_REG_LINK_SPEED_SERDES_S 0
+#define P_REG_LINK_SPEED_SERDES_M ICE_M(0x7, 0)
+#define P_REG_LINK_SPEED_FEC_MODE_S 3
+#define P_REG_LINK_SPEED_FEC_MODE_M ICE_M(0x3, 3)
+#define P_REG_LINK_SPEED_FEC_MODE(reg) \
+ (((reg) & P_REG_LINK_SPEED_FEC_MODE_M) >> \
+ P_REG_LINK_SPEED_FEC_MODE_S)
+
+/* PHY timestamp related registers */
+#define P_REG_PMD_ALIGNMENT 0x0FC
+#define P_REG_RX_80_TO_160_CNT 0x6FC
+#define P_REG_RX_80_TO_160_CNT_RXCYC_S 0
+#define P_REG_RX_80_TO_160_CNT_RXCYC_M BIT(0)
+#define P_REG_RX_40_TO_160_CNT 0x8FC
+#define P_REG_RX_40_TO_160_CNT_RXCYC_S 0
+#define P_REG_RX_40_TO_160_CNT_RXCYC_M ICE_M(0x3, 0)
+
+/* Rx FIFO status registers */
+#define P_REG_RX_OV_FS 0x4F8
+#define P_REG_RX_OV_FS_FIFO_STATUS_S 2
+#define P_REG_RX_OV_FS_FIFO_STATUS_M ICE_M(0x3FF, 2)
+
+/* Timestamp command registers */
+#define P_REG_TX_TMR_CMD 0x448
+#define P_REG_RX_TMR_CMD 0x468
+
/* E810 timesync enable register */
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
@@ -67,10 +406,28 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define INCVAL_HIGH_M 0xFF
/* Timestamp block macros */
+#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
+#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
+#define TS_PHY_LOW_M 0xFF
+#define TS_PHY_HIGH_M 0xFFFFFFFF
+#define TS_PHY_HIGH_S 8
+
#define BYTES_PER_IDX_ADDR_L_U 8
+#define BYTES_PER_IDX_ADDR_L 4
+
+/* Tx timestamp low latency read definitions */
+#define TS_LL_READ_RETRIES 200
+#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
+#define TS_LL_READ_TS_IDX GENMASK(29, 24)
+#define TS_LL_READ_TS BIT(31)
+
+/* Internal PHY timestamp address */
+#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
+#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
+ BYTES_PER_IDX_ADDR_L))
/* External PHY timestamp address */
#define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \
@@ -98,4 +455,10 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define ICE_SMA_MAX_BIT_E810T 7
#define ICE_PCA9575_P1_OFFSET 8
+/* E810T PCA9575 IO controller registers */
+#define ICE_PCA9575_P0_IN 0x0
+
+/* E810T PCA9575 IO controller pin control */
+#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index af8e6ef5f571..fd1f8b0ad0ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,8 +4,9 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "ice_devlink.h"
-#include "ice_virtchnl_pf.h"
+#include "ice_sriov.h"
#include "ice_tc_lib.h"
+#include "ice_dcb_lib.h"
/**
* ice_repr_get_sw_port_id - get port ID associated with representor
@@ -134,12 +135,59 @@ static int ice_repr_stop(struct net_device *netdev)
return 0;
}
-static struct devlink_port *
-ice_repr_get_devlink_port(struct net_device *netdev)
+/**
+ * ice_repr_sp_stats64 - get slow path stats for port representor
+ * @dev: network interface device structure
+ * @stats: netlink stats structure
+ *
+ * RX/TX stats are being swapped here to be consistent with VF stats. In slow
+ * path, port representor receives data when the corresponding VF is sending it
+ * (and vice versa), TX and RX bytes/packets are effectively swapped on port
+ * representor.
+ */
+static int
+ice_repr_sp_stats64(const struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
- struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_netdev_priv *np = netdev_priv(dev);
+ int vf_id = np->repr->vf->vf_id;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ u64 pkts, bytes;
+
+ tx_ring = np->vsi->tx_rings[vf_id];
+ ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
+ tx_ring->ring_stats->stats,
+ &pkts, &bytes);
+ stats->rx_packets = pkts;
+ stats->rx_bytes = bytes;
+
+ rx_ring = np->vsi->rx_rings[vf_id];
+ ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
+ rx_ring->ring_stats->stats,
+ &pkts, &bytes);
+ stats->tx_packets = pkts;
+ stats->tx_bytes = bytes;
+ stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed;
- return &repr->vf->devlink_port;
+ return 0;
+}
+
+static bool
+ice_repr_ndo_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+ return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
+}
+
+static int
+ice_repr_ndo_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
+{
+ if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
+ return ice_repr_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
+
+ return -EINVAL;
}
static int
@@ -197,8 +245,9 @@ static const struct net_device_ops ice_repr_netdev_ops = {
.ndo_open = ice_repr_open,
.ndo_stop = ice_repr_stop,
.ndo_start_xmit = ice_eswitch_port_start_xmit,
- .ndo_get_devlink_port = ice_repr_get_devlink_port,
.ndo_setup_tc = ice_repr_setup_tc,
+ .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats,
+ .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats,
};
/**
@@ -238,19 +287,32 @@ static int ice_repr_add(struct ice_vf *vf)
struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
+ struct ice_vsi *vsi;
int err;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
repr = kzalloc(sizeof(*repr), GFP_KERNEL);
if (!repr)
return -ENOMEM;
+#ifdef CONFIG_ICE_SWITCHDEV
+ repr->mac_rule = kzalloc(sizeof(*repr->mac_rule), GFP_KERNEL);
+ if (!repr->mac_rule) {
+ err = -ENOMEM;
+ goto err_alloc_rule;
+ }
+#endif
+
repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
if (!repr->netdev) {
err = -ENOMEM;
goto err_alloc;
}
- repr->src_vsi = ice_get_vf_vsi(vf);
+ repr->src_vsi = vsi;
repr->vf = vf;
vf->repr = repr;
np = netdev_priv(repr->netdev);
@@ -270,11 +332,13 @@ static int ice_repr_add(struct ice_vf *vf)
repr->netdev->min_mtu = ETH_MIN_MTU;
repr->netdev->max_mtu = ICE_MAX_MTU;
+ SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf));
+ SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port);
err = ice_repr_reg_netdev(repr->netdev);
if (err)
goto err_netdev;
- devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
+ ice_virtchnl_set_repr_ops(vf);
return 0;
@@ -287,6 +351,11 @@ err_alloc_q_vector:
free_netdev(repr->netdev);
repr->netdev = NULL;
err_alloc:
+#ifdef CONFIG_ICE_SWITCHDEV
+ kfree(repr->mac_rule);
+ repr->mac_rule = NULL;
+err_alloc_rule:
+#endif
kfree(repr);
vf->repr = NULL;
return err;
@@ -298,14 +367,47 @@ err_alloc:
*/
static void ice_repr_rem(struct ice_vf *vf)
{
- ice_devlink_destroy_vf_port(vf);
+ if (!vf->repr)
+ return;
+
kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL;
unregister_netdev(vf->repr->netdev);
+ ice_devlink_destroy_vf_port(vf);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
+#ifdef CONFIG_ICE_SWITCHDEV
+ kfree(vf->repr->mac_rule);
+ vf->repr->mac_rule = NULL;
+#endif
kfree(vf->repr);
vf->repr = NULL;
+
+ ice_virtchnl_set_dflt_ops(vf);
+}
+
+/**
+ * ice_repr_rem_from_all_vfs - remove port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
+{
+ struct devlink *devlink;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ lockdep_assert_held(&pf->vfs.table_lock);
+
+ ice_for_each_vf(pf, bkt, vf)
+ ice_repr_rem(vf);
+
+ /* since all port representors are destroyed, there is
+ * no point in keeping the nodes
+ */
+ devlink = priv_to_devlink(pf);
+ devl_lock(devlink);
+ devl_rate_nodes_destroy(devlink);
+ devl_unlock(devlink);
}
/**
@@ -314,49 +416,35 @@ static void ice_repr_rem(struct ice_vf *vf)
*/
int ice_repr_add_for_all_vfs(struct ice_pf *pf)
{
+ struct devlink *devlink;
+ struct ice_vf *vf;
+ unsigned int bkt;
int err;
- int i;
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
+ lockdep_assert_held(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
err = ice_repr_add(vf);
if (err)
goto err;
-
- ice_vc_change_ops_to_repr(&vf->vc_ops);
}
+ /* only export if ADQ and DCB disabled */
+ if (ice_is_adq_active(pf) || ice_is_dcb_active(pf))
+ return 0;
+
+ devlink = priv_to_devlink(pf);
+ ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf));
+
return 0;
err:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_repr_rem(vf);
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
- }
+ ice_repr_rem_from_all_vfs(pf);
return err;
}
/**
- * ice_repr_rem_from_all_vfs - remove port representor for all VFs
- * @pf: pointer to PF structure
- */
-void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_repr_rem(vf);
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
- }
-}
-
-/**
* ice_repr_start_tx_queues - start Tx queues of port representor
* @repr: pointer to repr structure
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 806de22933c6..378a45bfa256 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -5,7 +5,6 @@
#define _ICE_REPR_H_
#include <net/dst_metadata.h>
-#include "ice.h"
struct ice_repr {
struct ice_vsi *src_vsi;
@@ -13,6 +12,11 @@ struct ice_repr {
struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
+#ifdef CONFIG_ICE_SWITCHDEV
+ /* info about slow path MAC rule */
+ struct ice_rule_query_data *mac_rule;
+ u8 rule_added;
+#endif
};
int ice_repr_add_for_all_vfs(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index ce3c7bded4cb..b7682de0ae05 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
+#include <net/devlink.h>
#include "ice_sched.h"
/**
@@ -11,7 +12,7 @@
* This function inserts the root node of the scheduling tree topology
* to the SW DB.
*/
-static enum ice_status
+static int
ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_aqc_txsched_elem_data *info)
{
@@ -19,20 +20,20 @@ ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_hw *hw;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
if (!root)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* coverity[suspicious_sizeof] */
root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
sizeof(*root), GFP_KERNEL);
if (!root->children) {
devm_kfree(ice_hw_to_dev(hw), root);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
memcpy(&root->info, info, sizeof(*info));
@@ -96,14 +97,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
*
* This function sends a scheduling elements cmd (cmd_opc)
*/
-static enum ice_status
+static int
ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 elems_req, void *buf, u16 buf_size,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.sched_elem_cmd;
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
@@ -127,7 +128,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -142,21 +143,23 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
* @pi: port information structure
* @layer: Scheduler layer of the node
* @info: Scheduler element information from firmware
+ * @prealloc_node: preallocated ice_sched_node struct for SW DB
*
* This function inserts a scheduler node to the SW DB.
*/
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
- struct ice_aqc_txsched_elem_data *info)
+ struct ice_aqc_txsched_elem_data *info,
+ struct ice_sched_node *prealloc_node)
{
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
struct ice_sched_node *node;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
@@ -166,7 +169,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (!parent) {
ice_debug(hw, ICE_DBG_SCHED, "Parent Node not found for parent_teid=0x%x\n",
le32_to_cpu(info->parent_teid));
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* query the current node information from FW before adding it
@@ -176,9 +179,12 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
if (status)
return status;
- node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
+ if (prealloc_node)
+ node = prealloc_node;
+ else
+ node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
if (!node)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
if (hw->max_children[layer]) {
/* coverity[suspicious_sizeof] */
node->children = devm_kcalloc(ice_hw_to_dev(hw),
@@ -186,7 +192,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
sizeof(*node), GFP_KERNEL);
if (!node->children) {
devm_kfree(ice_hw_to_dev(hw), node);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
}
@@ -209,7 +215,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
*
* Delete scheduling elements (0x040F)
*/
-static enum ice_status
+static int
ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd)
@@ -228,19 +234,19 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* This function remove nodes from HW
*/
-static enum ice_status
+static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u16 num_nodes, u32 *node_teids)
{
struct ice_aqc_delete_elem *buf;
u16 i, num_groups_removed = 0;
- enum ice_status status;
u16 buf_size;
+ int status;
buf_size = struct_size(buf, teid, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
@@ -355,6 +361,9 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
/* leaf nodes have no children */
if (node->children)
devm_kfree(ice_hw_to_dev(hw), node->children);
+
+ kfree(node->name);
+ xa_erase(&pi->sched_node_ids, node->id);
devm_kfree(ice_hw_to_dev(hw), node);
}
@@ -369,14 +378,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
*
* Get default scheduler topology (0x400)
*/
-static enum ice_status
+static int
ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
struct ice_aqc_get_topo_elem *buf, u16 buf_size,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
@@ -399,7 +408,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
*
* Add scheduling elements (0x0401)
*/
-static enum ice_status
+static int
ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd)
@@ -420,7 +429,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Configure scheduling elements (0x0403)
*/
-static enum ice_status
+static int
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
@@ -441,7 +450,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-static enum ice_status
+static int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@@ -462,7 +471,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Suspend scheduling elements (0x0409)
*/
-static enum ice_status
+static int
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -482,7 +491,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* resume scheduling elements (0x040A)
*/
-static enum ice_status
+static int
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -500,7 +509,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* Query scheduler resource allocation (0x0412)
*/
-static enum ice_status
+static int
ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
@@ -520,18 +529,18 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
-static enum ice_status
+static int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
u16 i, buf_size, num_elem_ret = 0;
- enum ice_status status;
__le32 *buf;
+ int status;
buf_size = sizeof(*buf) * num_nodes;
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_nodes; i++)
buf[i] = cpu_to_le32(node_teids[i]);
@@ -558,7 +567,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -566,7 +575,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@@ -574,7 +583,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->lan_q_ctx[tc])
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
@@ -585,7 +594,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
@@ -602,7 +611,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -610,7 +619,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* allocate RDMA queue contexts */
if (!vsi_ctx->rdma_q_ctx[tc]) {
vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
@@ -618,7 +627,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx),
GFP_KERNEL);
if (!vsi_ctx->rdma_q_ctx[tc])
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
return 0;
}
@@ -629,7 +638,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
@@ -651,14 +660,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
*
* RL profile function to add, query, or remove profile(s)
*/
-static enum ice_status
+static int
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.rl_profile;
@@ -682,7 +691,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*
* Add RL profile (0x0410)
*/
-static enum ice_status
+static int
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_added, struct ice_sq_cd *cd)
@@ -702,7 +711,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*
* Remove RL profile (0x0415)
*/
-static enum ice_status
+static int
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_removed, struct ice_sq_cd *cd)
@@ -721,24 +730,24 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
* its associated parameters from HW DB,and locally. The caller needs to
* hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
- enum ice_status status;
u16 num_profiles = 1;
+ int status;
if (rl_info->prof_id_ref != 0)
- return ICE_ERR_IN_USE;
+ return -EBUSY;
/* Safe to remove profile ID */
buf = &rl_info->profile;
status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
&num_profiles_removed, NULL);
if (status || num_profiles_removed != num_profiles)
- return ICE_ERR_CFG;
+ return -EIO;
/* Delete stale entry now */
list_del(&rl_info->list_entry);
@@ -763,7 +772,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
&pi->rl_prof_list[ln], list_entry) {
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
@@ -872,26 +881,28 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
* @num_nodes: number of nodes
* @num_nodes_added: pointer to num nodes added
* @first_node_teid: if new nodes are added then return the TEID of first node
+ * @prealloc_nodes: preallocated nodes struct for software DB
*
* This function add nodes to HW as well as to SW DB for a given layer
*/
-static enum ice_status
+int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
- u16 *num_nodes_added, u32 *first_node_teid)
+ u16 *num_nodes_added, u32 *first_node_teid,
+ struct ice_sched_node **prealloc_nodes)
{
struct ice_sched_node *prev, *new_node;
struct ice_aqc_add_elem *buf;
u16 i, num_groups_added = 0;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
size_t buf_size;
+ int status = 0;
u32 teid;
buf_size = struct_size(buf, generic, num_nodes);
buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->hdr.parent_teid = parent->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(num_nodes);
@@ -918,13 +929,17 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
- return ICE_ERR_CFG;
+ return -EIO;
}
*num_nodes_added = num_nodes;
/* add nodes to the SW DB */
for (i = 0; i < num_nodes; i++) {
- status = ice_sched_add_node(pi, layer, &buf->generic[i]);
+ if (prealloc_nodes)
+ status = ice_sched_add_node(pi, layer, &buf->generic[i], prealloc_nodes[i]);
+ else
+ status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
+
if (status) {
ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
@@ -940,6 +955,22 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
new_node->sibling = NULL;
new_node->tc_num = tc_node->tc_num;
+ new_node->tx_weight = ICE_SCHED_DFLT_BW_WT;
+ new_node->tx_share = ICE_SCHED_DFLT_BW;
+ new_node->tx_max = ICE_SCHED_DFLT_BW;
+ new_node->name = kzalloc(SCHED_NODE_NAME_MAX_LEN, GFP_KERNEL);
+ if (!new_node->name)
+ return -ENOMEM;
+
+ status = xa_alloc(&pi->sched_node_ids, &new_node->id, NULL, XA_LIMIT(0, UINT_MAX),
+ GFP_KERNEL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SCHED, "xa_alloc failed for sched node status =%d\n",
+ status);
+ break;
+ }
+
+ snprintf(new_node->name, SCHED_NODE_NAME_MAX_LEN, "node_%u", new_node->id);
/* add it to previous node sibling pointer */
/* Note: siblings are not linked across branches */
@@ -974,7 +1005,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*
* Add nodes into specific HW layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -989,7 +1020,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
return 0;
if (!parent || layer < pi->hw->sw_entry_point_layer)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* max children per node per layer */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -998,12 +1029,12 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */
if (parent == tc_node)
- return ICE_ERR_CFG;
- return ICE_ERR_MAX_LIMIT;
+ return -EIO;
+ return -ENOSPC;
}
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
- num_nodes_added, first_node_teid);
+ num_nodes_added, first_node_teid, NULL);
}
/**
@@ -1018,7 +1049,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1027,12 +1058,11 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
{
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes = num_nodes;
- enum ice_status status = 0;
+ int status = 0;
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
u16 max_child_nodes, num_added = 0;
- /* cppcheck-suppress unusedVariable */
u32 temp;
status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
@@ -1045,14 +1075,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (*num_nodes_added > num_nodes) {
ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
*num_nodes_added);
- status = ICE_ERR_CFG;
+ status = -EIO;
break;
}
/* break if all the nodes are added successfully */
if (!status && (*num_nodes_added == num_nodes))
break;
/* break if the error is not max limit */
- if (status && status != ICE_ERR_MAX_LIMIT)
+ if (status && status != -ENOSPC)
break;
/* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -1152,7 +1182,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
}
if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- enum ice_status status;
+ int status;
/* remove the default leaf node */
status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
@@ -1198,23 +1228,23 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
* resources, default topology created by firmware and storing the information
* in SW DB.
*/
-enum ice_status ice_sched_init_port(struct ice_port_info *pi)
+int ice_sched_init_port(struct ice_port_info *pi)
{
struct ice_aqc_get_topo_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 num_branches;
u16 num_elems;
+ int status;
u8 i, j;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
/* Query the Default Topology from FW */
- buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Query default scheduling tree topology */
status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
@@ -1226,7 +1256,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
num_branches);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_init_port;
}
@@ -1237,7 +1267,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
num_elems);
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto err_init_port;
}
@@ -1268,7 +1298,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
ICE_AQC_ELEM_TYPE_ENTRY_POINT)
hw->sw_entry_point_layer = j;
- status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
+ status = ice_sched_add_node(pi, j, &buf[i].generic[j], NULL);
if (status)
goto err_init_port;
}
@@ -1290,7 +1320,7 @@ err_init_port:
pi->root = NULL;
}
- devm_kfree(ice_hw_to_dev(hw), buf);
+ kfree(buf);
return status;
}
@@ -1300,11 +1330,11 @@ err_init_port:
*
* query FW for allocated scheduler resources and store in HW struct
*/
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
+int ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
- enum ice_status status = 0;
__le16 max_sibl;
+ int status = 0;
u16 i;
if (hw->layer_info)
@@ -1312,7 +1342,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
if (status)
@@ -1341,7 +1371,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
sizeof(*hw->layer_info)),
GFP_KERNEL);
if (!hw->layer_info) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto sched_query_out;
}
@@ -1614,14 +1644,13 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* This function adds the VSI child nodes to tree. It gets called for
* LAN and RDMA separately.
*/
-static enum ice_status
+static int
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
{
struct ice_sched_node *parent, *node;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
@@ -1630,15 +1659,17 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
+ int status;
+
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
&num_added);
if (status || num_nodes[i] != num_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -1717,27 +1748,28 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
* This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
-static enum ice_status
+static int
ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *parent = tc_node;
- enum ice_status status;
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
+ int status;
+
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
&num_added);
if (status || num_nodes[i] != num_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -1749,7 +1781,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
parent = parent->children[0];
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
if (i == vsil)
parent->vsi_handle = vsi_handle;
@@ -1766,7 +1798,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
*
* This function adds a new VSI into scheduler tree
*/
-static enum ice_status
+static int
ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
@@ -1774,7 +1806,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
@@ -1794,7 +1826,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
*
* This function updates the VSI child nodes based on the number of queues
*/
-static enum ice_status
+static int
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
@@ -1802,21 +1834,21 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
+ int status = 0;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (owner == ICE_SCHED_NODE_OWNER_LAN)
prev_numqs = vsi_ctx->sched.max_lanq[tc];
@@ -1869,22 +1901,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* enabled and VSI is in suspended state then resume the VSI back. If TC is
* disabled then suspend the VSI if it is not already.
*/
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
/* suspend the VSI if TC is not enabled */
@@ -1908,7 +1940,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_CFG;
+ return -EIO;
vsi_ctx->sched.vsi_node[tc] = vsi_node;
vsi_node->in_use = true;
@@ -1993,11 +2025,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
* This function removes the VSI and its LAN or RDMA children nodes from the
* scheduler tree.
*/
-static enum ice_status
+static int
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
+ int status = -EINVAL;
u8 i;
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
@@ -2022,7 +2054,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (ice_sched_is_leaf_node_present(vsi_node)) {
ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = ICE_ERR_IN_USE;
+ status = -EBUSY;
goto exit_sched_rm_vsi_cfg;
}
while (j < vsi_node->num_children) {
@@ -2065,7 +2097,7 @@ exit_sched_rm_vsi_cfg:
* This function clears the VSI and its LAN children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
@@ -2078,7 +2110,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
* This function clears the VSI and its RDMA children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
}
@@ -2154,7 +2186,7 @@ ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
* This function removes the child from the old parent and adds it to a new
* parent
*/
-static void
+void
ice_sched_update_parent(struct ice_sched_node *new_parent,
struct ice_sched_node *node)
{
@@ -2188,36 +2220,36 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
*
* This function move the child nodes to a given parent.
*/
-static enum ice_status
+int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
struct ice_aqc_move_elem *buf;
struct ice_sched_node *node;
- enum ice_status status = 0;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ int status = 0;
u16 buf_len;
hw = pi->hw;
if (!parent || !num_items)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Does parent have enough space */
if (parent->num_children + num_items >
hw->max_children[parent->tx_sched_layer])
- return ICE_ERR_AQ_FULL;
+ return -ENOSPC;
buf_len = struct_size(buf, teid, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto move_err_exit;
}
@@ -2228,7 +2260,7 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
&grps_movd, NULL);
if (status && grps_movd != 1) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto move_err_exit;
}
@@ -2251,28 +2283,28 @@ move_err_exit:
* This function moves a VSI to an aggregator node or its subtree.
* Intermediate nodes may be created if required.
*/
-static enum ice_status
+static int
ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
u8 tc)
{
struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u32 first_node_teid, vsi_teid;
- enum ice_status status;
u16 num_nodes_added;
u8 aggl, vsil, i;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Is this VSI already part of given aggregator? */
if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
@@ -2302,7 +2334,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
&first_node_teid,
&num_nodes_added);
if (status || num_nodes[i] != num_nodes_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -2314,7 +2346,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
parent = parent->children[0];
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
}
move_nodes:
@@ -2333,14 +2365,14 @@ move_nodes:
* aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
* caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
struct ice_sched_agg_info *agg_info, u8 tc,
bool rm_vsi_info)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *tmp;
- enum ice_status status = 0;
+ int status = 0;
list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
list_entry) {
@@ -2397,7 +2429,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
* This function removes the aggregator node and intermediate nodes if any
* from the given TC
*/
-static enum ice_status
+static int
ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *tc_node, *agg_node;
@@ -2405,15 +2437,15 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
if (!agg_node)
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
/* Can't remove the aggregator node if it has children */
if (ice_sched_is_agg_inuse(pi, agg_node))
- return ICE_ERR_IN_USE;
+ return -EBUSY;
/* need to remove the whole subtree if aggregator node is the
* only child.
@@ -2422,7 +2454,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
struct ice_sched_node *parent = agg_node->parent;
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
if (parent->num_children > 1)
break;
@@ -2445,11 +2477,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* the aggregator configuration completely for requested TC. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
u8 tc, bool rm_vsi_info)
{
- enum ice_status status = 0;
+ int status = 0;
/* If nothing to remove - return success */
if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
@@ -2478,7 +2510,7 @@ exit_rm_agg_cfg_tc:
* Save aggregator TC bitmap. This function needs to be called with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
unsigned long *tc_bitmap)
{
@@ -2486,7 +2518,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
return 0;
@@ -2501,20 +2533,20 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
* This function creates an aggregator node and intermediate nodes if required
* for the given TC
*/
-static enum ice_status
+static int
ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *parent, *agg_node, *tc_node;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u32 first_node_teid;
u16 num_nodes_added;
+ int status = 0;
u8 i, aggl;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
- return ICE_ERR_CFG;
+ return -EIO;
agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
/* Does Agg node already exist ? */
@@ -2549,14 +2581,14 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
parent = tc_node;
for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
if (!parent)
- return ICE_ERR_CFG;
+ return -EIO;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
num_nodes[i],
&first_node_teid,
&num_nodes_added);
if (status || num_nodes[i] != num_nodes_added)
- return ICE_ERR_CFG;
+ return -EIO;
/* The newly added node can be a new parent for the next
* layer nodes
@@ -2591,13 +2623,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* resources and remove aggregator ID.
* This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, unsigned long *tc_bitmap)
{
struct ice_sched_agg_info *agg_info;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
agg_info = ice_get_agg_info(hw, agg_id);
@@ -2606,7 +2638,7 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
GFP_KERNEL);
if (!agg_info)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
agg_info->agg_id = agg_id;
agg_info->agg_type = agg_type;
@@ -2653,19 +2685,17 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
*
* This function configures aggregator node(s).
*/
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
u8 tc_bitmap)
{
unsigned long bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
- status = ice_sched_cfg_agg(pi, agg_id, agg_type,
- (unsigned long *)&bitmap);
+ status = ice_sched_cfg_agg(pi, agg_id, agg_type, &bitmap);
if (!status)
- status = ice_save_agg_tc_bitmap(pi, agg_id,
- (unsigned long *)&bitmap);
+ status = ice_save_agg_tc_bitmap(pi, agg_id, &bitmap);
mutex_unlock(&pi->sched_lock);
return status;
}
@@ -2724,7 +2754,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
* Save VSI to aggregator TC bitmap. This function needs to call with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
unsigned long *tc_bitmap)
{
@@ -2733,11 +2763,11 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
agg_info = ice_get_agg_info(pi->hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
return 0;
@@ -2754,21 +2784,21 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
* already associated to the aggregator node then no operation is performed on
* the tree. This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap)
{
- struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
+ struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
- enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
agg_info = ice_get_agg_info(hw, agg_id);
if (!agg_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* If the VSI is already part of another aggregator then update
* its VSI info list
*/
@@ -2776,11 +2806,13 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
if (old_agg_info && old_agg_info != agg_info) {
struct ice_sched_agg_vsi_info *vtmp;
- list_for_each_entry_safe(old_agg_vsi_info, vtmp,
+ list_for_each_entry_safe(iter, vtmp,
&old_agg_info->agg_vsi_list,
list_entry)
- if (old_agg_vsi_info->vsi_handle == vsi_handle)
+ if (iter->vsi_handle == vsi_handle) {
+ old_agg_vsi_info = iter;
break;
+ }
}
/* check if entry already exist */
@@ -2790,7 +2822,7 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*agg_vsi_info), GFP_KERNEL);
if (!agg_vsi_info)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* add VSI ID into the aggregator list */
agg_vsi_info->vsi_handle = vsi_handle;
@@ -2851,14 +2883,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data buf;
- enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
+ int status;
buf = *info;
/* Parent TEID is reserved field in this aq call */
@@ -2874,7 +2906,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
&elem_cfgd, NULL);
if (status || elem_cfgd != num_elems) {
ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
- return ICE_ERR_CFG;
+ return -EIO;
}
/* Config success case */
@@ -2893,7 +2925,7 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*
* This function configures node element's BW allocation.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -2909,7 +2941,7 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
} else {
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Configure element */
@@ -2925,12 +2957,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Move or associate VSI to a new or default aggregator node.
*/
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap)
{
unsigned long bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
@@ -3098,12 +3130,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
*
* This function converts the BW to profile structure format.
*/
-static enum ice_status
+static int
ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
struct ice_aqc_rl_profile_elem *profile)
{
- enum ice_status status = ICE_ERR_PARAM;
s64 bytes_per_sec, ts_rate, mv_tmp;
+ int status = -EINVAL;
bool found = false;
s32 encode = 0;
s64 mv = 0;
@@ -3150,7 +3182,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
profile->rl_encode = cpu_to_le16(encode);
status = 0;
} else {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
}
return status;
@@ -3176,9 +3208,9 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
+ int status;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
return NULL;
@@ -3249,7 +3281,7 @@ exit_add_rl_prof:
*
* This function configures node element's BW limit.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 rl_prof_id)
{
@@ -3268,7 +3300,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
* hence only one of them may be set for any given element
*/
if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
- return ICE_ERR_CFG;
+ return -EIO;
data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
break;
@@ -3291,7 +3323,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
(le16_to_cpu(data->eir_bw.bw_profile_idx) !=
ICE_SCHED_DFLT_RL_PROF_ID))
- return ICE_ERR_CFG;
+ return -EIO;
/* EIR BW is set to default, disable it */
data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
/* Okay to enable shared BW now */
@@ -3300,7 +3332,7 @@ ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
break;
default:
/* Unknown rate limit type */
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Configure element */
@@ -3420,15 +3452,15 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
* 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
* scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
- enum ice_status status = 0;
+ int status = 0;
if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@@ -3441,11 +3473,11 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Remove old profile ID from database */
status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
- if (status && status != ICE_ERR_IN_USE)
+ if (status && status != -EBUSY)
ice_debug(pi->hw, ICE_DBG_SCHED, "Remove rl profile failed\n");
break;
}
- if (status == ICE_ERR_IN_USE)
+ if (status == -EBUSY)
status = 0;
return status;
}
@@ -3461,16 +3493,16 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type, u8 layer_num)
{
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
u16 rl_prof_id;
u16 old_id;
+ int status;
hw = pi->hw;
switch (rl_type) {
@@ -3488,7 +3520,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* Save existing RL prof ID for later clean up */
old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
@@ -3518,7 +3550,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
* them may be set for any given element. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
struct ice_sched_node *node,
u8 layer_num, enum ice_rl_type rl_type, u32 bw)
@@ -3562,14 +3594,14 @@ ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
* ID from local database. The caller needs to hold scheduler lock.
*/
-static enum ice_status
+int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_info *rl_prof_info;
- enum ice_status status = ICE_ERR_PARAM;
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
+ int status = -EINVAL;
rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
if (!rl_prof_info)
@@ -3599,6 +3631,57 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
}
/**
+ * ice_sched_set_node_priority - set node's priority
+ * @pi: port information structure
+ * @node: tree node
+ * @priority: number 0-7 representing priority among siblings
+ *
+ * This function sets priority of a node among it's siblings.
+ */
+int
+ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
+ u16 priority)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+
+ buf = node->info;
+ data = &buf.data;
+
+ data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
+ data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_PRIO_M, priority);
+
+ return ice_sched_update_elem(pi->hw, node, &buf);
+}
+
+/**
+ * ice_sched_set_node_weight - set node's weight
+ * @pi: port information structure
+ * @node: tree node
+ * @weight: number 1-200 representing weight for WFQ
+ *
+ * This function sets weight of the node for WFQ algorithm.
+ */
+int
+ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
+{
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
+
+ buf = node->info;
+ data = &buf.data;
+
+ data->valid_sections = ICE_AQC_ELEM_VALID_CIR | ICE_AQC_ELEM_VALID_EIR |
+ ICE_AQC_ELEM_VALID_GENERIC;
+ data->cir_bw.bw_alloc = cpu_to_le16(weight);
+ data->eir_bw.bw_alloc = cpu_to_le16(weight);
+
+ data->generic |= FIELD_PREP(ICE_AQC_ELEM_GENERIC_SP_M, 0x0);
+
+ return ice_sched_update_elem(pi->hw, node, &buf);
+}
+
+/**
* ice_sched_set_node_bw_lmt - set node's BW limit
* @pi: port information structure
* @node: tree node
@@ -3608,31 +3691,31 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
* It updates node's BW limit parameters like BW RL profile ID of type CIR,
* EIR, or SRL. The caller needs to hold scheduler lock.
*/
-static enum ice_status
+int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
struct ice_sched_node *cfg_node = node;
- enum ice_status status;
+ int status;
struct ice_hw *hw;
u8 layer_num;
if (!pi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw = pi->hw;
/* Remove unused RL profile IDs from HW and SW DB */
ice_sched_rm_unused_rl_prof(pi);
layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (layer_num >= hw->num_tx_sched_layers)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (rl_type == ICE_SHARED_BW) {
/* SRL node may be different */
cfg_node = ice_sched_get_srl_node(node, layer_num);
if (!cfg_node)
- return ICE_ERR_CFG;
+ return -EIO;
}
/* EIR BW and Shared BW profiles are mutually exclusive and
* hence only one of them may be set for any given element
@@ -3657,7 +3740,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type)
@@ -3675,7 +3758,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
* behalf of the requested node (first argument). This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
{
/* SRL profiles are not available on all layers. Check if the
@@ -3690,7 +3773,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
(node->parent && node->parent->num_children == 1)))
return 0;
- return ICE_ERR_CFG;
+ return -EIO;
}
/**
@@ -3701,7 +3784,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
*
* Save BW information of queue type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
{
switch (rl_type) {
@@ -3715,7 +3798,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -3731,16 +3814,16 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
*
* This function sets BW limit of queue scheduling node.
*/
-static enum ice_status
+static int
ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
struct ice_q_ctx *q_ctx;
+ int status = -EINVAL;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
mutex_lock(&pi->sched_lock);
q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
if (!q_ctx)
@@ -3762,7 +3845,7 @@ ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
node->tx_sched_layer);
if (sel_layer >= pi->hw->num_tx_sched_layers) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto exit_q_bw_lmt;
}
status = ice_sched_validate_srl_node(node, sel_layer);
@@ -3794,7 +3877,7 @@ exit_q_bw_lmt:
*
* This function configures BW limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
@@ -3812,7 +3895,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* This function configures BW default limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type)
{
@@ -3880,13 +3963,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
* This function sets BW limit of VSI or Aggregator scheduling node
* based on TC information from passed in argument BW.
*/
-static enum ice_status
+int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
+ int status = -EINVAL;
if (!pi)
return status;
@@ -3921,7 +4004,7 @@ exit_set_node_bw_lmt_per_tc:
* This function configures BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -3948,7 +4031,7 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function configures default BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type)
{
@@ -3976,13 +4059,13 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* burst size value is used for future rate limit calls. It doesn't change the
* existing or previously created RL profiles.
*/
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
{
u16 burst_size_to_prog;
if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
bytes > ICE_MAX_BURST_SIZE_ALLOWED)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (ice_round_to_num(bytes, 64) <=
ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
/* 64 byte granularity case */
@@ -4017,13 +4100,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
* This function configures node element's priority value. It
* needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
+ int status;
buf = node->info;
data = &buf.data;
@@ -4044,12 +4127,12 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
* This function restores node's BW from bw_t_info. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_bw_type_info *bw_t_info)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status = ICE_ERR_PARAM;
+ int status = -EINVAL;
u16 bw_alloc;
if (!node)
@@ -4137,7 +4220,7 @@ void ice_sched_replay_agg(struct ice_hw *hw)
if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
ICE_MAX_TRAFFIC_CLASS)) {
DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
- enum ice_status status;
+ int status;
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
ice_sched_get_ena_tc_bitmap(pi,
@@ -4191,18 +4274,17 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
* their node bandwidth information. This function needs to be called with
* scheduler lock held.
*/
-static enum ice_status
-ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+static int ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_port_info *pi = hw->port_info;
struct ice_sched_agg_info *agg_info;
- enum ice_status status;
+ int status;
bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
if (!agg_info)
return 0; /* Not present in list - default Agg case */
@@ -4233,10 +4315,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays association of VSI to aggregator type nodes, and
* node bandwidth information.
*/
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
mutex_lock(&pi->sched_lock);
status = ice_sched_replay_vsi_agg(hw, vsi_handle);
@@ -4252,14 +4334,13 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays queue type node bandwidth. This function needs to be
* called with scheduler lock held.
*/
-enum ice_status
-ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
{
struct ice_sched_node *q_node;
/* Following also checks the presence of node in tree */
q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
if (!q_node)
- return ICE_ERR_PARAM;
+ return -EINVAL;
return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 6bddcbecaf5e..9c100747445a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -6,6 +6,8 @@
#include "ice_common.h"
+#define SCHED_NODE_NAME_MAX_LEN 32
+
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
#define ICE_AGG_LAYER_OFFSET 6
@@ -65,12 +67,35 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
-enum ice_status ice_sched_init_port(struct ice_port_info *pi);
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+
+int
+ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw);
+
+int
+ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
+ enum ice_rl_type rl_type, u32 bw, u8 layer_num);
+
+int
+ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer, u16 num_nodes,
+ u16 *num_nodes_added, u32 *first_node_teid,
+ struct ice_sched_node **prealloc_node);
+
+int
+ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
+ u16 num_items, u32 *list);
+
+int ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
+ u16 priority);
+int ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight);
+
+int ice_sched_init_port(struct ice_port_info *pi);
+int ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
@@ -79,43 +104,50 @@ void ice_sched_clear_agg(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
- struct ice_aqc_txsched_elem_data *info);
+ struct ice_aqc_txsched_elem_data *info,
+ struct ice_sched_node *prealloc_node);
+void
+ice_sched_update_parent(struct ice_sched_node *new_parent,
+ struct ice_sched_node *node);
void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node);
struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
/* Tx scheduler rate limiter functions */
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, u8 tc_bitmap);
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap);
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+int
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index aa11d07793d4..f1dca59bd844 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -1,532 +1,1779 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
-#include "ice_common.h"
-#include "ice_sriov.h"
+#include "ice.h"
+#include "ice_vf_lib_private.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
+#include "ice_flow.h"
+#include "ice_eswitch.h"
+#include "ice_virtchnl_allowlist.h"
+#include "ice_flex_pipe.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
/**
- * ice_aq_send_msg_to_vf
- * @hw: pointer to the hardware structure
- * @vfid: VF ID to send msg
- * @v_opcode: opcodes for VF-PF communication
- * @v_retval: return error code
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- * @cd: pointer to command details
+ * ice_free_vf_entries - Free all VF entries from the hash table
+ * @pf: pointer to the PF structure
*
- * Send message to VF driver (0x0802) using mailbox
- * queue and asynchronously sending message via
- * ice_sq_send_cmd() function
+ * Iterate over the VF hash table, removing and releasing all VF entries.
+ * Called during VF teardown or as cleanup during failed VF initialization.
*/
-enum ice_status
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
- u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+static void ice_free_vf_entries(struct ice_pf *pf)
{
- struct ice_aqc_pf_vf_msg *cmd;
- struct ice_aq_desc desc;
+ struct ice_vfs *vfs = &pf->vfs;
+ struct hlist_node *tmp;
+ struct ice_vf *vf;
+ unsigned int bkt;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+ /* Remove all VFs from the hash table and release their main
+ * reference. Once all references to the VF are dropped, ice_put_vf()
+ * will call ice_release_vf which will remove the VF memory.
+ */
+ lockdep_assert_held(&vfs->table_lock);
- cmd = &desc.params.virt;
- cmd->id = cpu_to_le32(vfid);
+ hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
+ hash_del_rcu(&vf->entry);
+ ice_put_vf(vf);
+ }
+}
- desc.cookie_high = cpu_to_le32(v_opcode);
- desc.cookie_low = cpu_to_le32(v_retval);
+/**
+ * ice_free_vf_res - Free a VF's resources
+ * @vf: pointer to the VF info
+ */
+static void ice_free_vf_res(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int i, last_vector_idx;
+
+ /* First, disable VF's configuration API to prevent OS from
+ * accessing the VF's VSI after it's freed or invalidated.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_vf_fdir_exit(vf);
+ /* free VF control VSI */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
- if (msglen)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ /* free VSI and disconnect it from the parent uplink */
+ if (vf->lan_vsi_idx != ICE_NO_VSI) {
+ ice_vf_vsi_release(vf);
+ vf->num_mac = 0;
+ }
- return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+ last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
+
+ /* clear VF MDD event information */
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+
+ /* Disable interrupts so that VF starts in a known state */
+ for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
+ wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
+ ice_flush(&pf->hw);
+ }
+ /* reset some of the state variables keeping track of the resources */
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
}
/**
- * ice_conv_link_speed_to_virtchnl
- * @adv_link_support: determines the format of the returned link speed
- * @link_speed: variable containing the link_speed to be converted
+ * ice_dis_vf_mappings
+ * @vf: pointer to the VF structure
+ */
+static void ice_dis_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int first, last, v;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi))
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
+
+ first = vf->first_vector_idx;
+ last = first + pf->vfs.num_msix_per - 1;
+ for (v = first; v <= last; v++) {
+ u32 reg;
+
+ reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
+ GLINT_VECT2FUNC_IS_PF_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
+ else
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+}
+
+/**
+ * ice_sriov_free_msix_res - Reset/free any used MSIX resources
+ * @pf: pointer to the PF structure
*
- * Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps. Else return
- * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
- * needs to cast back to an enum virtchnl_link_speed in the case where
- * adv_link_support is false, but when adv_link_support is true the caller can
- * expect the speed in Mbps.
+ * Since no MSIX entries are taken from the pf->irq_tracker then just clear
+ * the pf->sriov_base_vector.
+ *
+ * Returns 0 on success, and -EINVAL on error.
*/
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+static int ice_sriov_free_msix_res(struct ice_pf *pf)
{
- u32 speed;
+ struct ice_res_tracker *res;
- if (adv_link_support)
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- speed = ICE_LINK_SPEED_10MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100MB:
- speed = ICE_LINK_SPEED_100MBPS;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- speed = ICE_LINK_SPEED_1000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_2500MB:
- speed = ICE_LINK_SPEED_2500MBPS;
- break;
- case ICE_AQ_LINK_SPEED_5GB:
- speed = ICE_LINK_SPEED_5000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = ICE_LINK_SPEED_10000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = ICE_LINK_SPEED_20000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = ICE_LINK_SPEED_25000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- speed = ICE_LINK_SPEED_40000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_50GB:
- speed = ICE_LINK_SPEED_50000MBPS;
- break;
- case ICE_AQ_LINK_SPEED_100GB:
- speed = ICE_LINK_SPEED_100000MBPS;
- break;
- default:
- speed = ICE_LINK_SPEED_UNKNOWN;
- break;
- }
+ if (!pf)
+ return -EINVAL;
+
+ res = pf->irq_tracker;
+ if (!res)
+ return -EINVAL;
+
+ /* give back irq_tracker resources used */
+ WARN_ON(pf->sriov_base_vector < res->num_entries);
+
+ pf->sriov_base_vector = 0;
+
+ return 0;
+}
+
+/**
+ * ice_free_vfs - Free all VFs
+ * @pf: pointer to the PF structure
+ */
+void ice_free_vfs(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vfs *vfs = &pf->vfs;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ if (!ice_has_vfs(pf))
+ return;
+
+ while (test_and_set_bit(ICE_VF_DIS, pf->state))
+ usleep_range(1000, 2000);
+
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
else
- /* Virtchnl speeds are not defined for every speed supported in
- * the hardware. To maintain compatibility with older AVF
- * drivers, while reporting the speed the new speed values are
- * resolved to the closest known virtchnl speeds
- */
- switch (link_speed) {
- case ICE_AQ_LINK_SPEED_10MB:
- case ICE_AQ_LINK_SPEED_100MB:
- speed = (u32)VIRTCHNL_LINK_SPEED_100MB;
- break;
- case ICE_AQ_LINK_SPEED_1000MB:
- case ICE_AQ_LINK_SPEED_2500MB:
- case ICE_AQ_LINK_SPEED_5GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_1GB;
- break;
- case ICE_AQ_LINK_SPEED_10GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_10GB;
- break;
- case ICE_AQ_LINK_SPEED_20GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_20GB;
- break;
- case ICE_AQ_LINK_SPEED_25GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_25GB;
- break;
- case ICE_AQ_LINK_SPEED_40GB:
- case ICE_AQ_LINK_SPEED_50GB:
- case ICE_AQ_LINK_SPEED_100GB:
- speed = (u32)VIRTCHNL_LINK_SPEED_40GB;
- break;
- default:
- speed = (u32)VIRTCHNL_LINK_SPEED_UNKNOWN;
- break;
+ dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
+
+ mutex_lock(&vfs->table_lock);
+
+ ice_eswitch_release(pf);
+
+ ice_for_each_vf(pf, bkt, vf) {
+ mutex_lock(&vf->cfg_lock);
+
+ ice_dis_vf_qs(vf);
+
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ /* disable VF qp mappings and set VF disable state */
+ ice_dis_vf_mappings(vf);
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_free_vf_res(vf);
}
- return speed;
+ if (!pci_vfs_assigned(pf->pdev)) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ }
+
+ /* clear malicious info since the VF is getting released */
+ list_del(&vf->mbx_info.list_entry);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+ if (ice_sriov_free_msix_res(pf))
+ dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
+
+ vfs->num_qps_per = 0;
+ ice_free_vf_entries(pf);
+
+ mutex_unlock(&vfs->table_lock);
+
+ clear_bit(ICE_VF_DIS, pf->state);
+ clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
}
-/* The mailbox overflow detection algorithm helps to check if there
- * is a possibility of a malicious VF transmitting too many MBX messages to the
- * PF.
- * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
- * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
- * The struct ice_mbx_snapshot helps to track and traverse a static window of
- * messages within the mailbox queue while looking for a malicious VF.
+/**
+ * ice_vf_vsi_setup - Set up a VF VSI
+ * @vf: VF to setup VSI for
*
- * 2. When the caller starts processing its mailbox queue in response to an
- * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
- * the algorithm can be run for the first time for that interrupt. This can be
- * done via ice_mbx_reset_snapshot().
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_vsi_cfg_params params = {};
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ params.type = ICE_VSI_VF;
+ params.pi = ice_vf_get_port_info(vf);
+ params.vf = vf;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ vsi = ice_vsi_setup(pf, &params);
+
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
+ ice_vf_invalidate_vsi(vf);
+ return NULL;
+ }
+
+ vf->lan_vsi_idx = vsi->idx;
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return vsi;
+}
+
+/**
+ * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF that the first MSIX vector index is being calculated for
*
- * 3. For every message read by the caller from the MBX Queue, the caller must
- * call the detection algorithm's entry function ice_mbx_vf_state_handler().
- * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
- * filled as it is required to be passed to the algorithm.
+ * This returns the first MSIX vector index in PF space that is used by this VF.
+ * This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration will have to increment by
+ * 1 to avoid meddling with the OICR index.
+ */
+static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
+{
+ return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
+}
+
+/**
+ * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
+ * @vf: VF to enable MSIX mappings for
*
- * 4. Every time a message is read from the MBX queue, a VFId is received which
- * is passed to the state handler. The boolean output is_malvf of the state
- * handler ice_mbx_vf_state_handler() serves as an indicator to the caller
- * whether this VF is malicious or not.
+ * Some of the registers need to be indexed/configured using hardware global
+ * device values and other registers need 0-based values, which represent PF
+ * based values.
+ */
+static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
+{
+ int device_based_first_msix, device_based_last_msix;
+ int pf_based_first_msix, pf_based_last_msix, v;
+ struct ice_pf *pf = vf->pf;
+ int device_based_vf_id;
+ struct ice_hw *hw;
+ u32 reg;
+
+ hw = &pf->hw;
+ pf_based_first_msix = vf->first_vector_idx;
+ pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
+
+ device_based_first_msix = pf_based_first_msix +
+ pf->hw.func_caps.common_cap.msix_vector_first_id;
+ device_based_last_msix =
+ (device_based_first_msix + pf->vfs.num_msix_per) - 1;
+ device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
+ VPINT_ALLOC_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
+ VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
+ wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
+
+ reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
+ & VPINT_ALLOC_PCI_FIRST_M) |
+ ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
+ VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
+
+ /* map the interrupts to its functions */
+ for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
+ reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
+ GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
+ GLINT_VECT2FUNC_PF_NUM_M));
+ wr32(hw, GLINT_VECT2FUNC(v), reg);
+ }
+
+ /* Map mailbox interrupt to VF MSI-X vector 0 */
+ wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
+ * @vf: VF to enable the mappings for
+ * @max_txq: max Tx queues allowed on the VF's VSI
+ * @max_rxq: max Rx queues allowed on the VF's VSI
+ */
+static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
+ if (WARN_ON(!vsi))
+ return;
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
+ /* VF Tx queues allocation */
+ if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Tx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
+ VPLAN_TX_QBASE_VFFIRSTQ_M) |
+ (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
+ VPLAN_TX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
+ }
+
+ /* set regardless of mapping mode */
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
+
+ /* VF Rx queues allocation */
+ if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
+ /* set the VF PF Rx queue range
+ * VFNUMQ value should be set to (number of queues - 1). A value
+ * of 0 means 1 queue and a value of 255 means 256 queues
+ */
+ reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
+ VPLAN_RX_QBASE_VFFIRSTQ_M) |
+ (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
+ VPLAN_RX_QBASE_VFNUMQ_M));
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
+ } else {
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
+ }
+}
+
+/**
+ * ice_ena_vf_mappings - enable VF MSIX and queue mapping
+ * @vf: pointer to the VF structure
+ */
+static void ice_ena_vf_mappings(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_ena_vf_msix_mappings(vf);
+ ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
+}
+
+/**
+ * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
+ * @vf: VF to calculate the register index for
+ * @q_vector: a q_vector associated to the VF
+ */
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+{
+ struct ice_pf *pf;
+
+ if (!vf || !q_vector)
+ return -EINVAL;
+
+ pf = vf->pf;
+
+ /* always add one to account for the OICR being the first MSIX */
+ return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
+ q_vector->v_idx + 1;
+}
+
+/**
+ * ice_get_max_valid_res_idx - Get the max valid resource index
+ * @res: pointer to the resource to find the max valid index for
*
- * 5. When a VF is identified to be malicious, the caller can send a message
- * to the system administrator. The caller can invoke ice_mbx_report_malvf()
- * to help determine if a malicious VF is to be reported or not. This function
- * requires the caller to maintain a global bitmap to track all malicious VFs
- * and pass that to ice_mbx_report_malvf() along with the VFID which was identified
- * to be malicious by ice_mbx_vf_state_handler().
+ * Start from the end of the ice_res_tracker and return right when we find the
+ * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
+ * valid for SR-IOV because it is the only consumer that manipulates the
+ * res->end and this is always called when res->end is set to res->num_entries.
+ */
+static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = res->num_entries - 1; i >= 0; i--)
+ if (res->list[i] & ICE_RES_VALID_BIT)
+ return i;
+
+ return 0;
+}
+
+/**
+ * ice_sriov_set_msix_res - Set any used MSIX resources
+ * @pf: pointer to PF structure
+ * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
*
- * 6. The global bitmap maintained by PF can be cleared completely if PF is in
- * reset or the bit corresponding to a VF can be cleared if that VF is in reset.
- * When a VF is shut down and brought back up, we assume that the new VF
- * brought up is not malicious and hence report it if found malicious.
+ * This function allows SR-IOV resources to be taken from the end of the PF's
+ * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
+ * just set the pf->sriov_base_vector and return success.
*
- * 7. The function ice_mbx_reset_snapshot() is called to reset the information
- * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ * If there are not enough resources available, return an error. This should
+ * always be caught by ice_set_per_vf_res().
*
- * 8. The memory allocated for variables in ice_mbx_snapshot is de-allocated
- * when driver is unloaded.
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
+ * in the PF's space available for SR-IOV.
*/
-#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
-/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
- * the max messages check must be ignored in the algorithm
- */
-#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
+{
+ u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
+ int vectors_used = pf->irq_tracker->num_entries;
+ int sriov_base_vector;
+
+ sriov_base_vector = total_vectors - num_msix_needed;
+
+ /* make sure we only grab irq_tracker entries from the list end and
+ * that we have enough available MSIX vectors
+ */
+ if (sriov_base_vector < vectors_used)
+ return -EINVAL;
+
+ pf->sriov_base_vector = sriov_base_vector;
+
+ return 0;
+}
/**
- * ice_mbx_traverse - Pass through mailbox snapshot
- * @hw: pointer to the HW struct
- * @new_state: new algorithm state
+ * ice_set_per_vf_res - check if vectors and queues are available
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of SR-IOV VFs being configured
+ *
+ * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
+ * get more vectors and can enable more queues per VF. Note that this does not
+ * grab any vectors from the SW pool already allocated. Also note, that all
+ * vector counts include one for each VF's miscellaneous interrupt vector
+ * (i.e. OICR).
*
- * Traversing the mailbox static snapshot without checking
- * for malicious VFs.
+ * Minimum VFs - 2 vectors, 1 queue pair
+ * Small VFs - 5 vectors, 4 queue pairs
+ * Medium VFs - 17 vectors, 16 queue pairs
+ *
+ * Second, determine number of queue pairs per VF by starting with a pre-defined
+ * maximum each VF supports. If this is not possible, then we adjust based on
+ * queue pairs available on the device.
+ *
+ * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
+ * by each VF during VF initialization and reset.
*/
-static void
-ice_mbx_traverse(struct ice_hw *hw,
- enum ice_mbx_snapshot_state *new_state)
+static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- struct ice_mbx_snap_buffer_data *snap_buf;
- u32 num_iterations;
+ int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
+ u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
+ int msix_avail_per_vf, msix_avail_for_sriov;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
- snap_buf = &hw->mbx_snapshot.mbx_buf;
+ lockdep_assert_held(&pf->vfs.table_lock);
- /* As mailbox buffer is circular, applying a mask
- * on the incremented iteration count.
- */
- num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
-
- /* Checking either of the below conditions to exit snapshot traversal:
- * Condition-1: If the number of iterations in the mailbox is equal to
- * the mailbox head which would indicate that we have reached the end
- * of the static snapshot.
- * Condition-2: If the maximum messages serviced in the mailbox for a
- * given interrupt is the highest possible value then there is no need
- * to check if the number of messages processed is equal to it. If not
- * check if the number of messages processed is greater than or equal
- * to the maximum number of mailbox entries serviced in current work item.
- */
- if (num_iterations == snap_buf->head ||
- (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
- ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
- *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+ if (!num_vfs)
+ return -EINVAL;
+
+ if (max_valid_res_idx < 0)
+ return -ENOSPC;
+
+ /* determine MSI-X resources per VF */
+ msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
+ pf->irq_tracker->num_entries;
+ msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
+ if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
+ } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
+ num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
+ } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
+ num_msix_per_vf = ICE_MIN_INTR_PER_VF;
+ } else {
+ dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
+ msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
+ num_vfs);
+ return -ENOSPC;
+ }
+
+ num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF);
+ avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
+ if (!avail_qs)
+ num_txq = 0;
+ else if (num_txq > avail_qs)
+ num_txq = rounddown_pow_of_two(avail_qs);
+
+ num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
+ ICE_MAX_RSS_QS_PER_VF);
+ avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
+ if (!avail_qs)
+ num_rxq = 0;
+ else if (num_rxq > avail_qs)
+ num_rxq = rounddown_pow_of_two(avail_qs);
+
+ if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
+ dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
+ ICE_MIN_QS_PER_VF, num_vfs);
+ return -ENOSPC;
+ }
+
+ err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
+ if (err) {
+ dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
+ num_vfs, err);
+ return err;
+ }
+
+ /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
+ pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
+ pf->vfs.num_msix_per = num_msix_per_vf;
+ dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
+ num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
+
+ return 0;
}
/**
- * ice_mbx_detect_malvf - Detect malicious VF in snapshot
- * @hw: pointer to the HW struct
- * @vf_id: relative virtual function ID
- * @new_state: new algorithm state
- * @is_malvf: boolean output to indicate if VF is malicious
+ * ice_init_vf_vsi_res - initialize/setup VF VSI resources
+ * @vf: VF to initialize/setup the VSI for
*
- * This function tracks the number of asynchronous messages
- * sent per VF and marks the VF as malicious if it exceeds
- * the permissible number of messages to send.
+ * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
+ * VF VSI's broadcast filter and is only used during initial VF creation.
*/
-static enum ice_status
-ice_mbx_detect_malvf(struct ice_hw *hw, u16 vf_id,
- enum ice_mbx_snapshot_state *new_state,
- bool *is_malvf)
+static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int err;
+
+ vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
- if (vf_id >= snap->mbx_vf.vfcntr_len)
- return ICE_ERR_OUT_OF_RANGE;
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
+ return -ENOMEM;
- /* increment the message count in the VF array */
- snap->mbx_vf.vf_cntr[vf_id]++;
+ err = ice_vf_init_host_cfg(vf, vsi);
+ if (err)
+ goto release_vsi;
+
+ return 0;
+
+release_vsi:
+ ice_vf_vsi_release(vf);
+ return err;
+}
+
+/**
+ * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
+ * @pf: PF the VFs are associated with
+ */
+static int ice_start_vfs(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ unsigned int bkt, it_cnt;
+ struct ice_vf *vf;
+ int retval;
- if (snap->mbx_vf.vf_cntr[vf_id] >= ICE_ASYNC_VF_MSG_THRESHOLD)
- *is_malvf = true;
+ lockdep_assert_held(&pf->vfs.table_lock);
- /* continue to iterate through the mailbox snapshot */
- ice_mbx_traverse(hw, new_state);
+ it_cnt = 0;
+ ice_for_each_vf(pf, bkt, vf) {
+ vf->vf_ops->clear_reset_trigger(vf);
+ retval = ice_init_vf_vsi_res(vf);
+ if (retval) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
+ vf->vf_id, retval);
+ goto teardown;
+ }
+
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ ice_ena_vf_mappings(vf);
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+ it_cnt++;
+ }
+
+ ice_flush(hw);
return 0;
+
+teardown:
+ ice_for_each_vf(pf, bkt, vf) {
+ if (it_cnt == 0)
+ break;
+
+ ice_dis_vf_mappings(vf);
+ ice_vf_vsi_release(vf);
+ it_cnt--;
+ }
+
+ return retval;
}
/**
- * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
- * @snap: pointer to mailbox snapshot structure in the ice_hw struct
+ * ice_sriov_free_vf - Free VF memory after all references are dropped
+ * @vf: pointer to VF to free
*
- * Reset the mailbox snapshot structure and clear VF counter array.
+ * Called by ice_put_vf through ice_release_vf once the last reference to a VF
+ * structure has been dropped.
*/
-static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+static void ice_sriov_free_vf(struct ice_vf *vf)
{
- u32 vfcntr_len;
+ mutex_destroy(&vf->cfg_lock);
- if (!snap || !snap->mbx_vf.vf_cntr)
- return;
+ kfree_rcu(vf, rcu);
+}
+
+/**
+ * ice_sriov_clear_reset_state - clears VF Reset status register
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_reset_state(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
- /* Clear VF counters. */
- vfcntr_len = snap->mbx_vf.vfcntr_len;
- if (vfcntr_len)
- memset(snap->mbx_vf.vf_cntr, 0,
- (vfcntr_len * sizeof(*snap->mbx_vf.vf_cntr)));
-
- /* Reset mailbox snapshot for a new capture. */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
-}
-
-/**
- * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
- * @hw: pointer to the HW struct
- * @mbx_data: pointer to structure containing mailbox data
- * @vf_id: relative virtual function (VF) ID
- * @is_malvf: boolean output to indicate if VF is malicious
- *
- * The function serves as an entry point for the malicious VF
- * detection algorithm by handling the different states and state
- * transitions of the algorithm:
- * New snapshot: This state is entered when creating a new static
- * snapshot. The data from any previous mailbox snapshot is
- * cleared and a new capture of the mailbox head and tail is
- * logged. This will be the new static snapshot to detect
- * asynchronous messages sent by VFs. On capturing the snapshot
- * and depending on whether the number of pending messages in that
- * snapshot exceed the watermark value, the state machine enters
- * traverse or detect states.
- * Traverse: If pending message count is below watermark then iterate
- * through the snapshot without any action on VF.
- * Detect: If pending message count exceeds watermark traverse
- * the static snapshot and look for a malicious VF.
- */
-enum ice_status
-ice_mbx_vf_state_handler(struct ice_hw *hw,
- struct ice_mbx_data *mbx_data, u16 vf_id,
- bool *is_malvf)
-{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
- struct ice_mbx_snap_buffer_data *snap_buf;
- struct ice_ctl_q_info *cq = &hw->mailboxq;
- enum ice_mbx_snapshot_state new_state;
- enum ice_status status = 0;
-
- if (!is_malvf || !mbx_data)
- return ICE_ERR_BAD_PTR;
-
- /* When entering the mailbox state machine assume that the VF
- * is not malicious until detected.
+ /* Clear the reset status register so that VF immediately sees that
+ * the device is resetting, even if hardware hasn't yet gotten around
+ * to clearing VFGEN_RSTAT for us.
*/
- *is_malvf = false;
-
- /* Checking if max messages allowed to be processed while servicing current
- * interrupt is not less than the defined AVF message threshold.
- */
- if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
- return ICE_ERR_INVAL_SIZE;
-
- /* The watermark value should not be lesser than the threshold limit
- * set for the number of asynchronous messages a VF can send to mailbox
- * nor should it be greater than the maximum number of messages in the
- * mailbox serviced in current interrupt.
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
+}
+
+/**
+ * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+}
+
+/**
+ * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
+ * @vf: pointer to VF structure
+ * @is_vflr: true if reset occurred due to VFLR
+ *
+ * Trigger and cleanup after a VF reset for a SR-IOV VF.
+ */
+static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
+{
+ struct ice_pf *pf = vf->pf;
+ u32 reg, reg_idx, bit_idx;
+ unsigned int vf_abs_id, i;
+ struct device *dev;
+ struct ice_hw *hw;
+
+ dev = ice_pf_to_dev(pf);
+ hw = &pf->hw;
+ vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
+
+ /* In the case of a VFLR, HW has already reset the VF and we just need
+ * to clean up. Otherwise we must first trigger the reset using the
+ * VFRTRIG register.
*/
- if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
- mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
- return ICE_ERR_PARAM;
+ if (!is_vflr) {
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ }
- new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- snap_buf = &snap->mbx_buf;
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (vf_abs_id) / 32;
+ bit_idx = (vf_abs_id) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
- switch (snap_buf->state) {
- case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
- /* Clear any previously held data in mailbox snapshot structure. */
- ice_mbx_reset_snapshot(snap);
+ wr32(hw, PF_PCI_CIAA,
+ VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ /* no transactions pending so stop polling */
+ if ((reg & VF_TRANS_PENDING_M) == 0)
+ break;
- /* Collect the pending ARQ count, number of messages processed and
- * the maximum number of messages allowed to be processed from the
- * Mailbox for current interrupt.
- */
- snap_buf->num_pending_arq = mbx_data->num_pending_arq;
- snap_buf->num_msg_proc = mbx_data->num_msg_proc;
- snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+ dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
+ udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
+ }
+}
- /* Capture a new static snapshot of the mailbox by logging the
- * head and tail of snapshot and set num_iterations to the tail
- * value to mark the start of the iteration through the snapshot.
- */
- snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
- mbx_data->num_pending_arq);
- snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
- snap_buf->num_iterations = snap_buf->tail;
-
- /* Pending ARQ messages returned by ice_clean_rq_elem
- * is the difference between the head and tail of the
- * mailbox queue. Comparing this value against the watermark
- * helps to check if we potentially have malicious VFs.
+/**
+ * ice_sriov_poll_reset_status - poll SRIOV VF reset status
+ * @vf: pointer to VF structure
+ *
+ * Returns true when reset is successful, else returns false
+ */
+static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < 10; i++) {
+ /* VF reset requires driver to first reset the VF and then
+ * poll the status register to make sure that the reset
+ * completed successfully.
*/
- if (snap_buf->num_pending_arq >=
- mbx_data->async_watermark_val) {
- new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
- } else {
- new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
- ice_mbx_traverse(hw, &new_state);
+ reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
+ if (reg & VPGEN_VFRSTAT_VFRD_M)
+ return true;
+
+ /* only sleep if the reset is not done */
+ usleep_range(10, 20);
+ }
+ return false;
+}
+
+/**
+ * ice_sriov_clear_reset_trigger - enable VF to access hardware
+ * @vf: VF to enabled hardware access for
+ */
+static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
+ ice_flush(hw);
+}
+
+/**
+ * ice_sriov_create_vsi - Create a new VSI for a VF
+ * @vf: VF to create the VSI for
+ *
+ * This is called by ice_vf_recreate_vsi to create the new VSI after the old
+ * VSI has been released.
+ */
+static int ice_sriov_create_vsi(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi;
+
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
+ * @vf: VF to perform tasks on
+ */
+static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_ena_vf_mappings(vf);
+ wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
+}
+
+static const struct ice_vf_ops ice_sriov_vf_ops = {
+ .reset_type = ICE_VF_RESET,
+ .free = ice_sriov_free_vf,
+ .clear_reset_state = ice_sriov_clear_reset_state,
+ .clear_mbx_register = ice_sriov_clear_mbx_register,
+ .trigger_reset_register = ice_sriov_trigger_reset_register,
+ .poll_reset_status = ice_sriov_poll_reset_status,
+ .clear_reset_trigger = ice_sriov_clear_reset_trigger,
+ .irq_close = NULL,
+ .create_vsi = ice_sriov_create_vsi,
+ .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
+};
+
+/**
+ * ice_create_vf_entries - Allocate and insert VF entries
+ * @pf: pointer to the PF structure
+ * @num_vfs: the number of VFs to allocate
+ *
+ * Allocate new VF entries and insert them into the hash table. Set some
+ * basic default fields for initializing the new VFs.
+ *
+ * After this function exits, the hash table will have num_vfs entries
+ * inserted.
+ *
+ * Returns 0 on success or an integer error code on failure.
+ */
+static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
+{
+ struct ice_vfs *vfs = &pf->vfs;
+ struct ice_vf *vf;
+ u16 vf_id;
+ int err;
+
+ lockdep_assert_held(&vfs->table_lock);
+
+ for (vf_id = 0; vf_id < num_vfs; vf_id++) {
+ vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+ if (!vf) {
+ err = -ENOMEM;
+ goto err_free_entries;
}
- break;
+ kref_init(&vf->refcnt);
- case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
- new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
- ice_mbx_traverse(hw, &new_state);
- break;
+ vf->pf = pf;
+ vf->vf_id = vf_id;
- case ICE_MAL_VF_DETECT_STATE_DETECT:
- new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
- status = ice_mbx_detect_malvf(hw, vf_id, &new_state, is_malvf);
- break;
+ /* set sriov vf ops for VFs created during SRIOV flow */
+ vf->vf_ops = &ice_sriov_vf_ops;
- default:
- new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
- status = ICE_ERR_CFG;
+ ice_initialize_vf_entry(vf);
+
+ vf->vf_sw_id = pf->first_sw;
+
+ hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
- snap_buf->state = new_state;
+ return 0;
- return status;
+err_free_entries:
+ ice_free_vf_entries(pf);
+ return err;
}
/**
- * ice_mbx_report_malvf - Track and note malicious VF
- * @hw: pointer to the HW struct
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
- * @report_malvf: boolean to indicate if malicious VF must be reported
- *
- * This function will update a bitmap that keeps track of the malicious
- * VFs attached to the PF. A malicious VF must be reported only once if
- * discovered between VF resets or loading so the function checks
- * the input vf_id against the bitmap to verify if the VF has been
- * detected in any previous mailbox iterations.
+ * ice_ena_vfs - enable VFs so they are ready to be used
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to enable
*/
-enum ice_status
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf)
+static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
- if (!all_malvfs || !report_malvf)
- return ICE_ERR_PARAM;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int ret;
+
+ /* Disable global interrupt 0 so we don't try to handle the VFLR. */
+ wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
+ ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
+ set_bit(ICE_OICR_INTR_DIS, pf->state);
+ ice_flush(hw);
- *report_malvf = false;
+ ret = pci_enable_sriov(pf->pdev, num_vfs);
+ if (ret)
+ goto err_unroll_intr;
- if (bitmap_len < hw->mbx_snapshot.mbx_vf.vfcntr_len)
- return ICE_ERR_INVAL_SIZE;
+ mutex_lock(&pf->vfs.table_lock);
- if (vf_id >= bitmap_len)
- return ICE_ERR_OUT_OF_RANGE;
+ ret = ice_set_per_vf_res(pf, num_vfs);
+ if (ret) {
+ dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
+ num_vfs, ret);
+ goto err_unroll_sriov;
+ }
+
+ ret = ice_create_vf_entries(pf, num_vfs);
+ if (ret) {
+ dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
+ num_vfs);
+ goto err_unroll_sriov;
+ }
+
+ ret = ice_start_vfs(pf);
+ if (ret) {
+ dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
+ ret = -EAGAIN;
+ goto err_unroll_vf_entries;
+ }
+
+ clear_bit(ICE_VF_DIS, pf->state);
+
+ ret = ice_eswitch_configure(pf);
+ if (ret) {
+ dev_err(dev, "Failed to configure eswitch, err %d\n", ret);
+ goto err_unroll_sriov;
+ }
+
+ /* rearm global interrupts */
+ if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
+ ice_irq_dynamic_ena(hw, NULL, NULL);
- /* If the vf_id is found in the bitmap set bit and boolean to true */
- if (!test_and_set_bit(vf_id, all_malvfs))
- *report_malvf = true;
+ mutex_unlock(&pf->vfs.table_lock);
return 0;
+
+err_unroll_vf_entries:
+ ice_free_vf_entries(pf);
+err_unroll_sriov:
+ mutex_unlock(&pf->vfs.table_lock);
+ pci_disable_sriov(pf->pdev);
+err_unroll_intr:
+ /* rearm interrupts here */
+ ice_irq_dynamic_ena(hw, NULL, NULL);
+ clear_bit(ICE_OICR_INTR_DIS, pf->state);
+ return ret;
}
/**
- * ice_mbx_clear_malvf - Clear VF bitmap and counter for VF ID
- * @snap: pointer to the mailbox snapshot structure
- * @all_malvfs: all malicious VFs tracked by PF
- * @bitmap_len: length of bitmap in bits
- * @vf_id: relative virtual function ID of the malicious VF
+ * ice_pci_sriov_ena - Enable or change number of VFs
+ * @pf: pointer to the PF structure
+ * @num_vfs: number of VFs to allocate
*
- * In case of a VF reset, this function can be called to clear
- * the bit corresponding to the VF ID in the bitmap tracking all
- * malicious VFs attached to the PF. The function also clears the
- * VF counter array at the index of the VF ID. This is to ensure
- * that the new VF loaded is not considered malicious before going
- * through the overflow detection algorithm.
+ * Returns 0 on success and negative on failure
*/
-enum ice_status
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id)
+static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
- if (!snap || !all_malvfs)
- return ICE_ERR_PARAM;
+ int pre_existing_vfs = pci_num_vf(pf->pdev);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
- if (bitmap_len < snap->mbx_vf.vfcntr_len)
- return ICE_ERR_INVAL_SIZE;
+ if (pre_existing_vfs && pre_existing_vfs != num_vfs)
+ ice_free_vfs(pf);
+ else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
+ return 0;
- /* Ensure VF ID value is not larger than bitmap or VF counter length */
- if (vf_id >= bitmap_len || vf_id >= snap->mbx_vf.vfcntr_len)
- return ICE_ERR_OUT_OF_RANGE;
+ if (num_vfs > pf->vfs.num_supported) {
+ dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
+ num_vfs, pf->vfs.num_supported);
+ return -EOPNOTSUPP;
+ }
- /* Clear VF ID bit in the bitmap tracking malicious VFs attached to PF */
- clear_bit(vf_id, all_malvfs);
+ dev_info(dev, "Enabling %d VFs\n", num_vfs);
+ err = ice_ena_vfs(pf, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+ return err;
+ }
- /* Clear the VF counter in the mailbox snapshot structure for that VF ID.
- * This is to ensure that if a VF is unloaded and a new one brought back
- * up with the same VF ID for a snapshot currently in traversal or detect
- * state the counter for that VF ID does not increment on top of existing
- * values in the mailbox overflow detection algorithm.
- */
- snap->mbx_vf.vf_cntr[vf_id] = 0;
+ set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
+ return 0;
+}
+
+/**
+ * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
+ * @pf: PF to enabled SR-IOV on
+ */
+static int ice_check_sriov_allowed(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
+ dev_err(dev, "This device is not capable of SR-IOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_is_safe_mode(pf)) {
+ dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ice_pf_state_is_nominal(pf)) {
+ dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
+ return -EBUSY;
+ }
return 0;
}
/**
- * ice_mbx_init_snapshot - Initialize mailbox snapshot structure
- * @hw: pointer to the hardware structure
- * @vf_count: number of VFs allocated on a PF
+ * ice_sriov_configure - Enable or change number of VFs via sysfs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate or 0 to free VFs
+ *
+ * This function is called when the user updates the number of VFs in sysfs. On
+ * success return whatever num_vfs was set to by the caller. Return negative on
+ * failure.
+ */
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = ice_check_sriov_allowed(pf);
+ if (err)
+ return err;
+
+ if (!num_vfs) {
+ if (!pci_vfs_assigned(pdev)) {
+ ice_free_vfs(pf);
+ if (pf->lag)
+ ice_enable_lag(pf->lag);
+ return 0;
+ }
+
+ dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
+ return -EBUSY;
+ }
+
+ err = ice_pci_sriov_ena(pf, num_vfs);
+ if (err)
+ return err;
+
+ if (pf->lag)
+ ice_disable_lag(pf->lag);
+ return num_vfs;
+}
+
+/**
+ * ice_process_vflr_event - Free VF resources via IRQ calls
+ * @pf: pointer to the PF structure
*
- * Clear the mailbox snapshot structure and allocate memory
- * for the VF counter array based on the number of VFs allocated
- * on that PF.
+ * called from the VFLR IRQ handler to
+ * free up VF resources and state variables
+ */
+void ice_process_vflr_event(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+ u32 reg;
+
+ if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
+ !ice_has_vfs(pf))
+ return;
+
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ u32 reg_idx, bit_idx;
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ /* read GLGEN_VFLRSTAT register to find out the flr VFs */
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
+ ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
+ * @pf: PF used to index all VFs
+ * @pfq: queue index relative to the PF's function space
*
- * Assumption: This function will assume ice_get_caps() has already been
- * called to ensure that the vf_count can be compared against the number
- * of VFs supported as defined in the functional capabilities of the device.
+ * If no VF is found who owns the pfq then return NULL, otherwise return a
+ * pointer to the VF who owns the pfq
+ *
+ * If this function returns non-NULL, it acquires a reference count of the VF
+ * structure. The caller is responsible for calling ice_put_vf() to drop this
+ * reference.
*/
-enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count)
+static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_vf *vf;
+ unsigned int bkt;
- /* Ensure that the number of VFs allocated is non-zero and
- * is not greater than the number of supported VFs defined in
- * the functional capabilities of the PF.
- */
- if (!vf_count || vf_count > hw->func_caps.num_allocd_vfs)
- return ICE_ERR_INVAL_SIZE;
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ struct ice_vsi *vsi;
+ u16 rxq_idx;
- snap->mbx_vf.vf_cntr = devm_kcalloc(ice_hw_to_dev(hw), vf_count,
- sizeof(*snap->mbx_vf.vf_cntr),
- GFP_KERNEL);
- if (!snap->mbx_vf.vf_cntr)
- return ICE_ERR_NO_MEMORY;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ continue;
- /* Setting the VF counter length to the number of allocated
- * VFs for given PF's functional capabilities.
- */
- snap->mbx_vf.vfcntr_len = vf_count;
+ ice_for_each_rxq(vsi, rxq_idx)
+ if (vsi->rxq_map[rxq_idx] == pfq) {
+ struct ice_vf *found;
+
+ if (kref_get_unless_zero(&vf->refcnt))
+ found = vf;
+ else
+ found = NULL;
+ rcu_read_unlock();
+ return found;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * ice_globalq_to_pfq - convert from global queue index to PF space queue index
+ * @pf: PF used for conversion
+ * @globalq: global queue index used to convert to PF space queue index
+ */
+static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
+{
+ return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
+}
+
+/**
+ * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
+ * @pf: PF that the LAN overflow event happened on
+ * @event: structure holding the event information for the LAN overflow event
+ *
+ * Determine if the LAN overflow event was caused by a VF queue. If it was not
+ * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
+ * reset on the offending VF.
+ */
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ u32 gldcb_rtctq, queue;
+ struct ice_vf *vf;
+
+ gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
- /* Clear mbx_buf in the mailbox snaphot structure and setting the
- * mailbox snapshot state to a new capture.
+ /* event returns device global Rx queue number */
+ queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
+ GLDCB_RTCTQ_RXQNUM_S;
+
+ vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
+ if (!vf)
+ return;
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+ ice_put_vf(vf);
+}
+
+/**
+ * ice_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ena: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ */
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct ice_vsi *vf_vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ netdev_err(netdev, "VSI %d for VF %d is null\n",
+ vf->lan_vsi_idx, vf->vf_id);
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (vf_vsi->type != ICE_VSI_VF) {
+ netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
+ ret = -ENODEV;
+ goto out_put_vf;
+ }
+
+ if (ena == vf->spoofchk) {
+ dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
+ if (ret)
+ dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
+ ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
+ else
+ vf->spoofchk = ena;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_get_vf_cfg
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @ivi: VF configuration structure
+ *
+ * return VF configuration
+ */
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ ivi->vf = vf_id;
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr);
+
+ /* VF configuration for VLAN and applicable QoS */
+ ivi->vlan = ice_vf_get_port_vlan_id(vf);
+ ivi->qos = ice_vf_get_port_vlan_prio(vf);
+ if (ice_vf_is_port_vlan_ena(vf))
+ ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
+
+ ivi->trusted = vf->trusted;
+ ivi->spoofchk = vf->spoofchk;
+ if (!vf->link_forced)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_mac
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ return -EINVAL;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ /* nothing left to do, unicast MAC already set */
+ if (ether_addr_equal(vf->dev_lan_addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr, mac)) {
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ mutex_lock(&vf->cfg_lock);
+
+ /* VF is notified of its new MAC via the PF's response to the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
- snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+ ether_addr_copy(vf->dev_lan_addr, mac);
+ ether_addr_copy(vf->hw_lan_addr, mac);
+ if (is_zero_ether_addr(mac)) {
+ /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
+ vf->pf_set_mac = false;
+ netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
+ } else {
+ /* PF will add MAC rule for the VF */
+ vf->pf_set_mac = true;
+ netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
+ }
- return 0;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_trust
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @trusted: Boolean value to enable/disable trusted VF
+ *
+ * Enable or disable a given VF as trusted
+ */
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ /* Check if already trusted */
+ if (trusted == vf->trusted) {
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ mutex_lock(&vf->cfg_lock);
+
+ vf->trusted = trusted;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
+ vf_id, trusted ? "" : "un");
+
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_set_vf_link_state
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @link_state: required link state
+ *
+ * Set VF's link state, irrespective of physical link state status
+ */
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vf *vf;
+ int ret;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ switch (link_state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ vf->link_forced = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ vf->link_forced = true;
+ vf->link_up = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ vf->link_forced = true;
+ vf->link_up = false;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ ice_vc_notify_vf_link_state(vf);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ int rate = 0;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ rate += vf->min_tx_rate;
+ rcu_read_unlock();
+
+ return rate;
+}
+
+/**
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
+ *
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
+ *
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int all_vfs_min_tx_rate;
+ int link_speed_mbps;
+
+ if (WARN_ON(!vsi))
+ return false;
+
+ link_speed_mbps = ice_get_link_speed_mbps(vsi);
+ all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+ /* this VF's previous rate is being overwritten */
+ all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+ if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+ dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+ min_tx_rate, vf->vf_id,
+ all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+ link_speed_mbps);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
+ */
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (min_tx_rate && ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+ ret = -EOPNOTSUPP;
+ goto out_put_vf;
+ }
+
+ if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+ ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+ vf->vf_id);
+ goto out_put_vf;
+ }
+
+ vf->min_tx_rate = min_tx_rate;
+ }
+
+ if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+ ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+ vf->vf_id);
+ goto out_put_vf;
+ }
+
+ vf->max_tx_rate = max_tx_rate;
+ }
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_get_vf_stats - populate some stats for the VF
+ * @netdev: the netdev of the PF
+ * @vf_id: the host OS identifier (0-255)
+ * @vf_stats: pointer to the OS memory to be initialized
+ */
+int ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_eth_stats *stats;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int ret;
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ ret = -EINVAL;
+ goto out_put_vf;
+ }
+
+ ice_update_eth_stats(vsi);
+ stats = &vsi->eth_stats;
+
+ memset(vf_stats, 0, sizeof(*vf_stats));
+
+ vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
+ stats->rx_multicast;
+ vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
+ stats->tx_multicast;
+ vf_stats->rx_bytes = stats->rx_bytes;
+ vf_stats->tx_bytes = stats->tx_bytes;
+ vf_stats->broadcast = stats->rx_broadcast;
+ vf_stats->multicast = stats->rx_multicast;
+ vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->tx_dropped = stats->tx_discards;
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
+}
+
+/**
+ * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
+ * @hw: hardware structure used to check the VLAN mode
+ * @vlan_proto: VLAN TPID being checked
+ *
+ * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
+ * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
+ * Mode (SVM), then only ETH_P_8021Q is supported.
+ */
+static bool
+ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
+{
+ bool is_supported = false;
+
+ switch (vlan_proto) {
+ case ETH_P_8021Q:
+ is_supported = true;
+ break;
+ case ETH_P_8021AD:
+ if (ice_is_dvm_ena(hw))
+ is_supported = true;
+ break;
+ }
+
+ return is_supported;
+}
+
+/**
+ * ice_set_vf_port_vlan
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @vlan_id: VLAN ID being set
+ * @qos: priority setting
+ * @vlan_proto: VLAN protocol
+ *
+ * program VF Port VLAN ID and/or QoS
+ */
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ u16 local_vlan_proto = ntohs(vlan_proto);
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (vlan_id >= VLAN_N_VID || qos > 7) {
+ dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
+ vf_id, vlan_id, qos);
+ return -EINVAL;
+ }
+
+ if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
+ dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
+ local_vlan_proto);
+ return -EPROTONOSUPPORT;
+ }
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf)
+ return -EINVAL;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ goto out_put_vf;
+
+ if (ice_vf_get_port_vlan_prio(vf) == qos &&
+ ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
+ ice_vf_get_port_vlan_id(vf) == vlan_id) {
+ /* duplicate request, so just return success */
+ dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
+ vlan_id, qos, local_vlan_proto);
+ ret = 0;
+ goto out_put_vf;
+ }
+
+ mutex_lock(&vf->cfg_lock);
+
+ vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
+ if (ice_vf_is_port_vlan_ena(vf))
+ dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
+ vlan_id, qos, local_vlan_proto, vf_id);
+ else
+ dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
+
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ mutex_unlock(&vf->cfg_lock);
+
+out_put_vf:
+ ice_put_vf(vf);
+ return ret;
}
/**
- * ice_mbx_deinit_snapshot - Free mailbox snapshot structure
- * @hw: pointer to the hardware structure
+ * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dev_lan_addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
+ * ice_print_vfs_mdd_events - print VFs malicious driver detect event
+ * @pf: pointer to the PF structure
*
- * Clear the mailbox snapshot structure and free the VF counter array.
+ * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
*/
-void ice_mbx_deinit_snapshot(struct ice_hw *hw)
+void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
- struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ /* check that there are pending MDD events to print */
+ if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
+ return;
+
+ /* VF MDD event logs are rate limited to one second intervals */
+ if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
+ return;
- /* Free VF counter array and reset VF counter length */
- devm_kfree(ice_hw_to_dev(hw), snap->mbx_vf.vf_cntr);
- snap->mbx_vf.vfcntr_len = 0;
+ pf->vfs.last_printed_mdd_jiffies = jiffies;
- /* Clear mbx_buf in the mailbox snaphot structure */
- memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ /* only print Rx MDD event message if there are new events */
+ if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
+ vf->mdd_rx_events.last_printed =
+ vf->mdd_rx_events.count;
+ ice_print_vf_rx_mdd_event(vf);
+ }
+
+ /* only print Tx MDD event message if there are new events */
+ if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
+ vf->mdd_tx_events.last_printed =
+ vf->mdd_tx_events.count;
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
+ vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
+ vf->dev_lan_addr);
+ }
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
+ * @pdev: pointer to a pci_dev structure
+ *
+ * Called when recovering from a PF FLR to restore interrupt capability to
+ * the VFs.
+ */
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+{
+ u16 vf_id;
+ int pos;
+
+ if (!pci_num_vf(pdev))
+ return;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ struct pci_dev *vfdev;
+
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
+ &vf_id);
+ vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn && vfdev->physfn == pdev)
+ pci_restore_msi_state(vfdev);
+ vfdev = pci_get_device(pdev->vendor, vf_id,
+ vfdev);
+ }
+ }
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 161dc55d9e9c..346cb2666f3a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,50 +3,144 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
+#include "ice_virtchnl_fdir.h"
+#include "ice_vf_lib.h"
+#include "ice_virtchnl.h"
-#include "ice_type.h"
-#include "ice_controlq.h"
+/* Static VF transaction/status register def */
+#define VF_DEVICE_STATUS 0xAA
+#define VF_TRANS_PENDING_M 0x20
-/* Defining the mailbox message threshold as 63 asynchronous
- * pending messages. Normal VF functionality does not require
- * sending more than 63 asynchronous pending message.
- */
-#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+/* wait defines for polling PF_PCI_CIAD register status */
+#define ICE_PCI_CIAD_WAIT_COUNT 100
+#define ICE_PCI_CIAD_WAIT_DELAY_US 1
+
+/* VF resource constraints */
+#define ICE_MIN_QS_PER_VF 1
+#define ICE_NONQ_VECS_VF 1
+#define ICE_NUM_VF_MSIX_MED 17
+#define ICE_NUM_VF_MSIX_SMALL 5
+#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
+#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
+#define ICE_MAX_VF_RESET_TRIES 40
+#define ICE_MAX_VF_RESET_SLEEP_MS 20
#ifdef CONFIG_PCI_IOV
-enum ice_status
-ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
- u8 *msg, u16 msglen, struct ice_sq_cd *cd);
-
-u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
-enum ice_status
-ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
- u16 vf_id, bool *is_mal_vf);
-enum ice_status
-ice_mbx_clear_malvf(struct ice_mbx_snapshot *snap, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id);
-enum ice_status ice_mbx_init_snapshot(struct ice_hw *hw, u16 vf_count);
-void ice_mbx_deinit_snapshot(struct ice_hw *hw);
-enum ice_status
-ice_mbx_report_malvf(struct ice_hw *hw, unsigned long *all_malvfs,
- u16 bitmap_len, u16 vf_id, bool *report_malvf);
+void ice_process_vflr_event(struct ice_pf *pf);
+int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
+
+void ice_free_vfs(struct ice_pf *pf);
+void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto);
+
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
+int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
+
+int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+
+int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
+
+int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+
+int
+ice_get_vf_stats(struct net_device *netdev, int vf_id,
+ struct ifla_vf_stats *vf_stats);
+void
+ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
+void ice_print_vfs_mdd_events(struct ice_pf *pf);
+void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
#else /* CONFIG_PCI_IOV */
-static inline enum ice_status
-ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
- u16 __always_unused vfid, u32 __always_unused v_opcode,
- u32 __always_unused v_retval, u8 __always_unused *msg,
- u16 __always_unused msglen,
- struct ice_sq_cd __always_unused *cd)
+static inline void ice_process_vflr_event(struct ice_pf *pf) { }
+static inline void ice_free_vfs(struct ice_pf *pf) { }
+static inline
+void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
+static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+
+static inline int
+ice_sriov_configure(struct pci_dev __always_unused *pdev,
+ int __always_unused num_vfs)
{
- return 0;
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_mac(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_get_vf_cfg(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_info __always_unused *ivi)
+{
+ return -EOPNOTSUPP;
}
-static inline u32
-ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
- u16 __always_unused link_speed)
+static inline int
+ice_set_vf_trust(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused trusted)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, u16 __always_unused vid,
+ u8 __always_unused qos, __be16 __always_unused v_proto)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, bool __always_unused ena)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_link_state(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
+ struct ice_q_vector __always_unused *q_vector)
{
return 0;
}
+static inline int
+ice_get_vf_stats(struct net_device __always_unused *netdev,
+ int __always_unused vf_id,
+ struct ifla_vf_stats __always_unused *vf_stats)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
deleted file mode 100644
index dbf66057371d..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_STATUS_H_
-#define _ICE_STATUS_H_
-
-/* Error Codes */
-enum ice_status {
- ICE_SUCCESS = 0,
-
- /* Generic codes : Range -1..-49 */
- ICE_ERR_PARAM = -1,
- ICE_ERR_NOT_IMPL = -2,
- ICE_ERR_NOT_READY = -3,
- ICE_ERR_NOT_SUPPORTED = -4,
- ICE_ERR_BAD_PTR = -5,
- ICE_ERR_INVAL_SIZE = -6,
- ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
- ICE_ERR_RESET_FAILED = -9,
- ICE_ERR_FW_API_VER = -10,
- ICE_ERR_NO_MEMORY = -11,
- ICE_ERR_CFG = -12,
- ICE_ERR_OUT_OF_RANGE = -13,
- ICE_ERR_ALREADY_EXISTS = -14,
- ICE_ERR_DOES_NOT_EXIST = -15,
- ICE_ERR_IN_USE = -16,
- ICE_ERR_MAX_LIMIT = -17,
- ICE_ERR_RESET_ONGOING = -18,
- ICE_ERR_HW_TABLE = -19,
- ICE_ERR_FW_DDP_MISMATCH = -20,
-
- ICE_ERR_NVM = -50,
- ICE_ERR_NVM_CHECKSUM = -51,
- ICE_ERR_BUF_TOO_SHORT = -52,
- ICE_ERR_NVM_BLANK_MODE = -53,
- ICE_ERR_AQ_ERROR = -100,
- ICE_ERR_AQ_TIMEOUT = -101,
- ICE_ERR_AQ_FULL = -102,
- ICE_ERR_AQ_NO_WORK = -103,
- ICE_ERR_AQ_EMPTY = -104,
- ICE_ERR_AQ_FW_CRITICAL = -105,
-};
-
-#endif /* _ICE_STATUS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 183d93033890..46b36851af46 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -30,23 +30,80 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+enum {
+ ICE_PKT_OUTER_IPV6 = BIT(0),
+ ICE_PKT_TUN_GTPC = BIT(1),
+ ICE_PKT_TUN_GTPU = BIT(2),
+ ICE_PKT_TUN_NVGRE = BIT(3),
+ ICE_PKT_TUN_UDP = BIT(4),
+ ICE_PKT_INNER_IPV6 = BIT(5),
+ ICE_PKT_INNER_TCP = BIT(6),
+ ICE_PKT_INNER_UDP = BIT(7),
+ ICE_PKT_GTP_NOPAY = BIT(8),
+ ICE_PKT_KMALLOC = BIT(9),
+ ICE_PKT_PPPOE = BIT(10),
+ ICE_PKT_L2TPV3 = BIT(11),
+};
+
struct ice_dummy_pkt_offsets {
enum ice_protocol_type type;
u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
};
-static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
+struct ice_dummy_pkt_profile {
+ const struct ice_dummy_pkt_offsets *offsets;
+ const u8 *pkt;
+ u32 match;
+ u16 pkt_len;
+ u16 offsets_len;
+};
+
+#define ICE_DECLARE_PKT_OFFSETS(type) \
+ static const struct ice_dummy_pkt_offsets \
+ ice_dummy_##type##_packet_offsets[]
+
+#define ICE_DECLARE_PKT_TEMPLATE(type) \
+ static const u8 ice_dummy_##type##_packet[]
+
+#define ICE_PKT_PROFILE(type, m) { \
+ .match = (m), \
+ .pkt = ice_dummy_##type##_packet, \
+ .pkt_len = sizeof(ice_dummy_##type##_packet), \
+ .offsets = ice_dummy_##type##_packet_offsets, \
+ .offsets_len = sizeof(ice_dummy_##type##_packet_offsets), \
+}
+
+ICE_DECLARE_PKT_OFFSETS(vlan) = {
+ { ICE_VLAN_OFOS, 12 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(vlan) = {
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+};
+
+ICE_DECLARE_PKT_OFFSETS(qinq) = {
+ { ICE_VLAN_EX, 12 },
+ { ICE_VLAN_IN, 16 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(qinq) = {
+ 0x91, 0x00, 0x00, 0x00, /* ICE_VLAN_EX 12 */
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_IN 16 */
+};
+
+ICE_DECLARE_PKT_OFFSETS(gre_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_NVGRE, 34 },
{ ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
{ ICE_IPV4_IL, 56 },
{ ICE_TCP_IL, 76 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -65,7 +122,8 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 54 */
0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
0x00, 0x00, 0x00, 0x00,
@@ -80,18 +138,19 @@ static const u8 dummy_gre_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
{ ICE_NVGRE, 34 },
{ ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
{ ICE_IPV4_IL, 56 },
{ ICE_UDP_ILOS, 76 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_gre_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -110,7 +169,8 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 54 */
0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
0x00, 0x00, 0x00, 0x00,
@@ -122,7 +182,7 @@ static const u8 dummy_gre_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -131,12 +191,13 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
{ ICE_GENEVE, 42 },
{ ICE_VXLAN_GPE, 42 },
{ ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
{ ICE_IPV4_IL, 64 },
{ ICE_TCP_IL, 84 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -158,7 +219,8 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 62 */
0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
0x00, 0x01, 0x00, 0x00,
@@ -173,7 +235,7 @@ static const u8 dummy_udp_tun_tcp_packet[] = {
0x00, 0x00, 0x00, 0x00
};
-static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(udp_tun_udp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -182,12 +244,13 @@ static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
{ ICE_GENEVE, 42 },
{ ICE_VXLAN_GPE, 42 },
{ ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
{ ICE_IPV4_IL, 64 },
{ ICE_UDP_ILOS, 84 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_udp_tun_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -209,7 +272,8 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x08, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_IL 62 */
0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
0x00, 0x01, 0x00, 0x00,
@@ -221,69 +285,251 @@ static const u8 dummy_udp_tun_udp_packet[] = {
0x00, 0x08, 0x00, 0x00,
};
-/* offset info for MAC + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
- { ICE_UDP_ILOS, 34 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
+ { ICE_IPV6_IL, 56 },
+ { ICE_TCP_IL, 96 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* Dummy packet for MAC + IPv4 + UDP */
-static const u8 dummy_udp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x08, 0x00, /* ICE_ETYPE_OL 12 */
- 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
+ 0x45, 0x00, 0x00, 0x66, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+ 0x00, 0x08, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 96 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+ICE_DECLARE_PKT_OFFSETS(gre_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_ETYPE_IL, 54 },
+ { ICE_IPV6_IL, 56 },
+ { ICE_UDP_ILOS, 96 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(gre_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 54 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 56 */
+ 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 96 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
+ { ICE_IPV6_IL, 64 },
+ { ICE_TCP_IL, 104 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x6e, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x5a, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+ 0x00, 0x08, 0x06, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 104 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+ICE_DECLARE_PKT_OFFSETS(udp_tun_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_ETYPE_IL, 62 },
+ { ICE_IPV6_IL, 64 },
+ { ICE_UDP_ILOS, 104 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(udp_tun_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x62, /* ICE_IPV4_OFOS 14 */
0x00, 0x01, 0x00, 0x00,
0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
- 0x00, 0x08, 0x00, 0x00,
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x4e, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_IL 62 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 64 */
+ 0x00, 0x08, 0x11, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 104 */
+ 0x00, 0x08, 0x00, 0x00,
};
-/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+/* offset info for MAC + IPv4 + UDP dummy packet */
+ICE_DECLARE_PKT_OFFSETS(udp) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_UDP_ILOS, 38 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* C-tag (801.1Q), IPv4:UDP dummy packet */
-static const u8 dummy_vlan_udp_packet[] = {
+/* Dummy packet for MAC + IPv4 + UDP */
+ICE_DECLARE_PKT_TEMPLATE(udp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
- 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
0x00, 0x01, 0x00, 0x00,
0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
0x00, 0x08, 0x00, 0x00,
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
/* offset info for MAC + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV4_OFOS, 14 },
@@ -292,7 +538,7 @@ static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
};
/* Dummy packet for MAC + IPv4 + TCP */
-static const u8 dummy_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -314,50 +560,52 @@ static const u8 dummy_tcp_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
-static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(tcp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV4_OFOS, 18 },
- { ICE_TCP_IL, 38 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_TCP_IL, 54 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* C-tag (801.1Q), IPv4:TCP dummy packet */
-static const u8 dummy_vlan_tcp_packet[] = {
+ICE_DECLARE_PKT_TEMPLATE(tcp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
-
- 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
- 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
- 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x06, 0x00, 0x00,
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x50, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+/* IPv6 + UDP */
+ICE_DECLARE_PKT_OFFSETS(udp_ipv6) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
- { ICE_TCP_IL, 54 },
+ { ICE_UDP_ILOS, 54 },
{ ICE_PROTOCOL_LAST, 0 },
};
-static const u8 dummy_tcp_ipv6_packet[] = {
+/* IPv6 + UDP dummy packet */
+ICE_DECLARE_PKT_TEMPLATE(udp_ipv6) = {
0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -365,7 +613,7 @@ static const u8 dummy_tcp_ipv6_packet[] = {
0x86, 0xDD, /* ICE_ETYPE_OL 12 */
0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
- 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -375,7 +623,55 @@ static const u8 dummy_tcp_ipv6_packet[] = {
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x10, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV4_IL, 62 },
+ { ICE_TCP_IL, 82 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x58, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x44, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* IP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 82 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x50, 0x00, 0x00, 0x00,
@@ -384,29 +680,86 @@ static const u8 dummy_tcp_ipv6_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + TCP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_tcp_ipv6_packet_offsets[] = {
+/* Outer IPv4 + Outer UDP + GTP + Inner IPv4 + Inner UDP */
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4_udp) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_TCP_IL, 58 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV4_IL, 62 },
+ { ICE_UDP_ILOS, 82 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* C-tag (802.1Q), IPv6 + TCP dummy packet */
-static const u8 dummy_vlan_tcp_ipv6_packet[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x4c, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x38, 0x00, 0x00,
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* IP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 82 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* Outer IPv6 + Outer UDP + GTP + Inner IPv4 + Inner TCP */
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV6_IL, 62 },
+ { ICE_TCP_IL, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x6c, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x58, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+ 0x00, 0x14, 0x06, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -416,7 +769,7 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = {
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
+ 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x50, 0x00, 0x00, 0x00,
@@ -425,66 +778,252 @@ static const u8 dummy_vlan_tcp_ipv6_packet[] = {
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP, 42 },
+ { ICE_IPV6_IL, 62 },
+ { ICE_UDP_ILOS, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x60, /* IP 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 34 */
+ 0x00, 0x4c, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 62 */
+ 0x00, 0x08, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_ETYPE_OL, 12 },
{ ICE_IPV6_OFOS, 14 },
- { ICE_UDP_ILOS, 54 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV4_IL, 82 },
+ { ICE_TCP_IL, 102 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* IPv6 + UDP dummy packet */
-static const u8 dummy_udp_ipv6_packet[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
- 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x44, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
- 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x44, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x34, /* ICE_GTP Header 62 */
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* IP 82 */
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 102 */
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
- 0x00, 0x10, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
- 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv4_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV4_IL, 82 },
+ { ICE_UDP_ILOS, 102 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv4_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x38, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x38, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* IP 82 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* UDP 102 */
+ 0x00, 0x08, 0x00, 0x00,
+
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-/* C-tag (802.1Q): IPv6 + UDP */
-static const struct ice_dummy_pkt_offsets
-dummy_vlan_udp_ipv6_packet_offsets[] = {
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_tcp) = {
{ ICE_MAC_OFOS, 0 },
- { ICE_VLAN_OFOS, 12 },
- { ICE_ETYPE_OL, 16 },
- { ICE_IPV6_OFOS, 18 },
- { ICE_UDP_ILOS, 58 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV6_IL, 82 },
+ { ICE_TCP_IL, 122 },
{ ICE_PROTOCOL_LAST, 0 },
};
-/* C-tag (802.1Q), IPv6 + UDP dummy packet */
-static const u8 dummy_vlan_udp_ipv6_packet[] = {
- 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x58, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x58, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x48, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+ 0x00, 0x14, 0x06, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* TCP 122 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtpu_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP, 62 },
+ { ICE_IPV6_IL, 82 },
+ { ICE_UDP_ILOS, 122 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtpu_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* Ethernet 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 14 */
+ 0x00, 0x4c, 0x11, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x08, 0x68, /* UDP 54 */
+ 0x00, 0x4c, 0x00, 0x00,
- 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
+ 0x34, 0xff, 0x00, 0x3c, /* ICE_GTP Header 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
- 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+ 0x02, 0x00, 0x00, 0x00, /* GTP_PDUSession_ExtensionHeader 74 */
+ 0x00, 0x00, 0x00, 0x00,
- 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
- 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
+ 0x60, 0x00, 0x00, 0x00, /* IPv6 82 */
+ 0x00, 0x08, 0x11, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
@@ -494,24 +1033,349 @@ static const u8 dummy_vlan_udp_ipv6_packet[] = {
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
+ 0x00, 0x00, 0x00, 0x00, /* UDP 122 */
0x00, 0x08, 0x00, 0x00,
0x00, 0x00, /* 2 bytes for 4 byte alignment */
};
-#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
- (DUMMY_ETH_HDR_LEN * \
- sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
-#define ICE_SW_RULE_LG_ACT_SIZE(n) \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
- ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
-#define ICE_SW_RULE_VSI_LIST_SIZE(n) \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
- ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+ICE_DECLARE_PKT_OFFSETS(ipv4_gtpu_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_GTP_NO_PAY, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_gtpu_ipv4) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x44, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x34, 0xff, 0x00, 0x28, /* ICE_GTP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x85,
+
+ 0x02, 0x00, 0x00, 0x00, /* PDU Session extension header */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 62 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00,
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_gtp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_OF, 54 },
+ { ICE_GTP_NO_PAY, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x86, 0xdd,
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x6c, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x68, 0x08, 0x68, /* ICE_UDP_OF 54 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x30, 0x00, 0x00, 0x28, /* ICE_GTP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00,
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_TCP_IL, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 20 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV4_OFOS, 22 },
+ { ICE_UDP_ILOS, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv4_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x16,
+
+ 0x00, 0x21, /* PPP Link Layer 20 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 22 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 42 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_tcp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_TCP_IL, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_tcp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pppoe_ipv6_udp) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_PPPOE, 14 },
+ { ICE_IPV6_OFOS, 22 },
+ { ICE_UDP_ILOS, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x88, 0x64, /* ICE_ETYPE_OL 12 */
+
+ 0x11, 0x00, 0x00, 0x00, /* ICE_PPPOE 14 */
+ 0x00, 0x2a,
+
+ 0x00, 0x57, /* PPP Link Layer 20 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 22 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP*/
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 62 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_L2TPV3, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x73, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_L2TPV3, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
+ 0x00, 0x0c, 0x73, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv6_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPU | ICE_PKT_GTP_NOPAY),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv6_tcp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_udp, ICE_PKT_TUN_GTPU |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
+ ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
+ ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(pppoe_ipv4_udp, ICE_PKT_PPPOE | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(pppoe_ipv4_tcp, ICE_PKT_PPPOE),
+ ICE_PKT_PROFILE(gre_ipv6_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_tcp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(gre_ipv6_udp, ICE_PKT_TUN_NVGRE | ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(gre_udp, ICE_PKT_TUN_NVGRE),
+ ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6 |
+ ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
+ ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
+ ICE_PKT_INNER_IPV6),
+ ICE_PKT_PROFILE(udp_tun_udp, ICE_PKT_TUN_UDP),
+ ICE_PKT_PROFILE(udp_ipv6, ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(udp, ICE_PKT_INNER_UDP),
+ ICE_PKT_PROFILE(tcp_ipv6, ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(tcp, 0),
+};
+
+#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
+#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
+#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
/* this is a recipe to profile association bitmap */
static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
@@ -528,7 +1392,7 @@ static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
+int ice_init_def_sw_recp(struct ice_hw *hw)
{
struct ice_sw_recipe *recps;
u8 i;
@@ -536,7 +1400,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES,
sizeof(*recps), GFP_KERNEL);
if (!recps)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i;
@@ -576,14 +1440,14 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
* in response buffer. The caller of this function to use *num_elems while
* parsing the response buffer.
*/
-static enum ice_status
+static int
ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -606,14 +1470,14 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
*
* Add a VSI context to the hardware (0x0210)
*/
-static enum ice_status
+static int
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
res = &desc.params.add_update_free_vsi_res;
@@ -650,14 +1514,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware (0x0213)
*/
-static enum ice_status
+static int
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -685,14 +1549,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-static enum ice_status
+static int
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -832,15 +1696,15 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
* If this function gets called after reset for existing VSIs then update
* with the new HW VSI number in the corresponding VSI handle list entry.
*/
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_vsi_ctx *tmp_vsi_ctx;
- enum ice_status status;
+ int status;
if (vsi_handle >= ICE_MAX_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
status = ice_aq_add_vsi(hw, vsi_ctx, cd);
if (status)
return status;
@@ -851,7 +1715,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) {
ice_aq_free_vsi(hw, vsi_ctx, false, cd);
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
}
*tmp_vsi_ctx = *vsi_ctx;
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
@@ -873,14 +1737,14 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware as well as from VSI handle list
*/
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
if (!status)
@@ -897,12 +1761,12 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware
*/
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
return ice_aq_update_vsi(hw, vsi_ctx, cd);
}
@@ -916,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
{
- struct ice_vsi_ctx *ctx;
+ struct ice_vsi_ctx *ctx, *cached_ctx;
+ int status;
+
+ cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!cached_ctx)
+ return -ENOENT;
- ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
- return -EIO;
+ return -ENOMEM;
+
+ ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
+ ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
+ ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
+
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
if (enable)
ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
- return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
+ status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
+ if (!status) {
+ cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
+ cached_ctx->info.valid_sections |= ctx->info.valid_sections;
+ }
+
+ kfree(ctx);
+ return status;
}
/**
@@ -939,20 +1821,20 @@ ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
*
* allocates or free a VSI list resource
*/
-static enum ice_status
+static int
ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
struct ice_aqc_res_elem *vsi_ele;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC ||
@@ -960,13 +1842,14 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN) {
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto ice_aq_alloc_free_vsi_list_exit;
}
@@ -998,17 +1881,17 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
opc != ice_aqc_opc_remove_sw_rules)
- return ICE_ERR_PARAM;
+ return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1018,7 +1901,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
return status;
}
@@ -1032,7 +1915,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
*
* Add(0x0290)
*/
-static enum ice_status
+static int
ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd)
@@ -1069,18 +1952,18 @@ ice_aq_add_recipe(struct ice_hw *hw,
* The caller must supply enough space in s_recipe_list to hold all possible
* recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
*/
-static enum ice_status
+static int
ice_aq_get_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 buf_size;
+ int status;
if (*num_recipes != ICE_MAX_NUM_RECIPES)
- return ICE_ERR_PARAM;
+ return -EINVAL;
cmd = &desc.params.add_get_recipe;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
@@ -1097,6 +1980,64 @@ ice_aq_get_recipe(struct ice_hw *hw,
}
/**
+ * ice_update_recipe_lkup_idx - update a default recipe based on the lkup_idx
+ * @hw: pointer to the HW struct
+ * @params: parameters used to update the default recipe
+ *
+ * This function only supports updating default recipes and it only supports
+ * updating a single recipe based on the lkup_idx at a time.
+ *
+ * This is done as a read-modify-write operation. First, get the current recipe
+ * contents based on the recipe's ID. Then modify the field vector index and
+ * mask if it's valid at the lkup_idx. Finally, use the add recipe AQ to update
+ * the pre-existing recipe with the modifications.
+ */
+int
+ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ struct ice_update_recipe_lkup_idx_params *params)
+{
+ struct ice_aqc_recipe_data_elem *rcp_list;
+ u16 num_recps = ICE_MAX_NUM_RECIPES;
+ int status;
+
+ rcp_list = kcalloc(num_recps, sizeof(*rcp_list), GFP_KERNEL);
+ if (!rcp_list)
+ return -ENOMEM;
+
+ /* read current recipe list from firmware */
+ rcp_list->recipe_indx = params->rid;
+ status = ice_aq_get_recipe(hw, rcp_list, &num_recps, params->rid, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to get recipe %d, status %d\n",
+ params->rid, status);
+ goto error_out;
+ }
+
+ /* only modify existing recipe's lkup_idx and mask if valid, while
+ * leaving all other fields the same, then update the recipe firmware
+ */
+ rcp_list->content.lkup_indx[params->lkup_idx] = params->fv_idx;
+ if (params->mask_valid)
+ rcp_list->content.mask[params->lkup_idx] =
+ cpu_to_le16(params->mask);
+
+ if (params->ignore_valid)
+ rcp_list->content.lkup_indx[params->lkup_idx] |=
+ ICE_AQ_RECIPE_LKUP_IGNORE;
+
+ status = ice_aq_add_recipe(hw, &rcp_list[0], 1, NULL);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "Failed to update recipe %d lkup_idx %d fv_idx %d mask %d mask_valid %s, status %d\n",
+ params->rid, params->lkup_idx, params->fv_idx,
+ params->mask, params->mask_valid ? "true" : "false",
+ status);
+
+error_out:
+ kfree(rcp_list);
+ return status;
+}
+
+/**
* ice_aq_map_recipe_to_profile - Map recipe to packet profile
* @hw: pointer to the HW struct
* @profile_id: package profile ID to associate the recipe with
@@ -1104,7 +2045,7 @@ ice_aq_get_recipe(struct ice_hw *hw,
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
-static enum ice_status
+static int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
@@ -1130,13 +2071,13 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
-static enum ice_status
+static int
ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.recipe_to_profile;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
@@ -1154,16 +2095,16 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
*/
-static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = struct_size(sw_buf, elem, 1);
sw_buf = kzalloc(buf_len, GFP_KERNEL);
if (!sw_buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
@@ -1230,7 +2171,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
* bookkeeping so that we have a current list of all the recipes that are
* programmed in the firmware.
*/
-static enum ice_status
+static int
ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
bool *refresh_required)
{
@@ -1238,16 +2179,16 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
struct ice_aqc_recipe_data_elem *tmp;
u16 num_recps = ICE_MAX_NUM_RECIPES;
struct ice_prot_lkup_ext *lkup_exts;
- enum ice_status status;
u8 fv_word_idx = 0;
u16 sub_recps;
+ int status;
bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
/* we need a buffer big enough to accommodate all the recipes */
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
tmp[0].recipe_indx = rid;
status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
@@ -1284,7 +2225,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
GFP_KERNEL);
if (!rg_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
@@ -1364,7 +2305,7 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
GFP_KERNEL);
if (!recps[rid].root_buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
@@ -1395,8 +2336,6 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
pi->sw_id = swid;
pi->pf_vf_num = pf_vf_num;
pi->is_vf = is_vf;
- pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
break;
default:
ice_debug(pi->hw, ICE_DBG_SW, "incorrect VSI/port type received\n");
@@ -1407,19 +2346,17 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
/* ice_get_initial_sw_cfg - Get initial port and default VSI data
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
+int ice_get_initial_sw_cfg(struct ice_hw *hw)
{
struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
- enum ice_status status;
u16 req_desc = 0;
u16 num_elems;
+ int status;
u16 i;
- rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
- GFP_KERNEL);
-
+ rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
if (!rbuf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Multiple calls to ice_aq_get_sw_cfg may be required
* to get all the switch configuration information. The need
@@ -1465,7 +2402,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
}
} while (req_desc && !status);
- devm_kfree(ice_hw_to_dev(hw), rbuf);
+ kfree(rbuf);
return status;
}
@@ -1536,9 +2473,11 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
*/
static void
ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
- struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
+ struct ice_sw_rule_lkup_rx_tx *s_rule,
+ enum ice_adminq_opc opc)
{
u16 vlan_id = ICE_MAX_VLAN_ID + 1;
+ u16 vlan_tpid = ETH_P_8021Q;
void *daddr = NULL;
u16 eth_hdr_sz;
u8 *eth_hdr;
@@ -1547,15 +2486,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
u8 q_rgn;
if (opc == ice_aqc_opc_remove_sw_rules) {
- s_rule->pdata.lkup_tx_rx.act = 0;
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(f_info->fltr_rule_id);
- s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ s_rule->act = 0;
+ s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
+ s_rule->hdr_len = 0;
return;
}
eth_hdr_sz = sizeof(dummy_eth_header);
- eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
+ eth_hdr = s_rule->hdr_data;
/* initialize the ether header with a dummy header */
memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz);
@@ -1611,6 +2549,8 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
case ICE_SW_LKUP_VLAN:
vlan_id = f_info->l_data.vlan.vlan_id;
+ if (f_info->l_data.vlan.tpid_valid)
+ vlan_tpid = f_info->l_data.vlan.tpid;
if (f_info->fltr_act == ICE_FWD_TO_VSI ||
f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
act |= ICE_SINGLE_ACT_PRUNE;
@@ -1638,14 +2578,14 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
break;
}
- s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
+ s_rule->hdr.type = (f_info->flag & ICE_FLTR_RX) ?
cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX) :
cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
/* Recipe set depending on lookup type */
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(f_info->lkup_type);
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(f_info->src);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+ s_rule->recipe_id = cpu_to_le16(f_info->lkup_type);
+ s_rule->src = cpu_to_le16(f_info->src);
+ s_rule->act = cpu_to_le32(act);
if (daddr)
ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr);
@@ -1653,11 +2593,13 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
if (!(vlan_id > ICE_MAX_VLAN_ID)) {
off = (__force __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
*off = cpu_to_be16(vlan_id);
+ off = (__force __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
+ *off = cpu_to_be16(vlan_tpid);
}
/* Create the switch rule with the final dummy Ethernet header */
if (opc != ice_aqc_opc_update_sw_rules)
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz);
+ s_rule->hdr_len = cpu_to_le16(eth_hdr_sz);
}
/**
@@ -1670,43 +2612,44 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
*/
-static enum ice_status
+static int
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
- struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
+ struct ice_sw_rule_lkup_rx_tx *rx_tx;
+ struct ice_sw_rule_lg_act *lg_act;
/* For software marker we need 3 large actions
* 1. FWD action: FWD TO VSI or VSI LIST
* 2. GENERIC VALUE action to hold the profile ID
* 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
- enum ice_status status;
u16 lg_act_size;
u16 rules_size;
+ int status;
u32 act;
u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
* 1. Large Action
* 2. Look up Tx Rx
*/
- lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
- rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
+ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(lg_act, num_lg_acts);
+ rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(rx_tx);
lg_act = devm_kzalloc(ice_hw_to_dev(hw), rules_size, GFP_KERNEL);
if (!lg_act)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
- rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
+ rx_tx = (typeof(rx_tx))((u8 *)lg_act + lg_act_size);
/* Fill in the first switch rule i.e. large action */
- lg_act->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
- lg_act->pdata.lg_act.index = cpu_to_le16(l_id);
- lg_act->pdata.lg_act.size = cpu_to_le16(num_lg_acts);
+ lg_act->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LG_ACT);
+ lg_act->index = cpu_to_le16(l_id);
+ lg_act->size = cpu_to_le16(num_lg_acts);
/* First action VSI forwarding or VSI list forwarding depending on how
* many VSIs
@@ -1718,13 +2661,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
if (m_ent->vsi_count > 1)
act |= ICE_LG_ACT_VSI_LIST;
- lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
+ lg_act->act[0] = cpu_to_le32(act);
/* Second action descriptor type */
act = ICE_LG_ACT_GENERIC;
act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
- lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
+ lg_act->act[1] = cpu_to_le32(act);
act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
@@ -1734,24 +2677,22 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
ICE_LG_ACT_GENERIC_VALUE_M;
- lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
+ lg_act->act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
/* Update the action to point to the large action ID */
- rx_tx->pdata.lkup_tx_rx.act =
- cpu_to_le32(ICE_SINGLE_ACT_PTR |
- ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
- ICE_SINGLE_ACT_PTR_VAL_M));
+ rx_tx->act = cpu_to_le32(ICE_SINGLE_ACT_PTR |
+ ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
+ ICE_SINGLE_ACT_PTR_VAL_M));
/* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
- rx_tx->pdata.lkup_tx_rx.index =
- cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
+ rx_tx->index = cpu_to_le16(m_ent->fltr_info.fltr_rule_id);
status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
ice_aqc_opc_update_sw_rules, NULL);
@@ -1808,51 +2749,52 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* Call AQ command to add a new switch rule or update existing switch rule
* using the given VSI list ID
*/
-static enum ice_status
+static int
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
+ struct ice_sw_rule_vsi_list *s_rule;
u16 s_rule_size;
u16 rule_type;
+ int status;
int i;
if (!num_vsi)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (lkup_type == ICE_SW_LKUP_MAC ||
lkup_type == ICE_SW_LKUP_MAC_VLAN ||
lkup_type == ICE_SW_LKUP_ETHERTYPE ||
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
- lkup_type == ICE_SW_LKUP_PROMISC_VLAN)
+ lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ lkup_type == ICE_SW_LKUP_DFLT)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
rule_type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
else
- return ICE_ERR_PARAM;
+ return -EINVAL;
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
for (i = 0; i < num_vsi; i++) {
if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
- status = ICE_ERR_PARAM;
+ status = -EINVAL;
goto exit;
}
/* AQ call requires hw_vsi_id(s) */
- s_rule->pdata.vsi_list.vsi[i] =
+ s_rule->vsi[i] =
cpu_to_le16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
}
- s_rule->type = cpu_to_le16(rule_type);
- s_rule->pdata.vsi_list.number_vsi = cpu_to_le16(num_vsi);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+ s_rule->hdr.type = cpu_to_le16(rule_type);
+ s_rule->number_vsi = cpu_to_le16(num_vsi);
+ s_rule->index = cpu_to_le16(vsi_list_id);
status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
@@ -1869,11 +2811,11 @@ exit:
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
-static enum ice_status
+static int
ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
- enum ice_status status;
+ int status;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -1895,24 +2837,25 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* to the corresponding filter management list to track this switch rule
* and VSI mapping
*/
-static enum ice_status
+static int
ice_create_pkt_fwd_rule(struct ice_hw *hw,
struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
enum ice_sw_lkup_type l_type;
struct ice_sw_recipe *recp;
- enum ice_status status;
+ int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
+ GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
fm_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*fm_entry),
GFP_KERNEL);
if (!fm_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto ice_create_pkt_fwd_rule_exit;
}
@@ -1927,17 +2870,16 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw,
ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
ice_aqc_opc_add_sw_rules);
- status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
ice_aqc_opc_add_sw_rules, NULL);
if (status) {
devm_kfree(ice_hw_to_dev(hw), fm_entry);
goto ice_create_pkt_fwd_rule_exit;
}
- f_entry->fltr_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
- fm_entry->fltr_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ f_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
+ fm_entry->fltr_info.fltr_rule_id = le16_to_cpu(s_rule->index);
/* The book keeping entries will get removed when base driver
* calls remove filter AQ command
@@ -1959,23 +2901,25 @@ ice_create_pkt_fwd_rule_exit:
* Call AQ command to update a previously created switch rule with a
* VSI list ID
*/
-static enum ice_status
+static int
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
+ int status;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, GFP_KERNEL);
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule),
+ GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
- s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id);
+ s_rule->index = cpu_to_le16(f_info->fltr_rule_id);
/* Update switch rule with new rule set to forward VSI list */
- status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
+ status = ice_aq_sw_rules(hw, s_rule,
+ ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule), 1,
ice_aqc_opc_update_sw_rules, NULL);
devm_kfree(ice_hw_to_dev(hw), s_rule);
@@ -1988,13 +2932,13 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = 0;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
+ int status = 0;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
@@ -2044,24 +2988,24 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_add_update_vsi_list(struct ice_hw *hw,
struct ice_fltr_mgmt_list_entry *m_entry,
struct ice_fltr_info *cur_fltr,
struct ice_fltr_info *new_fltr)
{
- enum ice_status status = 0;
u16 vsi_list_id = 0;
+ int status = 0;
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already
@@ -2073,7 +3017,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
vsi_handle_arr[1] = new_fltr->vsi_handle;
@@ -2101,7 +3045,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
vsi_list_id);
if (!m_entry->vsi_list_info)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* If this entry was large action then the large action needs
* to be updated to point to FWD to VSI list
@@ -2116,7 +3060,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
enum ice_adminq_opc opcode;
if (!m_entry->vsi_list_info)
- return ICE_ERR_CFG;
+ return -EIO;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@@ -2209,7 +3153,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
*
* Adds or updates the rule lists for a given recipe
*/
-static enum ice_status
+static int
ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_list_entry *f_entry)
{
@@ -2217,10 +3161,10 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2255,21 +3199,21 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
* The VSI list should be emptied before this function is called to remove the
* VSI list.
*/
-static enum ice_status
+static int
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
- struct ice_aqc_sw_rules_elem *s_rule;
- enum ice_status status;
+ struct ice_sw_rule_vsi_list *s_rule;
u16 s_rule_size;
+ int status;
- s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
+ s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, 0);
s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
- s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
+ s_rule->index = cpu_to_le16(vsi_list_id);
/* Free the vsi_list resource that we allocated. It is assumed that the
* list is empty at this point.
@@ -2288,21 +3232,21 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_list)
{
enum ice_sw_lkup_type lkup_type;
- enum ice_status status = 0;
u16 vsi_list_id;
+ int status = 0;
if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
lkup_type = fm_list->fltr_info.lkup_type;
vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
@@ -2324,7 +3268,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@@ -2375,19 +3319,19 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* @recp_id: recipe ID for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
-static enum ice_status
+static int
ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_fltr_mgmt_list_entry *list_elem;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
bool remove_rule = false;
u16 vsi_handle;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2395,14 +3339,14 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
mutex_lock(rule_lock);
list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
if (!list_elem) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
}
if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
remove_rule = true;
} else if (!list_elem->vsi_list_info) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
} else if (list_elem->vsi_list_info->ref_cnt > 1) {
/* a ref_cnt > 1 indicates that the vsi_list is being
@@ -2429,13 +3373,13 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
if (remove_rule) {
/* Remove the lookup rule */
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
s_rule = devm_kzalloc(ice_hw_to_dev(hw),
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE,
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
GFP_KERNEL);
if (!s_rule) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto exit;
}
@@ -2443,8 +3387,8 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
ice_aqc_opc_remove_sw_rules);
status = ice_aq_sw_rules(hw, s_rule,
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
- ice_aqc_opc_remove_sw_rules, NULL);
+ ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule),
+ 1, ice_aqc_opc_remove_sw_rules, NULL);
/* Remove a book keeping from the list */
devm_kfree(ice_hw_to_dev(hw), s_rule);
@@ -2583,31 +3527,15 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
- *
- * IMPORTANT: When the ucast_shared flag is set to false and m_list has
- * multiple unicast addresses, the function assumes that all the
- * addresses are unique in a given add_mac call. It doesn't
- * check for duplicates in this case, removing duplicates from a given
- * list should be taken care of in the caller of this function.
*/
-enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
+int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
- struct list_head *rule_head;
- u16 total_elem_left, s_rule_size;
- struct ice_switch_info *sw;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
- u16 num_unicast = 0;
- u8 elem_sent;
+ int status = 0;
if (!m_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
- s_rule = NULL;
- sw = hw->switch_info;
- rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
@@ -2616,119 +3544,23 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
m_list_itr->fltr_info.flag = ICE_FLTR_TX;
vsi_handle = m_list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
/* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
m_list_itr->fltr_info.src = hw_vsi_id;
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
- return ICE_ERR_PARAM;
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't overwrite the unicast address */
- mutex_lock(rule_lock);
- if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
- &m_list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return ICE_ERR_ALREADY_EXISTS;
- }
- mutex_unlock(rule_lock);
- num_unicast++;
- } else if (is_multicast_ether_addr(add) ||
- (is_unicast_ether_addr(add) && hw->ucast_shared)) {
- m_list_itr->status =
- ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
- m_list_itr);
- if (m_list_itr->status)
- return m_list_itr->status;
- }
- }
+ return -EINVAL;
- mutex_lock(rule_lock);
- /* Exit if no suitable entries were found for adding bulk switch rule */
- if (!num_unicast) {
- status = 0;
- goto ice_add_mac_exit;
- }
-
- rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
-
- /* Allocate switch rule buffer for the bulk update for unicast */
- s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
- s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
- GFP_KERNEL);
- if (!s_rule) {
- status = ICE_ERR_NO_MEMORY;
- goto ice_add_mac_exit;
- }
-
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
-
- if (is_unicast_ether_addr(mac_addr)) {
- ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
- ice_aqc_opc_add_sw_rules);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
- }
-
- /* Call AQ bulk switch rule update for all unicast addresses */
- r_iter = s_rule;
- /* Call AQ switch rule in AQ_MAX chunk */
- for (total_elem_left = num_unicast; total_elem_left > 0;
- total_elem_left -= elem_sent) {
- struct ice_aqc_sw_rules_elem *entry = r_iter;
-
- elem_sent = min_t(u8, total_elem_left,
- (ICE_AQ_MAX_BUF_LEN / s_rule_size));
- status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
- elem_sent, ice_aqc_opc_add_sw_rules,
- NULL);
- if (status)
- goto ice_add_mac_exit;
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + (elem_sent * s_rule_size));
+ m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
}
- /* Fill up rule ID based on the value returned from FW */
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
- struct ice_fltr_mgmt_list_entry *fm_entry;
-
- if (is_unicast_ether_addr(mac_addr)) {
- f_info->fltr_rule_id =
- le16_to_cpu(r_iter->pdata.lkup_tx_rx.index);
- f_info->fltr_act = ICE_FWD_TO_VSI;
- /* Create an entry to track this MAC address */
- fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*fm_entry), GFP_KERNEL);
- if (!fm_entry) {
- status = ICE_ERR_NO_MEMORY;
- goto ice_add_mac_exit;
- }
- fm_entry->fltr_info = *f_info;
- fm_entry->vsi_count = 1;
- /* The book keeping entries will get removed when
- * base driver calls remove filter AQ command
- */
-
- list_add(&fm_entry->list_entry, rule_head);
- r_iter = (struct ice_aqc_sw_rules_elem *)
- ((u8 *)r_iter + s_rule_size);
- }
- }
-
-ice_add_mac_exit:
- mutex_unlock(rule_lock);
- if (s_rule)
- devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
@@ -2737,7 +3569,7 @@ ice_add_mac_exit:
* @hw: pointer to the hardware structure
* @f_entry: filter entry containing one VLAN information
*/
-static enum ice_status
+static int
ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
{
struct ice_switch_info *sw = hw->switch_info;
@@ -2746,10 +3578,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
enum ice_sw_lkup_type lkup_type;
u16 vsi_list_id = 0, vsi_handle;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
f_entry->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
@@ -2757,10 +3589,10 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (new_fltr->src_id != ICE_SRC_ID_VSI)
- return ICE_ERR_PARAM;
+ return -EINVAL;
new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
lkup_type = new_fltr->lkup_type;
@@ -2799,7 +3631,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
new_fltr);
if (!v_list_itr) {
- status = ICE_ERR_DOES_NOT_EXIST;
+ status = -ENOENT;
goto exit;
}
/* reuse VSI list for new rule and increment ref_cnt */
@@ -2835,7 +3667,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
if (v_list_itr->vsi_count > 1 &&
v_list_itr->vsi_list_info->ref_cnt > 1) {
ice_debug(hw, ICE_DBG_SW, "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
- status = ICE_ERR_CFG;
+ status = -EIO;
goto exit;
}
@@ -2845,7 +3677,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* A rule already exists with the new VSI being added */
if (cur_handle == vsi_handle) {
- status = ICE_ERR_ALREADY_EXISTS;
+ status = -EEXIST;
goto exit;
}
@@ -2890,16 +3722,16 @@ exit:
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
+int ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr;
if (!v_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(v_list_itr, v_list, list_entry) {
if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
- return ICE_ERR_PARAM;
+ return -EINVAL;
v_list_itr->fltr_info.flag = ICE_FLTR_TX;
v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
if (v_list_itr->status)
@@ -2917,13 +3749,12 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
* the filter list with the necessary fields (including flags to
* indicate Tx or Rx rules).
*/
-enum ice_status
-ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr;
if (!em_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(em_list_itr, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
@@ -2931,7 +3762,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
- return ICE_ERR_PARAM;
+ return -EINVAL;
em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr);
@@ -2946,13 +3777,12 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
* @hw: pointer to the hardware structure
* @em_list: list of ethertype or ethertype MAC entries
*/
-enum ice_status
-ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
{
struct ice_fltr_list_entry *em_list_itr, *tmp;
if (!em_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
enum ice_sw_lkup_type l_type =
@@ -2960,7 +3790,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
l_type != ICE_SW_LKUP_ETHERTYPE)
- return ICE_ERR_PARAM;
+ return -EINVAL;
em_list_itr->status = ice_remove_rule_internal(hw, l_type,
em_list_itr);
@@ -3012,7 +3842,7 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
- * @hw: pointer to the hardware structure
+ * @pi: pointer to the port_info structure
* @vsi_handle: VSI handle to set as default
* @set: true to add the above mentioned switch rule, false to remove it
* @direction: ICE_FLTR_RX or ICE_FLTR_TX
@@ -3020,26 +3850,20 @@ ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction)
{
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info f_info;
- enum ice_adminq_opc opcode;
- enum ice_status status;
- u16 s_rule_size;
+ struct ice_hw *hw = pi->hw;
u16 hw_vsi_id;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ return -EINVAL;
- s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
- ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
-
- s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&f_info, 0, sizeof(f_info));
@@ -3047,86 +3871,80 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
f_info.flag = direction;
f_info.fltr_act = ICE_FWD_TO_VSI;
f_info.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_info.vsi_handle = vsi_handle;
if (f_info.flag & ICE_FLTR_RX) {
f_info.src = hw->port_info->lport;
f_info.src_id = ICE_SRC_ID_LPORT;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_rx_vsi_rule_id;
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
- if (!set)
- f_info.fltr_rule_id =
- hw->port_info->dflt_tx_vsi_rule_id;
}
+ f_list_entry.fltr_info = f_info;
if (set)
- opcode = ice_aqc_opc_add_sw_rules;
+ status = ice_add_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
else
- opcode = ice_aqc_opc_remove_sw_rules;
-
- ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
-
- status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
- if (status || !(f_info.flag & ICE_FLTR_TX_RX))
- goto out;
- if (set) {
- u16 index = le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
-
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_tx_vsi_rule_id = index;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = hw_vsi_id;
- hw->port_info->dflt_rx_vsi_rule_id = index;
- }
- } else {
- if (f_info.flag & ICE_FLTR_TX) {
- hw->port_info->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
- } else if (f_info.flag & ICE_FLTR_RX) {
- hw->port_info->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
- hw->port_info->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
- }
- }
+ status = ice_remove_rule_internal(hw, ICE_SW_LKUP_DFLT,
+ &f_list_entry);
-out:
- devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
/**
- * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
- * @hw: pointer to the hardware structure
- * @recp_id: lookup type for which the specified rule needs to be searched
- * @f_info: rule information
- *
- * Helper function to search for a unicast rule entry - this is to be used
- * to remove unicast MAC filter that is not shared with other VSIs on the
- * PF switch.
+ * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
+ * @fm_entry: filter entry to inspect
+ * @vsi_handle: VSI handle to compare with filter info
+ */
+static bool
+ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
+{
+ return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
+ fm_entry->fltr_info.vsi_handle == vsi_handle) ||
+ (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+ fm_entry->vsi_list_info &&
+ (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
+}
+
+/**
+ * ice_check_if_dflt_vsi - check if VSI is default VSI
+ * @pi: pointer to the port_info structure
+ * @vsi_handle: vsi handle to check for in filter list
+ * @rule_exists: indicates if there are any VSI's in the rule list
*
- * Returns pointer to entry storing the rule if found
+ * checks if the VSI is in a default VSI list, and also indicates
+ * if the default VSI list is empty
*/
-static struct ice_fltr_mgmt_list_entry *
-ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
- struct ice_fltr_info *f_info)
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists)
{
- struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *list_head;
+ struct ice_fltr_mgmt_list_entry *fm_entry;
+ struct ice_sw_recipe *recp_list;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ bool ret = false;
- list_head = &sw->recp_list[recp_id].filt_rules;
- list_for_each_entry(list_itr, list_head, list_entry) {
- if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
- sizeof(f_info->l_data)) &&
- f_info->fwd_id.hw_vsi_id ==
- list_itr->fltr_info.fwd_id.hw_vsi_id &&
- f_info->flag == list_itr->fltr_info.flag)
- return list_itr;
+ recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
+ rule_lock = &recp_list->filt_rule_lock;
+ rule_head = &recp_list->filt_rules;
+
+ mutex_lock(rule_lock);
+
+ if (rule_exists && !list_empty(rule_head))
+ *rule_exists = true;
+
+ list_for_each_entry(fm_entry, rule_head, list_entry) {
+ if (ice_vsi_uses_fltr(fm_entry, vsi_handle)) {
+ ret = true;
+ break;
+ }
}
- return NULL;
+
+ mutex_unlock(rule_lock);
+
+ return ret;
}
/**
@@ -3137,47 +3955,32 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
* This function removes either a MAC filter rule or a specific VSI from a
* VSI list for a multicast MAC address.
*
- * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
- * ice_add_mac. Caller should be aware that this call will only work if all
- * the entries passed into m_list were added previously. It will not attempt to
- * do a partial remove of entries that were found.
+ * Returns -ENOENT if a given entry was not added by ice_add_mac. Caller should
+ * be aware that this call will only work if all the entries passed into m_list
+ * were added previously. It will not attempt to do a partial remove of entries
+ * that were found.
*/
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
+int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
- return ICE_ERR_PARAM;
+ return -EINVAL;
- rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
- u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
- return ICE_ERR_PARAM;
+ return -EINVAL;
vsi_handle = list_itr->fltr_info.vsi_handle;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't remove the unicast address that belongs to
- * another VSI on the switch, since it is not being
- * shared...
- */
- mutex_lock(rule_lock);
- if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
- &list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return ICE_ERR_DOES_NOT_EXIST;
- }
- mutex_unlock(rule_lock);
- }
+
list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_MAC,
list_itr);
@@ -3192,19 +3995,18 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
* @hw: pointer to the hardware structure
* @v_list: list of VLAN entries and forwarding information
*/
-enum ice_status
-ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
+int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
if (!v_list || !hw)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
if (l_type != ICE_SW_LKUP_VLAN)
- return ICE_ERR_PARAM;
+ return -EINVAL;
v_list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_VLAN,
v_list_itr);
@@ -3215,21 +4017,6 @@ ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list)
}
/**
- * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
- * @fm_entry: filter entry to inspect
- * @vsi_handle: VSI handle to compare with filter info
- */
-static bool
-ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
-{
- return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
- fm_entry->fltr_info.vsi_handle == vsi_handle) ||
- (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
- fm_entry->vsi_list_info &&
- (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
-}
-
-/**
* ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
@@ -3242,7 +4029,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
* fltr_info.fwd_id fields. These are set such that later logic can
* extract which VSI to remove the fltr from, and pass on that information.
*/
-static enum ice_status
+static int
ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *vsi_list_head,
struct ice_fltr_info *fi)
@@ -3254,7 +4041,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
*/
tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
tmp->fltr_info = *fi;
@@ -3285,17 +4072,17 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
* Note that this means all entries in vsi_list_head must be explicitly
* deallocated by the caller when done with list.
*/
-static enum ice_status
+static int
ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct list_head *lkup_list_head,
struct list_head *vsi_list_head)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = 0;
+ int status = 0;
/* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
@@ -3349,9 +4136,8 @@ static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
* @recp_id: recipe ID for which the rule needs to removed
* @v_list: list of promisc entries
*/
-static enum ice_status
-ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
- struct list_head *v_list)
+static int
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id, struct list_head *v_list)
{
struct ice_fltr_list_entry *v_list_itr, *tmp;
@@ -3371,7 +4157,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
* @promisc_mask: mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
*/
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid)
{
@@ -3381,11 +4167,11 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
struct ice_fltr_mgmt_list_entry *itr;
struct list_head *rule_head;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = 0;
+ int status = 0;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX))
recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
@@ -3444,20 +4230,20 @@ free_fltr_list:
* @promisc_mask: mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
*/
-enum ice_status
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
{
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
struct ice_fltr_list_entry f_list_entry;
struct ice_fltr_info new_fltr;
- enum ice_status status = 0;
bool is_tx_fltr;
+ int status = 0;
u16 hw_vsi_id;
int pkt_type;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ return -EINVAL;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
memset(&new_fltr, 0, sizeof(new_fltr));
@@ -3558,7 +4344,7 @@ set_promisc_exit:
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc)
{
@@ -3567,8 +4353,8 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
struct list_head vsi_list_head;
struct list_head *vlan_head;
struct mutex *vlan_lock; /* Lock to protect filter rule list */
- enum ice_status status;
u16 vlan_id;
+ int status;
INIT_LIST_HEAD(&vsi_list_head);
vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
@@ -3581,6 +4367,13 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
goto free_fltr_list;
list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ /* Avoid enabling or disabling VLAN zero twice when in double
+ * VLAN mode
+ */
+ if (ice_is_dvm_ena(hw) &&
+ list_itr->fltr_info.l_data.vlan.tpid == 0)
+ continue;
+
vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
if (rm_vlan_promisc)
status = ice_clear_vsi_promisc(hw, vsi_handle,
@@ -3588,7 +4381,7 @@ ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
else
status = ice_set_vsi_promisc(hw, vsi_handle,
promisc_mask, vlan_id);
- if (status)
+ if (status && status != -EEXIST)
break;
}
@@ -3616,7 +4409,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
struct list_head *rule_head;
struct ice_fltr_list_entry *tmp;
struct mutex *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status;
+ int status;
INIT_LIST_HEAD(&remove_list_head);
rule_lock = &sw->recp_list[lkup].filt_rule_lock;
@@ -3681,19 +4474,19 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* @num_items: number of entries requested for FD resource type
* @counter_id: counter index returned by AQ call
*/
-enum ice_status
+int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Allocate resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -3719,19 +4512,19 @@ exit:
* @num_items: number of entries to be freed for FD resource type
* @counter_id: counter ID resource which needs to be freed
*/
-enum ice_status
+int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Free resource */
buf_len = struct_size(buf, elem, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -3760,6 +4553,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
{ ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
{ ICE_ETYPE_OL, { 0 } },
+ { ICE_ETYPE_IL, { 0 } },
{ ICE_VLAN_OFOS, { 2, 0 } },
{ ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
{ ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
@@ -3772,13 +4566,20 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_UDP_ILOS, { 0, 2 } },
{ ICE_VXLAN, { 8, 10, 12, 14 } },
{ ICE_GENEVE, { 8, 10, 12, 14 } },
- { ICE_NVGRE, { 0, 2, 4, 6 } },
+ { ICE_NVGRE, { 0, 2, 4, 6 } },
+ { ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
+ { ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
+ { ICE_PPPOE, { 0, 2, 4, 6 } },
+ { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
+ { ICE_VLAN_EX, { 2, 0 } },
+ { ICE_VLAN_IN, { 2, 0 } },
};
static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
{ ICE_MAC_IL, ICE_MAC_IL_HW },
{ ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
+ { ICE_ETYPE_IL, ICE_ETYPE_IL_HW },
{ ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
{ ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
{ ICE_IPV4_IL, ICE_IPV4_IL_HW },
@@ -3789,7 +4590,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
{ ICE_VXLAN, ICE_UDP_OF_HW },
{ ICE_GENEVE, ICE_UDP_OF_HW },
- { ICE_NVGRE, ICE_GRE_OF_HW },
+ { ICE_NVGRE, ICE_GRE_OF_HW },
+ { ICE_GTP, ICE_UDP_OF_HW },
+ { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_PPPOE, ICE_PPPOE_HW },
+ { ICE_L2TPV3, ICE_L2TPV3_HW },
+ { ICE_VLAN_EX, ICE_VLAN_OF_HW },
+ { ICE_VLAN_IN, ICE_VLAN_OL_HW },
};
/**
@@ -3873,6 +4680,23 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
}
/**
+ * ice_change_proto_id_to_dvm - change proto id in prot_id_tbl
+ *
+ * As protocol id for outer vlan is different in dvm and svm, if dvm is
+ * supported protocol array record for outer vlan has to be modified to
+ * reflect the value proper for DVM.
+ */
+void ice_change_proto_id_to_dvm(void)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
+ if (ice_prot_id_tbl[i].type == ICE_VLAN_OFOS &&
+ ice_prot_id_tbl[i].protocol_id != ICE_VLAN_OF_HW)
+ ice_prot_id_tbl[i].protocol_id = ICE_VLAN_OF_HW;
+}
+
+/**
* ice_prot_type_to_id - get protocol ID from protocol type
* @type: protocol type
* @id: pointer to variable that will receive the ID
@@ -3941,7 +4765,7 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
* and start grouping them in 4-word groups. Each group makes up one
* recipe.
*/
-static enum ice_status
+static int
ice_create_first_fit_recp_def(struct ice_hw *hw,
struct ice_prot_lkup_ext *lkup_exts,
struct list_head *rg_list,
@@ -3965,7 +4789,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
sizeof(*entry),
GFP_KERNEL);
if (!entry)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
list_add(&entry->l_entry, rg_list);
grp = &entry->r_group;
(*recp_cnt)++;
@@ -3991,7 +4815,7 @@ ice_create_first_fit_recp_def(struct ice_hw *hw,
* Helper function to fill in the field vector indices for protocol-offset
* pairs. These indexes are then ultimately programmed into a recipe.
*/
-static enum ice_status
+static int
ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
struct list_head *rg_list)
{
@@ -4033,7 +4857,7 @@ ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
* invalid pair
*/
if (!found)
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
}
@@ -4075,12 +4899,10 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
u16 bit;
- bitmap_zero(possible_idx, ICE_MAX_FV_WORDS);
bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
- bitmap_zero(free_idx, ICE_MAX_FV_WORDS);
- bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+ bitmap_fill(possible_idx, ICE_MAX_FV_WORDS);
/* For each profile we are going to associate the recipe with, add the
* recipes that are associated with that profile. This will give us
@@ -4115,7 +4937,7 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
* @rm: recipe management list entry
* @profiles: bitmap of profiles that will be associated.
*/
-static enum ice_status
+static int
ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
unsigned long *profiles)
{
@@ -4123,11 +4945,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
struct ice_aqc_recipe_data_elem *tmp;
struct ice_aqc_recipe_data_elem *buf;
struct ice_recp_grp_entry *entry;
- enum ice_status status;
u16 free_res_idx;
u16 recipe_count;
u8 chain_idx;
u8 recps = 0;
+ int status;
/* When more than one recipe are required, another recipe is needed to
* chain them together. Matching a tunnel metadata ID takes up one of
@@ -4143,22 +4965,22 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (rm->n_grp_count > 1) {
if (rm->n_grp_count > free_res_idx)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
rm->n_grp_count++;
}
if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
GFP_KERNEL);
if (!buf) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_mem;
}
@@ -4218,7 +5040,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
*/
if (chain_idx >= ICE_MAX_FV_WORDS) {
ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
- status = ICE_ERR_MAX_LIMIT;
+ status = -ENOSPC;
goto err_unroll;
}
@@ -4249,7 +5071,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
sizeof(buf[0].recipe_bitmap));
} else {
- status = ICE_ERR_BAD_PTR;
+ status = -EINVAL;
goto err_unroll;
}
/* Applicable only for ROOT_RECIPE, set the fwd_priority for
@@ -4285,7 +5107,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
sizeof(*last_chain_entry),
GFP_KERNEL);
if (!last_chain_entry) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_unroll;
}
last_chain_entry->rid = rid;
@@ -4320,7 +5142,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
sizeof(buf[recps].recipe_bitmap));
} else {
- status = ICE_ERR_BAD_PTR;
+ status = -EINVAL;
goto err_unroll;
}
buf[recps].content.act_ctrl_fwd_priority = rm->priority;
@@ -4354,7 +5176,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
if (!idx_found) {
- status = ICE_ERR_OUT_OF_RANGE;
+ status = -EIO;
goto err_unroll;
}
@@ -4407,12 +5229,12 @@ err_mem:
* @rm: recipe management list entry
* @lkup_exts: lookup elements
*/
-static enum ice_status
+static int
ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
struct ice_prot_lkup_ext *lkup_exts)
{
- enum ice_status status;
u8 recp_count = 0;
+ int status;
rm->n_grp_count = 0;
@@ -4434,41 +5256,6 @@ ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
/**
- * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
- * @hw: pointer to hardware structure
- * @lkups: lookup elements or match criteria for the advanced recipe, one
- * structure per protocol header
- * @lkups_cnt: number of protocols
- * @bm: bitmap of field vectors to consider
- * @fv_list: pointer to a list that holds the returned field vectors
- */
-static enum ice_status
-ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- unsigned long *bm, struct list_head *fv_list)
-{
- enum ice_status status;
- u8 *prot_ids;
- u16 i;
-
- prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
- if (!prot_ids)
- return ICE_ERR_NO_MEMORY;
-
- for (i = 0; i < lkups_cnt; i++)
- if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
- status = ICE_ERR_CFG;
- goto free_mem;
- }
-
- /* Find field vectors that include all specified protocol types */
- status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
-
-free_mem:
- kfree(prot_ids);
- return status;
-}
-
-/**
* ice_tun_type_match_word - determine if tun type needs a match mask
* @tun_type: tunnel type
* @mask: mask to be used for the tunnel
@@ -4479,6 +5266,8 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
case ICE_SW_TUN_GENEVE:
case ICE_SW_TUN_VXLAN:
case ICE_SW_TUN_NVGRE:
+ case ICE_SW_TUN_GTPU:
+ case ICE_SW_TUN_GTPC:
*mask = ICE_TUN_FLAG_MASK;
return true;
@@ -4492,10 +5281,11 @@ static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
* ice_add_special_words - Add words that are not protocols, such as metadata
* @rinfo: other information regarding the rule e.g. priority and action info
* @lkup_exts: lookup word structure
+ * @dvm_ena: is double VLAN mode enabled
*/
-static enum ice_status
+static int
ice_add_special_words(struct ice_adv_rule_info *rinfo,
- struct ice_prot_lkup_ext *lkup_exts)
+ struct ice_prot_lkup_ext *lkup_exts, bool dvm_ena)
{
u16 mask;
@@ -4510,7 +5300,20 @@ ice_add_special_words(struct ice_adv_rule_info *rinfo,
lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
lkup_exts->field_mask[word] = mask;
} else {
- return ICE_ERR_MAX_LIMIT;
+ return -ENOSPC;
+ }
+ }
+
+ if (rinfo->vlan_type != 0 && dvm_ena) {
+ if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
+ u8 word = lkup_exts->n_val_words++;
+
+ lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
+ lkup_exts->fv_words[word].off = ICE_VLAN_FLAG_MDID_OFF;
+ lkup_exts->field_mask[word] =
+ ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK;
+ } else {
+ return -ENOSPC;
}
}
@@ -4544,6 +5347,13 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_NVGRE:
prof_type = ICE_PROF_TUN_GRE;
break;
+ case ICE_SW_TUN_GTPU:
+ prof_type = ICE_PROF_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_GTPC:
+ prof_type = ICE_PROF_TUN_GTPC;
+ break;
+ case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
break;
@@ -4561,7 +5371,7 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
* @rinfo: other information regarding the rule e.g. priority and action info
* @rid: return the recipe ID of the recipe created
*/
-static enum ice_status
+static int
ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
{
@@ -4572,16 +5382,16 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
struct ice_sw_fv_list_entry *fvit;
struct ice_recp_grp_entry *r_tmp;
struct ice_sw_fv_list_entry *tmp;
- enum ice_status status = 0;
struct ice_sw_recipe *rm;
+ int status = 0;
u8 i;
if (!lkups_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
if (!lkup_exts)
- return ICE_ERR_NO_MEMORY;
+ return -ENOMEM;
/* Determine the number of words to be matched and if it exceeds a
* recipe's restrictions
@@ -4590,20 +5400,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_free_lkup_exts;
}
count = ice_fill_valid_words(&lkups[i], lkup_exts);
if (!count) {
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_free_lkup_exts;
}
}
rm = kzalloc(sizeof(*rm), GFP_KERNEL);
if (!rm) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_free_lkup_exts;
}
@@ -4615,20 +5425,20 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Get bitmap of field vectors (profiles) that are compatible with the
* rule request; only these will be searched in the subsequent call to
- * ice_get_fv.
+ * ice_get_sw_fv_list.
*/
ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
- status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ status = ice_get_sw_fv_list(hw, lkup_exts, fv_bitmap, &rm->fv_list);
if (status)
goto err_unroll;
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, lkup_exts);
+ status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
- goto err_free_lkup_exts;
+ goto err_unroll;
/* Group match words into recipes using preferred recipe grouping
* criteria.
@@ -4727,115 +5537,158 @@ err_free_lkup_exts:
}
/**
+ * ice_dummy_packet_add_vlan - insert VLAN header to dummy pkt
+ *
+ * @dummy_pkt: dummy packet profile pattern to which VLAN tag(s) will be added
+ * @num_vlan: number of VLAN tags
+ */
+static struct ice_dummy_pkt_profile *
+ice_dummy_packet_add_vlan(const struct ice_dummy_pkt_profile *dummy_pkt,
+ u32 num_vlan)
+{
+ struct ice_dummy_pkt_profile *profile;
+ struct ice_dummy_pkt_offsets *offsets;
+ u32 buf_len, off, etype_off, i;
+ u8 *pkt;
+
+ if (num_vlan < 1 || num_vlan > 2)
+ return ERR_PTR(-EINVAL);
+
+ off = num_vlan * VLAN_HLEN;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet_offsets)) +
+ dummy_pkt->offsets_len;
+ offsets = kzalloc(buf_len, GFP_KERNEL);
+ if (!offsets)
+ return ERR_PTR(-ENOMEM);
+
+ offsets[0] = dummy_pkt->offsets[0];
+ if (num_vlan == 2) {
+ offsets[1] = ice_dummy_qinq_packet_offsets[0];
+ offsets[2] = ice_dummy_qinq_packet_offsets[1];
+ } else if (num_vlan == 1) {
+ offsets[1] = ice_dummy_vlan_packet_offsets[0];
+ }
+
+ for (i = 1; dummy_pkt->offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ offsets[i + num_vlan].type = dummy_pkt->offsets[i].type;
+ offsets[i + num_vlan].offset =
+ dummy_pkt->offsets[i].offset + off;
+ }
+ offsets[i + num_vlan] = dummy_pkt->offsets[i];
+
+ etype_off = dummy_pkt->offsets[1].offset;
+
+ buf_len = array_size(num_vlan, sizeof(ice_dummy_vlan_packet)) +
+ dummy_pkt->pkt_len;
+ pkt = kzalloc(buf_len, GFP_KERNEL);
+ if (!pkt) {
+ kfree(offsets);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memcpy(pkt, dummy_pkt->pkt, etype_off);
+ memcpy(pkt + etype_off,
+ num_vlan == 2 ? ice_dummy_qinq_packet : ice_dummy_vlan_packet,
+ off);
+ memcpy(pkt + etype_off + off, dummy_pkt->pkt + etype_off,
+ dummy_pkt->pkt_len - etype_off);
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile) {
+ kfree(offsets);
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ profile->offsets = offsets;
+ profile->pkt = pkt;
+ profile->pkt_len = buf_len;
+ profile->match |= ICE_PKT_KMALLOC;
+
+ return profile;
+}
+
+/**
* ice_find_dummy_packet - find dummy packet
*
* @lkups: lookup elements or match criteria for the advanced recipe, one
* structure per protocol header
* @lkups_cnt: number of protocols
* @tun_type: tunnel type
- * @pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: pointer to receive the pointer to the offsets for the packet
+ *
+ * Returns the &ice_dummy_pkt_profile corresponding to these lookup params.
*/
-static void
+static const struct ice_dummy_pkt_profile *
ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- enum ice_sw_tunnel_type tun_type,
- const u8 **pkt, u16 *pkt_len,
- const struct ice_dummy_pkt_offsets **offsets)
+ enum ice_sw_tunnel_type tun_type)
{
- bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ const struct ice_dummy_pkt_profile *ret = ice_dummy_pkt_profiles;
+ u32 match = 0, vlan_count = 0;
u16 i;
+ switch (tun_type) {
+ case ICE_SW_TUN_GTPC:
+ match |= ICE_PKT_TUN_GTPC;
+ break;
+ case ICE_SW_TUN_GTPU:
+ match |= ICE_PKT_TUN_GTPU;
+ break;
+ case ICE_SW_TUN_NVGRE:
+ match |= ICE_PKT_TUN_NVGRE;
+ break;
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ match |= ICE_PKT_TUN_UDP;
+ break;
+ default:
+ break;
+ }
+
for (i = 0; i < lkups_cnt; i++) {
if (lkups[i].type == ICE_UDP_ILOS)
- udp = true;
+ match |= ICE_PKT_INNER_UDP;
else if (lkups[i].type == ICE_TCP_IL)
- tcp = true;
+ match |= ICE_PKT_INNER_TCP;
else if (lkups[i].type == ICE_IPV6_OFOS)
- ipv6 = true;
- else if (lkups[i].type == ICE_VLAN_OFOS)
- vlan = true;
+ match |= ICE_PKT_OUTER_IPV6;
+ else if (lkups[i].type == ICE_VLAN_OFOS ||
+ lkups[i].type == ICE_VLAN_EX)
+ vlan_count++;
+ else if (lkups[i].type == ICE_VLAN_IN)
+ vlan_count++;
else if (lkups[i].type == ICE_ETYPE_OL &&
lkups[i].h_u.ethertype.ethtype_id ==
cpu_to_be16(ICE_IPV6_ETHER_ID) &&
lkups[i].m_u.ethertype.ethtype_id ==
- cpu_to_be16(0xFFFF))
- ipv6 = true;
- }
-
- if (tun_type == ICE_SW_TUN_NVGRE) {
- if (tcp) {
- *pkt = dummy_gre_tcp_packet;
- *pkt_len = sizeof(dummy_gre_tcp_packet);
- *offsets = dummy_gre_tcp_packet_offsets;
- return;
- }
-
- *pkt = dummy_gre_udp_packet;
- *pkt_len = sizeof(dummy_gre_udp_packet);
- *offsets = dummy_gre_udp_packet_offsets;
- return;
+ cpu_to_be16(0xFFFF))
+ match |= ICE_PKT_OUTER_IPV6;
+ else if (lkups[i].type == ICE_ETYPE_IL &&
+ lkups[i].h_u.ethertype.ethtype_id ==
+ cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+ lkups[i].m_u.ethertype.ethtype_id ==
+ cpu_to_be16(0xFFFF))
+ match |= ICE_PKT_INNER_IPV6;
+ else if (lkups[i].type == ICE_IPV6_IL)
+ match |= ICE_PKT_INNER_IPV6;
+ else if (lkups[i].type == ICE_GTP_NO_PAY)
+ match |= ICE_PKT_GTP_NOPAY;
+ else if (lkups[i].type == ICE_PPPOE) {
+ match |= ICE_PKT_PPPOE;
+ if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
+ htons(PPP_IPV6))
+ match |= ICE_PKT_OUTER_IPV6;
+ } else if (lkups[i].type == ICE_L2TPV3)
+ match |= ICE_PKT_L2TPV3;
}
- if (tun_type == ICE_SW_TUN_VXLAN ||
- tun_type == ICE_SW_TUN_GENEVE) {
- if (tcp) {
- *pkt = dummy_udp_tun_tcp_packet;
- *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
- *offsets = dummy_udp_tun_tcp_packet_offsets;
- return;
- }
+ while (ret->match && (match & ret->match) != ret->match)
+ ret++;
- *pkt = dummy_udp_tun_udp_packet;
- *pkt_len = sizeof(dummy_udp_tun_udp_packet);
- *offsets = dummy_udp_tun_udp_packet_offsets;
- return;
- }
+ if (vlan_count != 0)
+ ret = ice_dummy_packet_add_vlan(ret, vlan_count);
- if (udp && !ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_packet;
- *pkt_len = sizeof(dummy_vlan_udp_packet);
- *offsets = dummy_vlan_udp_packet_offsets;
- return;
- }
- *pkt = dummy_udp_packet;
- *pkt_len = sizeof(dummy_udp_packet);
- *offsets = dummy_udp_packet_offsets;
- return;
- } else if (udp && ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
- *offsets = dummy_vlan_udp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_udp_ipv6_packet;
- *pkt_len = sizeof(dummy_udp_ipv6_packet);
- *offsets = dummy_udp_ipv6_packet_offsets;
- return;
- } else if ((tcp && ipv6) || ipv6) {
- if (vlan) {
- *pkt = dummy_vlan_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
- *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
- return;
- }
- *pkt = dummy_tcp_ipv6_packet;
- *pkt_len = sizeof(dummy_tcp_ipv6_packet);
- *offsets = dummy_tcp_ipv6_packet_offsets;
- return;
- }
-
- if (vlan) {
- *pkt = dummy_vlan_tcp_packet;
- *pkt_len = sizeof(dummy_vlan_tcp_packet);
- *offsets = dummy_vlan_tcp_packet_offsets;
- } else {
- *pkt = dummy_tcp_packet;
- *pkt_len = sizeof(dummy_tcp_packet);
- *offsets = dummy_tcp_packet_offsets;
- }
+ return ret;
}
/**
@@ -4845,15 +5698,12 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* structure per protocol header
* @lkups_cnt: number of protocols
* @s_rule: stores rule information from the match criteria
- * @dummy_pkt: dummy packet to fill according to filter match criteria
- * @pkt_len: packet length of dummy packet
- * @offsets: offset info for the dummy packet
+ * @profile: dummy packet profile (the template, its size and header offsets)
*/
-static enum ice_status
+static int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
- struct ice_aqc_sw_rules_elem *s_rule,
- const u8 *dummy_pkt, u16 pkt_len,
- const struct ice_dummy_pkt_offsets *offsets)
+ struct ice_sw_rule_lkup_rx_tx *s_rule,
+ const struct ice_dummy_pkt_profile *profile)
{
u8 *pkt;
u16 i;
@@ -4861,11 +5711,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
/* Start with a packet with a pre-defined/dummy content. Then, fill
* in the header values to be looked up or matched.
*/
- pkt = s_rule->pdata.lkup_tx_rx.hdr;
+ pkt = s_rule->hdr_data;
- memcpy(pkt, dummy_pkt, pkt_len);
+ memcpy(pkt, profile->pkt, profile->pkt_len);
for (i = 0; i < lkups_cnt; i++) {
+ const struct ice_dummy_pkt_offsets *offsets = profile->offsets;
enum ice_protocol_type type;
u16 offset = 0, len = 0, j;
bool found = false;
@@ -4883,7 +5734,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
}
/* this should never happen in a correct calling sequence */
if (!found)
- return ICE_ERR_PARAM;
+ return -EINVAL;
switch (lkups[i].type) {
case ICE_MAC_OFOS:
@@ -4891,9 +5742,12 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
len = sizeof(struct ice_ether_hdr);
break;
case ICE_ETYPE_OL:
+ case ICE_ETYPE_IL:
len = sizeof(struct ice_ethtype_hdr);
break;
case ICE_VLAN_OFOS:
+ case ICE_VLAN_EX:
+ case ICE_VLAN_IN:
len = sizeof(struct ice_vlan_hdr);
break;
case ICE_IPV4_OFOS:
@@ -4919,13 +5773,23 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GENEVE:
len = sizeof(struct ice_udp_tnl_hdr);
break;
+ case ICE_GTP_NO_PAY:
+ case ICE_GTP:
+ len = sizeof(struct ice_udp_gtp_hdr);
+ break;
+ case ICE_PPPOE:
+ len = sizeof(struct ice_pppoe_hdr);
+ break;
+ case ICE_L2TPV3:
+ len = sizeof(struct ice_l2tpv3_sess_hdr);
+ break;
default:
- return ICE_ERR_PARAM;
+ return -EINVAL;
}
/* the length should be a word multiple */
if (len % ICE_BYTES_PER_WORD)
- return ICE_ERR_CFG;
+ return -EIO;
/* We have the offset to the header start, the length, the
* caller's header values and mask. Use this information to
@@ -4934,16 +5798,18 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* indicated by the mask to make sure we don't improperly write
* over any significant packet data.
*/
- for (j = 0; j < len / sizeof(u16); j++)
- if (((u16 *)&lkups[i].m_u)[j])
- ((u16 *)(pkt + offset))[j] =
- (((u16 *)(pkt + offset))[j] &
- ~((u16 *)&lkups[i].m_u)[j]) |
- (((u16 *)&lkups[i].h_u)[j] &
- ((u16 *)&lkups[i].m_u)[j]);
+ for (j = 0; j < len / sizeof(u16); j++) {
+ u16 *ptr = (u16 *)(pkt + offset);
+ u16 mask = lkups[i].m_raw[j];
+
+ if (!mask)
+ continue;
+
+ ptr[j] = (ptr[j] & ~mask) | (lkups[i].h_raw[j] & mask);
+ }
}
- s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+ s_rule->hdr_len = cpu_to_le16(profile->pkt_len);
return 0;
}
@@ -4955,7 +5821,7 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
* @pkt: dummy packet to fill in
* @offsets: offset info for the dummy packet
*/
-static enum ice_status
+static int
ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
{
@@ -4964,11 +5830,11 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
switch (tun_type) {
case ICE_SW_TUN_VXLAN:
if (!ice_get_open_tunnel_port(hw, &open_port, TNL_VXLAN))
- return ICE_ERR_CFG;
+ return -EIO;
break;
case ICE_SW_TUN_GENEVE:
if (!ice_get_open_tunnel_port(hw, &open_port, TNL_GENEVE))
- return ICE_ERR_CFG;
+ return -EIO;
break;
default:
/* Nothing needs to be done for this tunnel type */
@@ -4989,7 +5855,37 @@ ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
}
}
- return ICE_ERR_CFG;
+ return -EIO;
+}
+
+/**
+ * ice_fill_adv_packet_vlan - fill dummy packet with VLAN tag type
+ * @vlan_type: VLAN tag type
+ * @pkt: dummy packet to fill in
+ * @offsets: offset info for the dummy packet
+ */
+static int
+ice_fill_adv_packet_vlan(u16 vlan_type, u8 *pkt,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u16 i;
+
+ /* Find VLAN header and insert VLAN TPID */
+ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ if (offsets[i].type == ICE_VLAN_OFOS ||
+ offsets[i].type == ICE_VLAN_EX) {
+ struct ice_vlan_hdr *hdr;
+ u16 offset;
+
+ offset = offsets[i].offset;
+ hdr = (struct ice_vlan_hdr *)&pkt[offset];
+ hdr->type = cpu_to_be16(vlan_type);
+
+ return 0;
+ }
+ }
+
+ return -EIO;
}
/**
@@ -5027,6 +5923,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
rinfo->tun_type == list_itr->rule_info.tun_type &&
+ rinfo->vlan_type == list_itr->rule_info.vlan_type &&
lkups_matched)
return list_itr;
}
@@ -5054,25 +5951,25 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_adv_add_update_vsi_list(struct ice_hw *hw,
struct ice_adv_fltr_mgmt_list_entry *m_entry,
struct ice_adv_rule_info *cur_fltr,
struct ice_adv_rule_info *new_fltr)
{
- enum ice_status status;
u16 vsi_list_id = 0;
+ int status;
if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
(cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
- return ICE_ERR_NOT_IMPL;
+ return -EOPNOTSUPP;
if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
/* Only one entry existed in the mapping and it was not already
@@ -5085,7 +5982,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
new_fltr->sw_act.fwd_id.hw_vsi_id)
- return ICE_ERR_ALREADY_EXISTS;
+ return -EEXIST;
vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
@@ -5118,7 +6015,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->sw_act.vsi_handle;
if (!m_entry->vsi_list_info)
- return ICE_ERR_CFG;
+ return -EIO;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
@@ -5160,21 +6057,20 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
* rinfo describes other information related to this rule such as forwarding
* IDs, priority of this rule, etc.
*/
-enum ice_status
+int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
struct ice_rule_query_data *added_entry)
{
struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
- u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
- const struct ice_dummy_pkt_offsets *pkt_offsets;
- struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
+ const struct ice_dummy_pkt_profile *profile;
+ u16 rid = 0, i, rule_buf_sz, vsi_handle;
struct list_head *rule_head;
struct ice_switch_info *sw;
- enum ice_status status;
- const u8 *pkt = NULL;
u16 word_cnt;
u32 act = 0;
+ int status;
u8 q_rgn;
/* Initialize profile to result index bitmap */
@@ -5184,39 +6080,42 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
if (!lkups_cnt)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* get # of words we need to match */
word_cnt = 0;
for (i = 0; i < lkups_cnt; i++) {
- u16 j, *ptr;
+ u16 j;
- ptr = (u16 *)&lkups[i].m_u;
- for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
- if (ptr[j] != 0)
+ for (j = 0; j < ARRAY_SIZE(lkups->m_raw); j++)
+ if (lkups[i].m_raw[j])
word_cnt++;
}
- if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
- return ICE_ERR_PARAM;
+ if (!word_cnt)
+ return -EINVAL;
- /* make sure that we can locate a dummy packet */
- ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
- &pkt_offsets);
- if (!pkt) {
- status = ICE_ERR_PARAM;
- goto err_ice_add_adv_rule;
- }
+ if (word_cnt > ICE_MAX_CHAIN_WORDS)
+ return -ENOSPC;
+
+ /* locate a dummy packet */
+ profile = ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type);
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
- return ICE_ERR_CFG;
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
+ status = -EIO;
+ goto free_pkt_profile;
+ }
vsi_handle = rinfo->sw_act.vsi_handle;
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return ICE_ERR_PARAM;
+ if (!ice_is_vsi_valid(hw, vsi_handle)) {
+ status = -EINVAL;
+ goto free_pkt_profile;
+ }
if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
rinfo->sw_act.fwd_id.hw_vsi_id =
@@ -5226,7 +6125,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
if (status)
- return status;
+ goto free_pkt_profile;
m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
if (m_entry) {
/* we have to add VSI to VSI_LIST and increment vsi_count.
@@ -5245,12 +6144,14 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
}
- return status;
+ goto free_pkt_profile;
}
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, profile->pkt_len);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
- if (!s_rule)
- return ICE_ERR_NO_MEMORY;
+ if (!s_rule) {
+ status = -ENOMEM;
+ goto free_pkt_profile;
+ }
if (!rinfo->flags_info.act_valid) {
act |= ICE_SINGLE_ACT_LAN_ENABLE;
act |= ICE_SINGLE_ACT_LB_ENABLE;
@@ -5284,7 +6185,7 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ICE_SINGLE_ACT_VALID_BIT;
break;
default:
- status = ICE_ERR_CFG;
+ status = -EIO;
goto err_ice_add_adv_rule;
}
@@ -5296,26 +6197,33 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* by caller)
*/
if (rinfo->rx) {
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->pdata.lkup_tx_rx.src =
- cpu_to_le16(hw->port_info->lport);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
} else {
- s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
- s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->src = cpu_to_le16(rinfo->sw_act.src);
}
- s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
- s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+ s_rule->recipe_id = cpu_to_le16(rid);
+ s_rule->act = cpu_to_le32(act);
- status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
- pkt_len, pkt_offsets);
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, profile);
if (status)
goto err_ice_add_adv_rule;
- if (rinfo->tun_type != ICE_NON_TUN) {
+ if (rinfo->tun_type != ICE_NON_TUN &&
+ rinfo->tun_type != ICE_SW_TUN_AND_NON_TUN) {
status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
- s_rule->pdata.lkup_tx_rx.hdr,
- pkt_offsets);
+ s_rule->hdr_data,
+ profile->offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+ }
+
+ if (rinfo->vlan_type != 0 && ice_is_dvm_ena(hw)) {
+ status = ice_fill_adv_packet_vlan(rinfo->vlan_type,
+ s_rule->hdr_data,
+ profile->offsets);
if (status)
goto err_ice_add_adv_rule;
}
@@ -5329,21 +6237,20 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
sizeof(struct ice_adv_fltr_mgmt_list_entry),
GFP_KERNEL);
if (!adv_fltr) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_adv_rule;
}
adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
lkups_cnt * sizeof(*lkups), GFP_KERNEL);
if (!adv_fltr->lkups) {
- status = ICE_ERR_NO_MEMORY;
+ status = -ENOMEM;
goto err_ice_add_adv_rule;
}
adv_fltr->lkups_cnt = lkups_cnt;
adv_fltr->rule_info = *rinfo;
- adv_fltr->rule_info.fltr_rule_id =
- le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ adv_fltr->rule_info.fltr_rule_id = le16_to_cpu(s_rule->index);
sw = hw->switch_info;
sw->recp_list[rid].adv_rule = true;
rule_head = &sw->recp_list[rid].filt_rules;
@@ -5366,6 +6273,13 @@ err_ice_add_adv_rule:
kfree(s_rule);
+free_pkt_profile:
+ if (profile->match & ICE_PKT_KMALLOC) {
+ kfree(profile->offsets);
+ kfree(profile->pkt);
+ kfree(profile);
+ }
+
return status;
}
@@ -5379,12 +6293,12 @@ err_ice_add_adv_rule:
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
* It is required to pass valid VSI handle.
*/
-static enum ice_status
+static int
ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
struct list_head *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
- enum ice_status status = 0;
+ int status = 0;
u16 hw_vsi_id;
if (list_empty(list_head))
@@ -5433,22 +6347,22 @@ end:
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_adv_fltr_mgmt_list_entry *fm_list)
{
struct ice_vsi_list_map_info *vsi_list_info;
enum ice_sw_lkup_type lkup_type;
- enum ice_status status;
u16 vsi_list_id;
+ int status;
if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
- return ICE_ERR_PARAM;
+ return -EINVAL;
/* A rule with the VSI being removed does not exist */
if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
lkup_type = ICE_SW_LKUP_LAST;
vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
@@ -5468,7 +6382,7 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
ICE_MAX_VSI);
if (!ice_is_vsi_valid(hw, rem_vsi_handle))
- return ICE_ERR_OUT_OF_RANGE;
+ return -EIO;
/* Make sure VSI list is empty before removing it below */
status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
@@ -5532,40 +6446,40 @@ ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* header. rinfo describes other information related to this rule such as
* forwarding IDs, priority of this rule, etc.
*/
-static enum ice_status
+static int
ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
{
struct ice_adv_fltr_mgmt_list_entry *list_elem;
struct ice_prot_lkup_ext lkup_exts;
- enum ice_status status = 0;
bool remove_rule = false;
struct mutex *rule_lock; /* Lock to protect filter rule list */
u16 i, rid, vsi_handle;
+ int status = 0;
memset(&lkup_exts, 0, sizeof(lkup_exts));
for (i = 0; i < lkups_cnt; i++) {
u16 count;
if (lkups[i].type >= ICE_PROTOCOL_LAST)
- return ICE_ERR_CFG;
+ return -EIO;
count = ice_fill_valid_words(&lkups[i], &lkup_exts);
if (!count)
- return ICE_ERR_CFG;
+ return -EIO;
}
/* Create any special protocol/offset pairs, such as looking at tunnel
* bits by extracting metadata
*/
- status = ice_add_special_words(rinfo, &lkup_exts);
+ status = ice_add_special_words(rinfo, &lkup_exts, ice_is_dvm_ena(hw));
if (status)
return status;
rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
- return ICE_ERR_PARAM;
+ return -EINVAL;
rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
@@ -5591,21 +6505,20 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
mutex_unlock(rule_lock);
if (remove_rule) {
- struct ice_aqc_sw_rules_elem *s_rule;
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
u16 rule_buf_sz;
- rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s_rule);
s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
if (!s_rule)
- return ICE_ERR_NO_MEMORY;
- s_rule->pdata.lkup_tx_rx.act = 0;
- s_rule->pdata.lkup_tx_rx.index =
- cpu_to_le16(list_elem->rule_info.fltr_rule_id);
- s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ return -ENOMEM;
+ s_rule->act = 0;
+ s_rule->index = cpu_to_le16(list_elem->rule_info.fltr_rule_id);
+ s_rule->hdr_len = 0;
status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
rule_buf_sz, 1,
ice_aqc_opc_remove_sw_rules, NULL);
- if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
+ if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info;
mutex_lock(rule_lock);
@@ -5630,7 +6543,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* the remove_entry parameter. This function will remove rule for a given
* vsi_handle with a given rule_id which is passed as parameter in remove_entry
*/
-enum ice_status
+int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry)
{
@@ -5641,7 +6554,7 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
sw = hw->switch_info;
if (!sw->recp_list[remove_entry->rid].recp_created)
- return ICE_ERR_PARAM;
+ return -EINVAL;
list_head = &sw->recp_list[remove_entry->rid].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
if (list_itr->rule_info.fltr_rule_id ==
@@ -5653,7 +6566,92 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
}
}
/* either list is empty or unable to find rule */
- return ICE_ERR_DOES_NOT_EXIST;
+ return -ENOENT;
+}
+
+/**
+ * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
+ * given VSI handle
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
+ *
+ * This function is used to remove all the rules for a given VSI and as soon
+ * as removing a rule fails, it will return immediately with the error code,
+ * else it will return success.
+ */
+int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
+ struct ice_vsi_list_map_info *map_info;
+ struct ice_adv_rule_info rinfo;
+ struct list_head *list_head;
+ struct ice_switch_info *sw;
+ int status;
+ u8 rid;
+
+ sw = hw->switch_info;
+ for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
+ if (!sw->recp_list[rid].recp_created)
+ continue;
+ if (!sw->recp_list[rid].adv_rule)
+ continue;
+
+ list_head = &sw->recp_list[rid].filt_rules;
+ list_for_each_entry_safe(list_itr, tmp_entry, list_head,
+ list_entry) {
+ rinfo = list_itr->rule_info;
+
+ if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
+ map_info = list_itr->vsi_list_info;
+ if (!map_info)
+ continue;
+
+ if (!test_bit(vsi_handle, map_info->vsi_map))
+ continue;
+ } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
+ continue;
+ }
+
+ rinfo.sw_act.vsi_handle = vsi_handle;
+ status = ice_rem_adv_rule(hw, list_itr->lkups,
+ list_itr->lkups_cnt, &rinfo);
+ if (status)
+ return status;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: driver VSI handle
+ * @list_head: list for which filters need to be replayed
+ *
+ * Replay the advanced rule for the given VSI.
+ */
+static int
+ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
+ struct list_head *list_head)
+{
+ struct ice_rule_query_data added_entry = { 0 };
+ struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
+ int status = 0;
+
+ if (list_empty(list_head))
+ return status;
+ list_for_each_entry(adv_fltr, list_head, list_entry) {
+ struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
+ u16 lk_cnt = adv_fltr->lkups_cnt;
+
+ if (vsi_handle != rinfo->sw_act.vsi_handle)
+ continue;
+ status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
+ &added_entry);
+ if (status)
+ break;
+ }
+ return status;
}
/**
@@ -5663,17 +6661,20 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
*
* Replays filters for requested VSI via vsi_handle.
*/
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
- enum ice_status status = 0;
+ int status;
u8 i;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
struct list_head *head;
head = &sw->recp_list[i].filt_replay_rules;
- status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ if (!sw->recp_list[i].adv_rule)
+ status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
+ else
+ status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
if (status)
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d8a38906f16f..68d8e8a6a189 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,8 +14,14 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
- (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
+/* Switch Profile IDs for Profile related switch rules */
+#define ICE_PROFID_IPV4_GTPC_TEID 41
+#define ICE_PROFID_IPV4_GTPC_NO_TEID 42
+#define ICE_PROFID_IPV4_GTPU_TEID 43
+#define ICE_PROFID_IPV6_GTPC_TEID 44
+#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
+#define ICE_PROFID_IPV6_GTPU_TEID 46
+#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
@@ -33,15 +39,6 @@ struct ice_vsi_ctx {
struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
-enum ice_sw_fwd_act_type {
- ICE_FWD_TO_VSI = 0,
- ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
- ICE_FWD_TO_Q,
- ICE_FWD_TO_QGRP,
- ICE_DROP_PACKET,
- ICE_INVAL_ACT
-};
-
/* Switch recipe ID enum values are specific to hardware */
enum ice_sw_lkup_type {
ICE_SW_LKUP_ETHERTYPE = 0,
@@ -86,6 +83,8 @@ struct ice_fltr_info {
} mac_vlan;
struct {
u16 vlan_id;
+ u16 tpid;
+ u8 tpid_valid;
} vlan;
/* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
* if just using ethertype as filter. Set lkup_type as
@@ -125,10 +124,27 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_update_recipe_lkup_idx_params {
+ u16 rid;
+ u16 fv_idx;
+ bool ignore_valid;
+ u16 mask;
+ bool mask_valid;
+ u8 lkup_idx;
+};
+
struct ice_adv_lkup_elem {
enum ice_protocol_type type;
- union ice_prot_hdr h_u; /* Header values */
- union ice_prot_hdr m_u; /* Mask of header values to match */
+ union {
+ union ice_prot_hdr h_u; /* Header values */
+ /* Used to iterate over the headers */
+ u16 h_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
+ union {
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+ /* Used to iterate over header mask */
+ u16 m_raw[sizeof(union ice_prot_hdr) / sizeof(u16)];
+ };
};
struct ice_sw_act_ctrl {
@@ -176,6 +192,7 @@ struct ice_adv_rule_info {
u32 priority;
u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
u16 fltr_rule_id;
+ u16 vlan_type;
struct ice_adv_rule_flags_info flags_info;
};
@@ -256,7 +273,7 @@ struct ice_vsi_list_map_info {
struct ice_fltr_list_entry {
struct list_head list_entry;
- enum ice_status status;
+ int status;
struct ice_fltr_info fltr_info;
};
@@ -302,75 +319,79 @@ enum ice_promisc_flags {
};
/* VSI related commands */
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
void ice_clear_all_vsi_ctx(struct ice_hw *hw);
/* Switch config */
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
+int ice_get_initial_sw_cfg(struct ice_hw *hw);
-enum ice_status
+int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id);
-enum ice_status
+int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
-enum ice_status
+int
ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
struct ice_rule_query_data *added_entry);
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
-enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-enum ice_status
-ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
-enum ice_status
-ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
-int
-ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
+int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
+int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
+int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
-ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
-enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
/* Promisc/defport setup for VSIs */
-enum ice_status
-ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
-enum ice_status
+int
+ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
+ u8 direction);
+bool
+ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
+ bool *rule_exists);
+
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
u16 vid);
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
-enum ice_status
-ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
+int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry);
-enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
+int ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+int
+ice_update_recipe_lkup_idx(struct ice_hw *hw,
+ struct ice_update_recipe_lkup_idx_params *params);
+void ice_change_proto_id_to_dvm(void);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 25cca5c4ae57..d1a31f236d26 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -24,18 +24,26 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+ lkups_cnt++;
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))
+ lkups_cnt++;
+
if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
lkups_cnt++;
- /* currently inner etype filter isn't supported */
- if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
- fltr->tunnel_type == TNL_LAST)
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
lkups_cnt++;
/* are MAC fields specified? */
@@ -43,7 +51,16 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
lkups_cnt++;
/* is VLAN specified? */
- if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
+ lkups_cnt++;
+
+ /* is CVLAN specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
+ lkups_cnt++;
+
+ /* are PPPoE options specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
+ ICE_TC_FLWR_FIELD_PPP_PROTO))
lkups_cnt++;
/* are IPv[4|6] fields specified? */
@@ -51,6 +68,13 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
+ lkups_cnt++;
+
+ /* are L2TPv3 options specified? */
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID)
+ lkups_cnt++;
+
/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
@@ -64,6 +88,11 @@ static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
}
+static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
+{
+ return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
+}
+
static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
{
return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
@@ -96,6 +125,11 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GENEVE;
case TNL_GRETAP:
return ICE_NVGRE;
+ case TNL_GTPU:
+ /* NO_PAY profiles will not work with GTP-U */
+ return ICE_GTP;
+ case TNL_GTPC:
+ return ICE_GTP_NO_PAY;
default:
return 0;
}
@@ -111,11 +145,27 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GENEVE;
case TNL_GRETAP:
return ICE_SW_TUN_NVGRE;
+ case TNL_GTPU:
+ return ICE_SW_TUN_GTPU;
+ case TNL_GTPC:
+ return ICE_SW_TUN_GTPC;
default:
return ICE_NON_TUN;
}
}
+static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
+{
+ switch (vlan_tpid) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ case ETH_P_QINQ1:
+ return vlan_tpid;
+ default:
+ return 0;
+ }
+}
+
static int
ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
struct ice_adv_lkup_elem *list)
@@ -137,7 +187,15 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
break;
case TNL_GRETAP:
list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
- memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
+ memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
+ "\xff\xff\xff\xff", 4);
+ i++;
+ break;
+ case TNL_GTPC:
+ case TNL_GTPU:
+ list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
+ memcpy(&list[i].m_u.gtp_hdr.teid,
+ "\xff\xff\xff\xff", 4);
i++;
break;
default:
@@ -145,6 +203,33 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
}
}
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
+ list[i].type = ice_proto_type_from_mac(false);
+ ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+ hdr->l2_key.dst_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+ hdr->l2_mask.dst_mac);
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
+ (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+ list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
+
+ if (fltr->gtp_pdu_info_masks.pdu_type) {
+ list[i].h_u.gtp_hdr.pdu_type =
+ fltr->gtp_pdu_info_keys.pdu_type << 4;
+ memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
+ }
+
+ if (fltr->gtp_pdu_info_masks.qfi) {
+ list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
+ memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
+ }
+
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@@ -183,6 +268,50 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ hdr->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ hdr->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ hdr_h->hop_limit = hdr->l3_key.ttl;
+ hdr_m->hop_limit = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
hdr->l3_key.ip_proto == IPPROTO_UDP) {
list[i].type = ICE_UDP_OF;
@@ -216,16 +345,21 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
bool inner = false;
+ u16 vlan_tpid = 0;
int i = 0;
+ rule_info->vlan_type = vlan_tpid;
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
headers = &tc_fltr->inner_headers;
inner = true;
- } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
- list[i].type = ICE_ETYPE_OL;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+ list[i].type = ice_proto_type_from_etype(inner);
list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
i++;
@@ -255,10 +389,76 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
}
/* copy VLAN info */
- if (flags & ICE_TC_FLWR_FIELD_VLAN) {
- list[i].type = ICE_VLAN_OFOS;
- list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
- list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
+ vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
+ rule_info->vlan_type =
+ ice_check_supported_vlan_tpid(vlan_tpid);
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ list[i].type = ICE_VLAN_EX;
+ else
+ list[i].type = ICE_VLAN_OFOS;
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->vlan_hdr.vlan_prio;
+ }
+
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
+ list[i].type = ICE_VLAN_IN;
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->cvlan_hdr.vlan_prio;
+ }
+
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
+ ICE_TC_FLWR_FIELD_PPP_PROTO)) {
+ struct ice_pppoe_hdr *vals, *masks;
+
+ vals = &list[i].h_u.pppoe_hdr;
+ masks = &list[i].m_u.pppoe_hdr;
+
+ list[i].type = ICE_PPPOE;
+
+ if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
+ vals->session_id = headers->pppoe_hdr.session_id;
+ masks->session_id = cpu_to_be16(0xFFFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
+ vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
+ masks->ppp_prot_id = cpu_to_be16(0xFFFF);
+ }
+
i++;
}
@@ -305,6 +505,61 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
i++;
}
+ if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live =
+ headers->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live =
+ headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ headers->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ headers->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ hdr_h->hop_limit = headers->l3_key.ttl;
+ hdr_m->hop_limit = headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) {
+ list[i].type = ICE_L2TPV3;
+
+ list[i].h_u.l2tpv3_sess_hdr.session_id =
+ headers->l2tpv3_hdr.session_id;
+ list[i].m_u.l2tpv3_sess_hdr.session_id =
+ cpu_to_be32(0xFFFFFFFF);
+
+ i++;
+ }
+
/* copy L4 (src, dest) port */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
@@ -344,6 +599,12 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
if (netif_is_gretap(tunnel_dev) ||
netif_is_ip6gretap(tunnel_dev))
return TNL_GRETAP;
+
+ /* Assume GTP-U by default in case of GTP netdev.
+ * GTP-C may be selected later, based on enc_dst_port.
+ */
+ if (netif_is_gtp(tunnel_dev))
+ return TNL_GTPU;
return TNL_LAST;
}
@@ -398,9 +659,8 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_hw *hw = &vsi->back->hw;
struct ice_adv_lkup_elem *list;
u32 flags = fltr->flags;
- enum ice_status status;
int lkups_cnt;
- int ret = 0;
+ int ret;
int i;
if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
@@ -433,30 +693,30 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
* results into order of switch rule evaluation.
*/
rule_info.priority = 7;
+ rule_info.flags_info.act_valid = true;
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else {
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.rx = false;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
- rule_info.flags_info.act_valid = true;
}
/* specify the cookie as filter_rule_id */
rule_info.fltr_rule_id = fltr->cookie;
- status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
- if (status == ICE_ERR_ALREADY_EXISTS) {
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
- } else if (status) {
+ } else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
- ret = -EIO;
goto exit;
}
@@ -465,6 +725,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
*/
fltr->rid = rule_added.rid;
fltr->rule_id = rule_added.rule_id;
+ fltr->dest_vsi_handle = rule_added.vsi_handle;
exit:
kfree(list);
@@ -472,6 +733,116 @@ exit:
}
/**
+ * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action)
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to tc_flower_filter
+ *
+ * Locate the VSI using specified queue. When ADQ is not enabled, always
+ * return input VSI, otherwise locate corresponding VSI based on per channel
+ * offset and qcount
+ */
+static struct ice_vsi *
+ice_locate_vsi_using_queue(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ int num_tc, tc, queue;
+
+ /* if ADQ is not active, passed VSI is the candidate VSI */
+ if (!ice_is_adq_active(vsi->back))
+ return vsi;
+
+ /* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending
+ * upon queue number)
+ */
+ num_tc = vsi->mqprio_qopt.qopt.num_tc;
+ queue = tc_fltr->action.fwd.q.queue;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ int qcount = vsi->mqprio_qopt.qopt.count[tc];
+ int offset = vsi->mqprio_qopt.qopt.offset[tc];
+
+ if (queue >= offset && queue < offset + qcount) {
+ /* for non-ADQ TCs, passed VSI is the candidate VSI */
+ if (tc < ICE_CHNL_START_TC)
+ return vsi;
+ else
+ return vsi->tc_map_vsi[tc];
+ }
+ }
+ return NULL;
+}
+
+static struct ice_rx_ring *
+ice_locate_rx_ring_using_queue(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ u16 queue = tc_fltr->action.fwd.q.queue;
+
+ return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL;
+}
+
+/**
+ * ice_tc_forward_action - Determine destination VSI and queue for the action
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to TC flower filter structure
+ *
+ * Validates the tc forward action and determines the destination VSI and queue
+ * for the forward action.
+ */
+static struct ice_vsi *
+ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
+{
+ struct ice_rx_ring *ring = NULL;
+ struct ice_vsi *dest_vsi = NULL;
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ u32 tc_class;
+
+ dev = ice_pf_to_dev(pf);
+
+ /* Get the destination VSI and/or destination queue and validate them */
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ tc_class = tc_fltr->action.fwd.tc.tc_class;
+ /* Select the destination VSI */
+ if (tc_class < ICE_CHNL_START_TC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter because of unsupported destination");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ /* Locate ADQ VSI depending on hw_tc number */
+ dest_vsi = vsi->tc_map_vsi[tc_class];
+ break;
+ case ICE_FWD_TO_Q:
+ /* Locate the Rx queue */
+ ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr);
+ if (!ring) {
+ dev_err(dev,
+ "Unable to locate Rx queue for action fwd_to_queue: %u\n",
+ tc_fltr->action.fwd.q.queue);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Determine destination VSI even though the action is
+ * FWD_TO_QUEUE, because QUEUE is associated with VSI
+ */
+ dest_vsi = tc_fltr->dest_vsi;
+ break;
+ default:
+ dev_err(dev,
+ "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n",
+ tc_fltr->action.fltr_act);
+ return ERR_PTR(-EINVAL);
+ }
+ /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */
+ if (!dest_vsi) {
+ dev_err(dev,
+ "Unable to add filter because specified destination VSI doesn't exist\n");
+ return ERR_PTR(-EINVAL);
+ }
+ return dest_vsi;
+}
+
+/**
* ice_add_tc_flower_adv_fltr - add appropriate filter rules
* @vsi: Pointer to VSI
* @tc_fltr: Pointer to TC flower filter structure
@@ -490,7 +861,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 flags = tc_fltr->flags;
- struct ice_vsi *ch_vsi;
+ struct ice_vsi *dest_vsi;
struct device *dev;
u16 lkups_cnt = 0;
u16 l4_proto = 0;
@@ -512,11 +883,12 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return -EOPNOTSUPP;
}
- /* get the channel (aka ADQ VSI) */
- if (tc_fltr->dest_vsi)
- ch_vsi = tc_fltr->dest_vsi;
- else
- ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
+ /* validate forwarding action VSI and queue */
+ if (ice_is_forward_action(tc_fltr->action.fltr_act)) {
+ dest_vsi = ice_tc_forward_action(vsi, tc_fltr);
+ if (IS_ERR(dest_vsi))
+ return PTR_ERR(dest_vsi);
+ }
lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
@@ -530,30 +902,41 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
}
rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
- if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
- if (!ch_vsi) {
- NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
- ret = -EINVAL;
- goto exit;
- }
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = tc_fltr->cookie;
- rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
- rule_info.sw_act.vsi_handle = ch_vsi->idx;
- rule_info.priority = 7;
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ rule_info.sw_act.vsi_handle = dest_vsi->idx;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
- tc_fltr->action.tc_class,
+ tc_fltr->action.fwd.tc.tc_class,
rule_info.sw_act.vsi_handle, lkups_cnt);
- } else {
- rule_info.sw_act.flag |= ICE_FLTR_TX;
- rule_info.sw_act.src = vsi->idx;
- rule_info.rx = false;
+ break;
+ case ICE_FWD_TO_Q:
+ /* HW queue number in global space */
+ rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
+ rule_info.sw_act.vsi_handle = dest_vsi->idx;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n",
+ tc_fltr->action.fwd.q.queue,
+ tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
+ break;
+ case ICE_DROP_PACKET:
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto exit;
}
- /* specify the cookie as filter_rule_id */
- rule_info.fltr_rule_id = tc_fltr->cookie;
-
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
if (ret == -EEXIST) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
@@ -563,7 +946,6 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
- ret = -EIO;
goto exit;
}
@@ -572,19 +954,14 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
*/
tc_fltr->rid = rule_added.rid;
tc_fltr->rule_id = rule_added.rule_id;
- if (tc_fltr->action.tc_class > 0 && ch_vsi) {
- /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
- * for PF ADQ filter, it is not yet set in tc_fltr,
- * hence store the dest_vsi ptr in tc_fltr
- */
- if (ch_vsi->type == ICE_VSI_CHNL)
- tc_fltr->dest_vsi = ch_vsi;
+ tc_fltr->dest_vsi_handle = rule_added.vsi_handle;
+ if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
+ tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
+ tc_fltr->dest_vsi = dest_vsi;
/* keep track of advanced switch filter for
- * destination VSI (channel VSI)
+ * destination VSI
*/
- ch_vsi->num_chnl_fltr++;
- /* in this case, dest_id is VSI handle (sw handle) */
- tc_fltr->dest_id = rule_added.vsi_handle;
+ dest_vsi->num_chnl_fltr++;
/* keeps track of channel filters for PF VSI */
if (vsi->type == ICE_VSI_PF &&
@@ -592,16 +969,57 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
pf->num_dmac_chnl_fltrs++;
}
- dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
- lkups_cnt, flags,
- tc_fltr->action.tc_class, rule_added.rid,
- rule_added.rule_id, rule_added.vsi_handle);
+ switch (tc_fltr->action.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n",
+ lkups_cnt, flags,
+ tc_fltr->action.fwd.tc.tc_class, rule_added.rid,
+ rule_added.rule_id, rule_added.vsi_handle);
+ break;
+ case ICE_FWD_TO_Q:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n",
+ lkups_cnt, flags, tc_fltr->action.fwd.q.queue,
+ tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
+ rule_added.rule_id);
+ break;
+ case ICE_DROP_PACKET:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n",
+ lkups_cnt, flags, rule_added.rid, rule_added.rule_id);
+ break;
+ default:
+ break;
+ }
exit:
kfree(list);
return ret;
}
/**
+ * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: Pointer to outer header fields
+ * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
+ */
+static u16
+ice_tc_set_pppoe(struct flow_match_pppoe *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers)
+{
+ if (match->mask->session_id) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
+ headers->pppoe_hdr.session_id = match->key->session_id;
+ }
+
+ if (match->mask->ppp_proto) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
+ headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
+ }
+
+ return be16_to_cpu(match->key->type);
+}
+
+/**
* ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
* @match: Pointer to flow match structure
* @fltr: Pointer to filter structure
@@ -695,6 +1113,40 @@ ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
}
/**
+ * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel
+ */
+static void
+ice_tc_set_tos_ttl(struct flow_match_ip *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ bool is_encap)
+{
+ if (match->mask->tos) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
+
+ headers->l3_key.tos = match->key->tos;
+ headers->l3_mask.tos = match->mask->tos;
+ }
+
+ if (match->mask->ttl) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
+
+ headers->l3_key.ttl = match->key->ttl;
+ headers->l3_mask.ttl = match->mask->ttl;
+ }
+}
+
+/**
* ice_tc_set_port - Parse ports from TC flower filter
* @match: Flow match structure
* @fltr: Pointer to filter structure
@@ -711,7 +1163,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+
headers->l4_key.dst_port = match.key->dst;
headers->l4_mask.dst_port = match.mask->dst;
}
@@ -720,7 +1172,7 @@ ice_tc_set_port(struct flow_match_ports match,
fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
else
fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
- fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+
headers->l4_key.src_port = match.key->src;
headers->l4_mask.src_port = match.mask->src;
}
@@ -745,6 +1197,40 @@ ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
return NULL;
}
+/**
+ * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ *
+ * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
+ * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
+ * therefore making GTP-U the default choice (when destination port number is
+ * not specified).
+ */
+static int
+ice_parse_gtp_type(struct flow_match_ports match,
+ struct ice_tc_flower_fltr *fltr)
+{
+ u16 dst_port;
+
+ if (match.key->dst) {
+ dst_port = be16_to_cpu(match.key->dst);
+
+ switch (dst_port) {
+ case 2152:
+ break;
+ case 2123:
+ fltr->tunnel_type = TNL_GTPC;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr)
@@ -789,10 +1275,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
- headers->l3_key.tos = match.key->tos;
- headers->l3_key.ttl = match.key->ttl;
- headers->l3_mask.tos = match.mask->tos;
- headers->l3_mask.ttl = match.mask->ttl;
+ ice_tc_set_tos_ttl(&match, fltr, headers, true);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
@@ -800,8 +1283,28 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ports match;
flow_rule_match_enc_ports(rule, &match);
- if (ice_tc_set_port(match, fltr, headers, true))
- return -EINVAL;
+
+ if (fltr->tunnel_type != TNL_GTPU) {
+ if (ice_tc_set_port(match, fltr, headers, true))
+ return -EINVAL;
+ } else {
+ if (ice_parse_gtp_type(match, fltr))
+ return -EINVAL;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_match_enc_opts match;
+
+ flow_rule_match_enc_opts(rule, &match);
+
+ memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
+ sizeof(struct gtp_pdu_session_info));
+
+ memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
+ sizeof(struct gtp_pdu_session_info));
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
}
return 0;
@@ -832,6 +1335,7 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
@@ -839,8 +1343,12 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PPPOE) |
+ BIT(FLOW_DISSECTOR_KEY_L2TPV3))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -880,7 +1388,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
n_proto_key = ntohs(match.key->n_proto);
n_proto_mask = ntohs(match.mask->n_proto);
- if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+ if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
+ fltr->tunnel_type == TNL_GTPU ||
+ fltr->tunnel_type == TNL_GTPC) {
n_proto_key = 0;
n_proto_mask = 0;
} else {
@@ -934,16 +1444,71 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
} else {
NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
return -EINVAL;
}
}
- headers->vlan_hdr.vlan_id =
- cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
- if (match.mask->vlan_priority)
- headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
+ headers->vlan_hdr.vlan_prio =
+ be16_encode_bits(match.key->vlan_priority,
+ VLAN_PRIO_MASK);
+ }
+
+ if (match.mask->vlan_tpid)
+ headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan match;
+
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
+ return -EINVAL;
+ }
+
+ flow_rule_match_cvlan(rule, &match);
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
+ headers->cvlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Bad CVLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
+ headers->cvlan_hdr.vlan_prio =
+ be16_encode_bits(match.key->vlan_priority,
+ VLAN_PRIO_MASK);
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
+ struct flow_match_pppoe match;
+
+ flow_rule_match_pppoe(rule, &match);
+ n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
+
+ /* If ethertype equals ETH_P_PPP_SES, n_proto might be
+ * overwritten by encapsulated protocol (ppp_proto field) or set
+ * to 0. To correct this, flow_match_pppoe provides the type
+ * field, which contains the actual ethertype (ETH_P_PPP_SES).
+ */
+ headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
+ headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
+ fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
@@ -970,6 +1535,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ ice_tc_set_tos_ttl(&match, fltr, headers, false);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_L2TPV3)) {
+ struct flow_match_l2tpv3 match;
+
+ flow_rule_match_l2tpv3(rule, &match);
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID;
+ headers->l2tpv3_hdr.session_id = match.key->session_id;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
@@ -1008,43 +1589,15 @@ ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
}
/**
- * ice_handle_tclass_action - Support directing to a traffic class
+ * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to TC flower offload structure
* @fltr: Pointer to TC flower filter structure
*
- * Support directing traffic to a traffic class
+ * Prepare ADQ filter with the required additional header fields
*/
static int
-ice_handle_tclass_action(struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower,
- struct ice_tc_flower_fltr *fltr)
+ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
- struct ice_vsi *main_vsi;
-
- if (tc < 0) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
- return -EINVAL;
- }
- if (!tc) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
- return -EINVAL;
- }
-
- if (!(vsi->all_enatc & BIT(tc))) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
- return -EINVAL;
- }
-
- /* Redirect to a TC class or Queue Group */
- main_vsi = ice_get_main_vsi(vsi->back);
- if (!main_vsi || !main_vsi->netdev) {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unable to add filter because of invalid netdevice");
- return -EINVAL;
- }
-
if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
(fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
ICE_TC_FLWR_FIELD_SRC_MAC))) {
@@ -1056,29 +1609,28 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
/* For ADQ, filter must include dest MAC address, otherwise unwanted
* packets with unrelated MAC address get delivered to ADQ VSIs as long
* as remaining filter criteria is satisfied such as dest IP address
- * and dest/src L4 port. Following code is trying to handle:
- * 1. For non-tunnel, if user specify MAC addresses, use them (means
- * this code won't do anything
+ * and dest/src L4 port. Below code handles the following cases:
+ * 1. For non-tunnel, if user specify MAC addresses, use them.
* 2. For non-tunnel, if user didn't specify MAC address, add implicit
* dest MAC to be lower netdev's active unicast MAC address
+ * 3. For tunnel, as of now TC-filter through flower classifier doesn't
+ * have provision for user to specify outer DMAC, hence driver to
+ * implicitly add outer dest MAC to be lower netdev's active unicast
+ * MAC address.
*/
- if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
- ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
- main_vsi->netdev->dev_addr);
- eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
+ if (fltr->tunnel_type != TNL_LAST &&
+ !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
+
+ if (fltr->tunnel_type == TNL_LAST &&
+ !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
- }
- /* validate specified dest MAC address, make sure either it belongs to
- * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
- * unicast MAC filter destined to main VSI.
- */
- if (!ice_mac_fltr_exist(&main_vsi->back->hw,
- fltr->outer_headers.l2_key.dst_mac,
- main_vsi->idx)) {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
- return -EINVAL;
+ if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
+ ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
+ vsi->netdev->dev_addr);
+ eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
}
/* Make sure VLAN is already added to main VSI, before allowing ADQ to
@@ -1087,17 +1639,94 @@ ice_handle_tclass_action(struct ice_vsi *vsi,
if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
- if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
- main_vsi->idx)) {
+ if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
return -EINVAL;
}
}
+ return 0;
+}
+
+/**
+ * ice_handle_tclass_action - Support directing to a traffic class
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Support directing traffic to a traffic class/queue-set
+ */
+static int
+ice_handle_tclass_action(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+
+ /* user specified hw_tc (must be non-zero for ADQ TC), action is forward
+ * to hw_tc (i.e. ADQ channel number)
+ */
+ if (tc < ICE_CHNL_START_TC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of unsupported destination");
+ return -EOPNOTSUPP;
+ }
+ if (!(vsi->all_enatc & BIT(tc))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of non-existence destination");
+ return -EINVAL;
+ }
fltr->action.fltr_act = ICE_FWD_TO_VSI;
- fltr->action.tc_class = tc;
+ fltr->action.fwd.tc.tc_class = tc;
- return 0;
+ return ice_prep_adq_filter(vsi, fltr);
+}
+
+static int
+ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ struct ice_vsi *ch_vsi = NULL;
+ u16 queue = act->rx_queue;
+
+ if (queue >= vsi->num_rxq) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because specified queue is invalid");
+ return -EINVAL;
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_Q;
+ fltr->action.fwd.q.queue = queue;
+ /* determine corresponding HW queue */
+ fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue];
+
+ /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare
+ * ADQ switch filter
+ */
+ ch_vsi = ice_locate_vsi_using_queue(vsi, fltr);
+ if (!ch_vsi)
+ return -EINVAL;
+ fltr->dest_vsi = ch_vsi;
+ if (!ice_is_chnl_fltr(fltr))
+ return 0;
+
+ return ice_prep_adq_filter(vsi, fltr);
+}
+
+static int
+ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ switch (act->id) {
+ case FLOW_ACTION_RX_QUEUE_MAPPING:
+ /* forward to queue */
+ return ice_tc_forward_to_queue(vsi, fltr, act);
+ case FLOW_ACTION_DROP:
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+ return 0;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action");
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -1116,7 +1745,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
struct flow_action *flow_action = &rule->action;
struct flow_action_entry *act;
- int i;
+ int i, err;
if (cls_flower->classid)
return ice_handle_tclass_action(vsi, cls_flower, fltr);
@@ -1125,21 +1754,13 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
return -EINVAL;
flow_action_for_each(i, act, flow_action) {
- if (ice_is_eswitch_mode_switchdev(vsi->back)) {
- int err = ice_eswitch_tc_parse_action(fltr, act);
-
- if (err)
- return err;
- continue;
- }
- /* Allow only one rule per filter */
-
- /* Drop action */
- if (act->id == FLOW_ACTION_DROP) {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
- return -EINVAL;
- }
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+ err = ice_eswitch_tc_parse_action(fltr, act);
+ else
+ err = ice_tc_parse_action(vsi, fltr, act);
+ if (err)
+ return err;
+ continue;
}
return 0;
}
@@ -1159,10 +1780,10 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
- rule_rem.vsi_handle = fltr->dest_id;
+ rule_rem.vsi_handle = fltr->dest_vsi_handle;
err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
if (err) {
- if (err == ICE_ERR_DOES_NOT_EXIST) {
+ if (err == -ENOENT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
return -ENOENT;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 319049477959..8d5e22ac7023 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -22,9 +22,22 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
+#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
+#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
+#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
+#define ICE_TC_FLWR_FIELD_IP_TOS BIT(22)
+#define ICE_TC_FLWR_FIELD_IP_TTL BIT(23)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TOS BIT(24)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TTL BIT(25)
+#define ICE_TC_FLWR_FIELD_L2TPV3_SESSID BIT(26)
+#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
+#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
+#define ICE_IPV6_HDR_TC_MASK 0xFF00000
+
struct ice_indr_block_priv {
struct net_device *netdev;
struct ice_netdev_priv *np;
@@ -32,13 +45,32 @@ struct ice_indr_block_priv {
};
struct ice_tc_flower_action {
- u32 tc_class;
+ /* forward action specific params */
+ union {
+ struct {
+ u32 tc_class; /* forward to hw_tc */
+ u32 rsvd;
+ } tc;
+ struct {
+ u16 queue; /* forward to queue */
+ /* To add filter in HW, absolute queue number in global
+ * space of queues (between 0...N) is needed
+ */
+ u16 hw_queue;
+ } q;
+ } fwd;
enum ice_sw_fwd_act_type fltr_act;
};
struct ice_tc_vlan_hdr {
__be16 vlan_id; /* Only last 12 bits valid */
- u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_tpid;
+};
+
+struct ice_tc_pppoe_hdr {
+ __be16 session_id;
+ __be16 ppp_proto;
};
struct ice_tc_l2_hdr {
@@ -70,6 +102,10 @@ struct ice_tc_l3_hdr {
u8 ttl;
};
+struct ice_tc_l2tpv3_hdr {
+ __be32 session_id;
+};
+
struct ice_tc_l4_hdr {
__be16 dst_port;
__be16 src_port;
@@ -80,6 +116,9 @@ struct ice_tc_flower_lyr_2_4_hdrs {
struct ice_tc_l2_hdr l2_key;
struct ice_tc_l2_hdr l2_mask;
struct ice_tc_vlan_hdr vlan_hdr;
+ struct ice_tc_vlan_hdr cvlan_hdr;
+ struct ice_tc_pppoe_hdr pppoe_hdr;
+ struct ice_tc_l2tpv3_hdr l2tpv3_hdr;
/* L3 (IPv4[6]) layer fields with their mask */
struct ice_tc_l3_hdr l3_key;
struct ice_tc_l3_hdr l3_mask;
@@ -105,11 +144,11 @@ struct ice_tc_flower_fltr {
*/
u16 rid;
u16 rule_id;
- /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon
- * destination type
+ /* VSI handle of the destination VSI (it could be main PF VSI, CHNL_VSI,
+ * VF VSI)
*/
- u16 dest_id;
- /* if dest_id is vsi_idx, then need to store destination VSI ptr */
+ u16 dest_vsi_handle;
+ /* ptr to destination VSI */
struct ice_vsi *dest_vsi;
/* direction of fltr for eswitch use case */
enum ice_eswitch_fltr_direction direction;
@@ -119,6 +158,8 @@ struct ice_tc_flower_fltr {
struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
struct ice_vsi *src_vsi;
__be32 tenant_id;
+ struct gtp_pdu_session_info gtp_pdu_info_keys;
+ struct gtp_pdu_session_info gtp_pdu_info_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;
@@ -134,12 +175,23 @@ struct ice_tc_flower_fltr {
* @f: Pointer to tc-flower filter
*
* Criteria to determine of given filter is valid channel filter
- * or not is based on its "destination". If destination is hw_tc (aka tc_class)
- * and it is non-zero, then it is valid channel (aka ADQ) filter
+ * or not is based on its destination.
+ * For forward to VSI action, if destination is valid hw_tc (aka tc_class)
+ * and in supported range of TCs for ADQ, then return true.
+ * For forward to queue, as long as dest_vsi is valid and it is of type
+ * VSI_CHNL (PF ADQ VSI is of type VSI_CHNL), return true.
+ * NOTE: For forward to queue, correct dest_vsi is still set in tc_fltr based
+ * on destination queue specified.
*/
static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
{
- return !!f->action.tc_class;
+ if (f->action.fltr_act == ICE_FWD_TO_VSI)
+ return f->action.fwd.tc.tc_class >= ICE_CHNL_START_TC &&
+ f->action.fwd.tc.tc_class < ICE_CHNL_MAX_TC;
+ else if (f->action.fltr_act == ICE_FWD_TO_Q)
+ return f->dest_vsi && f->dest_vsi->type == ICE_VSI_CHNL;
+
+ return false;
}
/**
@@ -159,4 +211,14 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
+{
+ switch (fltr_act) {
+ case ICE_FWD_TO_VSI:
+ case ICE_FWD_TO_Q:
+ return true;
+ default:
+ return false;
+ }
+}
#endif /* _ICE_TC_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index cf685247c07a..ae98d5a8ff60 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -216,6 +216,30 @@ DEFINE_EVENT(ice_xmit_template, name, \
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring_drop);
+DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
+ TP_PROTO(struct sk_buff *skb, int idx),
+
+ TP_ARGS(skb, idx),
+
+ TP_STRUCT__entry(__field(void *, skb)
+ __field(int, idx)),
+
+ TP_fast_assign(__entry->skb = skb;
+ __entry->idx = idx;),
+
+ TP_printk("skb %pK idx %d",
+ __entry->skb, __entry->idx)
+);
+#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
+DEFINE_EVENT(ice_tx_tstamp_template, name, \
+ TP_PROTO(struct sk_buff *skb, int idx), \
+ TP_ARGS(skb, idx))
+
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_request);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
+DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index bc3ba19dc88f..4fcf2d07eb85 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -3,10 +3,12 @@
/* The driver transmit and receive code */
-#include <linux/prefetch.h>
#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -83,7 +85,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
ICE_TX_DESC_CMD_RE;
- tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->type = ICE_TX_BUF_DUMMY;
tx_buf->raw_buf = raw_packet;
tx_desc->cmd_type_offset_bsz =
@@ -110,27 +112,29 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
- if (tx_buf->skb) {
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
- devm_kfree(ring->dev, tx_buf->raw_buf);
- else if (ice_ring_is_xdp(ring))
- page_frag_free(tx_buf->raw_buf);
- else
- dev_kfree_skb_any(tx_buf->skb);
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buf, len)) {
+ if (dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_DUMMY:
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_SKB:
+ dev_kfree_skb_any(tx_buf->skb);
+ break;
+ case ICE_TX_BUF_XDP_TX:
+ page_frag_free(tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame(tx_buf->xdpf);
+ break;
}
tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* tx_buf must be completely set up in the transmit path */
}
@@ -219,6 +223,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
+ /* get the bql data ready */
+ netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
+
tx_buf = &tx_ring->tx_buf[i];
tx_desc = ICE_TX_DESC(tx_ring, i);
i -= tx_ring->count;
@@ -232,6 +239,9 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
if (!eop_desc)
break;
+ /* follow the guidelines of other drivers */
+ prefetchw(&tx_buf->skb->users);
+
smp_rmb(); /* prevent any other reads prior to eop_desc */
ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
@@ -257,7 +267,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
DMA_TO_DEVICE);
/* clear tx_buf data */
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* unmap remaining buffers */
@@ -303,9 +313,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
tx_ring->next_to_clean = i;
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
-
- netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
- total_bytes);
+ netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
@@ -314,12 +322,10 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
* sees the new next_to_clean.
*/
smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev,
- tx_ring->q_index) &&
+ if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
!test_bit(ICE_VSI_DOWN, vsi->state)) {
- netif_wake_subqueue(tx_ring->netdev,
- tx_ring->q_index);
- ++tx_ring->tx_stats.restart_q;
+ netif_tx_wake_queue(txring_txq(tx_ring));
+ ++tx_ring->ring_stats->tx_stats.restart_q;
}
}
@@ -361,7 +367,7 @@ int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->tx_stats.prev_pkt = -1;
+ tx_ring->ring_stats->tx_stats.prev_pkt = -1;
return 0;
err:
@@ -376,6 +382,7 @@ err:
*/
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct device *dev = rx_ring->dev;
u32 size;
u16 i;
@@ -384,16 +391,16 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
-
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
+ if (xdp->data) {
+ xdp_return_buff(xdp);
+ xdp->data = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
@@ -419,7 +426,10 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
}
rx_skip_free:
- memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
+ if (rx_ring->xsk_pool)
+ memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
+ else
+ memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -428,6 +438,7 @@ rx_skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->first_desc = 0;
rx_ring->next_to_use = 0;
}
@@ -446,8 +457,13 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
rx_ring->xdp_prog = NULL;
- devm_kfree(rx_ring->dev, rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
+ if (rx_ring->xsk_pool) {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ } else {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ }
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
@@ -475,8 +491,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
- GFP_KERNEL);
+ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
@@ -493,6 +508,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->first_desc = 0;
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
@@ -505,13 +521,21 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
return 0;
err:
- devm_kfree(dev, rx_ring->rx_buf);
+ kfree(rx_ring->rx_buf);
rx_ring->rx_buf = NULL;
return -ENOMEM;
}
+/**
+ * ice_rx_frame_truesize
+ * @rx_ring: ptr to Rx ring
+ * @size: size
+ *
+ * calculate the truesize with taking into the account PAGE_SIZE of
+ * underlying arch
+ */
static unsigned int
-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
{
unsigned int truesize;
@@ -532,44 +556,72 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused s
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @rx_buf: Rx buffer to store the XDP action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static int
+static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ struct ice_rx_buf *rx_buf)
{
- int err;
+ unsigned int ret = ICE_XDP_PASS;
u32 act;
+ if (!xdp_prog)
+ goto exit;
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- return ICE_XDP_PASS;
+ break;
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
- if (err == ICE_XDP_CONSUMED)
+ if (ret == ICE_XDP_CONSUMED)
goto out_failure;
- return err;
+ break;
case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
+ if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
goto out_failure;
- return ICE_XDP_REDIR;
+ ret = ICE_XDP_REDIR;
+ break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- return ICE_XDP_CONSUMED;
+ ret = ICE_XDP_CONSUMED;
}
+exit:
+ rx_buf->act = ret;
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring, ret);
+}
+
+/**
+ * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdpf: XDP frame that will be converted to XDP buff
+ * @xdp_ring: XDP ring for transmission
+ */
+static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
+ struct ice_tx_ring *xdp_ring)
+{
+ struct xdp_buff xdp;
+
+ xdp.data_hard_start = (void *)xdpf;
+ xdp.data = xdpf->data;
+ xdp.data_end = xdp.data + xdpf->len;
+ xdp.frame_sz = xdpf->frame_sz;
+ xdp.flags = xdpf->flags;
+
+ return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
}
/**
@@ -592,12 +644,13 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *xdp_ring;
+ struct ice_tx_buf *tx_buf;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
- if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
+ if (!ice_is_xdp_ena_vsi(vsi))
return -ENXIO;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -608,19 +661,24 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
spin_lock(&xdp_ring->tx_lock);
} else {
+ /* Generally, should not happen */
+ if (unlikely(queue_index >= vsi->num_xdp_txq))
+ return -ENXIO;
xdp_ring = vsi->xdp_rings[queue_index];
}
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
for (i = 0; i < n; i++) {
- struct xdp_frame *xdpf = frames[i];
+ const struct xdp_frame *xdpf = frames[i];
int err;
- err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ err = ice_xmit_xdp_ring(xdpf, xdp_ring);
if (err != ICE_XDP_TX)
break;
nxmit++;
}
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
@@ -651,7 +709,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
/* alloc new page for storage */
page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
return false;
}
@@ -664,7 +722,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
__free_pages(page, ice_rx_pg_order(rx_ring));
- rx_ring->rx_stats.alloc_page_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
return false;
}
@@ -690,7 +748,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -767,7 +825,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
- * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
@@ -775,7 +832,7 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
* page freed
*/
static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -786,7 +843,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
+ if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -808,33 +865,44 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
}
/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
* @rx_ring: Rx descriptor ring to transact packets on
+ * @xdp: xdp buff to place the data into
* @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
* @size: packet length from rx_desc
*
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
+ * This function will add the data contained in rx_buf->page to the xdp buf.
+ * It will just attach the page as a frag.
*/
-static void
-ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct sk_buff *skb, unsigned int size)
+static int
+ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct ice_rx_buf *rx_buf, const unsigned int size)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!size)
- return;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, truesize);
+ return 0;
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(xdp);
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ rx_buf->page_offset, size);
+ sinfo->xdp_frags_size += size;
- /* page is being used so we must update the page offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ if (page_is_pfmemalloc(rx_buf->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ return 0;
}
/**
@@ -870,19 +938,19 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
- * @rx_buf_pgcnt: rx_buf page refcount
+ * @ntc: index of next to clean element
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
- int *rx_buf_pgcnt)
+ const unsigned int ntc)
{
struct ice_rx_buf *rx_buf;
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- *rx_buf_pgcnt =
+ rx_buf = &rx_ring->rx_buf[ntc];
+ rx_buf->pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
#else
@@ -906,26 +974,25 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
*
- * This function builds an skb around an existing Rx buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead.
+ * This function builds an skb around an existing XDP buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead. Driver has
+ * already combined frags (if any) to skb_shared_info.
*/
static struct sk_buff *
-ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(xdp->data_end -
- xdp->data_hard_start);
-#endif
+ struct skb_shared_info *sinfo = NULL;
+ unsigned int nr_frags;
struct sk_buff *skb;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
* likely have a consumer accessing first few bytes of meta
@@ -933,7 +1000,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
return NULL;
@@ -948,8 +1015,11 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
if (metasize)
skb_metadata_set(skb, metasize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
@@ -957,7 +1027,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
/**
* ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
*
* This function allocates an skb. It then populates it with the page
@@ -965,22 +1034,30 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
+ struct skb_shared_info *sinfo = NULL;
+ struct ice_rx_buf *rx_buf;
+ unsigned int nr_frags = 0;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
net_prefetch(xdp->data);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
+ rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
skb_record_rx_queue(skb, rx_ring->q_index);
/* Determine available headroom for copy */
headlen = size;
@@ -994,21 +1071,36 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
if (size) {
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ /* besides adding here a partial frag, we are going to add
+ * frags from xdp_buff, make sure there is enough space for
+ * them
+ */
+ if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_add_rx_frag(skb, 0, rx_buf->page,
- rx_buf->page_offset + headlen, size, truesize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ rx_buf->page_offset + headlen, size,
+ xdp->frame_sz);
} else {
- /* buffer is unused, reset bias back to rx_buf; data was copied
- * onto skb's linear part so there's no need for adjusting
- * page offset and we can reuse this buffer as-is
+ /* buffer is unused, change the act that should be taken later
+ * on; data was copied onto skb's linear part so there's no
+ * need for adjusting page offset and we can reuse this buffer
+ * as-is
*/
- rx_buf->pagecnt_bias++;
+ rx_buf->act = ICE_SKB_CONSUMED;
+ }
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+
+ memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
+ sizeof(skb_frag_t) * nr_frags);
+
+ xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
}
return skb;
@@ -1018,26 +1110,17 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
- * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
- * This function will update next_to_clean and then clean up the contents
- * of the rx_buf. It will either recycle the buffer or unmap it and free
- * the associated resources.
+ * This function will clean up the contents of the rx_buf. It will either
+ * recycle the buffer or unmap it and free the associated resources.
*/
static void
-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- int rx_buf_pgcnt)
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- u16 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
+ if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@@ -1053,27 +1136,6 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
}
/**
- * ice_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- *
- * If the buffer is an EOP buffer, this function exits returning false,
- * otherwise return true indicating that this is in fact a non-EOP buffer.
- */
-static bool
-ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
-{
- /* if we are the last buffer then there is nothing else to do */
-#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
- return false;
-
- rx_ring->rx_stats.non_eop_descs++;
-
- return true;
-}
-
-/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1087,39 +1149,42 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*/
int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct ice_tx_ring *xdp_ring = NULL;
- unsigned int xdp_res, xdp_xmit = 0;
- struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
- struct xdp_buff xdp;
+ u32 ntc = rx_ring->next_to_clean;
+ u32 cnt = rx_ring->count;
+ u32 cached_ntc = ntc;
+ u32 xdp_xmit = 0;
+ u32 cached_ntu;
bool failure;
+ u32 first;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
- xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (xdp_prog)
+ if (xdp_prog) {
xdp_ring = rx_ring->xdp_ring;
+ cached_ntu = xdp_ring->next_to_use;
+ }
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
- unsigned char *hard_start;
+ struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- int rx_buf_pgcnt;
u16 vlan_tag = 0;
u16 rx_ptype;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
- rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
@@ -1127,7 +1192,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
* hardware wrote DD then it will be non-zero
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -1141,10 +1206,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
struct ice_vsi *ctrl_vsi = rx_ring->vsi;
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf_id != ICE_INVAL_VFID)
+ ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- ice_put_rx_buf(rx_ring, NULL, 0);
- cleaned_count++;
+ if (++ntc == cnt)
+ ntc = 0;
+ rx_ring->first_desc = ntc;
continue;
}
@@ -1152,81 +1218,69 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
+ rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
- if (!size) {
- xdp.data = NULL;
- xdp.data_end = NULL;
- xdp.data_hard_start = NULL;
- xdp.data_meta = NULL;
- goto construct_skb;
- }
+ if (!xdp->data) {
+ void *hard_start;
- hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
- offset;
- xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
#if (PAGE_SIZE > 4096)
- /* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
+ xdp_buff_clear_frags_flag(xdp);
+ } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ break;
+ }
+ if (++ntc == cnt)
+ ntc = 0;
- if (!xdp_prog)
- goto construct_skb;
+ /* skip if it is NOP desc */
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
- if (!xdp_res)
+ ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
+ if (rx_buf->act == ICE_XDP_PASS)
goto construct_skb;
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
- } else {
- rx_buf->pagecnt_bias++;
- }
- total_rx_bytes += size;
+ total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
continue;
construct_skb:
- if (skb) {
- ice_add_rx_frag(rx_ring, rx_buf, skb, size);
- } else if (likely(xdp.data)) {
- if (ice_ring_uses_build_skb(rx_ring))
- skb = ice_build_skb(rx_ring, rx_buf, &xdp);
- else
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
- }
+ if (likely(ice_ring_uses_build_skb(rx_ring)))
+ skb = ice_build_skb(rx_ring, xdp);
+ else
+ skb = ice_construct_skb(rx_ring, xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->rx_stats.alloc_buf_failed++;
- if (rx_buf)
- rx_buf->pagecnt_bias++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ rx_buf->act = ICE_XDP_CONSUMED;
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring,
+ ICE_XDP_CONSUMED);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
break;
}
-
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
- cleaned_count++;
-
- /* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc))
- continue;
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
+ if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits))) {
dev_kfree_skb_any(skb);
continue;
}
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
- if (eth_skb_pad(skb)) {
- skb = NULL;
+ if (eth_skb_pad(skb))
continue;
- }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1240,20 +1294,38 @@ construct_skb:
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag);
- skb = NULL;
/* update budget accounting */
total_rx_pkts++;
}
+ first = rx_ring->first_desc;
+ while (cached_ntc != first) {
+ struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
+
+ if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ xdp_xmit |= buf->act;
+ } else if (buf->act & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (buf->act == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+ if (++cached_ntc >= cnt)
+ cached_ntc = 0;
+ }
+ rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
- failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+ failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
- if (xdp_prog)
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
- rx_ring->skb = skb;
+ if (xdp_xmit)
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
- ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
+ if (rx_ring->ring_stats)
+ ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
+ total_rx_bytes);
/* guarantee a trip back through this routine if there was a failure */
return failure ? budget : (int)total_rx_pkts;
@@ -1270,15 +1342,25 @@ static void __ice_update_sample(struct ice_q_vector *q_vector,
struct ice_tx_ring *tx_ring;
ice_for_each_tx_ring(tx_ring, *rc) {
- packets += tx_ring->stats.pkts;
- bytes += tx_ring->stats.bytes;
+ struct ice_ring_stats *ring_stats;
+
+ ring_stats = tx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+ packets += ring_stats->stats.pkts;
+ bytes += ring_stats->stats.bytes;
}
} else {
struct ice_rx_ring *rx_ring;
ice_for_each_rx_ring(rx_ring, *rc) {
- packets += rx_ring->stats.pkts;
- bytes += rx_ring->stats.bytes;
+ struct ice_ring_stats *ring_stats;
+
+ ring_stats = rx_ring->ring_stats;
+ if (!ring_stats)
+ continue;
+ packets += ring_stats->stats.pkts;
+ bytes += ring_stats->stats.bytes;
}
}
@@ -1445,7 +1527,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
bool wd;
if (tx_ring->xsk_pool)
- wd = ice_clean_tx_irq_zc(tx_ring, budget);
+ wd = ice_xmit_zc(tx_ring);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1498,7 +1580,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done))) {
+ if (napi_complete_done(napi, work_done)) {
ice_net_dim(q_vector);
ice_enable_interrupt(q_vector);
} else {
@@ -1517,7 +1599,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*/
static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
+ netif_tx_stop_queue(txring_txq(tx_ring));
/* Memory barrier before checking head and tail */
smp_mb();
@@ -1525,9 +1607,9 @@ static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
if (likely(ICE_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
- /* A reprieve! - use start_subqueue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
- ++tx_ring->tx_stats.restart_q;
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_tx_start_queue(txring_txq(tx_ring));
+ ++tx_ring->ring_stats->tx_stats.restart_q;
return 0;
}
@@ -1568,6 +1650,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
struct sk_buff *skb;
skb_frag_t *frag;
dma_addr_t dma;
+ bool kick;
td_tag = off->td_l2tag1;
td_cmd = off->td_cmd;
@@ -1647,11 +1730,9 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
DMA_TO_DEVICE);
tx_buf = &tx_ring->tx_buf[i];
+ tx_buf->type = ICE_TX_BUF_FRAG;
}
- /* record bytecount for BQL */
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(first->skb);
@@ -1680,7 +1761,10 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
+ kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
+ netdev_xmit_more());
+ if (kick)
+ /* notify HW of packet */
writel(i, tx_ring->tail);
return;
@@ -1729,18 +1813,26 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol)) {
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+ } else {
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ }
/* compute outer L2 header size */
l2_len = ip.hdr - skb->data;
offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
- protocol = vlan_get_protocol(skb);
-
- if (protocol == htons(ETH_P_IP))
+ /* set the tx_flags to indicate the IP protocol type. this is
+ * required so that checksum header computation below is accurate.
+ */
+ if (ip.v4->version == 4)
first->tx_flags |= ICE_TX_FLAGS_IPV4;
- else if (protocol == htons(ETH_P_IPV6))
+ else if (ip.v6->version == 6)
first->tx_flags |= ICE_TX_FLAGS_IPV6;
if (skb->encapsulation) {
@@ -1901,12 +1993,16 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
return;
- /* currently, we always assume 802.1Q for VLAN insertion as VLAN
- * insertion for 802.1AD is not supported
+ /* the VLAN ethertype/tpid is determined by VSI configuration and netdev
+ * feature flags, which the driver only allows either 802.1Q or 802.1ad
+ * VLAN offloads exclusively so we only care about the VLAN ID here
*/
if (skb_vlan_tag_present(skb)) {
first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
- first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
+ first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
+ else
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
}
ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
@@ -1934,6 +2030,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
+ __be16 protocol;
u32 paylen;
u8 l4_start;
int err;
@@ -1948,9 +2045,13 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0)
return err;
- /* cppcheck-suppress unreadVariable */
- ip.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
+ protocol = vlan_get_protocol(skb);
+
+ if (eth_p_mpls(protocol))
+ ip.hdr = skb_inner_network_header(skb);
+ else
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
@@ -1980,8 +2081,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/* reset pointers to inner headers */
-
- /* cppcheck-suppress unreadVariable */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
@@ -2217,8 +2316,10 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
/* Grab an open timestamp slot */
idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
- if (idx < 0)
+ if (idx < 0) {
+ tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
return;
+ }
off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
@@ -2245,12 +2346,15 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ice_trace(xmit_frame_ring, tx_ring, skb);
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto out_drop;
+
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
count = ice_txd_use_count(skb->len);
- tx_ring->tx_stats.tx_linearize++;
+ tx_ring->ring_stats->tx_stats.tx_linearize++;
}
/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
@@ -2261,21 +2365,32 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
*/
if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
ICE_DESCS_FOR_CTX_DESC)) {
- tx_ring->tx_stats.tx_busy++;
+ tx_ring->ring_stats->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
}
+ /* prefetch for bql data which is infrequently used */
+ netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
+
offload.tx_ring = tx_ring;
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buf[tx_ring->next_to_use];
first->skb = skb;
+ first->type = ICE_TX_BUF_SKB;
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
first->gso_segs = 1;
first->tx_flags = 0;
/* prepare the VLAN tagging flags for Tx */
ice_tx_prepare_vlan_flags(tx_ring, first);
+ if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
+ offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_IL2TAG2 <<
+ ICE_TXD_CTX_QW1_CMD_S));
+ offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
+ ICE_TX_FLAGS_VLAN_S;
+ }
/* set up TSO offload */
tso = ice_tso(first, &offload);
@@ -2435,11 +2550,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ if (tx_buf->type == ICE_TX_BUF_DUMMY)
devm_kfree(tx_ring->dev, tx_buf->raw_buf);
/* clear next_to_watch to prevent false hangs */
- tx_buf->raw_buf = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
tx_buf->tx_flags = 0;
tx_buf->next_to_watch = NULL;
dma_unmap_len_set(tx_buf, len, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index c56dd1749903..fff0efe28373 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -9,11 +9,12 @@
#define ICE_DFLT_IRQ_WORK 256
#define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048
+#define ICE_RXBUF_1664 1664
#define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
-#define ICE_TX_THRESH 32
+#define ICE_MAX_FRAME_LEGACY_RX 8320
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -24,7 +25,6 @@
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
-#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
/* Attempt to maximize the headroom available for incoming frames. We use a 2K
@@ -112,17 +112,21 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
+#define ICE_RX_DESC_UNUSED(R) \
+ ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->first_desc - (R)->next_to_use - 1)
+
+#define ICE_RING_QUARTER(R) ((R)->count >> 2)
+
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
-/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
- * freed instead of returned like skb packets.
- */
-#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+/* Free, was ICE_TX_FLAGS_DUMMY_PKT */
#define ICE_TX_FLAGS_TSYN BIT(4)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
+#define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN BIT(8)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
#define ICE_TX_FLAGS_VLAN_PR_S 29
@@ -132,6 +136,8 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_CONSUMED BIT(0)
#define ICE_XDP_TX BIT(1)
#define ICE_XDP_REDIR BIT(2)
+#define ICE_XDP_EXIT BIT(3)
+#define ICE_SKB_CONSUMED ICE_XDP_CONSUMED
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -140,15 +146,44 @@ static inline int ice_skb_pad(void)
#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+/**
+ * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
+ * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
+ * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
+ * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
+ * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
+ * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
+ * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
+ * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
+ */
+enum ice_tx_buf_type {
+ ICE_TX_BUF_EMPTY = 0U,
+ ICE_TX_BUF_DUMMY,
+ ICE_TX_BUF_FRAG,
+ ICE_TX_BUF_SKB,
+ ICE_TX_BUF_XDP_TX,
+ ICE_TX_BUF_XDP_XMIT,
+ ICE_TX_BUF_XSK_TX,
+};
+
struct ice_tx_buf {
- struct ice_tx_desc *next_to_watch;
union {
- struct sk_buff *skb;
- void *raw_buf; /* used for XDP */
+ struct ice_tx_desc *next_to_watch;
+ u32 rs_idx;
+ };
+ union {
+ void *raw_buf; /* used for XDP_TX and FDir rules */
+ struct sk_buff *skb; /* used for .ndo_start_xmit() */
+ struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
+ struct xdp_buff *xdp; /* used for XDP_TX ZC */
};
unsigned int bytecount;
- unsigned short gso_segs;
- u32 tx_flags;
+ union {
+ unsigned int gso_segs;
+ unsigned int nr_frags; /* used for mbuf XDP */
+ };
+ u32 type:16; /* &ice_tx_buf_type */
+ u32 tx_flags:16;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
};
@@ -168,7 +203,9 @@ struct ice_rx_buf {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
- u16 pagecnt_bias;
+ unsigned int pgcnt;
+ unsigned int act;
+ unsigned int pagecnt_bias;
};
struct ice_q_stats {
@@ -189,6 +226,16 @@ struct ice_rxq_stats {
u64 alloc_buf_failed;
};
+struct ice_ring_stats {
+ struct rcu_head rcu; /* to avoid race on free */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ union {
+ struct ice_txq_stats tx_stats;
+ struct ice_rxq_stats rx_stats;
+ };
+};
+
enum ice_ring_state_t {
ICE_TX_XPS_INIT_DONE,
ICE_TX_NBITS,
@@ -261,43 +308,44 @@ struct ice_rx_ring {
struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
+ u16 q_index; /* Queue number of ring */
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 next_to_alloc;
+ /* CL2 - 2nd cacheline starts here */
union {
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
- /* CL2 - 2nd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
+ struct xdp_buff xdp;
/* CL3 - 3rd cacheline starts here */
- u16 q_index; /* Queue number of ring */
-
- u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
+ struct bpf_prog *xdp_prog;
+ u16 rx_offset;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
- u16 rx_offset;
- u16 rx_buf_len;
+ u16 first_desc;
/* stats structs */
- struct ice_rxq_stats rx_stats;
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
+ struct ice_ring_stats *ring_stats;
struct rcu_head rcu; /* to avoid race on free */
- /* CL4 - 3rd cacheline starts here */
+ /* CL4 - 4th cacheline starts here */
struct ice_channel *ch;
- struct bpf_prog *xdp_prog;
struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
- struct sk_buff *skb;
dma_addr_t dma; /* physical address of ring */
-#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
u64 cached_phctime;
+ u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
u8 flags;
+ /* CL5 - 5th cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
struct ice_tx_ring {
@@ -315,17 +363,13 @@ struct ice_tx_ring {
struct xsk_buff_pool *xsk_pool;
u16 next_to_use;
u16 next_to_clean;
- u16 next_rs;
- u16 next_dd;
u16 q_handle; /* Queue handle per TC */
u16 reg_idx; /* HW register index of the ring */
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
+ u16 xdp_tx_active;
/* stats structs */
- struct ice_q_stats stats;
- struct u64_stats_sync syncp;
- struct ice_txq_stats tx_stats;
-
+ struct ice_ring_stats *ring_stats;
/* CL3 - 3rd cacheline starts here */
struct rcu_head rcu; /* to avoid race on free */
DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
@@ -333,7 +377,10 @@ struct ice_tx_ring {
struct ice_ptp_tx *tx_tstamps;
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
+ /* CL4 - 4th cacheline starts here */
#define ICE_TX_FLAGS_RING_XDP BIT(0)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
+#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_tx;
@@ -380,9 +427,14 @@ struct ice_ring_container {
/* this matches the maximum number of ITR bits, but in usec
* values, so it is shifted left one bit (bit zero is ignored)
*/
- u16 itr_setting:13;
- u16 itr_reserved:2;
- u16 itr_mode:1;
+ union {
+ struct {
+ u16 itr_setting:13;
+ u16 itr_reserved:2;
+ u16 itr_mode:1;
+ };
+ u16 itr_settings;
+ };
enum ice_container_type type;
};
@@ -414,7 +466,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
ice_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 1dd7e84f41f8..c8322fb6f2b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */
+#include <linux/filter.h>
+
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
#include "ice_lib.h"
@@ -187,8 +189,7 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev
- (rx_ring, rx_desc));
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -208,151 +209,256 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
+ netdev_features_t features = rx_ring->netdev->features;
+ bool non_zero_vlan = !!(vlan_tag & VLAN_VID_MASK);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && non_zero_vlan)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ else if ((features & NETIF_F_HW_VLAN_STAG_RX) && non_zero_vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan_tag);
+
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
/**
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @dev: device for DMA mapping
+ * @tx_buf: Tx buffer to clean
+ * @bq: XDP bulk flush struct
+ */
+static void
+ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
+ struct xdp_frame_bulk *bq)
+{
+ dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_XDP_TX:
+ page_frag_free(tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame_bulk(tx_buf->xdpf, bq);
+ break;
+ }
+
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+}
+
+/**
* ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
* @xdp_ring: XDP ring to clean
*/
-static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
- unsigned int total_bytes = 0, total_pkts = 0;
- u16 ntc = xdp_ring->next_to_clean;
- struct ice_tx_desc *next_dd_desc;
- u16 next_dd = xdp_ring->next_dd;
- struct ice_tx_buf *tx_buf;
- int i;
+ int total_bytes = 0, total_pkts = 0;
+ struct device *dev = xdp_ring->dev;
+ u32 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u32 cnt = xdp_ring->count;
+ struct xdp_frame_bulk bq;
+ u32 frags, xdp_tx = 0;
+ u32 ready_frames = 0;
+ u32 idx;
+ u32 ret;
+
+ idx = xdp_ring->tx_buf[ntc].rs_idx;
+ tx_desc = ICE_TX_DESC(xdp_ring, idx);
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if (idx >= ntc)
+ ready_frames = idx - ntc + 1;
+ else
+ ready_frames = idx + cnt - ntc + 1;
+ }
- next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
- if (!(next_dd_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- return;
+ if (unlikely(!ready_frames))
+ return 0;
+ ret = ready_frames;
+
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock(); /* xdp_return_frame_bulk() */
- for (i = 0; i < ICE_TX_THRESH; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
+ while (ready_frames) {
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+ struct ice_tx_buf *head = tx_buf;
+ /* bytecount holds size of head + frags */
total_bytes += tx_buf->bytecount;
- /* normally tx_buf->gso_segs was taken but at this point
- * it's always 1 for us
- */
+ frags = tx_buf->nr_frags;
total_pkts++;
-
- page_frag_free(tx_buf->raw_buf);
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- tx_buf->raw_buf = NULL;
+ /* count head + frags */
+ ready_frames -= frags + 1;
+ xdp_tx++;
ntc++;
- if (ntc >= xdp_ring->count)
+ if (ntc == cnt)
ntc = 0;
+
+ for (int i = 0; i < frags; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ ice_clean_xdp_tx_buf(dev, tx_buf, &bq);
+ ntc++;
+ if (ntc == cnt)
+ ntc = 0;
+ }
+
+ ice_clean_xdp_tx_buf(dev, head, &bq);
}
- next_dd_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
- if (xdp_ring->next_dd > xdp_ring->count)
- xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
+ tx_desc->cmd_type_offset_bsz = 0;
xdp_ring->next_to_clean = ntc;
+ xdp_ring->xdp_tx_active -= xdp_tx;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
+
+ return ret;
}
/**
- * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
- * @data: packet data pointer
- * @size: packet data size
+ * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdp: XDP buffer to be placed onto Tx descriptors
* @xdp_ring: XDP ring for transmission
+ * @frame: whether this comes from .ndo_xdp_xmit()
*/
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
+ bool frame)
{
- u16 i = xdp_ring->next_to_use;
+ struct skb_shared_info *sinfo = NULL;
+ u32 size = xdp->data_end - xdp->data;
+ struct device *dev = xdp_ring->dev;
+ u32 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_head;
struct ice_tx_buf *tx_buf;
- dma_addr_t dma;
+ u32 cnt = xdp_ring->count;
+ void *data = xdp->data;
+ u32 nr_frags = 0;
+ u32 free_space;
+ u32 frag = 0;
+
+ free_space = ICE_DESC_UNUSED(xdp_ring);
+ if (free_space < ICE_RING_QUARTER(xdp_ring))
+ free_space += ice_clean_xdp_irq(xdp_ring);
+
+ if (unlikely(!free_space))
+ goto busy;
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ if (free_space < nr_frags + 1)
+ goto busy;
+ }
- if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
- ice_clean_xdp_irq(xdp_ring);
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_head = &xdp_ring->tx_buf[ntu];
+ tx_buf = tx_head;
- if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->tx_stats.tx_busy++;
- return ICE_XDP_CONSUMED;
- }
+ for (;;) {
+ dma_addr_t dma;
+
+ dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_unmap;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
- dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(xdp_ring->dev, dma))
- return ICE_XDP_CONSUMED;
-
- tx_buf = &xdp_ring->tx_buf[i];
- tx_buf->bytecount = size;
- tx_buf->gso_segs = 1;
- tx_buf->raw_buf = data;
-
- /* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, size);
- dma_unmap_addr_set(tx_buf, dma, dma);
-
- tx_desc = ICE_TX_DESC(xdp_ring, i);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
- size, 0);
-
- i++;
- if (i == xdp_ring->count) {
- i = 0;
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = ICE_TX_THRESH - 1;
+ if (frame) {
+ tx_buf->type = ICE_TX_BUF_FRAG;
+ } else {
+ tx_buf->type = ICE_TX_BUF_XDP_TX;
+ tx_buf->raw_buf = data;
+ }
+
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
+
+ ntu++;
+ if (ntu == cnt)
+ ntu = 0;
+
+ if (frag == nr_frags)
+ break;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_buf = &xdp_ring->tx_buf[ntu];
+
+ data = skb_frag_address(&sinfo->frags[frag]);
+ size = skb_frag_size(&sinfo->frags[frag]);
+ frag++;
}
- xdp_ring->next_to_use = i;
- if (i > xdp_ring->next_rs) {
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += ICE_TX_THRESH;
+ /* store info about bytecount and frag count in first desc */
+ tx_head->bytecount = xdp_get_buff_len(xdp);
+ tx_head->nr_frags = nr_frags;
+
+ if (frame) {
+ tx_head->type = ICE_TX_BUF_XDP_XMIT;
+ tx_head->xdpf = xdp->data_hard_start;
}
+ /* update last descriptor from a frame with EOP */
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
+
+ xdp_ring->xdp_tx_active++;
+ xdp_ring->next_to_use = ntu;
+
return ICE_XDP_TX;
-}
-/**
- * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
- * @xdp: XDP buffer
- * @xdp_ring: XDP Tx ring
- *
- * Returns negative on failure, 0 on success.
- */
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
-{
- struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+dma_unmap:
+ for (;;) {
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ if (tx_buf == tx_head)
+ break;
+
+ if (!ntu)
+ ntu += cnt;
+ ntu--;
+ }
+ return ICE_XDP_CONSUMED;
- if (unlikely(!xdpf))
- return ICE_XDP_CONSUMED;
+busy:
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ return ICE_XDP_CONSUMED;
}
/**
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
* @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch
+ * @first_idx: index to write from caller
*
* This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the
* napi loop.
*/
-void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
+ u32 first_idx)
{
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
+
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
+ /* store index of descriptor with RS bit set in the first
+ * ice_tx_buf of given NAPI batch
+ */
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 11b6c1601986..115969ecdf7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,8 +6,38 @@
#include "ice.h"
/**
+ * ice_set_rx_bufs_act - propagate Rx buffer action to frags
+ * @xdp: XDP buffer representing frame (linear and frags part)
+ * @rx_ring: Rx ring struct
+ * act: action to store onto Rx buffers related to XDP buffer parts
+ *
+ * Set action that should be taken before putting Rx buffer from first frag
+ * to one before last. Last one is handled by caller of this function as it
+ * is the EOP frag that is currently being processed. This function is
+ * supposed to be called only when XDP buffer contains frags.
+ */
+static inline void
+ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
+ const unsigned int act)
+{
+ const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 first = rx_ring->first_desc;
+ u32 nr_frags = sinfo->nr_frags;
+ u32 cnt = rx_ring->count;
+ struct ice_rx_buf *buf;
+
+ for (int i = 0; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[first];
+ buf->act = act;
+
+ if (++first == cnt)
+ first = 0;
+ }
+}
+
+/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
- * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
*
* This function does some fast chicanery in order to return the
@@ -16,9 +46,31 @@
* at offset zero.
*/
static inline bool
-ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
+ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
{
- return !!(rx_desc->wb.status_error0 & cpu_to_le16(stat_err_bits));
+ return !!(status_err_n & cpu_to_le16(stat_err_bits));
+}
+
+/**
+ * ice_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
+ */
+static inline bool
+ice_is_non_eop(const struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
+ if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
+ return false;
+
+ rx_ring->ring_stats->rx_stats.non_eop_descs++;
+
+ return true;
}
static inline __le64
@@ -32,6 +84,30 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_get_vlan_tag_from_rx_desc - get VLAN from Rx flex descriptor
+ * @rx_desc: Rx 32b flex descriptor with RXDID=2
+ *
+ * The OS and current PF implementation only support stripping a single VLAN tag
+ * at a time, so there should only ever be 0 or 1 tags in the l2tag* fields. If
+ * one is found return the tag, else return 0 to mean no VLAN tag was found.
+ */
+static inline u16
+ice_get_vlan_tag_from_rx_desc(union ice_32b_rx_flex_desc *rx_desc)
+{
+ u16 stat_err_bits;
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
+ if (ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ return le16_to_cpu(rx_desc->wb.l2tag1);
+
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS1_L2TAG2P_S);
+ if (ice_test_staterr(rx_desc->wb.status_error1, stat_err_bits))
+ return le16_to_cpu(rx_desc->wb.l2tag2_2nd);
+
+ return 0;
+}
+
+/**
* ice_xdp_ring_update_tail - Updates the XDP Tx ring tail register
* @xdp_ring: XDP Tx ring
*
@@ -46,9 +122,28 @@ static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
-void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ *
+ * returns index of descriptor that had RS bit produced on
+ */
+static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
+{
+ u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+
+ return rs_idx;
+}
+
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
+ bool frame);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 9e0c2923c62e..a09556e57803 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -6,15 +6,17 @@
#define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4
+#define ICE_CHNL_MAX_TC 16
-#include "ice_status.h"
#include "ice_hw_autogen.h"
+#include "ice_devids.h"
#include "ice_osdep.h"
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
+#include "ice_vlan_mode.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -54,6 +56,11 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_AQ_DESC BIT_ULL(25)
#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
+#define ICE_DBG_AQ (ICE_DBG_AQ_MSG | \
+ ICE_DBG_AQ_DESC | \
+ ICE_DBG_AQ_DESC_BUF | \
+ ICE_DBG_AQ_CMD)
+
#define ICE_DBG_USER BIT_ULL(31)
enum ice_aq_res_ids {
@@ -230,8 +237,8 @@ enum ice_fd_hw_seg {
ICE_FD_HW_SEG_MAX,
};
-/* 2 VSI = 1 ICE_VSI_PF + 1 ICE_VSI_CTRL */
-#define ICE_MAX_FDIR_VSI_PER_FILTER 2
+/* 1 ICE_VSI_PF + 1 ICE_VSI_CTRL + ICE_CHNL_MAX_TC */
+#define ICE_MAX_FDIR_VSI_PER_FILTER (2 + ICE_CHNL_MAX_TC)
struct ice_fd_hw_prof {
struct ice_flow_seg_info *fdir_seg[ICE_FD_HW_SEG_MAX];
@@ -279,6 +286,10 @@ struct ice_hw_common_caps {
#define ICE_NVM_PENDING_NETLIST BIT(2)
bool nvm_unified_update;
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance;
+ /* Post update reset restriction */
+ bool reset_restrict_support;
};
/* IEEE 1588 TIME_SYNC specific info */
@@ -295,9 +306,30 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_S 24
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
+/* TIME_REF clock rate specification */
+enum ice_time_ref_freq {
+ ICE_TIME_REF_FREQ_25_000 = 0,
+ ICE_TIME_REF_FREQ_122_880 = 1,
+ ICE_TIME_REF_FREQ_125_000 = 2,
+ ICE_TIME_REF_FREQ_153_600 = 3,
+ ICE_TIME_REF_FREQ_156_250 = 4,
+ ICE_TIME_REF_FREQ_245_760 = 5,
+
+ NUM_ICE_TIME_REF_FREQ
+};
+
+/* Clock source specification */
+enum ice_clk_src {
+ ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */
+ ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */
+
+ NUM_ICE_CLK_SRC
+};
+
struct ice_ts_func_info {
/* Function specific info */
- u32 clk_freq;
+ enum ice_time_ref_freq time_ref;
+ u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
u8 ena;
@@ -315,6 +347,7 @@ struct ice_ts_func_info {
#define ICE_TS_DEV_ENA_M BIT(24)
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
+#define ICE_TS_LL_TX_TS_READ_M BIT(28)
struct ice_ts_dev_info {
/* Device specific info */
@@ -327,6 +360,7 @@ struct ice_ts_dev_info {
u8 ena;
u8 tmr0_ena;
u8 tmr1_ena;
+ u8 ts_ll_read;
};
/* Function specific capabilities */
@@ -490,7 +524,14 @@ struct ice_sched_node {
struct ice_sched_node *sibling; /* next sibling in the same layer */
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
+ char *name;
+ struct devlink_rate *rate_node;
+ u64 tx_max;
+ u64 tx_share;
u32 agg_id; /* aggregator group ID */
+ u32 id;
+ u32 tx_priority;
+ u32 tx_weight;
u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
@@ -532,6 +573,8 @@ enum ice_rl_type {
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
+#define ICE_MAX_PORT_PER_PCI_DEV 8
+
/* Data structure for saving BW information */
enum ice_bw_type {
ICE_BW_TYPE_PRIO,
@@ -661,10 +704,6 @@ struct ice_port_info {
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
#define ICE_LPORT_MASK 0xff
- u16 dflt_tx_vsi_rule_id;
- u16 dflt_tx_vsi_num;
- u16 dflt_rx_vsi_rule_id;
- u16 dflt_rx_vsi_num;
struct ice_fc_info fc;
struct ice_mac_info mac;
struct ice_phy_info phy;
@@ -674,7 +713,9 @@ struct ice_port_info {
/* List contain profile ID(s) and other params per layer */
struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_qos_cfg qos_cfg;
+ struct xarray sched_node_ids;
u8 is_vf:1;
+ u8 is_custom_tx_enabled:1;
};
struct ice_switch_info {
@@ -743,14 +784,15 @@ struct ice_mbx_snap_buffer_data {
u16 max_num_msgs_mbx;
};
-/* Structure to track messages sent by VFs on mailbox:
- * 1. vf_cntr: a counter array of VFs to track the number of
- * asynchronous messages sent by each VF
- * 2. vfcntr_len: number of entries in VF counter array
+/* Structure used to track a single VF's messages on the mailbox:
+ * 1. list_entry: linked list entry node
+ * 2. msg_count: the number of asynchronous messages sent by this VF
+ * 3. malicious: whether this VF has been detected as malicious before
*/
-struct ice_mbx_vf_counter {
- u32 *vf_cntr;
- u32 vfcntr_len;
+struct ice_mbx_vf_info {
+ struct list_head list_entry;
+ u32 msg_count;
+ u8 malicious : 1;
};
/* Structure to hold data relevant to the captured static snapshot
@@ -758,7 +800,7 @@ struct ice_mbx_vf_counter {
*/
struct ice_mbx_snapshot {
struct ice_mbx_snap_buffer_data mbx_buf;
- struct ice_mbx_vf_counter mbx_vf;
+ struct list_head mbx_vf;
};
/* Structure to hold data to be used for capturing or updating a
@@ -857,8 +899,6 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
- u8 ucast_shared; /* true if VSIs can share unicast addr */
-
#define ICE_PHY_PER_NAC 1
#define ICE_MAX_QUAD 2
#define ICE_NUM_QUAD_TYPE 2
@@ -873,8 +913,6 @@ struct ice_hw {
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
- enum ice_aq_err pkg_dwnld_status;
-
/* Driver's package ver - (from the Ice Metadata section) */
struct ice_pkg_ver pkg_ver;
u8 pkg_name[ICE_PKG_NAME_SIZE];
@@ -897,6 +935,9 @@ struct ice_hw {
struct udp_tunnel_nic_shared udp_tunnel_shared;
struct udp_tunnel_nic_info udp_tunnel_nic;
+ /* dvm boost update information */
+ struct ice_dvm_table dvm_upd;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
@@ -919,6 +960,8 @@ struct ice_hw {
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
+ DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
+ u8 dvm_ena;
u16 io_expander_handle;
};
@@ -984,6 +1027,15 @@ struct ice_hw_port_stats {
u64 fd_sb_match;
};
+enum ice_sw_fwd_act_type {
+ ICE_FWD_TO_VSI = 0,
+ ICE_FWD_TO_VSI_LIST, /* Do not use this when adding filter */
+ ICE_FWD_TO_Q,
+ ICE_FWD_TO_QGRP,
+ ICE_DROP_PACKET,
+ ICE_INVAL_ACT
+};
+
struct ice_aq_get_set_rss_lut_params {
u16 vsi_handle; /* software VSI handle */
u16 lut_size; /* size of the LUT buffer */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
new file mode 100644
index 000000000000..89fd6982df09
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -0,0 +1,1312 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_virtchnl_allowlist.h"
+
+/* Public functions which may be accessed by all driver files */
+
+/**
+ * ice_get_vf_by_id - Get pointer to VF by ID
+ * @pf: the PF private structure
+ * @vf_id: the VF ID to locate
+ *
+ * Locate and return a pointer to the VF structure associated with a given ID.
+ * Returns NULL if the ID does not have a valid VF structure associated with
+ * it.
+ *
+ * This function takes a reference to the VF, which must be released by
+ * calling ice_put_vf() once the caller is finished accessing the VF structure
+ * returned.
+ */
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+ struct ice_vf *vf;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(pf->vfs.table, vf, entry, vf_id) {
+ if (vf->vf_id == vf_id) {
+ struct ice_vf *found;
+
+ if (kref_get_unless_zero(&vf->refcnt))
+ found = vf;
+ else
+ found = NULL;
+
+ rcu_read_unlock();
+ return found;
+ }
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+
+/**
+ * ice_release_vf - Release VF associated with a refcount
+ * @ref: the kref decremented to zero
+ *
+ * Callback function for kref_put to release a VF once its reference count has
+ * hit zero.
+ */
+static void ice_release_vf(struct kref *ref)
+{
+ struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
+
+ vf->vf_ops->free(vf);
+}
+
+/**
+ * ice_put_vf - Release a reference to a VF
+ * @vf: the VF structure to decrease reference count on
+ *
+ * Decrease the reference count for a VF, and free the entry if it is no
+ * longer in use.
+ *
+ * This must be called after ice_get_vf_by_id() once the reference to the VF
+ * structure is no longer used. Otherwise, the VF structure will never be
+ * freed.
+ */
+void ice_put_vf(struct ice_vf *vf)
+{
+ kref_put(&vf->refcnt, ice_release_vf);
+}
+
+/**
+ * ice_has_vfs - Return true if the PF has any associated VFs
+ * @pf: the PF private structure
+ *
+ * Return whether or not the PF has any allocated VFs.
+ *
+ * Note that this function only guarantees that there are no VFs at the point
+ * of calling it. It does not guarantee that no more VFs will be added.
+ */
+bool ice_has_vfs(struct ice_pf *pf)
+{
+ /* A simple check that the hash table is not empty does not require
+ * the mutex or rcu_read_lock.
+ */
+ return !hash_empty(pf->vfs.table);
+}
+
+/**
+ * ice_get_num_vfs - Get number of allocated VFs
+ * @pf: the PF private structure
+ *
+ * Return the total number of allocated VFs. NOTE: VF IDs are not guaranteed
+ * to be contiguous. Do not assume that a VF ID is guaranteed to be less than
+ * the output of this function.
+ */
+u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+ u16 num_vfs = 0;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf)
+ num_vfs++;
+ rcu_read_unlock();
+
+ return num_vfs;
+}
+
+/**
+ * ice_get_vf_vsi - get VF's VSI based on the stored index
+ * @vf: VF used to get VSI
+ */
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ if (vf->lan_vsi_idx == ICE_NO_VSI)
+ return NULL;
+
+ return vf->pf->vsi[vf->lan_vsi_idx];
+}
+
+/**
+ * ice_is_vf_disabled
+ * @vf: pointer to the VF info
+ *
+ * If the PF has been disabled, there is no need resetting VF until PF is
+ * active again. Similarly, if the VF has been disabled, this means something
+ * else is resetting the VF, so we shouldn't continue.
+ *
+ * Returns true if the caller should consider the VF as disabled whether
+ * because that single VF is explicitly disabled or because the PF is
+ * currently disabled.
+ */
+bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ return (test_bit(ICE_VF_DIS, pf->state) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states));
+}
+
+/**
+ * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
+ * @vf: The VF being resseting
+ *
+ * The max poll time is about ~800ms, which is about the maximum time it takes
+ * for a VF to be reset and/or a VF driver to be removed.
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+ int i;
+
+ for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ break;
+ msleep(ICE_MAX_VF_RESET_SLEEP_MS);
+ }
+}
+
+/**
+ * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
+ * @vf: VF to check if it's ready to be configured/queried
+ *
+ * The purpose of this function is to make sure the VF is not in reset, not
+ * disabled, and initialized so it can be configured and/or queried by a host
+ * administrator.
+ */
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ if (ice_check_vf_init(vf))
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * ice_trigger_vf_reset - Reset a VF on HW
+ * @vf: pointer to the VF structure
+ * @is_vflr: true if VFLR was issued, false if not
+ * @is_pfr: true if the reset was triggered due to a previous PFR
+ *
+ * Trigger hardware to start a reset for a particular VF. Expects the caller
+ * to wait the proper amount of time to allow hardware to reset the VF before
+ * it cleans up and restores VF functionality.
+ */
+static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
+{
+ /* Inform VF that it is no longer active, as a warning */
+ clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+ /* Disable VF's configuration API during reset. The flag is re-enabled
+ * when it's safe again to access VF's VSI.
+ */
+ clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
+
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
+ */
+ if (!is_pfr)
+ vf->vf_ops->clear_mbx_register(vf);
+
+ vf->vf_ops->trigger_reset_register(vf, is_vflr);
+}
+
+static void ice_vf_clear_counters(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (vsi)
+ vsi->num_vlan = 0;
+
+ vf->num_mac = 0;
+ memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
+ memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
+}
+
+/**
+ * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
+ * @vf: VF to perform pre VSI rebuild tasks
+ *
+ * These tasks are items that don't need to be amortized since they are most
+ * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
+ */
+static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
+{
+ /* Close any IRQ mapping now */
+ if (vf->vf_ops->irq_close)
+ vf->vf_ops->irq_close(vf);
+
+ ice_vf_clear_counters(vf);
+ vf->vf_ops->clear_reset_trigger(vf);
+}
+
+/**
+ * ice_vf_recreate_vsi - Release and re-create the VF's VSI
+ * @vf: VF to recreate the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
+ * VF configuration change, etc)
+ *
+ * It releases and then re-creates a new VSI.
+ */
+static int ice_vf_recreate_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int err;
+
+ ice_vf_vsi_release(vf);
+
+ err = vf->vf_ops->create_vsi(vf);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to recreate the VF%u's VSI, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_vsi - rebuild the VF's VSI
+ * @vf: VF to rebuild the VSI for
+ *
+ * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
+ * host, PFR, CORER, etc.).
+ *
+ * It reprograms the VSI configuration back into hardware.
+ */
+static int ice_vf_rebuild_vsi(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_pf *pf = vf->pf;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
+ dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
+ vf->vf_id);
+ return -EIO;
+ }
+ /* vsi->idx will remain the same in this case so don't update
+ * vf->lan_vsi_idx
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+ vf->lan_vsi_num = vsi->vsi_num;
+
+ return 0;
+}
+
+/**
+ * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
+ * @vf: the VF being reset
+ *
+ * Perform reset tasks which must occur after the VSI has been re-created or
+ * rebuilt during a VF reset.
+ */
+static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_rebuild_host_cfg(vf);
+ ice_vf_set_initialized(vf);
+
+ vf->vf_ops->post_vsi_rebuild(vf);
+}
+
+/**
+ * ice_is_any_vf_in_unicast_promisc - check if any VF(s)
+ * are in unicast promiscuous mode
+ * @pf: PF structure for accessing VF(s)
+ *
+ * Return false if no VF(s) are in unicast promiscuous mode,
+ * else return true
+ */
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
+{
+ bool is_vf_promisc = false;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ /* found a VF that has promiscuous mode configured */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
+ is_vf_promisc = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_vf_promisc;
+}
+
+/**
+ * ice_vf_get_promisc_masks - Calculate masks for promiscuous modes
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ * @ucast_m: promiscuous mask to apply to unicast
+ * @mcast_m: promiscuous mask to apply to multicast
+ *
+ * Decide which mask should be used for unicast and multicast filter,
+ * based on presence of VLANs
+ */
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m)
+{
+ if (ice_vf_is_port_vlan_ena(vf) ||
+ ice_vsi_has_non_zero_vlans(vsi)) {
+ *mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ } else {
+ *mcast_m = ICE_MCAST_PROMISC_BITS;
+ *ucast_m = ICE_UCAST_PROMISC_BITS;
+ }
+}
+
+/**
+ * ice_vf_clear_all_promisc_modes - Clear promisc/allmulticast on VF VSI
+ * @vf: the VF pointer
+ * @vsi: the VSI to configure
+ *
+ * Clear all promiscuous/allmulticast filters for a VF
+ */
+static int
+ice_vf_clear_all_promisc_modes(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vf->pf;
+ u8 ucast_m, mcast_m;
+ int ret = 0;
+
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) {
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ } else {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+ }
+
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling promiscuous mode succeeded\n");
+ }
+ }
+
+ if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ ret = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+ if (ret) {
+ dev_err(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode failed\n");
+ } else {
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ dev_info(ice_pf_to_dev(vf->pf), "Disabling allmulticast mode succeeded\n");
+ }
+ }
+ return ret;
+}
+
+/**
+ * ice_vf_set_vsi_promisc - Enable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to enable
+ */
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ ice_vf_get_port_vlan_id(vf));
+ else if (ice_vsi_has_non_zero_vlans(vsi))
+ status = ice_fltr_set_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_set_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "enable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_clear_vsi_promisc - Disable promiscuous mode for a VF VSI
+ * @vf: the VF to configure
+ * @vsi: the VF's VSI
+ * @promisc_m: the promiscuous mode to disable
+ */
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int status;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ ice_vf_get_port_vlan_id(vf));
+ else if (ice_vsi_has_non_zero_vlans(vsi))
+ status = ice_fltr_clear_vlan_vsi_promisc(hw, vsi, promisc_m);
+ else
+ status = ice_fltr_clear_vsi_promisc(hw, vsi->idx, promisc_m, 0);
+
+ if (status && status != -ENOENT) {
+ dev_err(ice_pf_to_dev(vsi->back), "disable Tx/Rx filter promiscuous mode on VF-%u failed, error: %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_reset_all_vfs - reset all allocated VFs in one go
+ * @pf: pointer to the PF structure
+ *
+ * Reset all VFs at once, in response to a PF or other device reset.
+ *
+ * First, tell the hardware to reset each VF, then do all the waiting in one
+ * chunk, and finally finish restoring each VF after the wait. This is useful
+ * during PF routines which need to reset all VFs, as otherwise it must perform
+ * these resets in a serialized fashion.
+ */
+void ice_reset_all_vfs(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ /* If we don't have any VFs, then there is nothing to reset */
+ if (!ice_has_vfs(pf))
+ return;
+
+ mutex_lock(&pf->vfs.table_lock);
+
+ /* clear all malicious info if the VFs are getting reset */
+ ice_for_each_vf(pf, bkt, vf)
+ ice_mbx_clear_malvf(&vf->mbx_info);
+
+ /* If VFs have been disabled, there is no need to reset */
+ if (test_and_set_bit(ICE_VF_DIS, pf->state)) {
+ mutex_unlock(&pf->vfs.table_lock);
+ return;
+ }
+
+ /* Begin reset on all VFs at once */
+ ice_for_each_vf(pf, bkt, vf)
+ ice_trigger_vf_reset(vf, true, true);
+
+ /* HW requires some time to make sure it can flush the FIFO for a VF
+ * when it resets it. Now that we've triggered all of the VFs, iterate
+ * the table again and wait for each VF to complete.
+ */
+ ice_for_each_vf(pf, bkt, vf) {
+ if (!vf->vf_ops->poll_reset_status(vf)) {
+ /* Display a warning if at least one VF didn't manage
+ * to reset in time, but continue on with the
+ * operation.
+ */
+ dev_warn(dev, "VF %u reset check timeout\n", vf->vf_id);
+ break;
+ }
+ }
+
+ /* free VF resources to begin resetting the VSI state */
+ ice_for_each_vf(pf, bkt, vf) {
+ mutex_lock(&vf->cfg_lock);
+
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
+ /* clean VF control VSI when resetting VFs since it should be
+ * setup only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_invalidate_vsi(vf);
+
+ ice_vf_pre_vsi_rebuild(vf);
+ ice_vf_rebuild_vsi(vf);
+ ice_vf_post_vsi_rebuild(vf);
+
+ mutex_unlock(&vf->cfg_lock);
+ }
+
+ if (ice_is_eswitch_mode_switchdev(pf))
+ if (ice_eswitch_rebuild(pf))
+ dev_warn(dev, "eswitch rebuild failed\n");
+
+ ice_flush(hw);
+ clear_bit(ICE_VF_DIS, pf->state);
+
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_notify_vf_reset - Notify VF of a reset event
+ * @vf: pointer to the VF structure
+ */
+static void ice_notify_vf_reset(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct virtchnl_pf_event pfe;
+
+ /* Bail out if VF is in disabled state, neither initialized, nor active
+ * state - otherwise proceed with notifications
+ */
+ if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
+ test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+ NULL);
+}
+
+/**
+ * ice_reset_vf - Reset a particular VF
+ * @vf: pointer to the VF structure
+ * @flags: flags controlling behavior of the reset
+ *
+ * Flags:
+ * ICE_VF_RESET_VFLR - Indicates a reset is due to VFLR event
+ * ICE_VF_RESET_NOTIFY - Send VF a notification prior to reset
+ * ICE_VF_RESET_LOCK - Acquire VF cfg_lock before resetting
+ *
+ * Returns 0 if the VF is currently in reset, if resets are disabled, or if
+ * the VF resets successfully. Returns an error code if the VF fails to
+ * rebuild.
+ */
+int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err = 0;
+ bool rsd;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (flags & ICE_VF_RESET_NOTIFY)
+ ice_notify_vf_reset(vf);
+
+ if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
+ dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
+ vf->vf_id);
+ return 0;
+ }
+
+ if (ice_is_vf_disabled(vf)) {
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ dev_dbg(dev, "VF is already removed\n");
+ return -EINVAL;
+ }
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+
+ if (ice_vsi_is_rx_queue_active(vsi))
+ ice_vsi_stop_all_rx_rings(vsi);
+
+ dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
+ vf->vf_id);
+ return 0;
+ }
+
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_lock(&vf->cfg_lock);
+ else
+ lockdep_assert_held(&vf->cfg_lock);
+
+ /* Set VF disable bit state here, before triggering reset */
+ set_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false);
+
+ vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi)) {
+ err = -EIO;
+ goto out_unlock;
+ }
+
+ ice_dis_vf_qs(vf);
+
+ /* Call Disable LAN Tx queue AQ whether or not queues are
+ * enabled. This is needed for successful completion of VFR.
+ */
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, vf->vf_ops->reset_type, vf->vf_id, NULL);
+
+ /* poll VPGEN_VFRSTAT reg to make sure
+ * that reset is complete
+ */
+ rsd = vf->vf_ops->poll_reset_status(vf);
+
+ /* Display a warning if VF didn't manage to reset in time, but need to
+ * continue on with the operation.
+ */
+ if (!rsd)
+ dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
+
+ vf->driver_caps = 0;
+ ice_vc_set_default_allowlist(vf);
+
+ /* disable promiscuous modes in case they were enabled
+ * ignore any error if disabling process failed
+ */
+ ice_vf_clear_all_promisc_modes(vf, vsi);
+
+ ice_eswitch_del_vf_mac_rule(vf);
+
+ ice_vf_fdir_exit(vf);
+ ice_vf_fdir_init(vf);
+ /* clean VF control VSI when resetting VF since it should be setup
+ * only when VF creates its first FDIR rule.
+ */
+ if (vf->ctrl_vsi_idx != ICE_NO_VSI)
+ ice_vf_ctrl_vsi_release(vf);
+
+ ice_vf_pre_vsi_rebuild(vf);
+
+ if (ice_vf_recreate_vsi(vf)) {
+ dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
+ vf->vf_id);
+ err = -EFAULT;
+ goto out_unlock;
+ }
+
+ ice_vf_post_vsi_rebuild(vf);
+ vsi = ice_get_vf_vsi(vf);
+ if (WARN_ON(!vsi)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ ice_eswitch_update_repr(vsi);
+ ice_eswitch_replay_vf_mac_rule(vf);
+
+ /* if the VF has been reset allow it to come up again */
+ ice_mbx_clear_malvf(&vf->mbx_info);
+
+out_unlock:
+ if (flags & ICE_VF_RESET_LOCK)
+ mutex_unlock(&vf->cfg_lock);
+
+ return err;
+}
+
+/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+ /* Clear Rx/Tx enabled queues flag */
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/**
+ * ice_set_vf_state_dis - Set VF state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_dis(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ vf->vf_ops->clear_reset_state(vf);
+}
+
+/* Private functions only accessed from other virtualization files */
+
+/**
+ * ice_initialize_vf_entry - Initialize a VF entry
+ * @vf: pointer to the VF structure
+ */
+void ice_initialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vfs *vfs;
+
+ vfs = &pf->vfs;
+
+ /* assign default capabilities */
+ vf->spoofchk = true;
+ vf->num_vf_qs = vfs->num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+ ice_virtchnl_set_dflt_ops(vf);
+
+ /* ctrl_vsi_idx will be set to a valid value only when iAVF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
+
+ /* Initialize mailbox info for this VF */
+ ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info);
+
+ mutex_init(&vf->cfg_lock);
+}
+
+/**
+ * ice_dis_vf_qs - Disable the VF queues
+ * @vf: pointer to the VF structure
+ */
+void ice_dis_vf_qs(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
+ ice_vsi_stop_all_rx_rings(vsi);
+ ice_set_vf_state_qs_dis(vf);
+}
+
+/**
+ * ice_err_to_virt_err - translate errors for VF return code
+ * @err: error return code
+ */
+enum virtchnl_status_code ice_err_to_virt_err(int err)
+{
+ switch (err) {
+ case 0:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case -EINVAL:
+ case -ENODEV:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case -ENOMEM:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case -EALREADY:
+ case -EBUSY:
+ case -EIO:
+ case -ENOSPC:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
+ * ice_check_vf_init - helper to check if VF init complete
+ * @vf: the pointer to the VF to check
+ */
+int ice_check_vf_init(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
+ dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
+ vf->vf_id);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * ice_vf_get_port_info - Get the VF's port info structure
+ * @vf: VF used to get the port info structure for
+ */
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
+{
+ return vf->pf->hw.port_info;
+}
+
+/**
+ * ice_cfg_mac_antispoof - Configure MAC antispoof checking behavior
+ * @vsi: the VSI to configure
+ * @enable: whether to enable or disable the spoof checking
+ *
+ * Configure a VSI to enable (or disable) spoof checking behavior.
+ */
+static int ice_cfg_mac_antispoof(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (enable)
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ else
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+
+ err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx MAC anti-spoof %s for VSI %d, error %d\n",
+ enable ? "ON" : "OFF", vsi->vsi_num, err);
+ else
+ vsi->info.sec_flags = ctx->info.sec_flags;
+
+ kfree(ctx);
+
+ return err;
+}
+
+/**
+ * ice_vsi_ena_spoofchk - enable Tx spoof checking for this VSI
+ * @vsi: VSI to enable Tx spoof checking for
+ */
+static int ice_vsi_ena_spoofchk(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err = 0;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ /* Allow VF with VLAN 0 only to send all tagged traffic */
+ if (vsi->type != ICE_VSI_VF || ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vlan_ops->ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+
+ return ice_cfg_mac_antispoof(vsi, true);
+}
+
+/**
+ * ice_vsi_dis_spoofchk - disable Tx spoof checking for this VSI
+ * @vsi: VSI to disable Tx spoof checking for
+ */
+static int ice_vsi_dis_spoofchk(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+
+ err = vlan_ops->dis_tx_filtering(vsi);
+ if (err)
+ return err;
+
+ return ice_cfg_mac_antispoof(vsi, false);
+}
+
+/**
+ * ice_vsi_apply_spoofchk - Apply Tx spoof checking setting to a VSI
+ * @vsi: VSI associated to the VF
+ * @enable: whether to enable or disable the spoof checking
+ */
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable)
+{
+ int err;
+
+ if (enable)
+ err = ice_vsi_ena_spoofchk(vsi);
+ else
+ err = ice_vsi_dis_spoofchk(vsi);
+
+ return err;
+}
+
+/**
+ * ice_is_vf_trusted
+ * @vf: pointer to the VF info
+ */
+bool ice_is_vf_trusted(struct ice_vf *vf)
+{
+ return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
+ * @vf: the VF to check
+ *
+ * Returns true if the VF has no Rx and no Tx queues enabled and returns false
+ * otherwise
+ */
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
+{
+ return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
+ !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
+}
+
+/**
+ * ice_is_vf_link_up - check if the VF's link is up
+ * @vf: VF to check if link is up
+ */
+bool ice_is_vf_link_up(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+
+ if (ice_check_vf_init(vf))
+ return false;
+
+ if (ice_vf_has_no_qs_ena(vf))
+ return false;
+ else if (vf->link_forced)
+ return vf->link_up;
+ else
+ return pi->phy.link_info.link_info &
+ ICE_AQ_LINK_UP;
+}
+
+/**
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
+ */
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
+{
+ if (vf->trusted)
+ set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ u8 broadcast[ETH_ALEN];
+ int status;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ vf->num_mac++;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
+ &vf->hw_lan_addr[0], vf->vf_id,
+ status);
+ return status;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
+ * @vsi: Pointer to VSI
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
+ */
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ int err;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
+ } else {
+ err = ice_vsi_add_vlan_zero(vsi);
+ }
+
+ if (err) {
+ dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
+ ice_vf_is_port_vlan_ena(vf) ?
+ ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err)
+ dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
+ vf->vf_id, vsi->idx, err);
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int status;
+
+ if (!vsi->agg_node)
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ dev_dbg(dev,
+ "agg_id %u already has reached max_num_vsis %u\n",
+ vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+ return;
+ }
+
+ status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+ vsi->idx, vsi->tc_cfg.ena_tc);
+ if (status)
+ dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, vsi->agg_node->agg_id);
+ else
+ vsi->agg_node->num_vsis++;
+}
+
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vf_set_host_trust_cfg(vf);
+
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
+ dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
+ vf->vf_id);
+
+ /* rebuild aggregator node config for main VF VSI */
+ ice_vf_rebuild_aggregator_node_cfg(vsi);
+}
+
+/**
+ * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
+ * @vf: VF that control VSI is being invalidated on
+ */
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->ctrl_vsi_idx = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
+ * @vf: VF that control VSI is being released on
+ */
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
+{
+ ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
+ ice_vf_ctrl_invalidate_vsi(vf);
+}
+
+/**
+ * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
+ * @vf: VF to setup control VSI for
+ *
+ * Returns pointer to the successfully allocated VSI struct on success,
+ * otherwise returns NULL on failure.
+ */
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
+{
+ struct ice_vsi_cfg_params params = {};
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ params.type = ICE_VSI_CTRL;
+ params.pi = ice_vf_get_port_info(vf);
+ params.vf = vf;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ vsi = ice_vsi_setup(pf, &params);
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
+ ice_vf_ctrl_invalidate_vsi(vf);
+ }
+
+ return vsi;
+}
+
+/**
+ * ice_vf_init_host_cfg - Initialize host admin configuration
+ * @vf: VF to initialize
+ * @vsi: the VSI created at initialization
+ *
+ * Initialize the VF host configuration. Called during VF creation to setup
+ * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
+ * should only be called during VF creation.
+ */
+int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ eth_broadcast_addr(broadcast);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ vf->num_mac = 1;
+
+ err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
+ if (err) {
+ dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
+ * @vf: VF to remove access to VSI for
+ */
+void ice_vf_invalidate_vsi(struct ice_vf *vf)
+{
+ vf->lan_vsi_idx = ICE_NO_VSI;
+ vf->lan_vsi_num = ICE_NO_VSI;
+}
+
+/**
+ * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
+ * @vf: pointer to the VF structure
+ *
+ * Release the VF associated with this VSI and then invalidate the VSI
+ * indexes.
+ */
+void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vsi_release(vsi);
+ ice_vf_invalidate_vsi(vf);
+}
+
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
new file mode 100644
index 000000000000..e3cda6fb71ab
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_H_
+#define _ICE_VF_LIB_H_
+
+#include <linux/types.h>
+#include <linux/hashtable.h>
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <net/devlink.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_type.h"
+#include "ice_virtchnl_fdir.h"
+#include "ice_vsi_vlan_ops.h"
+
+#define ICE_MAX_SRIOV_VFS 256
+
+/* VF resource constraints */
+#define ICE_MAX_RSS_QS_PER_VF 16
+
+struct ice_pf;
+struct ice_vf;
+struct ice_virtchnl_ops;
+
+/* VF capabilities */
+enum ice_virtchnl_cap {
+ ICE_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
+};
+
+/* Specific VF states */
+enum ice_vf_states {
+ ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
+ ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
+ ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
+ ICE_VF_STATE_DIS,
+ ICE_VF_STATE_MC_PROMISC,
+ ICE_VF_STATE_UC_PROMISC,
+ ICE_VF_STATES_NBITS
+};
+
+struct ice_time_mac {
+ unsigned long time_modified;
+ u8 addr[ETH_ALEN];
+};
+
+/* VF MDD events print structure */
+struct ice_mdd_vf_events {
+ u16 count; /* total count of Rx|Tx events */
+ /* count number of the last printed event */
+ u16 last_printed;
+};
+
+/* VF operations */
+struct ice_vf_ops {
+ enum ice_disq_rst_src reset_type;
+ void (*free)(struct ice_vf *vf);
+ void (*clear_reset_state)(struct ice_vf *vf);
+ void (*clear_mbx_register)(struct ice_vf *vf);
+ void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
+ bool (*poll_reset_status)(struct ice_vf *vf);
+ void (*clear_reset_trigger)(struct ice_vf *vf);
+ void (*irq_close)(struct ice_vf *vf);
+ int (*create_vsi)(struct ice_vf *vf);
+ void (*post_vsi_rebuild)(struct ice_vf *vf);
+};
+
+/* Virtchnl/SR-IOV config info */
+struct ice_vfs {
+ DECLARE_HASHTABLE(table, 8); /* table of VF entries */
+ struct mutex table_lock; /* Lock for protecting the hash table */
+ u16 num_supported; /* max supported VFs on this PF */
+ u16 num_qps_per; /* number of queue pairs per VF */
+ u16 num_msix_per; /* number of MSI-X vectors per VF */
+ unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
+};
+
+/* VF information structure */
+struct ice_vf {
+ struct hlist_node entry;
+ struct rcu_head rcu;
+ struct kref refcnt;
+ struct ice_pf *pf;
+
+ /* Used during virtchnl message handling and NDO ops against the VF
+ * that will trigger a VFR
+ */
+ struct mutex cfg_lock;
+
+ u16 vf_id; /* VF ID in the PF space */
+ u16 lan_vsi_idx; /* index into PF struct */
+ u16 ctrl_vsi_idx;
+ struct ice_vf_fdir fdir;
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
+ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
+ struct virtchnl_version_info vf_ver;
+ u32 driver_caps; /* reported by VF driver */
+ u8 dev_lan_addr[ETH_ALEN];
+ u8 hw_lan_addr[ETH_ALEN];
+ struct ice_time_mac legacy_last_added_umac;
+ DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ struct ice_vlan port_vlan_info; /* Port VLAN ID, QoS, and TPID */
+ struct virtchnl_vlan_caps vlan_v2_caps;
+ struct ice_mbx_vf_info mbx_info;
+ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
+ u8 trusted:1;
+ u8 spoofchk:1;
+ u8 link_forced:1;
+ u8 link_up:1; /* only valid if VF link is forced */
+ /* VSI indices - actual VSI pointers are maintained in the PF structure
+ * When assigned, these will be non-zero, because VSI 0 is always
+ * the main LAN VSI for the PF.
+ */
+ u16 lan_vsi_num; /* ID as used by firmware */
+ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
+ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
+ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
+
+ unsigned long vf_caps; /* VF's adv. capabilities */
+ u8 num_req_qs; /* num of queue pairs requested by VF */
+ u16 num_mac;
+ u16 num_vf_qs; /* num of queue configured per VF */
+ struct ice_mdd_vf_events mdd_rx_events;
+ struct ice_mdd_vf_events mdd_tx_events;
+ DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+ struct ice_repr *repr;
+ const struct ice_virtchnl_ops *virtchnl_ops;
+ const struct ice_vf_ops *vf_ops;
+
+ /* devlink port data */
+ struct devlink_port devlink_port;
+};
+
+/* Flags for controlling behavior of ice_reset_vf */
+enum ice_vf_reset_flags {
+ ICE_VF_RESET_VFLR = BIT(0), /* Indicate a VFLR reset */
+ ICE_VF_RESET_NOTIFY = BIT(1), /* Notify VF prior to reset */
+ ICE_VF_RESET_LOCK = BIT(2), /* Acquire the VF cfg_lock */
+};
+
+static inline u16 ice_vf_get_port_vlan_id(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.vid;
+}
+
+static inline u8 ice_vf_get_port_vlan_prio(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.prio;
+}
+
+static inline bool ice_vf_is_port_vlan_ena(struct ice_vf *vf)
+{
+ return (ice_vf_get_port_vlan_id(vf) || ice_vf_get_port_vlan_prio(vf));
+}
+
+static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
+{
+ return vf->port_vlan_info.tpid;
+}
+
+/* VF Hash Table access functions
+ *
+ * These functions provide abstraction for interacting with the VF hash table.
+ * In general, direct access to the hash table should be avoided outside of
+ * these functions where possible.
+ *
+ * The VF entries in the hash table are protected by reference counting to
+ * track lifetime of accesses from the table. The ice_get_vf_by_id() function
+ * obtains a reference to the VF structure which must be dropped by using
+ * ice_put_vf().
+ */
+
+/**
+ * ice_for_each_vf - Iterate over each VF entry
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under the table_lock mutex for the entire
+ * loop. Use this iterator if your loop is long or if it might sleep.
+ */
+#define ice_for_each_vf(pf, bkt, vf) \
+ hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
+
+/**
+ * ice_for_each_vf_rcu - Iterate over each VF entry protected by RCU
+ * @pf: pointer to the PF private structure
+ * @bkt: bucket index used for iteration
+ * @vf: pointer to the VF entry currently being processed in the loop
+ *
+ * The bkt variable is an unsigned integer iterator used to traverse the VF
+ * entries. It is *not* guaranteed to be the VF's vf_id. Do not assume it is.
+ * Use vf->vf_id to get the id number if needed.
+ *
+ * The caller is expected to be under rcu_read_lock() for the entire loop.
+ * Only use this iterator if your loop is short and you can guarantee it does
+ * not sleep.
+ */
+#define ice_for_each_vf_rcu(pf, bkt, vf) \
+ hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
+
+#ifdef CONFIG_PCI_IOV
+struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+void ice_put_vf(struct ice_vf *vf);
+bool ice_has_vfs(struct ice_pf *pf);
+u16 ice_get_num_vfs(struct ice_pf *pf);
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
+bool ice_is_vf_disabled(struct ice_vf *vf);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+void ice_set_vf_state_dis(struct ice_vf *vf);
+bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
+void
+ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
+ u8 *ucast_m, u8 *mcast_m);
+int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
+int ice_reset_vf(struct ice_vf *vf, u32 flags);
+void ice_reset_all_vfs(struct ice_pf *pf);
+#else /* CONFIG_PCI_IOV */
+static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
+{
+ return NULL;
+}
+
+static inline void ice_put_vf(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_has_vfs(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline u16 ice_get_num_vfs(struct ice_pf *pf)
+{
+ return 0;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return NULL;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ return true;
+}
+
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ice_set_vf_state_dis(struct ice_vf *vf)
+{
+}
+
+static inline bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline int
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_reset_vf(struct ice_vf *vf, u32 flags)
+{
+ return 0;
+}
+
+static inline void ice_reset_all_vfs(struct ice_pf *pf)
+{
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VF_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
new file mode 100644
index 000000000000..6f3293b793b5
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_LIB_PRIVATE_H_
+#define _ICE_VF_LIB_PRIVATE_H_
+
+#include "ice_vf_lib.h"
+
+/* This header file is for exposing functions in ice_vf_lib.c to other files
+ * which are also conditionally compiled depending on CONFIG_PCI_IOV.
+ * Functions which may be used by other files should be exposed as part of
+ * ice_vf_lib.h
+ *
+ * Functions in this file are exposed only when CONFIG_PCI_IOV is enabled, and
+ * thus this header must not be included by .c files which may be compiled
+ * with CONFIG_PCI_IOV disabled.
+ *
+ * To avoid this, only include this header file directly within .c files that
+ * are conditionally enabled in the "ice-$(CONFIG_PCI_IOV)" block.
+ */
+
+#ifndef CONFIG_PCI_IOV
+#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
+#endif
+
+void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_dis_vf_qs(struct ice_vf *vf);
+int ice_check_vf_init(struct ice_vf *vf);
+enum virtchnl_status_code ice_err_to_virt_err(int err);
+struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf);
+int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
+bool ice_is_vf_trusted(struct ice_vf *vf);
+bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
+bool ice_is_vf_link_up(struct ice_vf *vf);
+void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
+void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
+struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi);
+void ice_vf_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_vsi_release(struct ice_vf *vf);
+void ice_vf_set_initialized(struct ice_vf *vf);
+
+#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
new file mode 100644
index 000000000000..40cb4ba0789c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_vf_mbx.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = cpu_to_le32(vfid);
+
+ desc.cookie_high = cpu_to_le32(v_opcode);
+ desc.cookie_low = cpu_to_le32(v_retval);
+
+ if (msglen)
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+static const u32 ice_legacy_aq_to_vc_speed[] = {
+ VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
+ VIRTCHNL_LINK_SPEED_100MB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_10GB,
+ VIRTCHNL_LINK_SPEED_20GB,
+ VIRTCHNL_LINK_SPEED_25GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+};
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ /* convert a BIT() value into an array index */
+ u32 index = fls(link_speed) - 1;
+
+ if (adv_link_support)
+ return ice_get_link_speed(index);
+ else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ return ice_legacy_aq_to_vc_speed[index];
+
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+}
+
+/* The mailbox overflow detection algorithm helps to check if there
+ * is a possibility of a malicious VF transmitting too many MBX messages to the
+ * PF.
+ * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
+ * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
+ * The struct ice_mbx_snapshot helps to track and traverse a static window of
+ * messages within the mailbox queue while looking for a malicious VF.
+ *
+ * 2. When the caller starts processing its mailbox queue in response to an
+ * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
+ * the algorithm can be run for the first time for that interrupt. This
+ * requires calling ice_mbx_reset_snapshot() as well as calling
+ * ice_mbx_reset_vf_info() for each VF tracking structure.
+ *
+ * 3. For every message read by the caller from the MBX Queue, the caller must
+ * call the detection algorithm's entry function ice_mbx_vf_state_handler().
+ * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
+ * filled as it is required to be passed to the algorithm.
+ *
+ * 4. Every time a message is read from the MBX queue, a tracking structure
+ * for the VF must be passed to the state handler. The boolean output
+ * report_malvf from ice_mbx_vf_state_handler() serves as an indicator to the
+ * caller whether it must report this VF as malicious or not.
+ *
+ * 5. When a VF is identified to be malicious, the caller can send a message
+ * to the system administrator.
+ *
+ * 6. The PF is responsible for maintaining the struct ice_mbx_vf_info
+ * structure for each VF. The PF should clear the VF tracking structure if the
+ * VF is reset. When a VF is shut down and brought back up, we will then
+ * assume that the new VF is not malicious and may report it again if we
+ * detect it again.
+ *
+ * 7. The function ice_mbx_reset_snapshot() is called to reset the information
+ * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ */
+#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
+/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
+ * the max messages check must be ignored in the algorithm
+ */
+#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+
+/**
+ * ice_mbx_reset_snapshot - Reset mailbox snapshot structure
+ * @snap: pointer to the mailbox snapshot
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+ struct ice_mbx_vf_info *vf_info;
+
+ /* Clear mbx_buf in the mailbox snaphot structure and setting the
+ * mailbox snapshot state to a new capture.
+ */
+ memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf));
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+ /* Reset message counts for all VFs to zero */
+ list_for_each_entry(vf_info, &snap->mbx_vf, list_entry)
+ vf_info->msg_count = 0;
+}
+
+/**
+ * ice_mbx_traverse - Pass through mailbox snapshot
+ * @hw: pointer to the HW struct
+ * @new_state: new algorithm state
+ *
+ * Traversing the mailbox static snapshot without checking
+ * for malicious VFs.
+ */
+static void
+ice_mbx_traverse(struct ice_hw *hw,
+ enum ice_mbx_snapshot_state *new_state)
+{
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ u32 num_iterations;
+
+ snap_buf = &hw->mbx_snapshot.mbx_buf;
+
+ /* As mailbox buffer is circular, applying a mask
+ * on the incremented iteration count.
+ */
+ num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
+
+ /* Checking either of the below conditions to exit snapshot traversal:
+ * Condition-1: If the number of iterations in the mailbox is equal to
+ * the mailbox head which would indicate that we have reached the end
+ * of the static snapshot.
+ * Condition-2: If the maximum messages serviced in the mailbox for a
+ * given interrupt is the highest possible value then there is no need
+ * to check if the number of messages processed is equal to it. If not
+ * check if the number of messages processed is greater than or equal
+ * to the maximum number of mailbox entries serviced in current work item.
+ */
+ if (num_iterations == snap_buf->head ||
+ (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
+ ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
+ *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_detect_malvf - Detect malicious VF in snapshot
+ * @hw: pointer to the HW struct
+ * @vf_info: mailbox tracking structure for a VF
+ * @new_state: new algorithm state
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * This function tracks the number of asynchronous messages
+ * sent per VF and marks the VF as malicious if it exceeds
+ * the permissible number of messages to send.
+ */
+static int
+ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
+ enum ice_mbx_snapshot_state *new_state,
+ bool *is_malvf)
+{
+ /* increment the message count for this VF */
+ vf_info->msg_count++;
+
+ if (vf_info->msg_count >= ICE_ASYNC_VF_MSG_THRESHOLD)
+ *is_malvf = true;
+
+ /* continue to iterate through the mailbox snapshot */
+ ice_mbx_traverse(hw, new_state);
+
+ return 0;
+}
+
+/**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+ * @mbx_data: pointer to structure containing mailbox data
+ * @vf_info: mailbox tracking structure for the VF in question
+ * @report_malvf: boolean output to indicate whether VF should be reported
+ *
+ * The function serves as an entry point for the malicious VF
+ * detection algorithm by handling the different states and state
+ * transitions of the algorithm:
+ * New snapshot: This state is entered when creating a new static
+ * snapshot. The data from any previous mailbox snapshot is
+ * cleared and a new capture of the mailbox head and tail is
+ * logged. This will be the new static snapshot to detect
+ * asynchronous messages sent by VFs. On capturing the snapshot
+ * and depending on whether the number of pending messages in that
+ * snapshot exceed the watermark value, the state machine enters
+ * traverse or detect states.
+ * Traverse: If pending message count is below watermark then iterate
+ * through the snapshot without any action on VF.
+ * Detect: If pending message count exceeds watermark traverse
+ * the static snapshot and look for a malicious VF.
+ */
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ struct ice_mbx_vf_info *vf_info, bool *report_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+ enum ice_mbx_snapshot_state new_state;
+ bool is_malvf = false;
+ int status = 0;
+
+ if (!report_malvf || !mbx_data || !vf_info)
+ return -EINVAL;
+
+ *report_malvf = false;
+
+ /* When entering the mailbox state machine assume that the VF
+ * is not malicious until detected.
+ */
+ /* Checking if max messages allowed to be processed while servicing current
+ * interrupt is not less than the defined AVF message threshold.
+ */
+ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ return -EINVAL;
+
+ /* The watermark value should not be lesser than the threshold limit
+ * set for the number of asynchronous messages a VF can send to mailbox
+ * nor should it be greater than the maximum number of messages in the
+ * mailbox serviced in current interrupt.
+ */
+ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
+ mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ return -EINVAL;
+
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ snap_buf = &snap->mbx_buf;
+
+ switch (snap_buf->state) {
+ case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
+ /* Clear any previously held data in mailbox snapshot structure. */
+ ice_mbx_reset_snapshot(snap);
+
+ /* Collect the pending ARQ count, number of messages processed and
+ * the maximum number of messages allowed to be processed from the
+ * Mailbox for current interrupt.
+ */
+ snap_buf->num_pending_arq = mbx_data->num_pending_arq;
+ snap_buf->num_msg_proc = mbx_data->num_msg_proc;
+ snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+
+ /* Capture a new static snapshot of the mailbox by logging the
+ * head and tail of snapshot and set num_iterations to the tail
+ * value to mark the start of the iteration through the snapshot.
+ */
+ snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
+ mbx_data->num_pending_arq);
+ snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
+ snap_buf->num_iterations = snap_buf->tail;
+
+ /* Pending ARQ messages returned by ice_clean_rq_elem
+ * is the difference between the head and tail of the
+ * mailbox queue. Comparing this value against the watermark
+ * helps to check if we potentially have malicious VFs.
+ */
+ if (snap_buf->num_pending_arq >=
+ mbx_data->async_watermark_val) {
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf);
+ } else {
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ }
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_DETECT:
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf);
+ break;
+
+ default:
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ status = -EIO;
+ }
+
+ snap_buf->state = new_state;
+
+ /* Only report VFs as malicious the first time we detect it */
+ if (is_malvf && !vf_info->malicious) {
+ vf_info->malicious = 1;
+ *report_malvf = true;
+ }
+
+ return status;
+}
+
+/**
+ * ice_mbx_clear_malvf - Clear VF mailbox info
+ * @vf_info: the mailbox tracking structure for a VF
+ *
+ * In case of a VF reset, this function shall be called to clear the VF's
+ * current mailbox tracking state.
+ */
+void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info)
+{
+ vf_info->malicious = 0;
+ vf_info->msg_count = 0;
+}
+
+/**
+ * ice_mbx_init_vf_info - Initialize a new VF mailbox tracking info
+ * @hw: pointer to the hardware structure
+ * @vf_info: the mailbox tracking info structure for a VF
+ *
+ * Initialize a VF mailbox tracking info structure and insert it into the
+ * snapshot list.
+ *
+ * If you remove the VF, you must also delete the associated VF info structure
+ * from the linked list.
+ */
+void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ ice_mbx_clear_malvf(vf_info);
+ list_add(&vf_info->list_entry, &snap->mbx_vf);
+}
+
+/**
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot data
+ * @hw: pointer to the hardware structure
+ *
+ * Clear the mailbox snapshot structure and initialize the VF mailbox list.
+ */
+void ice_mbx_init_snapshot(struct ice_hw *hw)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ INIT_LIST_HEAD(&snap->mbx_vf);
+ ice_mbx_reset_snapshot(snap);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
new file mode 100644
index 000000000000..44bc030d17e0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Intel Corporation. */
+
+#ifndef _ICE_VF_MBX_H_
+#define _ICE_VF_MBX_H_
+
+#include "ice_type.h"
+#include "ice_controlq.h"
+
+/* Defining the mailbox message threshold as 63 asynchronous
+ * pending messages. Normal VF functionality does not require
+ * sending more than 63 asynchronous pending message.
+ */
+#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+
+#ifdef CONFIG_PCI_IOV
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ struct ice_mbx_vf_info *vf_info, bool *report_malvf);
+void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info);
+void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info);
+void ice_mbx_init_snapshot(struct ice_hw *hw);
+#else /* CONFIG_PCI_IOV */
+static inline int
+ice_aq_send_msg_to_vf(struct ice_hw __always_unused *hw,
+ u16 __always_unused vfid, u32 __always_unused v_opcode,
+ u32 __always_unused v_retval, u8 __always_unused *msg,
+ u16 __always_unused msglen,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline u32
+ice_conv_link_speed_to_virtchnl(bool __always_unused adv_link_support,
+ u16 __always_unused link_speed)
+{
+ return 0;
+}
+
+static inline void ice_mbx_init_snapshot(struct ice_hw *hw)
+{
+}
+
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_VF_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
new file mode 100644
index 000000000000..b1ffb81893d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_ops.h"
+#include "ice_vsi_vlan_lib.h"
+#include "ice_vlan_mode.h"
+#include "ice.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_sriov.h"
+
+static int
+noop_vlan_arg(struct ice_vsi __always_unused *vsi,
+ struct ice_vlan __always_unused *vlan)
+{
+ return 0;
+}
+
+static int
+noop_vlan(struct ice_vsi __always_unused *vsi)
+{
+ return 0;
+}
+
+/**
+ * ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI
+ * @vsi: VF's VSI being configured
+ *
+ * If Double VLAN Mode (DVM) is enabled, assume that the VF supports the new
+ * VIRTCHNL_VF_VLAN_OFFLOAD_V2 capability and set up the VLAN ops accordingly.
+ * If SVM is enabled maintain the same level of VLAN support previous to
+ * VIRTCHNL_VF_VLAN_OFFLOAD_V2.
+ */
+void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = vsi->vf;
+
+ if (WARN_ON(!vf))
+ return;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* outer VLAN ops regardless of port VLAN config */
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ /* setup outer VLAN ops */
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops->add_vlan = noop_vlan_arg;
+ vlan_ops->del_vlan = noop_vlan_arg;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ } else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
+
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ }
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ /* inner VLAN ops regardless of port VLAN config */
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
+ } else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ }
+ }
+}
+
+/**
+ * ice_vf_vsi_cfg_dvm_legacy_vlan_mode - Config VLAN mode for old VFs in DVM
+ * @vsi: VF's VSI being configured
+ *
+ * This should only be called when Double VLAN Mode (DVM) is enabled, there
+ * is not a port VLAN enabled on this VF, and the VF negotiates
+ * VIRTCHNL_VF_OFFLOAD_VLAN.
+ *
+ * This function sets up the VF VSI's inner and outer ice_vsi_vlan_ops and also
+ * initializes software only VLAN mode (i.e. allow all VLANs). Also, use no-op
+ * implementations for any functions that may be called during the lifetime of
+ * the VF so these methods do nothing and succeed.
+ */
+void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_vf *vf = vsi->vf;
+ struct device *dev;
+
+ if (WARN_ON(!vf))
+ return;
+
+ dev = ice_pf_to_dev(vf->pf);
+
+ if (!ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
+ return;
+
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* Rx VLAN filtering always disabled to allow software offloaded VLANs
+ * for VFs that only support VIRTCHNL_VF_OFFLOAD_VLAN and don't have a
+ * port VLAN configured
+ */
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+ /* Don't fail when attempting to enable Rx VLAN filtering */
+ vlan_ops->ena_rx_filtering = noop_vlan;
+
+ /* Tx VLAN filtering always disabled to allow software offloaded VLANs
+ * for VFs that only support VIRTCHNL_VF_OFFLOAD_VLAN and don't have a
+ * port VLAN configured
+ */
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
+ /* Don't fail when attempting to enable Tx VLAN filtering */
+ vlan_ops->ena_tx_filtering = noop_vlan;
+
+ if (vlan_ops->dis_rx_filtering(vsi))
+ dev_dbg(dev, "Failed to disable Rx VLAN filtering for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+ if (vlan_ops->dis_tx_filtering(vsi))
+ dev_dbg(dev, "Failed to disable Tx VLAN filtering for old VF without VIRTHCNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ /* All outer VLAN offloads must be disabled */
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ if (vlan_ops->dis_stripping(vsi))
+ dev_dbg(dev, "Failed to disable outer VLAN stripping for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ if (vlan_ops->dis_insertion(vsi))
+ dev_dbg(dev, "Failed to disable outer VLAN insertion for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ /* All inner VLAN offloads must be disabled */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+
+ if (vlan_ops->dis_stripping(vsi))
+ dev_dbg(dev, "Failed to disable inner VLAN stripping for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+
+ if (vlan_ops->dis_insertion(vsi))
+ dev_dbg(dev, "Failed to disable inner VLAN insertion for old VF without VIRTCHNL_VF_OFFLOAD_VLAN_V2 support\n");
+}
+
+/**
+ * ice_vf_vsi_cfg_svm_legacy_vlan_mode - Config VLAN mode for old VFs in SVM
+ * @vsi: VF's VSI being configured
+ *
+ * This should only be called when Single VLAN Mode (SVM) is enabled, there is
+ * not a port VLAN enabled on this VF, and the VF negotiates
+ * VIRTCHNL_VF_OFFLOAD_VLAN.
+ *
+ * All of the normal SVM VLAN ops are identical for this case. However, by
+ * default Rx VLAN filtering should be turned off by default in this case.
+ */
+void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi)
+{
+ struct ice_vf *vf = vsi->vf;
+
+ if (WARN_ON(!vf))
+ return;
+
+ if (ice_is_dvm_ena(&vsi->back->hw) || ice_vf_is_port_vlan_ena(vf))
+ return;
+
+ if (vsi->inner_vlan_ops.dis_rx_filtering(vsi))
+ dev_dbg(ice_pf_to_dev(vf->pf), "Failed to disable Rx VLAN filtering for old VF with VIRTCHNL_VF_OFFLOAD_VLAN support\n");
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
new file mode 100644
index 000000000000..875a4e615f39
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VF_VSI_VLAN_OPS_H_
+#define _ICE_VF_VSI_VLAN_OPS_H_
+
+#include "ice_vsi_vlan_ops.h"
+
+struct ice_vsi;
+
+void ice_vf_vsi_cfg_dvm_legacy_vlan_mode(struct ice_vsi *vsi);
+void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi);
+
+#ifdef CONFIG_PCI_IOV
+void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+#else
+static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { }
+#endif /* CONFIG_PCI_IOV */
+#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
new file mode 100644
index 000000000000..97243c616d5d
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -0,0 +1,4068 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice_virtchnl.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice_virtchnl_allowlist.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_vlan.h"
+#include "ice_flex_pipe.h"
+#include "ice_dcb_lib.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+ u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+ u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
+ {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+ {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP},
+ {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
+ {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
+ {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
+ {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
+};
+
+struct ice_vc_hash_field_match_type {
+ u32 vc_hdr; /* virtchnl headers
+ * (VIRTCHNL_PROTO_HDR_XXX)
+ */
+ u32 vc_hash_field; /* virtchnl hash fields selector
+ * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+ */
+ u64 ice_hash_field; /* ice hash fields
+ * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+ */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ ICE_FLOW_HASH_ETH},
+ {VIRTCHNL_PROTO_HDR_ETH,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+ {VIRTCHNL_PROTO_HDR_S_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+ {VIRTCHNL_PROTO_HDR_C_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+ {VIRTCHNL_PROTO_HDR_PPPOE,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV3,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+ {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+ {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+};
+
+/**
+ * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
+ * @pf: pointer to the PF structure
+ * @v_opcode: operation code
+ * @v_retval: return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ */
+static void
+ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+ struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
+ /* Not all vfs are enabled so skip the ones that are not */
+ if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ continue;
+
+ /* Ignore return value on purpose - a given VF may fail, but
+ * we need to keep going and send to all of them
+ */
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
+ msglen, NULL);
+ }
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
+ * @vf: pointer to the VF structure
+ * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
+ * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
+ * @link_up: whether or not to set the link up/down
+ */
+static void
+ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
+ int ice_link_speed, bool link_up)
+{
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
+ pfe->event_data.link_event_adv.link_status = link_up;
+ /* Speed in Mbps */
+ pfe->event_data.link_event_adv.link_speed =
+ ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
+ } else {
+ pfe->event_data.link_event.link_status = link_up;
+ /* Legacy method for virtchnl link speeds */
+ pfe->event_data.link_event.link_speed =
+ (enum virtchnl_link_speed)
+ ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
+ }
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Inform a VF of link status
+ * @vf: pointer to the VF structure
+ *
+ * send a link status message to a single VF
+ */
+void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+{
+ struct virtchnl_pf_event pfe = { 0 };
+ struct ice_hw *hw = &vf->pf->hw;
+
+ pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ pfe.severity = PF_EVENT_SEVERITY_INFO;
+
+ if (ice_is_vf_link_up(vf))
+ ice_set_pfe_link(vf, &pfe,
+ hw->port_info->phy.link_info.link_speed, true);
+ else
+ ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
+ sizeof(pfe), NULL);
+}
+
+/**
+ * ice_vc_notify_link_state - Inform all VFs on a PF of link status
+ * @pf: pointer to the PF structure
+ */
+void ice_vc_notify_link_state(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf)
+ ice_vc_notify_vf_link_state(vf);
+ mutex_unlock(&pf->vfs.table_lock);
+}
+
+/**
+ * ice_vc_notify_reset - Send pending reset message to all VFs
+ * @pf: pointer to the PF structure
+ *
+ * indicate a pending reset to all VFs on a given PF
+ */
+void ice_vc_notify_reset(struct ice_pf *pf)
+{
+ struct virtchnl_pf_event pfe;
+
+ if (!ice_has_vfs(pf))
+ return;
+
+ pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
+ pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
+ (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
+}
+
+/**
+ * ice_vc_send_msg_to_vf - Send message to VF
+ * @vf: pointer to the VF info
+ * @v_opcode: virtual channel opcode
+ * @v_retval: virtual channel return value
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * send msg to VF
+ */
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+ struct device *dev;
+ struct ice_pf *pf;
+ int aq_ret;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+
+ aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
+ msg, msglen, NULL);
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
+ dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
+ vf->vf_id, aq_ret,
+ ice_aq_str(pf->hw.mailboxq.sq_last_status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_get_ver_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request the API version used by the PF
+ */
+static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_version_info info = {
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
+ };
+
+ vf->vf_ver = *(struct virtchnl_version_info *)msg;
+ /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+ if (VF_IS_V10(&vf->vf_ver))
+ info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
+ sizeof(struct virtchnl_version_info));
+}
+
+/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
+ * ice_vc_get_vlan_caps
+ * @hw: pointer to the hw
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VSI
+ * @driver_caps: current driver caps
+ *
+ * Return 0 if there is no VLAN caps supported, or VLAN caps value
+ */
+static u32
+ice_vc_get_vlan_caps(struct ice_hw *hw, struct ice_vf *vf, struct ice_vsi *vsi,
+ u32 driver_caps)
+{
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ /* In switchdev setting VLAN from VF isn't supported */
+ return 0;
+
+ if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN_V2) {
+ /* VLAN offloads based on current device configuration */
+ return VIRTCHNL_VF_OFFLOAD_VLAN_V2;
+ } else if (driver_caps & VIRTCHNL_VF_OFFLOAD_VLAN) {
+ /* allow VF to negotiate VIRTCHNL_VF_OFFLOAD explicitly for
+ * these two conditions, which amounts to guest VLAN filtering
+ * and offloads being based on the inner VLAN or the
+ * inner/single VLAN respectively and don't allow VF to
+ * negotiate VIRTCHNL_VF_OFFLOAD in any other cases
+ */
+ if (ice_is_dvm_ena(hw) && ice_vf_is_port_vlan_ena(vf)) {
+ return VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (!ice_is_dvm_ena(hw) &&
+ !ice_vf_is_port_vlan_ena(vf)) {
+ /* configure backward compatible support for VFs that
+ * only support VIRTCHNL_VF_OFFLOAD_VLAN, the PF is
+ * configured in SVM, and no port VLAN is configured
+ */
+ ice_vf_vsi_cfg_svm_legacy_vlan_mode(vsi);
+ return VIRTCHNL_VF_OFFLOAD_VLAN;
+ } else if (ice_is_dvm_ena(hw)) {
+ /* configure software offloaded VLAN support when DVM
+ * is enabled, but no port VLAN is enabled
+ */
+ ice_vf_vsi_cfg_dvm_legacy_vlan_mode(vsi);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_get_vf_res_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to request its resources
+ */
+static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_resource *vfres = NULL;
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ int len = 0;
+ int ret;
+
+ if (ice_check_vf_init(vf)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource);
+
+ vfres = kzalloc(len, GFP_KERNEL);
+ if (!vfres) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ if (VF_IS_V11(&vf->vf_ver))
+ vf->driver_caps = *(u32 *)msg;
+ else
+ vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ VIRTCHNL_VF_OFFLOAD_VLAN;
+
+ vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
+ vf->driver_caps);
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
+ } else {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ }
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
+
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
+
+ vfres->num_vsis = 1;
+ /* Tx and Rx queue are equal for VF */
+ vfres->num_queue_pairs = vsi->num_txq;
+ vfres->max_vectors = vf->pf->vfs.num_msix_per;
+ vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vfres->max_mtu = ice_vc_get_max_frame_size(vf);
+
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
+ vf->hw_lan_addr);
+
+ /* match guest capabilities */
+ vf->driver_caps = vfres->vf_cap_flags;
+
+ ice_vc_set_caps_allowlist(vf);
+ ice_vc_set_working_allowlist(vf);
+
+ set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
+ (u8 *)vfres, len);
+
+ kfree(vfres);
+ return ret;
+}
+
+/**
+ * ice_vc_reset_vf_msg
+ * @vf: pointer to the VF info
+ *
+ * called from the VF to reset itself,
+ * unlike other virtchnl messages, PF driver
+ * doesn't send the response back to the VF
+ */
+static void ice_vc_reset_vf_msg(struct ice_vf *vf)
+{
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ ice_reset_vf(vf, 0);
+}
+
+/**
+ * ice_vc_isvalid_vsi_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VF relative VSI ID
+ *
+ * check for the valid VSI ID
+ */
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+
+ vsi = ice_find_vsi(pf, vsi_id);
+
+ return (vsi && (vsi->vf == vf));
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI ID
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
+{
+ struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id);
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return (vsi && (qid < vsi->alloc_txq));
+}
+
+/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
+ * to configure
+ * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool
+ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
+ u32 *addl_hdrs, u64 *hash_flds)
+{
+ const struct ice_vc_hash_field_match_type *hf_list;
+ const struct ice_vc_hdr_match_type *hdr_list;
+ int i, hf_list_len, hdr_list_len;
+
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
+
+ for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+ struct virtchnl_proto_hdr *proto_hdr =
+ &rss_cfg->proto_hdrs.proto_hdr[i];
+ bool hdr_found = false;
+ int j;
+
+ /* Find matched ice headers according to virtchnl headers. */
+ for (j = 0; j < hdr_list_len; j++) {
+ struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+ if (proto_hdr->type == hdr_map.vc_hdr) {
+ *addl_hdrs |= hdr_map.ice_hdr;
+ hdr_found = true;
+ }
+ }
+
+ if (!hdr_found)
+ return false;
+
+ /* Find matched ice hash fields according to
+ * virtchnl hash fields.
+ */
+ for (j = 0; j < hf_list_len; j++) {
+ struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+ if (proto_hdr->type == hf_map.vc_hdr &&
+ proto_hdr->field_selector == hf_map.vc_hash_field) {
+ *hash_flds |= hf_map.ice_hash_field;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+ u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto error_param;
+ }
+
+ if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+ rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+ rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+ struct ice_vsi_ctx *ctx;
+ u8 lut_type, hash_type;
+ int status;
+
+ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto error_param;
+ }
+
+ ctx->info.q_opt_rss = ((lut_type <<
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
+ ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
+ (hash_type &
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
+ ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_rss;
+
+ ctx->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+ status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (status) {
+ dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
+ status, ice_aq_str(hw->adminq.sq_last_status));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ }
+
+ kfree(ctx);
+ } else {
+ u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ u64 hash_flds = ICE_HASH_INVALID;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
+ &hash_flds)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
+ addl_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
+ vsi->vsi_num, v_ret);
+ }
+ } else {
+ int status;
+
+ status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
+ addl_hdrs);
+ /* We just ignore -ENOENT, because if two configurations
+ * share the same profile remove one of them actually
+ * removes both, since the profile is deleted.
+ */
+ if (status && status != -ENOENT) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
+ vf->vf_id, status);
+ }
+ }
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_key(vsi, vrk->key))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_promiscuous_mode_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure VF VSIs promiscuous mode
+ */
+static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ bool rm_promisc, alluni = false, allmulti = false;
+ struct virtchnl_promisc_info *info =
+ (struct virtchnl_promisc_info *)msg;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int mcast_err = 0, ucast_err = 0;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ u8 mcast_m, ucast_m;
+ struct device *dev;
+ int ret = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ dev = ice_pf_to_dev(pf);
+ if (!ice_is_vf_trusted(vf)) {
+ dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Leave v_ret alone, lie to the VF on purpose. */
+ goto error_param;
+ }
+
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+ allmulti = true;
+
+ rm_promisc = !allmulti && !alluni;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ if (rm_promisc)
+ ret = vlan_ops->ena_rx_filtering(vsi);
+ else
+ ret = vlan_ops->dis_rx_filtering(vsi);
+ if (ret) {
+ dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_get_promisc_masks(vf, vsi, &ucast_m, &mcast_m);
+
+ if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
+ if (alluni) {
+ /* in this case we're turning on promiscuous mode */
+ ret = ice_set_dflt_vsi(vsi);
+ } else {
+ /* in this case we're turning off promiscuous mode */
+ if (ice_is_dflt_vsi_in_use(vsi->port_info))
+ ret = ice_clear_dflt_vsi(vsi);
+ }
+
+ /* in this case we're turning on/off only
+ * allmulticast
+ */
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+
+ if (ret) {
+ dev_err(dev, "Turning on/off promiscuous mode for VF %d failed, error: %d\n",
+ vf->vf_id, ret);
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ goto error_param;
+ }
+ } else {
+ if (alluni)
+ ucast_err = ice_vf_set_vsi_promisc(vf, vsi, ucast_m);
+ else
+ ucast_err = ice_vf_clear_vsi_promisc(vf, vsi, ucast_m);
+
+ if (allmulti)
+ mcast_err = ice_vf_set_vsi_promisc(vf, vsi, mcast_m);
+ else
+ mcast_err = ice_vf_clear_vsi_promisc(vf, vsi, mcast_m);
+
+ if (ucast_err || mcast_err)
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+
+ if (!mcast_err) {
+ if (allmulti &&
+ !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ else if (!allmulti &&
+ test_and_clear_bit(ICE_VF_STATE_MC_PROMISC,
+ vf->vf_states))
+ dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
+ vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying multicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, mcast_err);
+ }
+
+ if (!ucast_err) {
+ if (alluni &&
+ !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ else if (!alluni &&
+ test_and_clear_bit(ICE_VF_STATE_UC_PROMISC,
+ vf->vf_states))
+ dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
+ vf->vf_id);
+ } else {
+ dev_err(dev, "Error while modifying unicast promiscuous mode for VF %u, error: %d\n",
+ vf->vf_id, ucast_err);
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_get_stats_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to get VSI stats
+ */
+static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_eth_stats stats = { 0 };
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_update_eth_stats(vsi);
+
+ stats = vsi->eth_stats;
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
+ (u8 *)&stats, sizeof(stats));
+}
+
+/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->rxq_ena);
+ }
+
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->txq_ena);
+ }
+
+ /* Set flag to indicate that queues are enabled */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+static int
+ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific queue(s)
+ */
+static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
+
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ }
+ }
+
+ /* Clear enabled queues flag */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @vector_id: vector ID
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static int
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
+ struct virtchnl_irq_map_info *irqmap_info;
+ struct virtchnl_vector_map *map;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i;
+
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ pf->vfs.num_msix_per < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
+
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < pf->vfs.num_msix_per) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ v_ret = (enum virtchnl_status_code)
+ ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+ if (v_ret)
+ goto error_param;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i = -1, q_idx;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
+ goto error_param;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ goto error_param;
+
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+ !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
+ goto error_param;
+ }
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ goto error_param;
+ }
+
+ /* copy Tx queue info from VF into VSI */
+ if (qpi->txq.ring_len > 0) {
+ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[i]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
+ goto error_param;
+
+ /* Configure a queue with the requested settings */
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+ vf->vf_id, i);
+ goto error_param;
+ }
+ }
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ u32 rxdid;
+
+ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
+ vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024))
+ goto error_param;
+ vsi->rx_buf_len = qpi->rxq.databuffer_size;
+ vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64)
+ goto error_param;
+
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
+ */
+ if (ice_vf_is_port_vlan_ena(vf))
+ vsi->max_frame += VLAN_HLEN;
+
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+ vf->vf_id, i);
+ goto error_param;
+ }
+
+ /* If Rx flex desc is supported, select RXDID for Rx
+ * queues. Otherwise, use legacy 32byte descriptor
+ * format. Legacy 16byte descriptor is not supported.
+ * If this RXDID is selected, return error.
+ */
+ if (vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ rxdid = qpi->rxq.rxdid;
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
+
+ ice_write_qrxflxp_cntxt(&vsi->back->hw,
+ vsi->rxq_map[q_idx],
+ rxdid, 0x03, false);
+ }
+ }
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+error_param:
+ /* disable whatever we can */
+ for (; i >= 0; i--) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+ vf->vf_id, i);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+ vf->vf_id, i);
+ }
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
+}
+
+/**
+ * ice_can_vf_change_mac
+ * @vf: pointer to the VF info
+ *
+ * Return true if the VF is allowed to change its MAC filters, false otherwise
+ */
+static bool ice_can_vf_change_mac(struct ice_vf *vf)
+{
+ /* If the VF MAC address has been set administratively (via the
+ * ndo_set_vf_mac command), then deny permission to the VF to
+ * add/delete unicast MAC addresses, unless the VF is trusted
+ */
+ if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
+ * @vc_ether_addr: used to extract the type
+ */
+static u8
+ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
+}
+
+/**
+ * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ */
+static bool
+ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+ return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
+}
+
+/**
+ * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ *
+ * This function should only be called when the MAC address in
+ * virtchnl_ether_addr is a valid unicast MAC
+ */
+static bool
+ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
+{
+ u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+ return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
+}
+
+/**
+ * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to add
+ */
+static void
+ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return;
+
+ /* only allow legacy VF drivers to set the device and hardware MAC if it
+ * is zero and allow new VF drivers to set the hardware MAC if the type
+ * was correctly specified over VIRTCHNL
+ */
+ if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
+ is_zero_ether_addr(vf->hw_lan_addr)) ||
+ ice_is_vc_addr_primary(vc_ether_addr)) {
+ ether_addr_copy(vf->dev_lan_addr, mac_addr);
+ ether_addr_copy(vf->hw_lan_addr, mac_addr);
+ }
+
+ /* hardware and device MACs are already set, but its possible that the
+ * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
+ * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
+ * away for the legacy VF driver case as it will be updated in the
+ * delete flow for this case
+ */
+ if (ice_is_vc_addr_legacy(vc_ether_addr)) {
+ ether_addr_copy(vf->legacy_last_added_umac.addr,
+ mac_addr);
+ vf->legacy_last_added_umac.time_modified = jiffies;
+ }
+}
+
+/**
+ * ice_vc_add_mac_addr - attempt to add the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
+ */
+static int
+ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u8 *mac_addr = vc_ether_addr->addr;
+ int ret;
+
+ /* device MAC already added */
+ if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
+ return 0;
+
+ if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
+ dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return -EPERM;
+ }
+
+ ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+ if (ret == -EEXIST) {
+ dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
+ vf->vf_id);
+ /* don't return since we might need to update
+ * the primary MAC in ice_vfhw_mac_add() below
+ */
+ } else if (ret) {
+ dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, ret);
+ return ret;
+ } else {
+ vf->num_mac++;
+ }
+
+ ice_vfhw_mac_add(vf, vc_ether_addr);
+
+ return ret;
+}
+
+/**
+ * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
+ * @last_added_umac: structure used to check expiration
+ */
+static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
+{
+#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
+ return time_is_before_jiffies(last_added_umac->time_modified +
+ ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
+}
+
+/**
+ * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to check
+ *
+ * only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+static void
+ice_update_legacy_cached_mac(struct ice_vf *vf,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
+ ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
+ return;
+
+ ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);
+}
+
+/**
+ * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
+ */
+static void
+ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr) ||
+ !ether_addr_equal(vf->dev_lan_addr, mac_addr))
+ return;
+
+ /* allow the device MAC to be repopulated in the add flow and don't
+ * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant
+ * to be persistent on VM reboot and across driver unload/load, which
+ * won't work if we clear the hardware MAC here
+ */
+ eth_zero_addr(vf->dev_lan_addr);
+
+ ice_update_legacy_cached_mac(vf, vc_ether_addr);
+}
+
+/**
+ * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
+ * @vf: pointer to the VF info
+ * @vsi: pointer to the VF's VSI
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
+ */
+static int
+ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ u8 *mac_addr = vc_ether_addr->addr;
+ int status;
+
+ if (!ice_can_vf_change_mac(vf) &&
+ ether_addr_equal(vf->dev_lan_addr, mac_addr))
+ return 0;
+
+ status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
+ if (status == -ENOENT) {
+ dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
+ vf->vf_id);
+ return -ENOENT;
+ } else if (status) {
+ dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
+ mac_addr, vf->vf_id, status);
+ return -EIO;
+ }
+
+ ice_vfhw_mac_del(vf, vc_ether_addr);
+
+ vf->num_mac--;
+
+ return 0;
+}
+
+/**
+ * ice_vc_handle_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @set: true if MAC filters are being set, false otherwise
+ *
+ * add guest MAC address filter
+ */
+static int
+ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
+{
+ int (*ice_vc_cfg_mac)
+ (struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *virtchnl_ether_addr);
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_pf *pf = vf->pf;
+ enum virtchnl_ops vc_op;
+ struct ice_vsi *vsi;
+ int i;
+
+ if (set) {
+ vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
+ ice_vc_cfg_mac = ice_vc_add_mac_addr;
+ } else {
+ vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
+ ice_vc_cfg_mac = ice_vc_del_mac_addr;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ /* If this VF is not privileged, then we can't add more than a
+ * limited number of addresses. Check to make sure that the
+ * additions do not push us over the limit.
+ */
+ if (set && !ice_is_vf_trusted(vf) &&
+ (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
+ dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *mac_addr = al->list[i].addr;
+ int result;
+
+ if (is_broadcast_ether_addr(mac_addr) ||
+ is_zero_ether_addr(mac_addr))
+ continue;
+
+ result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
+ if (result == -EEXIST || result == -ENOENT) {
+ continue;
+ } else if (result) {
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ goto handle_mac_exit;
+ }
+ }
+
+handle_mac_exit:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_add_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * add guest MAC address filter
+ */
+static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_del_mac_addr_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove guest MAC address filter
+ */
+static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_handle_mac_addr_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ u16 req_queues = vfres->num_queue_pairs;
+ struct ice_pf *pf = vf->pf;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ struct device *dev;
+ u16 cur_queues;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = vf->num_vf_qs;
+ tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf));
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
+ if (!req_queues) {
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
+ ICE_MAX_RSS_QS_PER_VF);
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ v_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
+/**
+ * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
+ */
+static bool ice_vf_vlan_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
+}
+
+/**
+ * ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
+ * @vf: VF used to determine if VLAN promiscuous config is allowed
+ */
+static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+ if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
+ test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, vf->pf->flags))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to enable VLAN promiscuous mode
+ * @vlan: VLAN used to enable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ int status;
+
+ status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+ vlan->vid);
+ if (status && status != -EEXIST)
+ return status;
+
+ return 0;
+}
+
+/**
+ * ice_vf_dis_vlan_promisc - Disable Tx/Rx VLAN promiscuous for the VLAN
+ * @vsi: VF's VSI used to disable VLAN promiscuous mode for
+ * @vlan: VLAN used to disable VLAN promiscuous
+ *
+ * This function should only be called if VLAN promiscuous mode is allowed,
+ * which can be determined via ice_is_vlan_promisc_allowed().
+ */
+static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX;
+ int status;
+
+ status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m,
+ vlan->vid);
+ if (status && status != -ENOENT)
+ return status;
+
+ return 0;
+}
+
+/**
+ * ice_vf_has_max_vlans - check if VF already has the max allowed VLAN filters
+ * @vf: VF to check against
+ * @vsi: VF's VSI
+ *
+ * If the VF is trusted then the VF is allowed to add as many VLANs as it
+ * wants to, so return false.
+ *
+ * When the VF is untrusted compare the number of non-zero VLANs + 1 to the max
+ * allowed VLANs for an untrusted VF. Return the result of this comparison.
+ */
+static bool ice_vf_has_max_vlans(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ if (ice_is_vf_trusted(vf))
+ return false;
+
+#define ICE_VF_ADDED_VLAN_ZERO_FLTRS 1
+ return ((ice_vsi_num_non_zero_vlans(vsi) +
+ ICE_VF_ADDED_VLAN_ZERO_FLTRS) >= ICE_MAX_VLAN_PER_VF);
+}
+
+/**
+ * ice_vc_process_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @add_v: Add VLAN if true, otherwise delete VLAN
+ *
+ * Process virtchnl op to add or remove programmed guest VLAN ID
+ */
+static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_filter_list *vfl =
+ (struct virtchnl_vlan_filter_list *)msg;
+ struct ice_pf *pf = vf->pf;
+ bool vlan_promisc = false;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status = 0;
+ int i;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ if (vfl->vlan_id[i] >= VLAN_N_VID) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "invalid VF VLAN id %d\n",
+ vfl->vlan_id[i]);
+ goto error_param;
+ }
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add_v && ice_vf_has_max_vlans(vf, vsi)) {
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
+ goto error_param;
+ }
+
+ /* in DVM a VF can add/delete inner VLAN filters when
+ * VIRTCHNL_VF_OFFLOAD_VLAN is negotiated, so only reject in SVM
+ */
+ if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&pf->hw)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* in DVM VLAN promiscuous is based on the outer VLAN, which would be
+ * the port VLAN if VIRTCHNL_VF_OFFLOAD_VLAN was negotiated, so only
+ * allow vlan_promisc = true in SVM and if no port VLAN is configured
+ */
+ vlan_promisc = ice_is_vlan_promisc_allowed(vf) &&
+ !ice_is_dvm_ena(&pf->hw) &&
+ !ice_vf_is_port_vlan_ena(vf);
+
+ if (add_v) {
+ for (i = 0; i < vfl->num_elements; i++) {
+ u16 vid = vfl->vlan_id[i];
+ struct ice_vlan vlan;
+
+ if (ice_vf_has_max_vlans(vf, vsi)) {
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being
+ * not trusted, so we can just return success
+ * message here as well.
+ */
+ goto error_param;
+ }
+
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't need to add it again here
+ */
+ if (!vid)
+ continue;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+ status = vsi->inner_vlan_ops.add_vlan(vsi, &vlan);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (!vlan_promisc && vid && !ice_is_dvm_ena(&pf->hw)) {
+ if (vf->spoofchk) {
+ status = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN anti-spoofing on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ }
+ if (vsi->inner_vlan_ops.ena_rx_filtering(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
+ } else if (vlan_promisc) {
+ status = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ vid, status);
+ }
+ }
+ }
+ } else {
+ /* In case of non_trusted VF, number of VLAN elements passed
+ * to PF for removal might be greater than number of VLANs
+ * filter programmed for that VF - So, use actual number of
+ * VLANS added earlier with add VLAN opcode. In order to avoid
+ * removing VLAN that doesn't exist, which result to sending
+ * erroneous failed message back to the VF
+ */
+ int num_vf_vlan;
+
+ num_vf_vlan = vsi->num_vlan;
+ for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
+ u16 vid = vfl->vlan_id[i];
+ struct ice_vlan vlan;
+
+ /* we add VLAN 0 by default for each VF so we can enable
+ * Tx VLAN anti-spoof without triggering MDD events so
+ * we don't want a VIRTCHNL request to remove it
+ */
+ if (!vid)
+ continue;
+
+ vlan = ICE_VLAN(ETH_P_8021Q, vid, 0);
+ status = vsi->inner_vlan_ops.del_vlan(vsi, &vlan);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ vsi->inner_vlan_ops.dis_rx_filtering(vsi);
+ }
+
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+ }
+ }
+
+error_param:
+ /* send the response to the VF */
+ if (add_v)
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
+ NULL, 0);
+ else
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_add_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Add and program guest VLAN ID
+ */
+static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, true);
+}
+
+/**
+ * ice_vc_remove_vlan_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * remove programmed guest VLAN ID
+ */
+static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
+{
+ return ice_vc_process_vlan_msg(vf, msg, false);
+}
+
+/**
+ * ice_vc_ena_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Enable VLAN header stripping for a given VF
+ */
+static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping
+ * @vf: pointer to the VF info
+ *
+ * Disable VLAN header stripping for a given VF
+ */
+static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vsi->inner_vlan_ops.dis_stripping(vsi))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware
+ * @vf: pointer to the VF info
+ */
+static int ice_vc_get_rss_hena(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_hena *vrh = NULL;
+ int len = 0, ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_rss_hena);
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vrh->hena = ICE_DEFAULT_RSS_HENA;
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret,
+ (u8 *)vrh, len);
+ kfree(vrh);
+ return ret;
+}
+
+/**
+ * ice_vc_set_rss_hena - set RSS HENA bits for the VF
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ */
+static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ dev_err(dev, "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ /* clear all previously programmed RSS configuration to allow VF drivers
+ * the ability to customize the RSS configuration and/or completely
+ * disable RSS
+ */
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status && !vrh->hena) {
+ /* only report failure to clear the current RSS configuration if
+ * that was clearly the VF's intention (i.e. vrh->hena = 0)
+ */
+ v_ret = ice_err_to_virt_err(status);
+ goto err;
+ } else if (status) {
+ /* allow the VF to update the RSS configuration even on failure
+ * to clear the current RSS confguration in an attempt to keep
+ * RSS in a working state
+ */
+ dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
+ vf->vf_id);
+ }
+
+ if (vrh->hena) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, vrh->hena);
+ v_ret = ice_err_to_virt_err(status);
+ }
+
+ /* send the response to the VF */
+err:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_query_rxdid - query RXDID supported by DDP package
+ * @vf: pointer to VF info
+ *
+ * Called from VF to query a bitmap of supported flexible
+ * descriptor RXDIDs of a DDP package.
+ */
+static int ice_vc_query_rxdid(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_supported_rxdids *rxdid = NULL;
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_pf *pf = vf->pf;
+ int len = 0;
+ int ret, i;
+ u32 regval;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_supported_rxdids);
+ rxdid = kzalloc(len, GFP_KERNEL);
+ if (!rxdid) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ /* Read flexiflag registers to determine whether the
+ * corresponding RXDID is configured and supported or not.
+ * Since Legacy 16byte descriptor format is not supported,
+ * start from Legacy 32byte descriptor.
+ */
+ for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
+ if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
+ & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
+ rxdid->supported_rxdids |= BIT(i);
+ }
+
+ pf->supported_rxdids = rxdid->supported_rxdids;
+
+err:
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+ v_ret, (u8 *)rxdid, len);
+ kfree(rxdid);
+ return ret;
+}
+
+/**
+ * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
+ * @vf: VF to enable/disable VLAN stripping for on initialization
+ *
+ * Set the default for VLAN stripping based on whether a port VLAN is configured
+ * and the current VLAN mode of the device.
+ */
+static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* don't modify stripping if port VLAN is configured in SVM since the
+ * port VLAN is based on the inner/single VLAN in SVM
+ */
+ if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
+ return 0;
+
+ if (ice_vf_vlan_offload_ena(vf->driver_caps))
+ return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
+ else
+ return vsi->inner_vlan_ops.dis_stripping(vsi);
+}
+
+static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
+{
+ if (vf->trusted)
+ return VLAN_N_VID;
+ else
+ return ICE_MAX_VLAN_PER_VF;
+}
+
+/**
+ * ice_vf_outer_vlan_not_allowed - check if outer VLAN can be used
+ * @vf: VF that being checked for
+ *
+ * When the device is in double VLAN mode, check whether or not the outer VLAN
+ * is allowed.
+ */
+static bool ice_vf_outer_vlan_not_allowed(struct ice_vf *vf)
+{
+ if (ice_vf_is_port_vlan_ena(vf))
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_vc_set_dvm_caps - set VLAN capabilities when the device is in DVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF should use the inner
+ * filtering/offload capabilities since the port VLAN is using the outer VLAN
+ * capabilies.
+ */
+static void
+ice_vc_set_dvm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps;
+
+ if (ice_vf_outer_vlan_not_allowed(vf)) {
+ /* until support for inner VLAN filtering is added when a port
+ * VLAN is configured, only support software offloaded inner
+ * VLANs when a port VLAN is confgured in DVM
+ */
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ } else {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_AND;
+ caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_XOR |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_ETHERTYPE_88A8 |
+ VIRTCHNL_VLAN_ETHERTYPE_9100 |
+ VIRTCHNL_VLAN_ETHERTYPE_XOR |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ }
+
+ caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+}
+
+/**
+ * ice_vc_set_svm_caps - set VLAN capabilities when the device is in SVM
+ * @vf: VF that capabilities are being set for
+ * @caps: VLAN capabilities to populate
+ *
+ * Determine VLAN capabilities support based on whether a port VLAN is
+ * configured. If a port VLAN is configured then the VF does not have any VLAN
+ * filtering or offload capabilities since the port VLAN is using the inner VLAN
+ * capabilities in single VLAN mode (SVM). Otherwise allow the VF to use inner
+ * VLAN fitlering and offload capabilities.
+ */
+static void
+ice_vc_set_svm_caps(struct ice_vf *vf, struct virtchnl_vlan_caps *caps)
+{
+ struct virtchnl_vlan_supported_caps *supported_caps;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_UNSUPPORTED;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->offloads.ethertype_match = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->filtering.max_filters = 0;
+ } else {
+ supported_caps = &caps->filtering.filtering_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+ caps->filtering.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+
+ supported_caps = &caps->offloads.stripping_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ supported_caps = &caps->offloads.insertion_support;
+ supported_caps->inner = VIRTCHNL_VLAN_ETHERTYPE_8100 |
+ VIRTCHNL_VLAN_TOGGLE |
+ VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1;
+ supported_caps->outer = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ caps->offloads.ethertype_init = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ caps->offloads.ethertype_match =
+ VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION;
+ caps->filtering.max_filters = ice_vc_get_max_vlan_fltrs(vf);
+ }
+}
+
+/**
+ * ice_vc_get_offload_vlan_v2_caps - determine VF's VLAN capabilities
+ * @vf: VF to determine VLAN capabilities for
+ *
+ * This will only be called if the VF and PF successfully negotiated
+ * VIRTCHNL_VF_OFFLOAD_VLAN_V2.
+ *
+ * Set VLAN capabilities based on the current VLAN mode and whether a port VLAN
+ * is configured or not.
+ */
+static int ice_vc_get_offload_vlan_v2_caps(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_caps *caps = NULL;
+ int err, len = 0;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+ if (!caps) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto out;
+ }
+ len = sizeof(*caps);
+
+ if (ice_is_dvm_ena(&vf->pf->hw))
+ ice_vc_set_dvm_caps(vf, caps);
+ else
+ ice_vc_set_svm_caps(vf, caps);
+
+ /* store negotiated caps to prevent invalid VF messages */
+ memcpy(&vf->vlan_v2_caps, caps, sizeof(*caps));
+
+out:
+ err = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS,
+ v_ret, (u8 *)caps, len);
+ kfree(caps);
+ return err;
+}
+
+/**
+ * ice_vc_validate_vlan_tpid - validate VLAN TPID
+ * @filtering_caps: negotiated/supported VLAN filtering capabilities
+ * @tpid: VLAN TPID used for validation
+ *
+ * Convert the VLAN TPID to a VIRTCHNL_VLAN_ETHERTYPE_* and then compare against
+ * the negotiated/supported filtering caps to see if the VLAN TPID is valid.
+ */
+static bool ice_vc_validate_vlan_tpid(u16 filtering_caps, u16 tpid)
+{
+ enum virtchnl_vlan_support vlan_ethertype = VIRTCHNL_VLAN_UNSUPPORTED;
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_8100;
+ break;
+ case ETH_P_8021AD:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_88A8;
+ break;
+ case ETH_P_QINQ1:
+ vlan_ethertype = VIRTCHNL_VLAN_ETHERTYPE_9100;
+ break;
+ }
+
+ if (!(filtering_caps & vlan_ethertype))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_is_valid_vlan - validate the virtchnl_vlan
+ * @vc_vlan: virtchnl_vlan to validate
+ *
+ * If the VLAN TCI and VLAN TPID are 0, then this filter is invalid, so return
+ * false. Otherwise return true.
+ */
+static bool ice_vc_is_valid_vlan(struct virtchnl_vlan *vc_vlan)
+{
+ if (!vc_vlan->tci || !vc_vlan->tpid)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_validate_vlan_filter_list - validate the filter list from the VF
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF. If any of
+ * the checks fail then return false. Otherwise return true.
+ */
+static bool
+ice_vc_validate_vlan_filter_list(struct virtchnl_vlan_filtering_caps *vfc,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ u16 i;
+
+ if (!vfl->num_elements)
+ return false;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_supported_caps *filtering_support =
+ &vfc->filtering_support;
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *outer = &vlan_fltr->outer;
+ struct virtchnl_vlan *inner = &vlan_fltr->inner;
+
+ if ((ice_vc_is_valid_vlan(outer) &&
+ filtering_support->outer == VIRTCHNL_VLAN_UNSUPPORTED) ||
+ (ice_vc_is_valid_vlan(inner) &&
+ filtering_support->inner == VIRTCHNL_VLAN_UNSUPPORTED))
+ return false;
+
+ if ((outer->tci_mask &&
+ !(filtering_support->outer & VIRTCHNL_VLAN_FILTER_MASK)) ||
+ (inner->tci_mask &&
+ !(filtering_support->inner & VIRTCHNL_VLAN_FILTER_MASK)))
+ return false;
+
+ if (((outer->tci & VLAN_PRIO_MASK) &&
+ !(filtering_support->outer & VIRTCHNL_VLAN_PRIO)) ||
+ ((inner->tci & VLAN_PRIO_MASK) &&
+ !(filtering_support->inner & VIRTCHNL_VLAN_PRIO)))
+ return false;
+
+ if ((ice_vc_is_valid_vlan(outer) &&
+ !ice_vc_validate_vlan_tpid(filtering_support->outer,
+ outer->tpid)) ||
+ (ice_vc_is_valid_vlan(inner) &&
+ !ice_vc_validate_vlan_tpid(filtering_support->inner,
+ inner->tpid)))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_vc_to_vlan - transform from struct virtchnl_vlan to struct ice_vlan
+ * @vc_vlan: struct virtchnl_vlan to transform
+ */
+static struct ice_vlan ice_vc_to_vlan(struct virtchnl_vlan *vc_vlan)
+{
+ struct ice_vlan vlan = { 0 };
+
+ vlan.prio = (vc_vlan->tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan.vid = vc_vlan->tci & VLAN_VID_MASK;
+ vlan.tpid = vc_vlan->tpid;
+
+ return vlan;
+}
+
+/**
+ * ice_vc_vlan_action - action to perform on the virthcnl_vlan
+ * @vsi: VF's VSI used to perform the action
+ * @vlan_action: function to perform the action with (i.e. add/del)
+ * @vlan: VLAN filter to perform the action with
+ */
+static int
+ice_vc_vlan_action(struct ice_vsi *vsi,
+ int (*vlan_action)(struct ice_vsi *, struct ice_vlan *),
+ struct ice_vlan *vlan)
+{
+ int err;
+
+ err = vlan_action(vsi, vlan);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_vc_del_vlans - delete VLAN(s) from the virtchnl filter list
+ * @vf: VF used to delete the VLAN(s)
+ * @vsi: VF's VSI used to delete the VLAN(s)
+ * @vfl: virthchnl filter list used to delete the filters
+ */
+static int
+ice_vc_del_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+ int err;
+ u16 i;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *vc_vlan;
+
+ vc_vlan = &vlan_fltr->outer;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->outer_vlan_ops.del_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi) && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
+
+ vc_vlan = &vlan_fltr->inner;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->inner_vlan_ops.del_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ /* no support for VLAN promiscuous on inner VLAN unless
+ * we are in Single VLAN Mode (SVM)
+ */
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc)
+ ice_vf_dis_vlan_promisc(vsi, &vlan);
+
+ /* Disable VLAN filtering when only VLAN 0 is left */
+ if (!ice_vsi_has_non_zero_vlans(vsi)) {
+ err = vsi->inner_vlan_ops.dis_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_remove_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_DEL_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_remove_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!ice_vc_validate_vlan_filter_list(&vf->vlan_v2_caps.filtering,
+ vfl)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (ice_vc_del_vlans(vf, vsi, vfl))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN_V2, v_ret, NULL,
+ 0);
+}
+
+/**
+ * ice_vc_add_vlans - add VLAN(s) from the virtchnl filter list
+ * @vf: VF used to add the VLAN(s)
+ * @vsi: VF's VSI used to add the VLAN(s)
+ * @vfl: virthchnl filter list used to add the filters
+ */
+static int
+ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ bool vlan_promisc = ice_is_vlan_promisc_allowed(vf);
+ int err;
+ u16 i;
+
+ for (i = 0; i < vfl->num_elements; i++) {
+ struct virtchnl_vlan_filter *vlan_fltr = &vfl->filters[i];
+ struct virtchnl_vlan *vc_vlan;
+
+ vc_vlan = &vlan_fltr->outer;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->outer_vlan_ops.add_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid && ice_is_dvm_ena(&vsi->back->hw)) {
+ err = vsi->outer_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
+
+ vc_vlan = &vlan_fltr->inner;
+ if (ice_vc_is_valid_vlan(vc_vlan)) {
+ struct ice_vlan vlan = ice_vc_to_vlan(vc_vlan);
+
+ err = ice_vc_vlan_action(vsi,
+ vsi->inner_vlan_ops.add_vlan,
+ &vlan);
+ if (err)
+ return err;
+
+ /* no support for VLAN promiscuous on inner VLAN unless
+ * we are in Single VLAN Mode (SVM)
+ */
+ if (!ice_is_dvm_ena(&vsi->back->hw)) {
+ if (vlan_promisc) {
+ err = ice_vf_ena_vlan_promisc(vsi, &vlan);
+ if (err)
+ return err;
+ }
+
+ /* Enable VLAN filtering on first non-zero VLAN */
+ if (vf->spoofchk && vlan.vid) {
+ err = vsi->inner_vlan_ops.ena_tx_filtering(vsi);
+ if (err)
+ return err;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_add_vlan_filter_list - validate add filter list from the VF
+ * @vsi: VF VSI used to get number of existing VLAN filters
+ * @vfc: negotiated/supported VLAN filtering capabilities
+ * @vfl: VLAN filter list from VF to validate
+ *
+ * Validate all of the filters in the VLAN filter list from the VF during the
+ * VIRTCHNL_OP_ADD_VLAN_V2 opcode. If any of the checks fail then return false.
+ * Otherwise return true.
+ */
+static bool
+ice_vc_validate_add_vlan_filter_list(struct ice_vsi *vsi,
+ struct virtchnl_vlan_filtering_caps *vfc,
+ struct virtchnl_vlan_filter_list_v2 *vfl)
+{
+ u16 num_requested_filters = ice_vsi_num_non_zero_vlans(vsi) +
+ vfl->num_elements;
+
+ if (num_requested_filters > vfc->max_filters)
+ return false;
+
+ return ice_vc_validate_vlan_filter_list(vfc, vfl);
+}
+
+/**
+ * ice_vc_add_vlan_v2_msg - virtchnl handler for VIRTCHNL_OP_ADD_VLAN_V2
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ */
+static int ice_vc_add_vlan_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_filter_list_v2 *vfl =
+ (struct virtchnl_vlan_filter_list_v2 *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vfl->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_validate_add_vlan_filter_list(vsi,
+ &vf->vlan_v2_caps.filtering,
+ vfl)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (ice_vc_add_vlans(vf, vsi, vfl))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN_V2, v_ret, NULL,
+ 0);
+}
+
+/**
+ * ice_vc_valid_vlan_setting - validate VLAN setting
+ * @negotiated_settings: negotiated VLAN settings during VF init
+ * @ethertype_setting: ethertype(s) requested for the VLAN setting
+ */
+static bool
+ice_vc_valid_vlan_setting(u32 negotiated_settings, u32 ethertype_setting)
+{
+ if (ethertype_setting && !(negotiated_settings & ethertype_setting))
+ return false;
+
+ /* only allow a single VIRTCHNL_VLAN_ETHERTYPE if
+ * VIRTHCNL_VLAN_ETHERTYPE_AND is not negotiated/supported
+ */
+ if (!(negotiated_settings & VIRTCHNL_VLAN_ETHERTYPE_AND) &&
+ hweight32(ethertype_setting) > 1)
+ return false;
+
+ /* ability to modify the VLAN setting was not negotiated */
+ if (!(negotiated_settings & VIRTCHNL_VLAN_TOGGLE))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_valid_vlan_setting_msg - validate the VLAN setting message
+ * @caps: negotiated VLAN settings during VF init
+ * @msg: message to validate
+ *
+ * Used to validate any VLAN virtchnl message sent as a
+ * virtchnl_vlan_setting structure. Validates the message against the
+ * negotiated/supported caps during VF driver init.
+ */
+static bool
+ice_vc_valid_vlan_setting_msg(struct virtchnl_vlan_supported_caps *caps,
+ struct virtchnl_vlan_setting *msg)
+{
+ if ((!msg->outer_ethertype_setting &&
+ !msg->inner_ethertype_setting) ||
+ (!caps->outer && !caps->inner))
+ return false;
+
+ if (msg->outer_ethertype_setting &&
+ !ice_vc_valid_vlan_setting(caps->outer,
+ msg->outer_ethertype_setting))
+ return false;
+
+ if (msg->inner_ethertype_setting &&
+ !ice_vc_valid_vlan_setting(caps->inner,
+ msg->inner_ethertype_setting))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vc_get_tpid - transform from VIRTCHNL_VLAN_ETHERTYPE_* to VLAN TPID
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* used to get VLAN TPID
+ * @tpid: VLAN TPID to populate
+ */
+static int ice_vc_get_tpid(u32 ethertype_setting, u16 *tpid)
+{
+ switch (ethertype_setting) {
+ case VIRTCHNL_VLAN_ETHERTYPE_8100:
+ *tpid = ETH_P_8021Q;
+ break;
+ case VIRTCHNL_VLAN_ETHERTYPE_88A8:
+ *tpid = ETH_P_8021AD;
+ break;
+ case VIRTCHNL_VLAN_ETHERTYPE_9100:
+ *tpid = ETH_P_QINQ1;
+ break;
+ default:
+ *tpid = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vc_ena_vlan_offload - enable VLAN offload based on the ethertype_setting
+ * @vsi: VF's VSI used to enable the VLAN offload
+ * @ena_offload: function used to enable the VLAN offload
+ * @ethertype_setting: VIRTCHNL_VLAN_ETHERTYPE_* to enable offloads for
+ */
+static int
+ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
+ int (*ena_offload)(struct ice_vsi *vsi, u16 tpid),
+ u32 ethertype_setting)
+{
+ u16 tpid;
+ int err;
+
+ err = ice_vc_get_tpid(ethertype_setting, &tpid);
+ if (err)
+ return err;
+
+ err = ena_offload(vsi, tpid);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET 23
+enum ice_l2tsel {
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
+};
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 l2tsel_bit;
+ int i;
+
+ if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+ l2tsel_bit = 0;
+ else
+ l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ u16 pfq = vsi->rxq_map[i];
+ u32 qrx_context_offset;
+ u32 regval;
+
+ qrx_context_offset =
+ QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+ regval = rd32(hw, qrx_context_offset);
+ regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+ regval |= l2tsel_bit;
+ wr32(hw, qrx_context_offset, regval);
+ }
+}
+
+/**
+ * ice_vc_ena_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_setting *strip_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = strip_msg->outer_ethertype_setting;
+ if (ethertype_setting) {
+ if (ice_vc_ena_vlan_offload(vsi,
+ vsi->outer_vlan_ops.ena_stripping,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ } else {
+ enum ice_l2tsel l2tsel =
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND;
+
+ /* PF tells the VF that the outer VLAN tag is always
+ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+ * inner is always extracted to
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+ * support outer stripping so the first tag always ends
+ * up in L2TAG2_2ND and the second/inner tag, if
+ * enabled, is extracted in L2TAG1.
+ */
+ ice_vsi_update_l2tsel(vsi, l2tsel);
+ }
+ }
+
+ ethertype_setting = strip_msg->inner_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_stripping,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_stripping_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2
+ */
+static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *stripping_support;
+ struct virtchnl_vlan_setting *strip_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, strip_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ stripping_support = &vf->vlan_v2_caps.offloads.stripping_support;
+ if (!ice_vc_valid_vlan_setting_msg(stripping_support, strip_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = strip_msg->outer_ethertype_setting;
+ if (ethertype_setting) {
+ if (vsi->outer_vlan_ops.dis_stripping(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ } else {
+ enum ice_l2tsel l2tsel =
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1;
+
+ /* PF tells the VF that the outer VLAN tag is always
+ * extracted to VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2 and
+ * inner is always extracted to
+ * VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1. This is needed to
+ * support inner stripping while outer stripping is
+ * disabled so that the first and only tag is extracted
+ * in L2TAG1.
+ */
+ ice_vsi_update_l2tsel(vsi, l2tsel);
+ }
+ }
+
+ ethertype_setting = strip_msg->inner_ethertype_setting;
+ if (ethertype_setting && vsi->inner_vlan_ops.dis_stripping(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_ena_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_ena_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+ struct virtchnl_vlan_setting *insertion_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->outer_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->outer_vlan_ops.ena_insertion,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->inner_ethertype_setting;
+ if (ethertype_setting &&
+ ice_vc_ena_vlan_offload(vsi, vsi->inner_vlan_ops.ena_insertion,
+ ethertype_setting)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_dis_vlan_insertion_v2_msg
+ * @vf: VF the message was received from
+ * @msg: message received from the VF
+ *
+ * virthcnl handler for VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2
+ */
+static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vlan_supported_caps *insertion_support;
+ struct virtchnl_vlan_setting *insertion_msg =
+ (struct virtchnl_vlan_setting *)msg;
+ u32 ethertype_setting;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, insertion_msg->vport_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ insertion_support = &vf->vlan_v2_caps.offloads.insertion_support;
+ if (!ice_vc_valid_vlan_setting_msg(insertion_support, insertion_msg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->outer_ethertype_setting;
+ if (ethertype_setting && vsi->outer_vlan_ops.dis_insertion(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+ ethertype_setting = insertion_msg->inner_ethertype_setting;
+ if (ethertype_setting && vsi->inner_vlan_ops.dis_insertion(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto out;
+ }
+
+out:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+ v_ret, NULL, 0);
+}
+
+static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
+ .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .query_rxdid = ice_vc_query_rxdid,
+ .get_rss_hena = ice_vc_get_rss_hena,
+ .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+};
+
+/**
+ * ice_virtchnl_set_dflt_ops - Switch to default virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf)
+{
+ vf->virtchnl_ops = &ice_virtchnl_dflt_ops;
+}
+
+/**
+ * ice_vc_repr_add_mac
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * When port representors are created, we do not add MAC rule
+ * to firmware, we store it so that PF could report same
+ * MAC as VF.
+ */
+static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ pf = vf->pf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *mac_addr = al->list[i].addr;
+ int result;
+
+ if (!is_unicast_ether_addr(mac_addr) ||
+ ether_addr_equal(mac_addr, vf->hw_lan_addr))
+ continue;
+
+ if (vf->pf_set_mac) {
+ dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto handle_mac_exit;
+ }
+
+ result = ice_eswitch_add_vf_mac_rule(pf, vf, mac_addr);
+ if (result) {
+ dev_err(ice_pf_to_dev(pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
+ mac_addr, vf->vf_id, result);
+ goto handle_mac_exit;
+ }
+
+ ice_vfhw_mac_add(vf, &al->list[i]);
+ vf->num_mac++;
+ break;
+ }
+
+handle_mac_exit:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_repr_del_mac - response with success for deleting MAC
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * Respond with success to not break normal VF flow.
+ * For legacy VF driver try to update cached MAC address.
+ */
+static int
+ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ ice_update_legacy_cached_mac(vf, &al->list[0]);
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int
+ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't config promiscuous mode in switchdev mode for VF %d\n",
+ vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+}
+
+static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_repr_add_mac,
+ .del_mac_addr_msg = ice_vc_repr_del_mac,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .query_rxdid = ice_vc_query_rxdid,
+ .get_rss_hena = ice_vc_get_rss_hena,
+ .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+ .get_offload_vlan_v2_caps = ice_vc_get_offload_vlan_v2_caps,
+ .add_vlan_v2_msg = ice_vc_add_vlan_v2_msg,
+ .remove_vlan_v2_msg = ice_vc_remove_vlan_v2_msg,
+ .ena_vlan_stripping_v2_msg = ice_vc_ena_vlan_stripping_v2_msg,
+ .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
+ .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
+ .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+};
+
+/**
+ * ice_virtchnl_set_repr_ops - Switch to representor virtchnl ops
+ * @vf: the VF to switch ops
+ */
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf)
+{
+ vf->virtchnl_ops = &ice_virtchnl_repr_ops;
+}
+
+/**
+ * ice_is_malicious_vf - check if this vf might be overflowing mailbox
+ * @vf: the VF to check
+ * @mbxdata: data about the state of the mailbox
+ *
+ * Detect if a given VF might be malicious and attempting to overflow the PF
+ * mailbox. If so, log a warning message and ignore this event.
+ */
+static bool
+ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata)
+{
+ bool report_malvf = false;
+ struct device *dev;
+ struct ice_pf *pf;
+ int status;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
+ return vf->mbx_info.malicious;
+
+ /* check to see if we have a newly malicious VF */
+ status = ice_mbx_vf_state_handler(&pf->hw, mbxdata, &vf->mbx_info,
+ &report_malvf);
+ if (status)
+ dev_warn_ratelimited(dev, "Unable to check status of mailbox overflow for VF %u MAC %pM, status %d\n",
+ vf->vf_id, vf->dev_lan_addr, status);
+
+ if (report_malvf) {
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+ u8 zero_addr[ETH_ALEN] = {};
+
+ dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
+ vf->dev_lan_addr,
+ pf_vsi ? pf_vsi->netdev->dev_addr : zero_addr);
+ }
+
+ return vf->mbx_info.malicious;
+}
+
+/**
+ * ice_vc_process_vf_msg - Process request from VF
+ * @pf: pointer to the PF structure
+ * @event: pointer to the AQ event
+ * @mbxdata: information used to detect VF attempting mailbox overflow
+ *
+ * called from the common asq/arq handler to
+ * process request from VF
+ */
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata)
+{
+ u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
+ s16 vf_id = le16_to_cpu(event->desc.retval);
+ const struct ice_virtchnl_ops *ops;
+ u16 msglen = event->msg_len;
+ u8 *msg = event->msg_buf;
+ struct ice_vf *vf = NULL;
+ struct device *dev;
+ int err = 0;
+
+ dev = ice_pf_to_dev(pf);
+
+ vf = ice_get_vf_by_id(pf, vf_id);
+ if (!vf) {
+ dev_err(dev, "Unable to locate VF for message from VF ID %d, opcode %d, len %d\n",
+ vf_id, v_opcode, msglen);
+ return;
+ }
+
+ mutex_lock(&vf->cfg_lock);
+
+ /* Check if the VF is trying to overflow the mailbox */
+ if (ice_is_malicious_vf(vf, mbxdata))
+ goto finish;
+
+ /* Check if VF is disabled. */
+ if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
+ err = -EPERM;
+ goto error_handler;
+ }
+
+ ops = vf->virtchnl_ops;
+
+ /* Perform basic checks on the msg */
+ err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
+ if (err) {
+ if (err == VIRTCHNL_STATUS_ERR_PARAM)
+ err = -EPERM;
+ else
+ err = -EINVAL;
+ }
+
+error_handler:
+ if (err) {
+ ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
+ NULL, 0);
+ dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
+ vf_id, v_opcode, msglen, err);
+ goto finish;
+ }
+
+ if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
+ ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
+ 0);
+ goto finish;
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ err = ops->get_ver_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ err = ops->get_vf_res_msg(vf, msg);
+ if (ice_vf_init_vlan_stripping(vf))
+ dev_dbg(dev, "Failed to initialize VLAN stripping for VF %d\n",
+ vf->vf_id);
+ ice_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ ops->reset_vf(vf);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ err = ops->add_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ err = ops->del_mac_addr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ err = ops->cfg_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ err = ops->ena_qs_msg(vf, msg);
+ ice_vc_notify_vf_link_state(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ err = ops->dis_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_REQUEST_QUEUES:
+ err = ops->request_qs_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ err = ops->cfg_irq_map_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ err = ops->config_rss_key(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ err = ops->config_rss_lut(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ err = ops->get_stats_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ err = ops->cfg_promiscuous_mode_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ err = ops->add_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ err = ops->remove_vlan_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
+ err = ops->query_rxdid(vf);
+ break;
+ case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ err = ops->get_rss_hena(vf);
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ err = ops->set_rss_hena_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
+ err = ops->ena_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
+ err = ops->dis_vlan_stripping(vf);
+ break;
+ case VIRTCHNL_OP_ADD_FDIR_FILTER:
+ err = ops->add_fdir_fltr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_FDIR_FILTER:
+ err = ops->del_fdir_fltr_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_RSS_CFG:
+ err = ops->handle_rss_cfg_msg(vf, msg, true);
+ break;
+ case VIRTCHNL_OP_DEL_RSS_CFG:
+ err = ops->handle_rss_cfg_msg(vf, msg, false);
+ break;
+ case VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS:
+ err = ops->get_offload_vlan_v2_caps(vf);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN_V2:
+ err = ops->add_vlan_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN_V2:
+ err = ops->remove_vlan_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2:
+ err = ops->ena_vlan_stripping_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2:
+ err = ops->dis_vlan_stripping_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2:
+ err = ops->ena_vlan_insertion_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2:
+ err = ops->dis_vlan_insertion_v2_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_UNKNOWN:
+ default:
+ dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
+ vf_id);
+ err = ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+ break;
+ }
+ if (err) {
+ /* Helper function cares less about error return values here
+ * as it is busy with pending work.
+ */
+ dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
+ vf_id, v_opcode, err);
+ }
+
+finish:
+ mutex_unlock(&vf->cfg_lock);
+ ice_put_vf(vf);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
new file mode 100644
index 000000000000..cd747718de73
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRTCHNL_H_
+#define _ICE_VIRTCHNL_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/if_ether.h>
+#include <linux/avf/virtchnl.h>
+#include "ice_vf_lib.h"
+
+/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
+#define ICE_MAX_VLAN_PER_VF 8
+
+/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
+ * broadcast, and 16 for additional unicast/multicast filters
+ */
+#define ICE_MAX_MACADDR_PER_VF 18
+#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+
+struct ice_virtchnl_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+ void (*reset_vf)(struct ice_vf *vf);
+ int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+ int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*query_rxdid)(struct ice_vf *vf);
+ int (*get_rss_hena)(struct ice_vf *vf);
+ int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping)(struct ice_vf *vf);
+ int (*dis_vlan_stripping)(struct ice_vf *vf);
+ int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+ int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_offload_vlan_v2_caps)(struct ice_vf *vf);
+ int (*add_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg);
+};
+
+#ifdef CONFIG_PCI_IOV
+void ice_virtchnl_set_dflt_ops(struct ice_vf *vf);
+void ice_virtchnl_set_repr_ops(struct ice_vf *vf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_notify_link_state(struct ice_pf *pf);
+void ice_vc_notify_reset(struct ice_pf *pf);
+int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
+bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata);
+#else /* CONFIG_PCI_IOV */
+static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
+static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
+static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+
+static inline int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
+{
+ return false;
+}
+
+static inline void
+ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
+ struct ice_mbx_data *mbxdata)
+{
+}
+#endif /* !CONFIG_PCI_IOV */
+
+#endif /* _ICE_VIRTCHNL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 9feebe5f556c..7d547fa616fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -55,12 +55,26 @@ static const u32 vlan_allowlist_opcodes[] = {
VIRTCHNL_OP_ENABLE_VLAN_STRIPPING, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
};
+/* VIRTCHNL_VF_OFFLOAD_VLAN_V2 */
+static const u32 vlan_v2_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS, VIRTCHNL_OP_ADD_VLAN_V2,
+ VIRTCHNL_OP_DEL_VLAN_V2, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
+ VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
+ VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2,
+ VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2,
+};
+
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
};
+/* VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC */
+static const u32 rx_flex_desc_allowlist_opcodes[] = {
+ VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
+};
+
/* VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF */
static const u32 adv_rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_RSS_CFG, VIRTCHNL_OP_DEL_RSS_CFG,
@@ -87,8 +101,10 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_REQ_QUEUES, req_queues_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN, vlan_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RSS_PF, rss_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, rx_flex_desc_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index eee180d8c024..daa6a1e894cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -5,6 +5,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_flow.h"
+#include "ice_vf_lib_private.h"
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
@@ -47,197 +48,6 @@ struct virtchnl_fdir_fltr_conf {
u32 flow_id;
};
-static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_TCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_SCTP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_TCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_SCTP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_GTPU_IP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_GTPU_IP,
- VIRTCHNL_PROTO_HDR_GTPU_EH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_L2TPV3,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_L2TPV3,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_AH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_AH,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_ESP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV4,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_PFCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
- VIRTCHNL_PROTO_HDR_ETH,
- VIRTCHNL_PROTO_HDR_IPV6,
- VIRTCHNL_PROTO_HDR_UDP,
- VIRTCHNL_PROTO_HDR_PFCP,
- VIRTCHNL_PROTO_HDR_NONE,
-};
-
-struct virtchnl_fdir_pattern_match_item {
- enum virtchnl_proto_hdr_type *list;
- u64 input_set;
- u64 *meta;
-};
-
-static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
- {vc_pattern_ipv4, 0, NULL},
- {vc_pattern_ipv4_tcp, 0, NULL},
- {vc_pattern_ipv4_udp, 0, NULL},
- {vc_pattern_ipv4_sctp, 0, NULL},
- {vc_pattern_ipv6, 0, NULL},
- {vc_pattern_ipv6_tcp, 0, NULL},
- {vc_pattern_ipv6_udp, 0, NULL},
- {vc_pattern_ipv6_sctp, 0, NULL},
-};
-
-static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
- {vc_pattern_ipv4, 0, NULL},
- {vc_pattern_ipv4_tcp, 0, NULL},
- {vc_pattern_ipv4_udp, 0, NULL},
- {vc_pattern_ipv4_sctp, 0, NULL},
- {vc_pattern_ipv6, 0, NULL},
- {vc_pattern_ipv6_tcp, 0, NULL},
- {vc_pattern_ipv6_udp, 0, NULL},
- {vc_pattern_ipv6_sctp, 0, NULL},
- {vc_pattern_ether, 0, NULL},
- {vc_pattern_ipv4_gtpu, 0, NULL},
- {vc_pattern_ipv4_gtpu_eh, 0, NULL},
- {vc_pattern_ipv4_l2tpv3, 0, NULL},
- {vc_pattern_ipv6_l2tpv3, 0, NULL},
- {vc_pattern_ipv4_esp, 0, NULL},
- {vc_pattern_ipv6_esp, 0, NULL},
- {vc_pattern_ipv4_ah, 0, NULL},
- {vc_pattern_ipv6_ah, 0, NULL},
- {vc_pattern_ipv4_nat_t_esp, 0, NULL},
- {vc_pattern_ipv6_nat_t_esp, 0, NULL},
- {vc_pattern_ipv4_pfcp, 0, NULL},
- {vc_pattern_ipv6_pfcp, 0, NULL},
-};
-
struct virtchnl_fdir_inset_map {
enum virtchnl_proto_hdr_field field;
enum ice_flow_field fld;
@@ -303,7 +113,7 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
- if (!pf->vsi[vf->lan_vsi_idx])
+ if (!ice_get_vf_vsi(vf))
return -EINVAL;
return 0;
@@ -684,7 +494,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
vf_prof = fdir->fdir_prof[flow];
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
return;
@@ -732,6 +542,87 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
}
/**
+ * ice_vc_fdir_reset_cnt_all - reset all FDIR counters for this VF FDIR
+ * @fdir: pointer to the VF FDIR structure
+ */
+static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
+{
+ enum ice_fltr_ptype flow;
+
+ for (flow = ICE_FLTR_PTYPE_NONF_NONE;
+ flow < ICE_FLTR_PTYPE_MAX; flow++) {
+ fdir->fdir_fltr_cnt[flow][0] = 0;
+ fdir->fdir_fltr_cnt[flow][1] = 0;
+ }
+}
+
+/**
+ * ice_vc_fdir_has_prof_conflict
+ * @vf: pointer to the VF structure
+ * @conf: FDIR configuration for each filter
+ *
+ * Check if @conf has conflicting profile with existing profiles
+ *
+ * Return: true on success, and false on error.
+ */
+static bool
+ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
+ struct virtchnl_fdir_fltr_conf *conf)
+{
+ struct ice_fdir_fltr *desc;
+
+ list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
+ struct virtchnl_fdir_fltr_conf *existing_conf;
+ enum ice_fltr_ptype flow_type_a, flow_type_b;
+ struct ice_fdir_fltr *a, *b;
+
+ existing_conf = to_fltr_conf_from_desc(desc);
+ a = &existing_conf->input;
+ b = &conf->input;
+ flow_type_a = a->flow_type;
+ flow_type_b = b->flow_type;
+
+ /* No need to compare two rules with different tunnel types or
+ * with the same protocol type.
+ */
+ if (existing_conf->ttype != conf->ttype ||
+ flow_type_a == flow_type_b)
+ continue;
+
+ switch (flow_type_a) {
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
+ flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
+ return true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
+/**
* ice_vc_fdir_write_flow_prof
* @vf: pointer to the VF structure
* @flow: filter flow type
@@ -751,7 +642,6 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_flow_seg_info *old_seg;
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *vf_prof;
- enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -763,7 +653,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi)
return -EINVAL;
@@ -794,29 +684,26 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
tun ? ICE_FLTR_PTYPE_MAX : 0);
- status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
- tun + 1, &prof);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
+ tun + 1, &prof);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
- vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry1_h);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry1_h);
if (ret) {
dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
flow, vf->vf_id);
goto err_prof;
}
- status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
- ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
- seg, &entry2_h);
- ret = ice_status_to_errno(status);
+ ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
+ ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
+ seg, &entry2_h);
if (ret) {
dev_dbg(dev,
"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
@@ -871,6 +758,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
enum ice_fltr_ptype flow;
int ret;
+ ret = ice_vc_fdir_has_prof_conflict(vf, conf);
+ if (ret) {
+ dev_dbg(dev, "Found flow profile conflict for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
flow = input->flow_type;
ret = ice_vc_fdir_alloc_prof(vf, flow);
if (ret) {
@@ -911,83 +805,6 @@ err_exit:
}
/**
- * ice_vc_fdir_match_pattern
- * @fltr: virtual channel add cmd buffer
- * @type: virtual channel protocol filter header type
- *
- * Matching the header type by comparing fltr and type's value.
- *
- * Return: true on success, and false on error.
- */
-static bool
-ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
- enum virtchnl_proto_hdr_type *type)
-{
- struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
- int i = 0;
-
- while ((i < proto->count) &&
- (*type == proto->proto_hdr[i].type) &&
- (*type != VIRTCHNL_PROTO_HDR_NONE)) {
- type++;
- i++;
- }
-
- return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
-}
-
-/**
- * ice_vc_fdir_get_pattern - get while list pattern
- * @vf: pointer to the VF info
- * @len: filter list length
- *
- * Return: pointer to allowed filter list
- */
-static const struct virtchnl_fdir_pattern_match_item *
-ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
-{
- const struct virtchnl_fdir_pattern_match_item *item;
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
- sizeof(hw->active_pkg_name))) {
- item = vc_fdir_pattern_comms;
- *len = ARRAY_SIZE(vc_fdir_pattern_comms);
- } else {
- item = vc_fdir_pattern_os;
- *len = ARRAY_SIZE(vc_fdir_pattern_os);
- }
-
- return item;
-}
-
-/**
- * ice_vc_fdir_search_pattern
- * @vf: pointer to the VF info
- * @fltr: virtual channel add cmd buffer
- *
- * Search for matched pattern from supported pattern list
- *
- * Return: 0 on success, and other on error.
- */
-static int
-ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
-{
- const struct virtchnl_fdir_pattern_match_item *pattern;
- int len, i;
-
- pattern = ice_vc_fdir_get_pattern(vf, &len);
-
- for (i = 0; i < len; i++)
- if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
- return 0;
-
- return -EINVAL;
-}
-
-/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
@@ -1299,11 +1116,11 @@ static int
ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf)
{
+ struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int ret;
- ret = ice_vc_fdir_search_pattern(vf, fltr);
- if (ret)
- return ret;
+ if (!ice_vc_validate_pattern(vf, proto))
+ return -EINVAL;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
@@ -1467,7 +1284,6 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
struct ice_fdir_fltr *input = &conf->input;
struct ice_vsi *vsi, *ctrl_vsi;
struct ice_fltr_desc desc;
- enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -1477,7 +1293,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
return -EINVAL;
@@ -1497,8 +1313,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
- status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
- ret = ice_status_to_errno(status);
+ ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type);
@@ -1562,15 +1377,16 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
union ice_32b_rx_flex_desc *rx_desc)
{
struct ice_pf *pf = ctrl_vsi->back;
+ struct ice_vf *vf = ctrl_vsi->vf;
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir_ctx *ctx_irq;
struct ice_vf_fdir *fdir;
unsigned long flags;
struct device *dev;
- struct ice_vf *vf;
int ret;
- vf = &pf->vf[ctrl_vsi->vf_id];
+ if (WARN_ON(!vf))
+ return;
fdir = &vf->fdir;
ctx_done = &fdir->ctx_done;
@@ -1616,12 +1432,17 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf)
pf = vf->pf;
hw = &pf->hw;
dev = ice_pf_to_dev(pf);
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_dbg(dev, "VF %d: invalid VSI pointer\n", vf->vf_id);
+ return;
+ }
+
vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
- dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
+ dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n",
vf->vf_id,
(fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
(fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
@@ -1845,15 +1666,16 @@ err_exit:
*/
void ice_flush_fdir_ctx(struct ice_pf *pf)
{
- int i;
+ struct ice_vf *vf;
+ unsigned int bkt;
if (!test_and_clear_bit(ICE_FD_VF_FLUSH_CTX, pf->state))
return;
- ice_for_each_vf(pf, i) {
+ mutex_lock(&pf->vfs.table_lock);
+ ice_for_each_vf(pf, bkt, vf) {
struct device *dev = ice_pf_to_dev(pf);
enum virtchnl_fdir_prgm_status status;
- struct ice_vf *vf = &pf->vf[i];
struct ice_vf_fdir_ctx *ctx;
unsigned long flags;
int ret;
@@ -1907,6 +1729,7 @@ err_exit:
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
}
+ mutex_unlock(&pf->vfs.table_lock);
}
/**
@@ -2063,7 +1886,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
- goto err_free_conf;
+ goto err_rem_entry;
}
ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
@@ -2072,15 +1895,16 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
vf->vf_id, ret);
- goto err_rem_entry;
+ goto err_clr_irq;
}
exit:
kfree(stat);
return ret;
-err_rem_entry:
+err_clr_irq:
ice_vc_fdir_clear_irq_ctx(vf);
+err_rem_entry:
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
err_free_conf:
devm_kfree(dev, conf);
@@ -2189,6 +2013,7 @@ void ice_vf_fdir_init(struct ice_vf *vf)
spin_lock_init(&fdir->ctx_lock);
fdir->ctx_irq.flags = 0;
fdir->ctx_done.flags = 0;
+ ice_vc_fdir_reset_cnt_all(fdir);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
index f4e629f4c09b..c5bcc8d7481c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
@@ -6,6 +6,7 @@
struct ice_vf;
struct ice_pf;
+struct ice_vsi;
enum ice_fdir_ctx_stat {
ICE_FDIR_CTX_READY,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
deleted file mode 100644
index 6427e7ec93de..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ /dev/null
@@ -1,5343 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
-
-#include "ice.h"
-#include "ice_base.h"
-#include "ice_lib.h"
-#include "ice_fltr.h"
-#include "ice_dcb_lib.h"
-#include "ice_flow.h"
-#include "ice_eswitch.h"
-#include "ice_virtchnl_allowlist.h"
-
-#define FIELD_SELECTOR(proto_hdr_field) \
- BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
-
-struct ice_vc_hdr_match_type {
- u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
- u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
- {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
- {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
- {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
- {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
- ICE_FLOW_SEG_HDR_GTPU_DWN},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
- ICE_FLOW_SEG_HDR_GTPU_UP},
- {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
- {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
- {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
- {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
-};
-
-struct ice_vc_hash_field_match_type {
- u32 vc_hdr; /* virtchnl headers
- * (VIRTCHNL_PROTO_HDR_XXX)
- */
- u32 vc_hash_field; /* virtchnl hash fields selector
- * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
- */
- u64 ice_hash_field; /* ice hash fields
- * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
- */
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- ICE_FLOW_HASH_ETH},
- {VIRTCHNL_PROTO_HDR_ETH,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
- {VIRTCHNL_PROTO_HDR_S_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
- {VIRTCHNL_PROTO_HDR_C_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
- {VIRTCHNL_PROTO_HDR_PPPOE,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_GTPU_IP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
- {VIRTCHNL_PROTO_HDR_L2TPV3,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
- {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
- {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
-};
-
-/**
- * ice_get_vf_vsi - get VF's VSI based on the stored index
- * @vf: VF used to get VSI
- */
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
- return vf->pf->vsi[vf->lan_vsi_idx];
-}
-
-/**
- * ice_validate_vf_id - helper to check if VF ID is valid
- * @pf: pointer to the PF structure
- * @vf_id: the ID of the VF to check
- */
-static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
-{
- /* vf_id range is only valid for 0-255, and should always be unsigned */
- if (vf_id >= pf->num_alloc_vfs) {
- dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
- return -EINVAL;
- }
- return 0;
-}
-
-/**
- * ice_check_vf_init - helper to check if VF init complete
- * @pf: pointer to the PF structure
- * @vf: the pointer to the VF to check
- */
-static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
-{
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
- vf->vf_id);
- return -EBUSY;
- }
- return 0;
-}
-
-/**
- * ice_err_to_virt_err - translate errors for VF return code
- * @ice_err: error return code
- */
-static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
-{
- switch (ice_err) {
- case ICE_SUCCESS:
- return VIRTCHNL_STATUS_SUCCESS;
- case ICE_ERR_BAD_PTR:
- case ICE_ERR_INVAL_SIZE:
- case ICE_ERR_DEVICE_NOT_SUPPORTED:
- case ICE_ERR_PARAM:
- case ICE_ERR_CFG:
- return VIRTCHNL_STATUS_ERR_PARAM;
- case ICE_ERR_NO_MEMORY:
- return VIRTCHNL_STATUS_ERR_NO_MEMORY;
- case ICE_ERR_NOT_READY:
- case ICE_ERR_RESET_FAILED:
- case ICE_ERR_FW_API_VER:
- case ICE_ERR_AQ_ERROR:
- case ICE_ERR_AQ_TIMEOUT:
- case ICE_ERR_AQ_FULL:
- case ICE_ERR_AQ_NO_WORK:
- case ICE_ERR_AQ_EMPTY:
- return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- default:
- return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- }
-}
-
-/**
- * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
- * @pf: pointer to the PF structure
- * @v_opcode: operation code
- * @v_retval: return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- */
-static void
-ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
- enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
-{
- struct ice_hw *hw = &pf->hw;
- unsigned int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- /* Not all vfs are enabled so skip the ones that are not */
- if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- continue;
-
- /* Ignore return value on purpose - a given VF may fail, but
- * we need to keep going and send to all of them
- */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
- msglen, NULL);
- }
-}
-
-/**
- * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
- * @vf: pointer to the VF structure
- * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
- * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
- * @link_up: whether or not to set the link up/down
- */
-static void
-ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
- int ice_link_speed, bool link_up)
-{
- if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
- pfe->event_data.link_event_adv.link_status = link_up;
- /* Speed in Mbps */
- pfe->event_data.link_event_adv.link_speed =
- ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
- } else {
- pfe->event_data.link_event.link_status = link_up;
- /* Legacy method for virtchnl link speeds */
- pfe->event_data.link_event.link_speed =
- (enum virtchnl_link_speed)
- ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
- }
-}
-
-/**
- * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
- * @vf: the VF to check
- *
- * Returns true if the VF has no Rx and no Tx queues enabled and returns false
- * otherwise
- */
-static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
-{
- return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
- !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
-}
-
-/**
- * ice_is_vf_link_up - check if the VF's link is up
- * @vf: VF to check if link is up
- */
-static bool ice_is_vf_link_up(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
-
- if (ice_check_vf_init(pf, vf))
- return false;
-
- if (ice_vf_has_no_qs_ena(vf))
- return false;
- else if (vf->link_forced)
- return vf->link_up;
- else
- return pf->hw.port_info->phy.link_info.link_info &
- ICE_AQ_LINK_UP;
-}
-
-/**
- * ice_vc_notify_vf_link_state - Inform a VF of link status
- * @vf: pointer to the VF structure
- *
- * send a link status message to a single VF
- */
-void ice_vc_notify_vf_link_state(struct ice_vf *vf)
-{
- struct virtchnl_pf_event pfe = { 0 };
- struct ice_hw *hw = &vf->pf->hw;
-
- pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
- pfe.severity = PF_EVENT_SEVERITY_INFO;
-
- if (ice_is_vf_link_up(vf))
- ice_set_pfe_link(vf, &pfe,
- hw->port_info->phy.link_info.link_speed, true);
- else
- ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
-
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
- sizeof(pfe), NULL);
-}
-
-/**
- * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
- * @vf: VF to remove access to VSI for
- */
-static void ice_vf_invalidate_vsi(struct ice_vf *vf)
-{
- vf->lan_vsi_idx = ICE_NO_VSI;
- vf->lan_vsi_num = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
- * @vf: invalidate this VF's VSI after freeing it
- */
-static void ice_vf_vsi_release(struct ice_vf *vf)
-{
- ice_vsi_release(ice_get_vf_vsi(vf));
- ice_vf_invalidate_vsi(vf);
-}
-
-/**
- * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
- * @vf: VF that control VSI is being invalidated on
- */
-static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
-{
- vf->ctrl_vsi_idx = ICE_NO_VSI;
-}
-
-/**
- * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
- * @vf: VF that control VSI is being released on
- */
-static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
-{
- ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
- ice_vf_ctrl_invalidate_vsi(vf);
-}
-
-/**
- * ice_free_vf_res - Free a VF's resources
- * @vf: pointer to the VF info
- */
-static void ice_free_vf_res(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- int i, last_vector_idx;
-
- /* First, disable VF's configuration API to prevent OS from
- * accessing the VF's VSI after it's freed or invalidated.
- */
- clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- ice_vf_fdir_exit(vf);
- /* free VF control VSI */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_vsi_release(vf);
-
- /* free VSI and disconnect it from the parent uplink */
- if (vf->lan_vsi_idx != ICE_NO_VSI) {
- ice_vf_vsi_release(vf);
- vf->num_mac = 0;
- }
-
- last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
-
- /* clear VF MDD event information */
- memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
- memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-
- /* Disable interrupts so that VF starts in a known state */
- for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
- wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
- ice_flush(&pf->hw);
- }
- /* reset some of the state variables keeping track of the resources */
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_mappings
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_mappings(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int first, last, v;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- vsi = ice_get_vf_vsi(vf);
-
- dev = ice_pf_to_dev(pf);
- wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
- wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
-
- first = vf->first_vector_idx;
- last = first + pf->num_msix_per_vf - 1;
- for (v = first; v <= last; v++) {
- u32 reg;
-
- reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
- GLINT_VECT2FUNC_IS_PF_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
- wr32(hw, GLINT_VECT2FUNC(v), reg);
- }
-
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
- wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
- else
- dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
-
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
- wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
- else
- dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
-}
-
-/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- struct ice_res_tracker *res;
-
- if (!pf)
- return -EINVAL;
-
- res = pf->irq_tracker;
- if (!res)
- return -EINVAL;
-
- /* give back irq_tracker resources used */
- WARN_ON(pf->sriov_base_vector < res->num_entries);
-
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
- * ice_set_vf_state_qs_dis - Set VF queues state to disabled
- * @vf: pointer to the VF structure
- */
-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
-{
- /* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-}
-
-/**
- * ice_dis_vf_qs - Disable the VF queues
- * @vf: pointer to the VF structure
- */
-static void ice_dis_vf_qs(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
- ice_vsi_stop_all_rx_rings(vsi);
- ice_set_vf_state_qs_dis(vf);
-}
-
-/**
- * ice_free_vfs - Free all VFs
- * @pf: pointer to the PF structure
- */
-void ice_free_vfs(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- unsigned int tmp, i;
-
- set_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
-
- if (!pf->vf)
- return;
-
- ice_eswitch_release(pf);
-
- while (test_and_set_bit(ICE_VF_DIS, pf->state))
- usleep_range(1000, 2000);
-
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
-
- /* Avoid wait time by stopping all VFs at the same time */
- ice_for_each_vf(pf, i)
- ice_dis_vf_qs(&pf->vf[i]);
-
- tmp = pf->num_alloc_vfs;
- pf->num_qps_per_vf = 0;
- pf->num_alloc_vfs = 0;
- for (i = 0; i < tmp; i++) {
- if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
- /* disable VF qp mappings and set VF disable state */
- ice_dis_vf_mappings(&pf->vf[i]);
- set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
- ice_free_vf_res(&pf->vf[i]);
- }
-
- mutex_destroy(&pf->vf[i].cfg_lock);
- }
-
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
- devm_kfree(dev, pf->vf);
- pf->vf = NULL;
-
- /* This check is for when the driver is unloaded while VFs are
- * assigned. Setting the number of VFs to 0 through sysfs is caught
- * before this function ever gets called.
- */
- if (!pci_vfs_assigned(pf->pdev)) {
- unsigned int vf_id;
-
- /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
- * work correctly when SR-IOV gets re-enabled.
- */
- for (vf_id = 0; vf_id < tmp; vf_id++) {
- u32 reg_idx, bit_idx;
-
- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
- }
- }
-
- /* clear malicious info if the VFs are getting released */
- for (i = 0; i < tmp; i++)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
- ICE_MAX_VF_COUNT, i))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
- i);
-
- clear_bit(ICE_VF_DIS, pf->state);
- clear_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state);
- clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
-}
-
-/**
- * ice_trigger_vf_reset - Reset a VF on HW
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- * @is_pfr: true if the reset was triggered due to a previous PFR
- *
- * Trigger hardware to start a reset for a particular VF. Expects the caller
- * to wait the proper amount of time to allow hardware to reset the VF before
- * it cleans up and restores VF functionality.
- */
-static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
-{
- struct ice_pf *pf = vf->pf;
- u32 reg, reg_idx, bit_idx;
- unsigned int vf_abs_id, i;
- struct device *dev;
- struct ice_hw *hw;
-
- dev = ice_pf_to_dev(pf);
- hw = &pf->hw;
- vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- /* Inform VF that it is no longer active, as a warning */
- clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
-
- /* Disable VF's configuration API during reset. The flag is re-enabled
- * when it's safe again to access VF's VSI.
- */
- clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
-
- /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
- * needs to clear them in the case of VFR/VFLR. If this is done for
- * PFR, it can mess up VF resets because the VF driver may already
- * have started cleanup by the time we get here.
- */
- if (!is_pfr) {
- wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
- wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
- }
-
- /* In the case of a VFLR, the HW has already reset the VF and we
- * just need to clean up, so don't hit the VFRTRIG register.
- */
- if (!is_vflr) {
- /* reset VF using VPGEN_VFRTRIG reg */
- reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
- reg |= VPGEN_VFRTRIG_VFSWR_M;
- wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
- }
- /* clear the VFLR bit in GLGEN_VFLRSTAT */
- reg_idx = (vf_abs_id) / 32;
- bit_idx = (vf_abs_id) % 32;
- wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
- ice_flush(hw);
-
- wr32(hw, PF_PCI_CIAA,
- VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
- for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
- reg = rd32(hw, PF_PCI_CIAD);
- /* no transactions pending so stop polling */
- if ((reg & VF_TRANS_PENDING_M) == 0)
- break;
-
- dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
- udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
- }
-}
-
-/**
- * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
- * @vsi: the VSI to update
- * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
- * @enable: true for enable PVID false for disable
- */
-static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
-{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_aqc_vsi_props *info;
- struct ice_vsi_ctx *ctxt;
- enum ice_status status;
- int ret = 0;
-
- ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
- if (!ctxt)
- return -ENOMEM;
-
- ctxt->info = vsi->info;
- info = &ctxt->info;
- if (enable) {
- info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR;
- info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- } else {
- info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
- ICE_AQ_VSI_VLAN_MODE_ALL;
- info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- }
-
- info->pvid = cpu_to_le16(pvid_info);
- info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
- if (status) {
- dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- ret = -EIO;
- goto out;
- }
-
- vsi->info.vlan_flags = info->vlan_flags;
- vsi->info.sw_flags2 = info->sw_flags2;
- vsi->info.pvid = info->pvid;
-out:
- kfree(ctxt);
- return ret;
-}
-
-/**
- * ice_vf_get_port_info - Get the VF's port info structure
- * @vf: VF used to get the port info structure for
- */
-static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
-{
- return vf->pf->hw.port_info;
-}
-
-/**
- * ice_vf_vsi_setup - Set up a VF VSI
- * @vf: VF to setup VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
-
- if (!vsi) {
- dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
- ice_vf_invalidate_vsi(vf);
- return NULL;
- }
-
- vf->lan_vsi_idx = vsi->idx;
- vf->lan_vsi_num = vsi->vsi_num;
-
- return vsi;
-}
-
-/**
- * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
- * @vf: VF to setup control VSI for
- *
- * Returns pointer to the successfully allocated VSI struct on success,
- * otherwise returns NULL on failure.
- */
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
- if (!vsi) {
- dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
- ice_vf_ctrl_invalidate_vsi(vf);
- }
-
- return vsi;
-}
-
-/**
- * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
- * @pf: pointer to PF structure
- * @vf: pointer to VF that the first MSIX vector index is being calculated for
- *
- * This returns the first MSIX vector index in PF space that is used by this VF.
- * This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration will have to increment by
- * 1 to avoid meddling with the OICR index.
- */
-static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
-{
- return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
-}
-
-/**
- * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
- * @vf: VF to re-apply the configuration for
- *
- * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
- * needs to re-apply the host configured Tx rate limiting configuration.
- */
-static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- int err;
-
- if (vf->min_tx_rate) {
- err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
- vf->min_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- if (vf->max_tx_rate) {
- err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
- vf->max_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds either a VLAN 0 or port VLAN based filter after reset.
- */
-static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u16 vlan_id = 0;
- int err;
-
- if (vf->port_vlan_info) {
- err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
- if (err) {
- dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
- vf->vf_id, err);
- return err;
- }
-
- vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
- }
-
- /* vlan_id will either be 0 or the port VLAN number */
- err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
- vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
- err);
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
- */
-static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- enum ice_status status;
- u8 broadcast[ETH_ALEN];
-
- if (ice_is_eswitch_mode_switchdev(vf->pf))
- return 0;
-
- eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
- vf->vf_id, ice_stat_str(status));
- return ice_status_to_errno(status);
- }
-
- vf->num_mac++;
-
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
- ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
- &vf->hw_lan_addr.addr[0], vf->vf_id,
- ice_stat_str(status));
- return ice_status_to_errno(status);
- }
- vf->num_mac++;
-
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
- }
-
- return 0;
-}
-
-/**
- * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
- * @vf: VF to configure trust setting for
- */
-static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
-{
- if (vf->trusted)
- set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
- else
- clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
- * @vf: VF to enable MSIX mappings for
- *
- * Some of the registers need to be indexed/configured using hardware global
- * device values and other registers need 0-based values, which represent PF
- * based values.
- */
-static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
-{
- int device_based_first_msix, device_based_last_msix;
- int pf_based_first_msix, pf_based_last_msix, v;
- struct ice_pf *pf = vf->pf;
- int device_based_vf_id;
- struct ice_hw *hw;
- u32 reg;
-
- hw = &pf->hw;
- pf_based_first_msix = vf->first_vector_idx;
- pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
-
- device_based_first_msix = pf_based_first_msix +
- pf->hw.func_caps.common_cap.msix_vector_first_id;
- device_based_last_msix =
- (device_based_first_msix + pf->num_msix_per_vf) - 1;
- device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
-
- reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
- VPINT_ALLOC_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
- VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
- wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
-
- reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
- & VPINT_ALLOC_PCI_FIRST_M) |
- ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
- VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
- wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
-
- /* map the interrupts to its functions */
- for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
- reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
- GLINT_VECT2FUNC_VF_NUM_M) |
- ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
- GLINT_VECT2FUNC_PF_NUM_M));
- wr32(hw, GLINT_VECT2FUNC(v), reg);
- }
-
- /* Map mailbox interrupt to VF MSI-X vector 0 */
- wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
- * @vf: VF to enable the mappings for
- * @max_txq: max Tx queues allowed on the VF's VSI
- * @max_rxq: max Rx queues allowed on the VF's VSI
- */
-static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_hw *hw = &vf->pf->hw;
- u32 reg;
-
- /* set regardless of mapping mode */
- wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
-
- /* VF Tx queues allocation */
- if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- /* set the VF PF Tx queue range
- * VFNUMQ value should be set to (number of queues - 1). A value
- * of 0 means 1 queue and a value of 255 means 256 queues
- */
- reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
- VPLAN_TX_QBASE_VFFIRSTQ_M) |
- (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
- VPLAN_TX_QBASE_VFNUMQ_M));
- wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
- } else {
- dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
- }
-
- /* set regardless of mapping mode */
- wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
-
- /* VF Rx queues allocation */
- if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
- /* set the VF PF Rx queue range
- * VFNUMQ value should be set to (number of queues - 1). A value
- * of 0 means 1 queue and a value of 255 means 256 queues
- */
- reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
- VPLAN_RX_QBASE_VFFIRSTQ_M) |
- (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
- VPLAN_RX_QBASE_VFNUMQ_M));
- wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
- } else {
- dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
- }
-}
-
-/**
- * ice_ena_vf_mappings - enable VF MSIX and queue mapping
- * @vf: pointer to the VF structure
- */
-static void ice_ena_vf_mappings(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_ena_vf_msix_mappings(vf);
- ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
-}
-
-/**
- * ice_determine_res
- * @pf: pointer to the PF structure
- * @avail_res: available resources in the PF structure
- * @max_res: maximum resources that can be given per VF
- * @min_res: minimum resources that can be given per VF
- *
- * Returns non-zero value if resources (queues/vectors) are available or
- * returns zero if PF cannot accommodate for all num_alloc_vfs.
- */
-static int
-ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
-{
- bool checked_min_res = false;
- int res;
-
- /* start by checking if PF can assign max number of resources for
- * all num_alloc_vfs.
- * if yes, return number per VF
- * If no, divide by 2 and roundup, check again
- * repeat the loop till we reach a point where even minimum resources
- * are not available, in that case return 0
- */
- res = max_res;
- while ((res >= min_res) && !checked_min_res) {
- int num_all_res;
-
- num_all_res = pf->num_alloc_vfs * res;
- if (num_all_res <= avail_res)
- return res;
-
- if (res == min_res)
- checked_min_res = true;
-
- res = DIV_ROUND_UP(res, 2);
- }
- return 0;
-}
-
-/**
- * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
- * @vf: VF to calculate the register index for
- * @q_vector: a q_vector associated to the VF
- */
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
-{
- struct ice_pf *pf;
-
- if (!vf || !q_vector)
- return -EINVAL;
-
- pf = vf->pf;
-
- /* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
- q_vector->v_idx + 1;
-}
-
-/**
- * ice_get_max_valid_res_idx - Get the max valid resource index
- * @res: pointer to the resource to find the max valid index for
- *
- * Start from the end of the ice_res_tracker and return right when we find the
- * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
- * valid for SR-IOV because it is the only consumer that manipulates the
- * res->end and this is always called when res->end is set to res->num_entries.
- */
-static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
-{
- int i;
-
- if (!res)
- return -EINVAL;
-
- for (i = res->num_entries - 1; i >= 0; i--)
- if (res->list[i] & ICE_RES_VALID_BIT)
- return i;
-
- return 0;
-}
-
-/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = pf->irq_tracker->num_entries;
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
- * ice_set_per_vf_res - check if vectors and queues are available
- * @pf: pointer to the PF structure
- *
- * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
- * get more vectors and can enable more queues per VF. Note that this does not
- * grab any vectors from the SW pool already allocated. Also note, that all
- * vector counts include one for each VF's miscellaneous interrupt vector
- * (i.e. OICR).
- *
- * Minimum VFs - 2 vectors, 1 queue pair
- * Small VFs - 5 vectors, 4 queue pairs
- * Medium VFs - 17 vectors, 16 queue pairs
- *
- * Second, determine number of queue pairs per VF by starting with a pre-defined
- * maximum each VF supports. If this is not possible, then we adjust based on
- * queue pairs available on the device.
- *
- * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
- * by each VF during VF initialization and reset.
- */
-static int ice_set_per_vf_res(struct ice_pf *pf)
-{
- int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
- int msix_avail_per_vf, msix_avail_for_sriov;
- struct device *dev = ice_pf_to_dev(pf);
- u16 num_msix_per_vf, num_txq, num_rxq;
-
- if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
- return -EINVAL;
-
- /* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- pf->irq_tracker->num_entries;
- msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
- if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
- } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
- } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
- num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
- } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
- num_msix_per_vf = ICE_MIN_INTR_PER_VF;
- } else {
- dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
- msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
- pf->num_alloc_vfs);
- return -EIO;
- }
-
- /* determine queue resources per VF */
- num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
- min_t(u16,
- num_msix_per_vf - ICE_NONQ_VECS_VF,
- ICE_MAX_RSS_QS_PER_VF),
- ICE_MIN_QS_PER_VF);
-
- num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
- min_t(u16,
- num_msix_per_vf - ICE_NONQ_VECS_VF,
- ICE_MAX_RSS_QS_PER_VF),
- ICE_MIN_QS_PER_VF);
-
- if (!num_txq || !num_rxq) {
- dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
- ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
- return -EIO;
- }
-
- if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
- pf->num_alloc_vfs);
- return -EINVAL;
- }
-
- /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
- pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
- pf->num_msix_per_vf = num_msix_per_vf;
- dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
- pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
-
- return 0;
-}
-
-/**
- * ice_clear_vf_reset_trigger - enable VF to access hardware
- * @vf: VF to enabled hardware access for
- */
-static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
-{
- struct ice_hw *hw = &vf->pf->hw;
- u32 reg;
-
- reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
- reg &= ~VPGEN_VFRTRIG_VFSWR_M;
- wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
- ice_flush(hw);
-}
-
-/**
- * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @promisc_m: mask of promiscuous config bits
- * @rm_promisc: promisc flag request from the VF to remove or add filter
- *
- * This function configures VF VSI promiscuous mode, based on the VF requests,
- * for Unicast, Multicast and VLAN
- */
-static enum ice_status
-ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
- bool rm_promisc)
-{
- struct ice_pf *pf = vf->pf;
- enum ice_status status = 0;
- struct ice_hw *hw;
-
- hw = &pf->hw;
- if (vsi->num_vlan) {
- status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
- rm_promisc);
- } else if (vf->port_vlan_info) {
- if (rm_promisc)
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info);
- else
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- vf->port_vlan_info);
- } else {
- if (rm_promisc)
- status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- else
- status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
- 0);
- }
-
- return status;
-}
-
-static void ice_vf_clear_counters(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- vf->num_mac = 0;
- vsi->num_vlan = 0;
- memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
- memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
-}
-
-/**
- * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
- * @vf: VF to perform pre VSI rebuild tasks
- *
- * These tasks are items that don't need to be amortized since they are most
- * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
- */
-static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
-{
- ice_vf_clear_counters(vf);
- ice_clear_vf_reset_trigger(vf);
-}
-
-/**
- * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
- * @vsi: Pointer to VSI
- *
- * This function moves VSI into corresponding scheduler aggregator node
- * based on cached value of "aggregator node info" per VSI
- */
-static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- enum ice_status status;
- struct device *dev;
-
- if (!vsi->agg_node)
- return;
-
- dev = ice_pf_to_dev(pf);
- if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
- dev_dbg(dev,
- "agg_id %u already has reached max_num_vsis %u\n",
- vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
- return;
- }
-
- status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
- vsi->idx, vsi->tc_cfg.ena_tc);
- if (status)
- dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
- vsi->idx, vsi->agg_node->agg_id);
- else
- vsi->agg_node->num_vsis++;
-}
-
-/**
- * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
- * @vf: VF to rebuild host configuration on
- */
-static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- ice_vf_set_host_trust_cfg(vf);
-
- if (ice_vf_rebuild_host_mac_cfg(vf))
- dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_vlan_cfg(vf))
- dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_tx_rate_cfg(vf))
- dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
- vf->vf_id);
-
- /* rebuild aggregator node config for main VF VSI */
- ice_vf_rebuild_aggregator_node_cfg(vsi);
-}
-
-/**
- * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
- * @vf: VF to release and setup the VSI for
- *
- * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
- * configuration change, etc.).
- */
-static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
-{
- ice_vf_vsi_release(vf);
- if (!ice_vf_vsi_setup(vf))
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_vsi - rebuild the VF's VSI
- * @vf: VF to rebuild the VSI for
- *
- * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
- * host, PFR, CORER, etc.).
- */
-static int ice_vf_rebuild_vsi(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_pf *pf = vf->pf;
-
- if (ice_vsi_rebuild(vsi, true)) {
- dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
- vf->vf_id);
- return -EIO;
- }
- /* vsi->idx will remain the same in this case so don't update
- * vf->lan_vsi_idx
- */
- vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
- vf->lan_vsi_num = vsi->vsi_num;
-
- return 0;
-}
-
-/**
- * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
- * @vf: VF to set in initialized state
- *
- * After this function the VF will be ready to receive/handle the
- * VIRTCHNL_OP_GET_VF_RESOURCES message
- */
-static void ice_vf_set_initialized(struct ice_vf *vf)
-{
- ice_set_vf_state_qs_dis(vf);
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
-}
-
-/**
- * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
- * @vf: VF to perform tasks on
- */
-static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_hw *hw;
-
- hw = &pf->hw;
-
- ice_vf_rebuild_host_cfg(vf);
-
- ice_vf_set_initialized(vf);
- ice_ena_vf_mappings(vf);
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
-}
-
-/**
- * ice_reset_all_vfs - reset all allocated VFs in one go
- * @pf: pointer to the PF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * First, tell the hardware to reset each VF, then do all the waiting in one
- * chunk, and finally finish restoring each VF after the wait. This is useful
- * during PF routines which need to reset all VFs, as otherwise it must perform
- * these resets in a serialized fashion.
- *
- * Returns true if any VFs were reset, and false otherwise.
- */
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- struct ice_vf *vf;
- int v, i;
-
- /* If we don't have any VFs, then there is nothing to reset */
- if (!pf->num_alloc_vfs)
- return false;
-
- /* clear all malicious info if the VFs are getting reset */
- ice_for_each_vf(pf, i)
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
- /* If VFs have been disabled, there is no need to reset */
- if (test_and_set_bit(ICE_VF_DIS, pf->state))
- return false;
-
- /* Begin reset on all VFs at once */
- ice_for_each_vf(pf, v)
- ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
-
- /* HW requires some time to make sure it can flush the FIFO for a VF
- * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
- * sequence to make sure that it has completed. We'll keep track of
- * the VFs using a simple iterator that increments once that VF has
- * finished resetting.
- */
- for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
- /* Check each VF in sequence */
- while (v < pf->num_alloc_vfs) {
- u32 reg;
-
- vf = &pf->vf[v];
- reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
- /* only delay if the check failed */
- usleep_range(10, 20);
- break;
- }
-
- /* If the current VF has finished resetting, move on
- * to the next VF in sequence.
- */
- v++;
- }
- }
-
- /* Display a warning if at least one VF didn't manage to reset in
- * time, but continue on with the operation.
- */
- if (v < pf->num_alloc_vfs)
- dev_warn(dev, "VF reset check timeout\n");
-
- /* free VF resources to begin resetting the VSI state */
- ice_for_each_vf(pf, v) {
- vf = &pf->vf[v];
-
- vf->driver_caps = 0;
- ice_vc_set_default_allowlist(vf);
-
- ice_vf_fdir_exit(vf);
- ice_vf_fdir_init(vf);
- /* clean VF control VSI when resetting VFs since it should be
- * setup only when VF creates its first FDIR rule.
- */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_invalidate_vsi(vf);
-
- ice_vf_pre_vsi_rebuild(vf);
- ice_vf_rebuild_vsi(vf);
- ice_vf_post_vsi_rebuild(vf);
- }
-
- if (ice_is_eswitch_mode_switchdev(pf))
- if (ice_eswitch_rebuild(pf))
- dev_warn(dev, "eswitch rebuild failed\n");
-
- ice_flush(hw);
- clear_bit(ICE_VF_DIS, pf->state);
-
- return true;
-}
-
-/**
- * ice_is_vf_disabled
- * @vf: pointer to the VF info
- *
- * Returns true if the PF or VF is disabled, false otherwise.
- */
-bool ice_is_vf_disabled(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
-
- /* If the PF has been disabled, there is no need resetting VF until
- * PF is active again. Similarly, if the VF has been disabled, this
- * means something else is resetting the VF, so we shouldn't continue.
- * Otherwise, set disable VF state bit for actual reset, and continue.
- */
- return (test_bit(ICE_VF_DIS, pf->state) ||
- test_bit(ICE_VF_STATE_DIS, vf->vf_states));
-}
-
-/**
- * ice_reset_vf - Reset a particular VF
- * @vf: pointer to the VF structure
- * @is_vflr: true if VFLR was issued, false if not
- *
- * Returns true if the VF is currently in reset, resets successfully, or resets
- * are disabled and false otherwise.
- */
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_hw *hw;
- bool rsd = false;
- u8 promisc_m;
- u32 reg;
- int i;
-
- dev = ice_pf_to_dev(pf);
-
- if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
- dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
- vf->vf_id);
- return true;
- }
-
- if (ice_is_vf_disabled(vf)) {
- dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
- vf->vf_id);
- return true;
- }
-
- /* Set VF disable bit state here, before triggering reset */
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- ice_trigger_vf_reset(vf, is_vflr, false);
-
- vsi = ice_get_vf_vsi(vf);
-
- ice_dis_vf_qs(vf);
-
- /* Call Disable LAN Tx queue AQ whether or not queues are
- * enabled. This is needed for successful completion of VFR.
- */
- ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
- NULL, ICE_VF_RESET, vf->vf_id, NULL);
-
- hw = &pf->hw;
- /* poll VPGEN_VFRSTAT reg to make sure
- * that reset is complete
- */
- for (i = 0; i < 10; i++) {
- /* VF reset requires driver to first reset the VF and then
- * poll the status register to make sure that the reset
- * completed successfully.
- */
- reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
- if (reg & VPGEN_VFRSTAT_VFRD_M) {
- rsd = true;
- break;
- }
-
- /* only sleep if the reset is not done */
- usleep_range(10, 20);
- }
-
- vf->driver_caps = 0;
- ice_vc_set_default_allowlist(vf);
-
- /* Display a warning if VF didn't manage to reset in time, but need to
- * continue on with the operation.
- */
- if (!rsd)
- dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
-
- /* disable promiscuous modes in case they were enabled
- * ignore any error if disabling process failed
- */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
- if (vf->port_vlan_info || vsi->num_vlan)
- promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
- else
- promisc_m = ICE_UCAST_PROMISC_BITS;
-
- if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
- dev_err(dev, "disabling promiscuous mode failed\n");
- }
-
- ice_vf_fdir_exit(vf);
- ice_vf_fdir_init(vf);
- /* clean VF control VSI when resetting VF since it should be setup
- * only when VF creates its first FDIR rule.
- */
- if (vf->ctrl_vsi_idx != ICE_NO_VSI)
- ice_vf_ctrl_vsi_release(vf);
-
- ice_vf_pre_vsi_rebuild(vf);
-
- if (ice_vf_rebuild_vsi_with_release(vf)) {
- dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
- return false;
- }
-
- ice_vf_post_vsi_rebuild(vf);
- vsi = ice_get_vf_vsi(vf);
- ice_eswitch_update_repr(vsi);
-
- /* if the VF has been reset allow it to come up again */
- if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
- dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
-
- return true;
-}
-
-/**
- * ice_vc_notify_link_state - Inform all VFs on a PF of link status
- * @pf: pointer to the PF structure
- */
-void ice_vc_notify_link_state(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i)
- ice_vc_notify_vf_link_state(&pf->vf[i]);
-}
-
-/**
- * ice_vc_notify_reset - Send pending reset message to all VFs
- * @pf: pointer to the PF structure
- *
- * indicate a pending reset to all VFs on a given PF
- */
-void ice_vc_notify_reset(struct ice_pf *pf)
-{
- struct virtchnl_pf_event pfe;
-
- if (!pf->num_alloc_vfs)
- return;
-
- pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
- (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
-}
-
-/**
- * ice_vc_notify_vf_reset - Notify VF of a reset event
- * @vf: pointer to the VF structure
- */
-static void ice_vc_notify_vf_reset(struct ice_vf *vf)
-{
- struct virtchnl_pf_event pfe;
- struct ice_pf *pf;
-
- if (!vf)
- return;
-
- pf = vf->pf;
- if (ice_validate_vf_id(pf, vf->vf_id))
- return;
-
- /* Bail out if VF is in disabled state, neither initialized, nor active
- * state - otherwise proceed with notifications
- */
- if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
- test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return;
-
- pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
- pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
- NULL);
-}
-
-/**
- * ice_init_vf_vsi_res - initialize/setup VF VSI resources
- * @vf: VF to initialize/setup the VSI for
- *
- * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
- * VF VSI's broadcast filter and is only used during initial VF creation.
- */
-static int ice_init_vf_vsi_res(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- u8 broadcast[ETH_ALEN];
- enum ice_status status;
- struct ice_vsi *vsi;
- struct device *dev;
- int err;
-
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
-
- dev = ice_pf_to_dev(pf);
- vsi = ice_vf_vsi_setup(vf);
- if (!vsi)
- return -ENOMEM;
-
- err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
- if (err) {
- dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
- vf->vf_id, ice_stat_str(status));
- err = ice_status_to_errno(status);
- goto release_vsi;
- }
-
- vf->num_mac = 1;
-
- return 0;
-
-release_vsi:
- ice_vf_vsi_release(vf);
- return err;
-}
-
-/**
- * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
- * @pf: PF the VFs are associated with
- */
-static int ice_start_vfs(struct ice_pf *pf)
-{
- struct ice_hw *hw = &pf->hw;
- int retval, i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_clear_vf_reset_trigger(vf);
-
- retval = ice_init_vf_vsi_res(vf);
- if (retval) {
- dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
- vf->vf_id, retval);
- goto teardown;
- }
-
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
- ice_ena_vf_mappings(vf);
- wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
- }
-
- ice_flush(hw);
- return 0;
-
-teardown:
- for (i = i - 1; i >= 0; i--) {
- struct ice_vf *vf = &pf->vf[i];
-
- ice_dis_vf_mappings(vf);
- ice_vf_vsi_release(vf);
- }
-
- return retval;
-}
-
-/**
- * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
- * @pf: PF holding reference to all VFs for default configuration
- */
-static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
-{
- int i;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- vf->pf = pf;
- vf->vf_id = i;
- vf->vf_sw_id = pf->first_sw;
- /* assign default capabilities */
- set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
- vf->spoofchk = true;
- vf->num_vf_qs = pf->num_qps_per_vf;
- ice_vc_set_default_allowlist(vf);
-
- /* ctrl_vsi_idx will be set to a valid value only when VF
- * creates its first fdir rule.
- */
- ice_vf_ctrl_invalidate_vsi(vf);
- ice_vf_fdir_init(vf);
-
- ice_vc_set_dflt_vf_ops(&vf->vc_ops);
-
- mutex_init(&vf->cfg_lock);
- }
-}
-
-/**
- * ice_alloc_vfs - allocate num_vfs in the PF structure
- * @pf: PF to store the allocated VFs in
- * @num_vfs: number of VFs to allocate
- */
-static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
-{
- struct ice_vf *vfs;
-
- vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
- GFP_KERNEL);
- if (!vfs)
- return -ENOMEM;
-
- pf->vf = vfs;
- pf->num_alloc_vfs = num_vfs;
-
- return 0;
-}
-
-/**
- * ice_ena_vfs - enable VFs so they are ready to be used
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to enable
- */
-static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int ret;
-
- /* Disable global interrupt 0 so we don't try to handle the VFLR. */
- wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
- ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
- set_bit(ICE_OICR_INTR_DIS, pf->state);
- ice_flush(hw);
-
- ret = pci_enable_sriov(pf->pdev, num_vfs);
- if (ret) {
- pf->num_alloc_vfs = 0;
- goto err_unroll_intr;
- }
-
- ret = ice_alloc_vfs(pf, num_vfs);
- if (ret)
- goto err_pci_disable_sriov;
-
- if (ice_set_per_vf_res(pf)) {
- dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
- num_vfs);
- ret = -ENOSPC;
- goto err_unroll_sriov;
- }
-
- ice_set_dflt_settings_vfs(pf);
-
- if (ice_start_vfs(pf)) {
- dev_err(dev, "Failed to start VF(s)\n");
- ret = -EAGAIN;
- goto err_unroll_sriov;
- }
-
- clear_bit(ICE_VF_DIS, pf->state);
-
- ret = ice_eswitch_configure(pf);
- if (ret)
- goto err_unroll_sriov;
-
- /* rearm global interrupts */
- if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
- ice_irq_dynamic_ena(hw, NULL, NULL);
-
- return 0;
-
-err_unroll_sriov:
- devm_kfree(dev, pf->vf);
- pf->vf = NULL;
- pf->num_alloc_vfs = 0;
-err_pci_disable_sriov:
- pci_disable_sriov(pf->pdev);
-err_unroll_intr:
- /* rearm interrupts here */
- ice_irq_dynamic_ena(hw, NULL, NULL);
- clear_bit(ICE_OICR_INTR_DIS, pf->state);
- return ret;
-}
-
-/**
- * ice_pci_sriov_ena - Enable or change number of VFs
- * @pf: pointer to the PF structure
- * @num_vfs: number of VFs to allocate
- *
- * Returns 0 on success and negative on failure
- */
-static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
-{
- int pre_existing_vfs = pci_num_vf(pf->pdev);
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- if (pre_existing_vfs && pre_existing_vfs != num_vfs)
- ice_free_vfs(pf);
- else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
- return 0;
-
- if (num_vfs > pf->num_vfs_supported) {
- dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
- num_vfs, pf->num_vfs_supported);
- return -EOPNOTSUPP;
- }
-
- dev_info(dev, "Enabling %d VFs\n", num_vfs);
- err = ice_ena_vfs(pf, num_vfs);
- if (err) {
- dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
- return err;
- }
-
- set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
- return 0;
-}
-
-/**
- * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
- * @pf: PF to enabled SR-IOV on
- */
-static int ice_check_sriov_allowed(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
- dev_err(dev, "This device is not capable of SR-IOV\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_is_safe_mode(pf)) {
- dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
- return -EOPNOTSUPP;
- }
-
- if (!ice_pf_state_is_nominal(pf)) {
- dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-/**
- * ice_sriov_configure - Enable or change number of VFs via sysfs
- * @pdev: pointer to a pci_dev structure
- * @num_vfs: number of VFs to allocate or 0 to free VFs
- *
- * This function is called when the user updates the number of VFs in sysfs. On
- * success return whatever num_vfs was set to by the caller. Return negative on
- * failure.
- */
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
- struct ice_pf *pf = pci_get_drvdata(pdev);
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_status status;
- int err;
-
- err = ice_check_sriov_allowed(pf);
- if (err)
- return err;
-
- if (!num_vfs) {
- if (!pci_vfs_assigned(pdev)) {
- ice_mbx_deinit_snapshot(&pf->hw);
- ice_free_vfs(pf);
- if (pf->lag)
- ice_enable_lag(pf->lag);
- return 0;
- }
-
- dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
- return -EBUSY;
- }
-
- status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
- if (status)
- return ice_status_to_errno(status);
-
- err = ice_pci_sriov_ena(pf, num_vfs);
- if (err) {
- ice_mbx_deinit_snapshot(&pf->hw);
- return err;
- }
-
- if (pf->lag)
- ice_disable_lag(pf->lag);
- return num_vfs;
-}
-
-/**
- * ice_process_vflr_event - Free VF resources via IRQ calls
- * @pf: pointer to the PF structure
- *
- * called from the VFLR IRQ handler to
- * free up VF resources and state variables
- */
-void ice_process_vflr_event(struct ice_pf *pf)
-{
- struct ice_hw *hw = &pf->hw;
- unsigned int vf_id;
- u32 reg;
-
- if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
- !pf->num_alloc_vfs)
- return;
-
- ice_for_each_vf(pf, vf_id) {
- struct ice_vf *vf = &pf->vf[vf_id];
- u32 reg_idx, bit_idx;
-
- reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
- bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
- /* read GLGEN_VFLRSTAT register to find out the flr VFs */
- reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx))
- /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
- ice_reset_vf(vf, true);
- }
-}
-
-/**
- * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
- * @vf: pointer to the VF info
- */
-static void ice_vc_reset_vf(struct ice_vf *vf)
-{
- ice_vc_notify_vf_reset(vf);
- ice_reset_vf(vf, false);
-}
-
-/**
- * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
- * @pf: PF used to index all VFs
- * @pfq: queue index relative to the PF's function space
- *
- * If no VF is found who owns the pfq then return NULL, otherwise return a
- * pointer to the VF who owns the pfq
- */
-static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
-{
- unsigned int vf_id;
-
- ice_for_each_vf(pf, vf_id) {
- struct ice_vf *vf = &pf->vf[vf_id];
- struct ice_vsi *vsi;
- u16 rxq_idx;
-
- vsi = ice_get_vf_vsi(vf);
-
- ice_for_each_rxq(vsi, rxq_idx)
- if (vsi->rxq_map[rxq_idx] == pfq)
- return vf;
- }
-
- return NULL;
-}
-
-/**
- * ice_globalq_to_pfq - convert from global queue index to PF space queue index
- * @pf: PF used for conversion
- * @globalq: global queue index used to convert to PF space queue index
- */
-static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
-{
- return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
-}
-
-/**
- * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
- * @pf: PF that the LAN overflow event happened on
- * @event: structure holding the event information for the LAN overflow event
- *
- * Determine if the LAN overflow event was caused by a VF queue. If it was not
- * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
- * reset on the offending VF.
- */
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- u32 gldcb_rtctq, queue;
- struct ice_vf *vf;
-
- gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
- dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
-
- /* event returns device global Rx queue number */
- queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
- GLDCB_RTCTQ_RXQNUM_S;
-
- vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
- if (!vf)
- return;
-
- ice_vc_reset_vf(vf);
-}
-
-/**
- * ice_vc_send_msg_to_vf - Send message to VF
- * @vf: pointer to the VF info
- * @v_opcode: virtual channel opcode
- * @v_retval: virtual channel return value
- * @msg: pointer to the msg buffer
- * @msglen: msg length
- *
- * send msg to VF
- */
-int
-ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
-{
- enum ice_status aq_ret;
- struct device *dev;
- struct ice_pf *pf;
-
- if (!vf)
- return -EINVAL;
-
- pf = vf->pf;
- if (ice_validate_vf_id(pf, vf->vf_id))
- return -EINVAL;
-
- dev = ice_pf_to_dev(pf);
-
- /* single place to detect unsuccessful return values */
- if (v_retval) {
- vf->num_inval_msgs++;
- dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
- v_opcode, v_retval);
- if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
- vf->vf_id);
- dev_err(dev, "Use PF Control I/F to enable the VF\n");
- set_bit(ICE_VF_STATE_DIS, vf->vf_states);
- return -EIO;
- }
- } else {
- vf->num_valid_msgs++;
- /* reset the invalid counter, if a valid message is received. */
- vf->num_inval_msgs = 0;
- }
-
- aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
- msg, msglen, NULL);
- if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
- vf->vf_id, ice_stat_str(aq_ret),
- ice_aq_str(pf->hw.mailboxq.sq_last_status));
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * ice_vc_get_ver_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to request the API version used by the PF
- */
-static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_version_info info = {
- VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
- };
-
- vf->vf_ver = *(struct virtchnl_version_info *)msg;
- /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
- if (VF_IS_V10(&vf->vf_ver))
- info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
-
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
- VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
- sizeof(struct virtchnl_version_info));
-}
-
-/**
- * ice_vc_get_max_frame_size - get max frame size allowed for VF
- * @vf: VF used to determine max frame size
- *
- * Max frame size is determined based on the current port's max frame size and
- * whether a port VLAN is configured on this VF. The VF is not aware whether
- * it's in a port VLAN so the PF needs to account for this in max frame size
- * checks and sending the max frame size to the VF.
- */
-static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- u16 max_frame_size;
-
- max_frame_size = pi->phy.link_info.max_frame_size;
-
- if (vf->port_vlan_info)
- max_frame_size -= VLAN_HLEN;
-
- return max_frame_size;
-}
-
-/**
- * ice_vc_get_vf_res_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to request its resources
- */
-static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_resource *vfres = NULL;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- int len = 0;
- int ret;
-
- if (ice_check_vf_init(pf, vf)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- len = sizeof(struct virtchnl_vf_resource);
-
- vfres = kzalloc(len, GFP_KERNEL);
- if (!vfres) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
- if (VF_IS_V11(&vf->vf_ver))
- vf->driver_caps = *(u32 *)msg;
- else
- vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_RSS_REG |
- VIRTCHNL_VF_OFFLOAD_VLAN;
-
- vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!vsi->info.pvid)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
- } else {
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
- else
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
- }
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
-
- if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
- vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
-
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
-
- vfres->num_vsis = 1;
- /* Tx and Rx queue are equal for VF */
- vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = pf->num_msix_per_vf;
- vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
- vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
- vfres->max_mtu = ice_vc_get_max_frame_size(vf);
-
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
- vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
- vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
- ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
- vf->hw_lan_addr.addr);
-
- /* match guest capabilities */
- vf->driver_caps = vfres->vf_cap_flags;
-
- ice_vc_set_caps_allowlist(vf);
- ice_vc_set_working_allowlist(vf);
-
- set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
-
-err:
- /* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
- (u8 *)vfres, len);
-
- kfree(vfres);
- return ret;
-}
-
-/**
- * ice_vc_reset_vf_msg
- * @vf: pointer to the VF info
- *
- * called from the VF to reset itself,
- * unlike other virtchnl messages, PF driver
- * doesn't send the response back to the VF
- */
-static void ice_vc_reset_vf_msg(struct ice_vf *vf)
-{
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- ice_reset_vf(vf, false);
-}
-
-/**
- * ice_find_vsi_from_id
- * @pf: the PF structure to search for the VSI
- * @id: ID of the VSI it is searching for
- *
- * searches for the VSI with the given ID
- */
-static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
-{
- int i;
-
- ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
- return pf->vsi[i];
-
- return NULL;
-}
-
-/**
- * ice_vc_isvalid_vsi_id
- * @vf: pointer to the VF info
- * @vsi_id: VF relative VSI ID
- *
- * check for the valid VSI ID
- */
-bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
-{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi_from_id(pf, vsi_id);
-
- return (vsi && (vsi->vf_id == vf->vf_id));
-}
-
-/**
- * ice_vc_isvalid_q_id
- * @vf: pointer to the VF info
- * @vsi_id: VSI ID
- * @qid: VSI relative queue ID
- *
- * check for the valid queue ID
- */
-static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
-{
- struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
- /* allocated Tx and Rx queues should be always equal for VF VSI */
- return (vsi && (qid < vsi->alloc_txq));
-}
-
-/**
- * ice_vc_isvalid_ring_len
- * @ring_len: length of ring
- *
- * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
- * or zero
- */
-static bool ice_vc_isvalid_ring_len(u16 ring_len)
-{
- return ring_len == 0 ||
- (ring_len >= ICE_MIN_NUM_DESC &&
- ring_len <= ICE_MAX_NUM_DESC &&
- !(ring_len % ICE_REQ_DESC_MULTIPLE));
-}
-
-/**
- * ice_vc_parse_rss_cfg - parses hash fields and headers from
- * a specific virtchnl RSS cfg
- * @hw: pointer to the hardware
- * @rss_cfg: pointer to the virtchnl RSS cfg
- * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
- * to configure
- * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
- *
- * Return true if all the protocol header and hash fields in the RSS cfg could
- * be parsed, else return false
- *
- * This function parses the virtchnl RSS cfg to be the intended
- * hash fields and the intended header for RSS configuration
- */
-static bool
-ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
- u32 *addl_hdrs, u64 *hash_flds)
-{
- const struct ice_vc_hash_field_match_type *hf_list;
- const struct ice_vc_hdr_match_type *hdr_list;
- int i, hf_list_len, hdr_list_len;
-
- if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
- sizeof(hw->active_pkg_name))) {
- hf_list = ice_vc_hash_field_list_comms;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
- hdr_list = ice_vc_hdr_list_comms;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
- } else {
- hf_list = ice_vc_hash_field_list_os;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
- hdr_list = ice_vc_hdr_list_os;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
- }
-
- for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
- struct virtchnl_proto_hdr *proto_hdr =
- &rss_cfg->proto_hdrs.proto_hdr[i];
- bool hdr_found = false;
- int j;
-
- /* Find matched ice headers according to virtchnl headers. */
- for (j = 0; j < hdr_list_len; j++) {
- struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
-
- if (proto_hdr->type == hdr_map.vc_hdr) {
- *addl_hdrs |= hdr_map.ice_hdr;
- hdr_found = true;
- }
- }
-
- if (!hdr_found)
- return false;
-
- /* Find matched ice hash fields according to
- * virtchnl hash fields.
- */
- for (j = 0; j < hf_list_len; j++) {
- struct ice_vc_hash_field_match_type hf_map = hf_list[j];
-
- if (proto_hdr->type == hf_map.vc_hdr &&
- proto_hdr->field_selector == hf_map.vc_hash_field) {
- *hash_flds |= hf_map.ice_hash_field;
- break;
- }
- }
- }
-
- return true;
-}
-
-/**
- * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
- * RSS offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
- * else return false
- */
-static bool ice_vf_adv_rss_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
-}
-
-/**
- * ice_vc_handle_rss_cfg
- * @vf: pointer to the VF info
- * @msg: pointer to the message buffer
- * @add: add a RSS config if true, otherwise delete a RSS config
- *
- * This function adds/deletes a RSS config
- */
-static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
-{
- u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
- struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto error_param;
- }
-
- if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
- rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
- rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
- struct ice_vsi_ctx *ctx;
- enum ice_status status;
- u8 lut_type, hash_type;
-
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- goto error_param;
- }
-
- ctx->info.q_opt_rss = ((lut_type <<
- ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
- ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- (hash_type &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
-
- /* Preserve existing queueing option setting */
- ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
- ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
- ctx->info.q_opt_tc = vsi->info.q_opt_tc;
- ctx->info.q_opt_flags = vsi->info.q_opt_rss;
-
- ctx->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
- ice_stat_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- } else {
- vsi->info.q_opt_rss = ctx->info.q_opt_rss;
- }
-
- kfree(ctx);
- } else {
- u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- u64 hash_flds = ICE_HASH_INVALID;
-
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
- &hash_flds)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add) {
- if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
- vsi->vsi_num, v_ret);
- }
- } else {
- enum ice_status status;
-
- status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
- addl_hdrs);
- /* We just ignore ICE_ERR_DOES_NOT_EXIST, because
- * if two configurations share the same profile remove
- * one of them actually removes both, since the
- * profile is deleted.
- */
- if (status && status != ICE_ERR_DOES_NOT_EXIST) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
- vf->vf_id, ice_stat_str(status));
- }
- }
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS key
- */
-static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_key *vrk =
- (struct virtchnl_rss_key *)msg;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_key(vsi, vrk->key))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS LUT
- */
-static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
- * @vf: The VF being resseting
- *
- * The max poll time is about ~800ms, which is about the maximum time it takes
- * for a VF to be reset and/or a VF driver to be removed.
- */
-static void ice_wait_on_vf_reset(struct ice_vf *vf)
-{
- int i;
-
- for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- break;
- msleep(ICE_MAX_VF_RESET_SLEEP_MS);
- }
-}
-
-/**
- * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
- * @vf: VF to check if it's ready to be configured/queried
- *
- * The purpose of this function is to make sure the VF is not in reset, not
- * disabled, and initialized so it can be configured and/or queried by a host
- * administrator.
- */
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
- struct ice_pf *pf;
-
- ice_wait_on_vf_reset(vf);
-
- if (ice_is_vf_disabled(vf))
- return -EINVAL;
-
- pf = vf->pf;
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- return 0;
-}
-
-/**
- * ice_set_vf_spoofchk
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ena: flag to enable or disable feature
- *
- * Enable or disable VF spoof checking
- */
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
- struct ice_vsi_ctx *ctx;
- struct ice_vsi *vf_vsi;
- enum ice_status status;
- struct device *dev;
- struct ice_vf *vf;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vf_vsi = ice_get_vf_vsi(vf);
- if (!vf_vsi) {
- netdev_err(netdev, "VSI %d for VF %d is null\n",
- vf->lan_vsi_idx, vf->vf_id);
- return -EINVAL;
- }
-
- if (vf_vsi->type != ICE_VSI_VF) {
- netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
- vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
- return -ENODEV;
- }
-
- if (ena == vf->spoofchk) {
- dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
- return 0;
- }
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- ctx->info.sec_flags = vf_vsi->info.sec_flags;
- ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
- if (ena) {
- ctx->info.sec_flags |=
- ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
- } else {
- ctx->info.sec_flags &=
- ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
- (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
- ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
- }
-
- status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
- ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
- ice_stat_str(status));
- ret = -EIO;
- goto out;
- }
-
- /* only update spoofchk state and VSI context on success */
- vf_vsi->info.sec_flags = ctx->info.sec_flags;
- vf->spoofchk = ena;
-
-out:
- kfree(ctx);
- return ret;
-}
-
-/**
- * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
- * @pf: PF structure for accessing VF(s)
- *
- * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
- * else return true
- */
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
-{
- int vf_idx;
-
- ice_for_each_vf(pf, vf_idx) {
- struct ice_vf *vf = &pf->vf[vf_idx];
-
- /* found a VF that has promiscuous mode configured */
- if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- return true;
- }
-
- return false;
-}
-
-/**
- * ice_vc_cfg_promiscuous_mode_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure VF VSIs promiscuous mode
- */
-static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- enum ice_status mcast_status = 0, ucast_status = 0;
- bool rm_promisc, alluni = false, allmulti = false;
- struct virtchnl_promisc_info *info =
- (struct virtchnl_promisc_info *)msg;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int ret = 0;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
- dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
- vf->vf_id);
- /* Leave v_ret alone, lie to the VF on purpose. */
- goto error_param;
- }
-
- if (info->flags & FLAG_VF_UNICAST_PROMISC)
- alluni = true;
-
- if (info->flags & FLAG_VF_MULTICAST_PROMISC)
- allmulti = true;
-
- rm_promisc = !allmulti && !alluni;
-
- if (vsi->num_vlan || vf->port_vlan_info) {
- if (rm_promisc)
- ret = ice_cfg_vlan_pruning(vsi, true);
- else
- ret = ice_cfg_vlan_pruning(vsi, false);
- if (ret) {
- dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
-
- if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
- bool set_dflt_vsi = alluni || allmulti;
-
- if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
- /* only attempt to set the default forwarding VSI if
- * it's not currently set
- */
- ret = ice_set_dflt_vsi(pf->first_sw, vsi);
- else if (!set_dflt_vsi &&
- ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
- /* only attempt to free the default forwarding VSI if we
- * are the owner
- */
- ret = ice_clear_dflt_vsi(pf->first_sw);
-
- if (ret) {
- dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
- set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- goto error_param;
- }
- } else {
- u8 mcast_m, ucast_m;
-
- if (vf->port_vlan_info || vsi->num_vlan > 1) {
- mcast_m = ICE_MCAST_VLAN_PROMISC_BITS;
- ucast_m = ICE_UCAST_VLAN_PROMISC_BITS;
- } else {
- mcast_m = ICE_MCAST_PROMISC_BITS;
- ucast_m = ICE_UCAST_PROMISC_BITS;
- }
-
- ucast_status = ice_vf_set_vsi_promisc(vf, vsi, ucast_m,
- !alluni);
- if (ucast_status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
- alluni ? "en" : "dis", vf->vf_id);
- v_ret = ice_err_to_virt_err(ucast_status);
- }
-
- mcast_status = ice_vf_set_vsi_promisc(vf, vsi, mcast_m,
- !allmulti);
- if (mcast_status) {
- dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed\n",
- allmulti ? "en" : "dis", vf->vf_id);
- v_ret = ice_err_to_virt_err(mcast_status);
- }
- }
-
- if (!mcast_status) {
- if (allmulti &&
- !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully set multicast promiscuous mode\n",
- vf->vf_id);
- else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n",
- vf->vf_id);
- }
-
- if (!ucast_status) {
- if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully set unicast promiscuous mode\n",
- vf->vf_id);
- else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
- dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n",
- vf->vf_id);
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_get_stats_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to get VSI stats
- */
-static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_eth_stats stats = { 0 };
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_update_eth_stats(vsi);
-
- stats = vsi->eth_stats;
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
- (u8 *)&stats, sizeof(stats));
-}
-
-/**
- * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
- * @vqs: virtchnl_queue_select structure containing bitmaps to validate
- *
- * Return true on successful validation, else false
- */
-static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
-{
- if ((!vqs->rx_queues && !vqs->tx_queues) ||
- vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
- vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
- return false;
-
- return true;
-}
-
-/**
- * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->txq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_TQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_TQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->rxq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_RQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_RQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vc_ena_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to enable all or specific queue(s)
- */
-static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable only Rx rings, Tx rings were enabled by the FW when the
- * Tx queue group list was configured and the context bits were
- * programmed using ice_vsi_cfg_txqs
- */
- q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->rxq_ena);
- }
-
- q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_vf_ena_txq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->txq_ena);
- }
-
- /* Set flag to indicate that queues are enabled */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_dis_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to disable all or specific
- * queue(s)
- */
-static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->tx_queues) {
- q_map = vqs->tx_queues;
-
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
- struct ice_txq_meta txq_meta = { 0 };
-
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
- ring, &txq_meta)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->txq_ena);
- }
- }
-
- q_map = vqs->rx_queues;
- /* speed up Rx queue disable by batching them if possible */
- if (q_map &&
- bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
- if (ice_vsi_stop_all_rx_rings(vsi)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- } else if (q_map) {
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
- true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->rxq_ena);
- }
- }
-
- /* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_cfg_interrupt
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @vector_id: vector ID
- * @map: vector map for mapping vectors to queues
- * @q_vector: structure for interrupt vector
- * configure the IRQ to queue map
- */
-static int
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
- struct virtchnl_vector_map *map,
- struct ice_q_vector *q_vector)
-{
- u16 vsi_q_id, vsi_q_id_idx;
- unsigned long qmap;
-
- q_vector->num_ring_rx = 0;
- q_vector->num_ring_tx = 0;
-
- qmap = map->rxq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
- q_vector->tx.itr_idx);
- }
-
- return VIRTCHNL_STATUS_SUCCESS;
-}
-
-/**
- * ice_vc_cfg_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the IRQ to queue map
- */
-static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 num_q_vectors_mapped, vsi_id, vector_id;
- struct virtchnl_irq_map_info *irqmap_info;
- struct virtchnl_vector_map *map;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- int i;
-
- irqmap_info = (struct virtchnl_irq_map_info *)msg;
- num_q_vectors_mapped = irqmap_info->num_vectors;
-
- /* Check to make sure number of VF vectors mapped is not greater than
- * number of VF vectors originally allocated, and check that
- * there is actually at least a single VF queue vector mapped
- */
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->num_msix_per_vf < num_q_vectors_mapped ||
- !num_q_vectors_mapped) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < num_q_vectors_mapped; i++) {
- struct ice_q_vector *q_vector;
-
- map = &irqmap_info->vecmap[i];
-
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
- /* vector_id is always 0-based for each VF, and can never be
- * larger than or equal to the max allowed interrupts per VF
- */
- if (!(vector_id < pf->num_msix_per_vf) ||
- !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
- (!vector_id && (map->rxq_map || map->txq_map))) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* No need to map VF miscellaneous or rogue vector */
- if (!vector_id)
- continue;
-
- /* Subtract non queue vector from vector_id passed by VF
- * to get actual number of VSI queue vector array index
- */
- q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
- if (!q_vector) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* lookout for the invalid queue index */
- v_ret = (enum virtchnl_status_code)
- ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
- if (v_ret)
- goto error_param;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_cfg_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the Rx/Tx queues
- */
-static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vsi_queue_config_info *qci =
- (struct virtchnl_vsi_queue_config_info *)msg;
- struct virtchnl_queue_pair_info *qpi;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- int i, q_idx;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
- qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- qpi = &qci->qpair[i];
- if (qpi->txq.vsi_id != qci->vsi_id ||
- qpi->rxq.vsi_id != qci->vsi_id ||
- qpi->rxq.queue_id != qpi->txq.queue_id ||
- qpi->txq.headwb_enabled ||
- !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
- !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- q_idx = qpi->rxq.queue_id;
-
- /* make sure selected "q_idx" is in valid range of queues
- * for selected "vsi"
- */
- if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* copy Tx queue info from VF into VSI */
- if (qpi->txq.ring_len > 0) {
- vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[i]->count = qpi->txq.ring_len;
- if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
-
- /* copy Rx queue info from VF into VSI */
- if (qpi->rxq.ring_len > 0) {
- u16 max_frame_size = ice_vc_get_max_frame_size(vf);
-
- vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
- vsi->rx_rings[i]->count = qpi->rxq.ring_len;
-
- if (qpi->rxq.databuffer_size != 0 &&
- (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
- qpi->rxq.databuffer_size < 1024)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- vsi->rx_buf_len = qpi->rxq.databuffer_size;
- vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
- if (qpi->rxq.max_pkt_size > max_frame_size ||
- qpi->rxq.max_pkt_size < 64) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is not
- * expected to account for it in the MTU calculation
- */
- if (vf->port_vlan_info)
- vsi->max_frame += VLAN_HLEN;
-
- if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_is_vf_trusted
- * @vf: pointer to the VF info
- */
-static bool ice_is_vf_trusted(struct ice_vf *vf)
-{
- return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_can_vf_change_mac
- * @vf: pointer to the VF info
- *
- * Return true if the VF is allowed to change its MAC filters, false otherwise
- */
-static bool ice_can_vf_change_mac(struct ice_vf *vf)
-{
- /* If the VF MAC address has been set administratively (via the
- * ndo_set_vf_mac command), then deny permission to the VF to
- * add/delete unicast MAC addresses, unless the VF is trusted
- */
- if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
- return false;
-
- return true;
-}
-
-/**
- * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
- * @vc_ether_addr: used to extract the type
- */
-static u8
-ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
-{
- return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
-}
-
-/**
- * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
- * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
- */
-static bool
-ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
-{
- u8 type = ice_vc_ether_addr_type(vc_ether_addr);
-
- return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
-}
-
-/**
- * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
- * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
- *
- * This function should only be called when the MAC address in
- * virtchnl_ether_addr is a valid unicast MAC
- */
-static bool
-ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
-{
- u8 type = ice_vc_ether_addr_type(vc_ether_addr);
-
- return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
-}
-
-/**
- * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to add
- */
-static void
-ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
-{
- u8 *mac_addr = vc_ether_addr->addr;
-
- if (!is_valid_ether_addr(mac_addr))
- return;
-
- /* only allow legacy VF drivers to set the device and hardware MAC if it
- * is zero and allow new VF drivers to set the hardware MAC if the type
- * was correctly specified over VIRTCHNL
- */
- if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
- is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
- ice_is_vc_addr_primary(vc_ether_addr)) {
- ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
- ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
- }
-
- /* hardware and device MACs are already set, but its possible that the
- * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
- * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
- * away for the legacy VF driver case as it will be updated in the
- * delete flow for this case
- */
- if (ice_is_vc_addr_legacy(vc_ether_addr)) {
- ether_addr_copy(vf->legacy_last_added_umac.addr,
- mac_addr);
- vf->legacy_last_added_umac.time_modified = jiffies;
- }
-}
-
-/**
- * ice_vc_add_mac_addr - attempt to add the MAC address passed in
- * @vf: pointer to the VF info
- * @vsi: pointer to the VF's VSI
- * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
- */
-static int
-ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_ether_addr *vc_ether_addr)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- u8 *mac_addr = vc_ether_addr->addr;
- enum ice_status status;
- int ret = 0;
-
- /* device MAC already added */
- if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
- return 0;
-
- if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
- dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
- return -EPERM;
- }
-
- status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_ALREADY_EXISTS) {
- dev_dbg(dev, "MAC %pM already exists for VF %d\n", mac_addr,
- vf->vf_id);
- /* don't return since we might need to update
- * the primary MAC in ice_vfhw_mac_add() below
- */
- ret = -EEXIST;
- } else if (status) {
- dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
- mac_addr, vf->vf_id, ice_stat_str(status));
- return -EIO;
- } else {
- vf->num_mac++;
- }
-
- ice_vfhw_mac_add(vf, vc_ether_addr);
-
- return ret;
-}
-
-/**
- * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
- * @last_added_umac: structure used to check expiration
- */
-static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
-{
-#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
- return time_is_before_jiffies(last_added_umac->time_modified +
- ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
-}
-
-/**
- * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to check
- *
- * only update cached hardware MAC for legacy VF drivers on delete
- * because we cannot guarantee order/type of MAC from the VF driver
- */
-static void
-ice_update_legacy_cached_mac(struct ice_vf *vf,
- struct virtchnl_ether_addr *vc_ether_addr)
-{
- if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
- ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
- return;
-
- ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
-}
-
-/**
- * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
- * @vf: VF to update
- * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
- */
-static void
-ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
-{
- u8 *mac_addr = vc_ether_addr->addr;
-
- if (!is_valid_ether_addr(mac_addr) ||
- !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
- return;
-
- /* allow the device MAC to be repopulated in the add flow and don't
- * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
- * to be persistent on VM reboot and across driver unload/load, which
- * won't work if we clear the hardware MAC here
- */
- eth_zero_addr(vf->dev_lan_addr.addr);
-
- ice_update_legacy_cached_mac(vf, vc_ether_addr);
-}
-
-/**
- * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
- * @vf: pointer to the VF info
- * @vsi: pointer to the VF's VSI
- * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
- */
-static int
-ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_ether_addr *vc_ether_addr)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- u8 *mac_addr = vc_ether_addr->addr;
- enum ice_status status;
-
- if (!ice_can_vf_change_mac(vf) &&
- ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
- return 0;
-
- status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
- if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
- vf->vf_id);
- return -ENOENT;
- } else if (status) {
- dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
- mac_addr, vf->vf_id, ice_stat_str(status));
- return -EIO;
- }
-
- ice_vfhw_mac_del(vf, vc_ether_addr);
-
- vf->num_mac--;
-
- return 0;
-}
-
-/**
- * ice_vc_handle_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @set: true if MAC filters are being set, false otherwise
- *
- * add guest MAC address filter
- */
-static int
-ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
-{
- int (*ice_vc_cfg_mac)
- (struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_ether_addr *virtchnl_ether_addr);
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_ether_addr_list *al =
- (struct virtchnl_ether_addr_list *)msg;
- struct ice_pf *pf = vf->pf;
- enum virtchnl_ops vc_op;
- struct ice_vsi *vsi;
- int i;
-
- if (set) {
- vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
- ice_vc_cfg_mac = ice_vc_add_mac_addr;
- } else {
- vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
- ice_vc_cfg_mac = ice_vc_del_mac_addr;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- /* If this VF is not privileged, then we can't add more than a
- * limited number of addresses. Check to make sure that the
- * additions do not push us over the limit.
- */
- if (set && !ice_is_vf_trusted(vf) &&
- (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- for (i = 0; i < al->num_elements; i++) {
- u8 *mac_addr = al->list[i].addr;
- int result;
-
- if (is_broadcast_ether_addr(mac_addr) ||
- is_zero_ether_addr(mac_addr))
- continue;
-
- result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
- if (result == -EEXIST || result == -ENOENT) {
- continue;
- } else if (result) {
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
- goto handle_mac_exit;
- }
- }
-
-handle_mac_exit:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_add_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * add guest MAC address filter
- */
-static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
-{
- return ice_vc_handle_mac_addr_msg(vf, msg, true);
-}
-
-/**
- * ice_vc_del_mac_addr_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * remove guest MAC address filter
- */
-static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
-{
- return ice_vc_handle_mac_addr_msg(vf, msg, false);
-}
-
-/**
- * ice_vc_request_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
- * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
- */
-static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_res_request *vfres =
- (struct virtchnl_vf_res_request *)msg;
- u16 req_queues = vfres->num_queue_pairs;
- struct ice_pf *pf = vf->pf;
- u16 max_allowed_vf_queues;
- u16 tx_rx_queue_left;
- struct device *dev;
- u16 cur_queues;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (!req_queues) {
- dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
- vf->vf_id);
- } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
- dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
- } else if (req_queues > cur_queues &&
- req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
- vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_RSS_QS_PER_VF);
- } else {
- /* request is successful, then reset VF */
- vf->num_req_qs = req_queues;
- ice_vc_reset_vf(vf);
- dev_info(dev, "VF %d granted request of %u queues.\n",
- vf->vf_id, req_queues);
- return 0;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- v_ret, (u8 *)vfres, sizeof(*vfres));
-}
-
-/**
- * ice_set_vf_port_vlan
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @vlan_id: VLAN ID being set
- * @qos: priority setting
- * @vlan_proto: VLAN protocol
- *
- * program VF Port VLAN ID and/or QoS
- */
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
- __be16 vlan_proto)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct device *dev;
- struct ice_vf *vf;
- u16 vlanprio;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- if (vlan_id >= VLAN_N_VID || qos > 7) {
- dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
- vf_id, vlan_id, qos);
- return -EINVAL;
- }
-
- if (vlan_proto != htons(ETH_P_8021Q)) {
- dev_err(dev, "VF VLAN protocol is not supported\n");
- return -EPROTONOSUPPORT;
- }
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
-
- if (vf->port_vlan_info == vlanprio) {
- /* duplicate request, so just return success */
- dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
- return 0;
- }
-
- mutex_lock(&vf->cfg_lock);
-
- vf->port_vlan_info = vlanprio;
-
- if (vf->port_vlan_info)
- dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
- vlan_id, qos, vf_id);
- else
- dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
-
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
-
- return 0;
-}
-
-/**
- * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
- */
-static bool ice_vf_vlan_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
-}
-
-/**
- * ice_vc_process_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- * @add_v: Add VLAN if true, otherwise delete VLAN
- *
- * Process virtchnl op to add or remove programmed guest VLAN ID
- */
-static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vlan_filter_list *vfl =
- (struct virtchnl_vlan_filter_list *)msg;
- struct ice_pf *pf = vf->pf;
- bool vlan_promisc = false;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_hw *hw;
- int status = 0;
- u8 promisc_m;
- int i;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < vfl->num_elements; i++) {
- if (vfl->vlan_id[i] >= VLAN_N_VID) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "invalid VF VLAN id %d\n",
- vfl->vlan_id[i]);
- goto error_param;
- }
- }
-
- hw = &pf->hw;
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add_v && !ice_is_vf_trusted(vf) &&
- vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
- vf->vf_id);
- /* There is no need to let VF know about being not trusted,
- * so we can just return success message here
- */
- goto error_param;
- }
-
- if (vsi->info.pvid) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
- test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
- test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
- vlan_promisc = true;
-
- if (add_v) {
- for (i = 0; i < vfl->num_elements; i++) {
- u16 vid = vfl->vlan_id[i];
-
- if (!ice_is_vf_trusted(vf) &&
- vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
- vf->vf_id);
- /* There is no need to let VF know about being
- * not trusted, so we can just return success
- * message here as well.
- */
- goto error_param;
- }
-
- /* we add VLAN 0 by default for each VF so we can enable
- * Tx VLAN anti-spoof without triggering MDD events so
- * we don't need to add it again here
- */
- if (!vid)
- continue;
-
- status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
- if (status) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable VLAN pruning when non-zero VLAN is added */
- if (!vlan_promisc && vid &&
- !ice_vsi_is_vlan_pruning_ena(vsi)) {
- status = ice_cfg_vlan_pruning(vsi, true);
- if (status) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
- vid, status);
- goto error_param;
- }
- } else if (vlan_promisc) {
- /* Enable Ucast/Mcast VLAN promiscuous mode */
- promisc_m = ICE_PROMISC_VLAN_TX |
- ICE_PROMISC_VLAN_RX;
-
- status = ice_set_vsi_promisc(hw, vsi->idx,
- promisc_m, vid);
- if (status) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
- vid, status);
- }
- }
- }
- } else {
- /* In case of non_trusted VF, number of VLAN elements passed
- * to PF for removal might be greater than number of VLANs
- * filter programmed for that VF - So, use actual number of
- * VLANS added earlier with add VLAN opcode. In order to avoid
- * removing VLAN that doesn't exist, which result to sending
- * erroneous failed message back to the VF
- */
- int num_vf_vlan;
-
- num_vf_vlan = vsi->num_vlan;
- for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
- u16 vid = vfl->vlan_id[i];
-
- /* we add VLAN 0 by default for each VF so we can enable
- * Tx VLAN anti-spoof without triggering MDD events so
- * we don't want a VIRTCHNL request to remove it
- */
- if (!vid)
- continue;
-
- /* Make sure ice_vsi_kill_vlan is successful before
- * updating VLAN information
- */
- status = ice_vsi_kill_vlan(vsi, vid);
- if (status) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Disable VLAN pruning when only VLAN 0 is left */
- if (vsi->num_vlan == 1 &&
- ice_vsi_is_vlan_pruning_ena(vsi))
- ice_cfg_vlan_pruning(vsi, false);
-
- /* Disable Unicast/Multicast VLAN promiscuous mode */
- if (vlan_promisc) {
- promisc_m = ICE_PROMISC_VLAN_TX |
- ICE_PROMISC_VLAN_RX;
-
- ice_clear_vsi_promisc(hw, vsi->idx,
- promisc_m, vid);
- }
- }
- }
-
-error_param:
- /* send the response to the VF */
- if (add_v)
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
- NULL, 0);
- else
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_add_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Add and program guest VLAN ID
- */
-static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
-{
- return ice_vc_process_vlan_msg(vf, msg, true);
-}
-
-/**
- * ice_vc_remove_vlan_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * remove programmed guest VLAN ID
- */
-static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
-{
- return ice_vc_process_vlan_msg(vf, msg, false);
-}
-
-/**
- * ice_vc_ena_vlan_stripping
- * @vf: pointer to the VF info
- *
- * Enable VLAN header stripping for a given VF
- */
-static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (ice_vsi_manage_vlan_stripping(vsi, true))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_dis_vlan_stripping
- * @vf: pointer to the VF info
- *
- * Disable VLAN header stripping for a given VF
- */
-static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_vsi_manage_vlan_stripping(vsi, false))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
- * @vf: VF to enable/disable VLAN stripping for on initialization
- *
- * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
- * the flag is cleared then we want to disable stripping. For example, the flag
- * will be cleared when port VLANs are configured by the administrator before
- * passing the VF to the guest or if the AVF driver doesn't support VLAN
- * offloads.
- */
-static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- if (!vsi)
- return -EINVAL;
-
- /* don't modify stripping if port VLAN is configured */
- if (vsi->info.pvid)
- return 0;
-
- if (ice_vf_vlan_offload_ena(vf->driver_caps))
- return ice_vsi_manage_vlan_stripping(vsi, true);
- else
- return ice_vsi_manage_vlan_stripping(vsi, false);
-}
-
-static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
- .get_ver_msg = ice_vc_get_ver_msg,
- .get_vf_res_msg = ice_vc_get_vf_res_msg,
- .reset_vf = ice_vc_reset_vf_msg,
- .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
- .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
- .cfg_qs_msg = ice_vc_cfg_qs_msg,
- .ena_qs_msg = ice_vc_ena_qs_msg,
- .dis_qs_msg = ice_vc_dis_qs_msg,
- .request_qs_msg = ice_vc_request_qs_msg,
- .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
- .config_rss_key = ice_vc_config_rss_key,
- .config_rss_lut = ice_vc_config_rss_lut,
- .get_stats_msg = ice_vc_get_stats_msg,
- .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
- .add_vlan_msg = ice_vc_add_vlan_msg,
- .remove_vlan_msg = ice_vc_remove_vlan_msg,
- .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
- .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
- .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
- .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
- .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
-};
-
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
-{
- *ops = ice_vc_vf_dflt_ops;
-}
-
-/**
- * ice_vc_repr_add_mac
- * @vf: pointer to VF
- * @msg: virtchannel message
- *
- * When port representors are created, we do not add MAC rule
- * to firmware, we store it so that PF could report same
- * MAC as VF.
- */
-static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_ether_addr_list *al =
- (struct virtchnl_ether_addr_list *)msg;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
- int i;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- pf = vf->pf;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto handle_mac_exit;
- }
-
- for (i = 0; i < al->num_elements; i++) {
- u8 *mac_addr = al->list[i].addr;
-
- if (!is_unicast_ether_addr(mac_addr) ||
- ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
- continue;
-
- if (vf->pf_set_mac) {
- dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto handle_mac_exit;
- }
-
- ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
- break;
- }
-
-handle_mac_exit:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_repr_del_mac - response with success for deleting MAC
- * @vf: pointer to VF
- * @msg: virtchannel message
- *
- * Respond with success to not break normal VF flow.
- * For legacy VF driver try to update cached MAC address.
- */
-static int
-ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
-{
- struct virtchnl_ether_addr_list *al =
- (struct virtchnl_ether_addr_list *)msg;
-
- ice_update_legacy_cached_mac(vf, &al->list[0]);
-
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-}
-
-static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't enable VLAN stripping in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
-static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't disable VLAN stripping in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
-static int
-ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
-{
- dev_dbg(ice_pf_to_dev(vf->pf),
- "Can't config promiscuous mode in switchdev mode for VF %d\n",
- vf->vf_id);
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
-}
-
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
-{
- ops->add_mac_addr_msg = ice_vc_repr_add_mac;
- ops->del_mac_addr_msg = ice_vc_repr_del_mac;
- ops->add_vlan_msg = ice_vc_repr_add_vlan;
- ops->remove_vlan_msg = ice_vc_repr_del_vlan;
- ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
- ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
- ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
-}
-
-/**
- * ice_vc_process_vf_msg - Process request from VF
- * @pf: pointer to the PF structure
- * @event: pointer to the AQ event
- *
- * called from the common asq/arq handler to
- * process request from VF
- */
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
- s16 vf_id = le16_to_cpu(event->desc.retval);
- u16 msglen = event->msg_len;
- struct ice_vc_vf_ops *ops;
- u8 *msg = event->msg_buf;
- struct ice_vf *vf = NULL;
- struct device *dev;
- int err = 0;
-
- /* if de-init is underway, don't process messages from VF */
- if (test_bit(ICE_VF_DEINIT_IN_PROGRESS, pf->state))
- return;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id)) {
- err = -EINVAL;
- goto error_handler;
- }
-
- vf = &pf->vf[vf_id];
-
- /* Check if VF is disabled. */
- if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
- err = -EPERM;
- goto error_handler;
- }
-
- ops = &vf->vc_ops;
-
- /* Perform basic checks on the msg */
- err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
- if (err) {
- if (err == VIRTCHNL_STATUS_ERR_PARAM)
- err = -EPERM;
- else
- err = -EINVAL;
- }
-
- if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
- ice_vc_send_msg_to_vf(vf, v_opcode,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
- 0);
- return;
- }
-
-error_handler:
- if (err) {
- ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
- NULL, 0);
- dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
- vf_id, v_opcode, msglen, err);
- return;
- }
-
- /* VF is being configured in another context that triggers a VFR, so no
- * need to process this message
- */
- if (!mutex_trylock(&vf->cfg_lock)) {
- dev_info(dev, "VF %u is being configured in another context that will trigger a VFR, so there is no need to handle this message\n",
- vf->vf_id);
- return;
- }
-
- switch (v_opcode) {
- case VIRTCHNL_OP_VERSION:
- err = ops->get_ver_msg(vf, msg);
- break;
- case VIRTCHNL_OP_GET_VF_RESOURCES:
- err = ops->get_vf_res_msg(vf, msg);
- if (ice_vf_init_vlan_stripping(vf))
- dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
- vf->vf_id);
- ice_vc_notify_vf_link_state(vf);
- break;
- case VIRTCHNL_OP_RESET_VF:
- ops->reset_vf(vf);
- break;
- case VIRTCHNL_OP_ADD_ETH_ADDR:
- err = ops->add_mac_addr_msg(vf, msg);
- break;
- case VIRTCHNL_OP_DEL_ETH_ADDR:
- err = ops->del_mac_addr_msg(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- err = ops->cfg_qs_msg(vf, msg);
- break;
- case VIRTCHNL_OP_ENABLE_QUEUES:
- err = ops->ena_qs_msg(vf, msg);
- ice_vc_notify_vf_link_state(vf);
- break;
- case VIRTCHNL_OP_DISABLE_QUEUES:
- err = ops->dis_qs_msg(vf, msg);
- break;
- case VIRTCHNL_OP_REQUEST_QUEUES:
- err = ops->request_qs_msg(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- err = ops->cfg_irq_map_msg(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_RSS_KEY:
- err = ops->config_rss_key(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_RSS_LUT:
- err = ops->config_rss_lut(vf, msg);
- break;
- case VIRTCHNL_OP_GET_STATS:
- err = ops->get_stats_msg(vf, msg);
- break;
- case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- err = ops->cfg_promiscuous_mode_msg(vf, msg);
- break;
- case VIRTCHNL_OP_ADD_VLAN:
- err = ops->add_vlan_msg(vf, msg);
- break;
- case VIRTCHNL_OP_DEL_VLAN:
- err = ops->remove_vlan_msg(vf, msg);
- break;
- case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
- err = ops->ena_vlan_stripping(vf);
- break;
- case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
- err = ops->dis_vlan_stripping(vf);
- break;
- case VIRTCHNL_OP_ADD_FDIR_FILTER:
- err = ops->add_fdir_fltr_msg(vf, msg);
- break;
- case VIRTCHNL_OP_DEL_FDIR_FILTER:
- err = ops->del_fdir_fltr_msg(vf, msg);
- break;
- case VIRTCHNL_OP_ADD_RSS_CFG:
- err = ops->handle_rss_cfg_msg(vf, msg, true);
- break;
- case VIRTCHNL_OP_DEL_RSS_CFG:
- err = ops->handle_rss_cfg_msg(vf, msg, false);
- break;
- case VIRTCHNL_OP_UNKNOWN:
- default:
- dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
- vf_id);
- err = ice_vc_send_msg_to_vf(vf, v_opcode,
- VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
- NULL, 0);
- break;
- }
- if (err) {
- /* Helper function cares less about error return values here
- * as it is busy with pending work.
- */
- dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
- vf_id, v_opcode, err);
- }
-
- mutex_unlock(&vf->cfg_lock);
-}
-
-/**
- * ice_get_vf_cfg
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @ivi: VF configuration structure
- *
- * return VF configuration
- */
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
-
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
- ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
-
- /* VF configuration for VLAN and applicable QoS */
- ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
- ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
-
- ivi->trusted = vf->trusted;
- ivi->spoofchk = vf->spoofchk;
- if (!vf->link_forced)
- ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
- else if (vf->link_up)
- ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
- else
- ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- ivi->max_tx_rate = vf->max_tx_rate;
- ivi->min_tx_rate = vf->min_tx_rate;
- return 0;
-}
-
-/**
- * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
- * @pf: PF used to reference the switch's rules
- * @umac: unicast MAC to compare against existing switch rules
- *
- * Return true on the first/any match, else return false
- */
-static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
-{
- struct ice_sw_recipe *mac_recipe_list =
- &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *rule_head;
- struct mutex *rule_lock; /* protect MAC filter list access */
-
- rule_head = &mac_recipe_list->filt_rules;
- rule_lock = &mac_recipe_list->filt_rule_lock;
-
- mutex_lock(rule_lock);
- list_for_each_entry(list_itr, rule_head, list_entry) {
- u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
-
- if (ether_addr_equal(existing_mac, umac)) {
- mutex_unlock(rule_lock);
- return true;
- }
- }
-
- mutex_unlock(rule_lock);
-
- return false;
-}
-
-/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @mac: MAC address
- *
- * program VF MAC address
- */
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- if (is_multicast_ether_addr(mac)) {
- netdev_err(netdev, "%pM not a valid unicast address\n", mac);
- return -EINVAL;
- }
-
- vf = &pf->vf[vf_id];
- /* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
- ether_addr_equal(vf->hw_lan_addr.addr, mac))
- return 0;
-
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- if (ice_unicast_mac_exists(pf, mac)) {
- netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
- mac, vf_id, mac);
- return -EINVAL;
- }
-
- mutex_lock(&vf->cfg_lock);
-
- /* VF is notified of its new MAC via the PF's response to the
- * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
- */
- ether_addr_copy(vf->dev_lan_addr.addr, mac);
- ether_addr_copy(vf->hw_lan_addr.addr, mac);
- if (is_zero_ether_addr(mac)) {
- /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
- vf->pf_set_mac = false;
- netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
- vf->vf_id);
- } else {
- /* PF will add MAC rule for the VF */
- vf->pf_set_mac = true;
- netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
- mac, vf_id);
- }
-
- ice_vc_reset_vf(vf);
- mutex_unlock(&vf->cfg_lock);
- return 0;
-}
-
-/**
- * ice_set_vf_trust
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @trusted: Boolean value to enable/disable trusted VF
- *
- * Enable or disable a given VF as trusted
- */
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_is_eswitch_mode_switchdev(pf)) {
- dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- /* Check if already trusted */
- if (trusted == vf->trusted)
- return 0;
-
- mutex_lock(&vf->cfg_lock);
-
- vf->trusted = trusted;
- ice_vc_reset_vf(vf);
- dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
- vf_id, trusted ? "" : "un");
-
- mutex_unlock(&vf->cfg_lock);
-
- return 0;
-}
-
-/**
- * ice_set_vf_link_state
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @link_state: required link state
- *
- * Set VF's link state, irrespective of physical link state status
- */
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- switch (link_state) {
- case IFLA_VF_LINK_STATE_AUTO:
- vf->link_forced = false;
- break;
- case IFLA_VF_LINK_STATE_ENABLE:
- vf->link_forced = true;
- vf->link_up = true;
- break;
- case IFLA_VF_LINK_STATE_DISABLE:
- vf->link_forced = true;
- vf->link_up = false;
- break;
- default:
- return -EINVAL;
- }
-
- ice_vc_notify_vf_link_state(vf);
-
- return 0;
-}
-
-/**
- * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
- * @pf: PF associated with VFs
- */
-static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
-{
- int rate = 0, i;
-
- ice_for_each_vf(pf, i)
- rate += pf->vf[i].min_tx_rate;
-
- return rate;
-}
-
-/**
- * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
- * @vf: VF trying to configure min_tx_rate
- * @min_tx_rate: min Tx rate in Mbps
- *
- * Check if the min_tx_rate being passed in will cause oversubscription of total
- * min_tx_rate based on the current link speed and all other VFs configured
- * min_tx_rate
- *
- * Return true if the passed min_tx_rate would cause oversubscription, else
- * return false
- */
-static bool
-ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
-{
- int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
- int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
-
- /* this VF's previous rate is being overwritten */
- all_vfs_min_tx_rate -= vf->min_tx_rate;
-
- if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
- dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
- min_tx_rate, vf->vf_id,
- all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
- link_speed_mbps);
- return true;
- }
-
- return false;
-}
-
-/**
- * ice_set_vf_bw - set min/max VF bandwidth
- * @netdev: network interface device structure
- * @vf_id: VF identifier
- * @min_tx_rate: Minimum Tx rate in Mbps
- * @max_tx_rate: Maximum Tx rate in Mbps
- */
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_vf *vf;
- int ret;
-
- dev = ice_pf_to_dev(pf);
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vsi = ice_get_vf_vsi(vf);
-
- /* when max_tx_rate is zero that means no max Tx rate limiting, so only
- * check if max_tx_rate is non-zero
- */
- if (max_tx_rate && min_tx_rate > max_tx_rate) {
- dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
- min_tx_rate, max_tx_rate);
- return -EINVAL;
- }
-
- if (min_tx_rate && ice_is_dcb_active(pf)) {
- dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
- return -EOPNOTSUPP;
- }
-
- if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
- return -EINVAL;
-
- if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
- ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
- if (ret) {
- dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
- vf->vf_id);
- return ret;
- }
-
- vf->min_tx_rate = min_tx_rate;
- }
-
- if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
- ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
- if (ret) {
- dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
- vf->vf_id);
- return ret;
- }
-
- vf->max_tx_rate = max_tx_rate;
- }
-
- return 0;
-}
-
-/**
- * ice_get_vf_stats - populate some stats for the VF
- * @netdev: the netdev of the PF
- * @vf_id: the host OS identifier (0-255)
- * @vf_stats: pointer to the OS memory to be initialized
- */
-int ice_get_vf_stats(struct net_device *netdev, int vf_id,
- struct ifla_vf_stats *vf_stats)
-{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct ice_eth_stats *stats;
- struct ice_vsi *vsi;
- struct ice_vf *vf;
- int ret;
-
- if (ice_validate_vf_id(pf, vf_id))
- return -EINVAL;
-
- vf = &pf->vf[vf_id];
- ret = ice_check_vf_ready_for_cfg(vf);
- if (ret)
- return ret;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- ice_update_eth_stats(vsi);
- stats = &vsi->eth_stats;
-
- memset(vf_stats, 0, sizeof(*vf_stats));
-
- vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
- stats->rx_multicast;
- vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
- stats->tx_multicast;
- vf_stats->rx_bytes = stats->rx_bytes;
- vf_stats->tx_bytes = stats->tx_bytes;
- vf_stats->broadcast = stats->rx_broadcast;
- vf_stats->multicast = stats->rx_multicast;
- vf_stats->rx_dropped = stats->rx_discards;
- vf_stats->tx_dropped = stats->tx_discards;
-
- return 0;
-}
-
-/**
- * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
- * @vf: pointer to the VF structure
- */
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
-{
- struct ice_pf *pf = vf->pf;
- struct device *dev;
-
- dev = ice_pf_to_dev(pf);
-
- dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
- vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dev_lan_addr.addr,
- test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
- ? "on" : "off");
-}
-
-/**
- * ice_print_vfs_mdd_events - print VFs malicious driver detect event
- * @pf: pointer to the PF structure
- *
- * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
- */
-void ice_print_vfs_mdd_events(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- int i;
-
- /* check that there are pending MDD events to print */
- if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
- return;
-
- /* VF MDD event logs are rate limited to one second intervals */
- if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
- return;
-
- pf->last_printed_mdd_jiffies = jiffies;
-
- ice_for_each_vf(pf, i) {
- struct ice_vf *vf = &pf->vf[i];
-
- /* only print Rx MDD event message if there are new events */
- if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
- vf->mdd_rx_events.last_printed =
- vf->mdd_rx_events.count;
- ice_print_vf_rx_mdd_event(vf);
- }
-
- /* only print Tx MDD event message if there are new events */
- if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
- vf->mdd_tx_events.last_printed =
- vf->mdd_tx_events.count;
-
- dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
- vf->mdd_tx_events.count, hw->pf_id, i,
- vf->dev_lan_addr.addr);
- }
- }
-}
-
-/**
- * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
- * @pdev: pointer to a pci_dev structure
- *
- * Called when recovering from a PF FLR to restore interrupt capability to
- * the VFs.
- */
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
-{
- u16 vf_id;
- int pos;
-
- if (!pci_num_vf(pdev))
- return;
-
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- struct pci_dev *vfdev;
-
- pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
- &vf_id);
- vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && vfdev->physfn == pdev)
- pci_restore_msi_state(vfdev);
- vfdev = pci_get_device(pdev->vendor, vf_id,
- vfdev);
- }
- }
-}
-
-/**
- * ice_is_malicious_vf - helper function to detect a malicious VF
- * @pf: ptr to struct ice_pf
- * @event: pointer to the AQ event
- * @num_msg_proc: the number of messages processed so far
- * @num_msg_pending: the number of messages peinding in admin queue
- */
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending)
-{
- s16 vf_id = le16_to_cpu(event->desc.retval);
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_mbx_data mbxdata;
- enum ice_status status;
- bool malvf = false;
- struct ice_vf *vf;
-
- if (ice_validate_vf_id(pf, vf_id))
- return false;
-
- vf = &pf->vf[vf_id];
- /* Check if VF is disabled. */
- if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
- return false;
-
- mbxdata.num_msg_proc = num_msg_proc;
- mbxdata.num_pending_arq = num_msg_pending;
- mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
-#define ICE_MBX_OVERFLOW_WATERMARK 64
- mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
-
- /* check to see if we have a malicious VF */
- status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
- if (status)
- return false;
-
- if (malvf) {
- bool report_vf = false;
-
- /* if the VF is malicious and we haven't let the user
- * know about it, then let them know now
- */
- status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
- ICE_MAX_VF_COUNT, vf_id,
- &report_vf);
- if (status)
- dev_dbg(dev, "Error reporting malicious VF\n");
-
- if (report_vf) {
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (pf_vsi)
- dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dev_lan_addr.addr[0],
- pf_vsi->netdev->dev_addr);
- }
-
- return true;
- }
-
- /* if there was an error in detection or the VF is not malicious then
- * return false
- */
- return false;
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
deleted file mode 100644
index 7e28ecbbe7af..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
-
-#ifndef _ICE_VIRTCHNL_PF_H_
-#define _ICE_VIRTCHNL_PF_H_
-#include "ice.h"
-#include "ice_virtchnl_fdir.h"
-
-/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
-#define ICE_MAX_VLAN_PER_VF 8
-/* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for
- * broadcast, and 16 for additional unicast/multicast filters
- */
-#define ICE_MAX_MACADDR_PER_VF 18
-
-/* Malicious Driver Detection */
-#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
-#define ICE_MDD_EVENTS_THRESHOLD 30
-
-/* Static VF transaction/status register def */
-#define VF_DEVICE_STATUS 0xAA
-#define VF_TRANS_PENDING_M 0x20
-
-/* wait defines for polling PF_PCI_CIAD register status */
-#define ICE_PCI_CIAD_WAIT_COUNT 100
-#define ICE_PCI_CIAD_WAIT_DELAY_US 1
-
-/* VF resource constraints */
-#define ICE_MAX_VF_COUNT 256
-#define ICE_MIN_QS_PER_VF 1
-#define ICE_NONQ_VECS_VF 1
-#define ICE_MAX_SCATTER_QS_PER_VF 16
-#define ICE_MAX_RSS_QS_PER_VF 16
-#define ICE_NUM_VF_MSIX_MED 17
-#define ICE_NUM_VF_MSIX_SMALL 5
-#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3
-#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
-#define ICE_MAX_VF_RESET_TRIES 40
-#define ICE_MAX_VF_RESET_SLEEP_MS 20
-
-#define ice_for_each_vf(pf, i) \
- for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
-
-/* Specific VF states */
-enum ice_vf_states {
- ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
- ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
- ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
- ICE_VF_STATE_DIS,
- ICE_VF_STATE_MC_PROMISC,
- ICE_VF_STATE_UC_PROMISC,
- ICE_VF_STATES_NBITS
-};
-
-/* VF capabilities */
-enum ice_virtchnl_cap {
- ICE_VIRTCHNL_VF_CAP_L2 = 0,
- ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
-};
-
-struct ice_time_mac {
- unsigned long time_modified;
- u8 addr[ETH_ALEN];
-};
-
-/* VF MDD events print structure */
-struct ice_mdd_vf_events {
- u16 count; /* total count of Rx|Tx events */
- /* count number of the last printed event */
- u16 last_printed;
-};
-
-struct ice_vf;
-
-struct ice_vc_vf_ops {
- int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
- int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
- void (*reset_vf)(struct ice_vf *vf);
- int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
- int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
- int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
- int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
- int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
- int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
- int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
- int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
- int (*ena_vlan_stripping)(struct ice_vf *vf);
- int (*dis_vlan_stripping)(struct ice_vf *vf);
- int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
- int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
- int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
-};
-
-/* VF information structure */
-struct ice_vf {
- struct ice_pf *pf;
-
- /* Used during virtchnl message handling and NDO ops against the VF
- * that will trigger a VFR
- */
- struct mutex cfg_lock;
-
- u16 vf_id; /* VF ID in the PF space */
- u16 lan_vsi_idx; /* index into PF struct */
- u16 ctrl_vsi_idx;
- struct ice_vf_fdir fdir;
- /* first vector index of this VF in the PF space */
- int first_vector_idx;
- struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
- struct virtchnl_version_info vf_ver;
- u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dev_lan_addr;
- struct virtchnl_ether_addr hw_lan_addr;
- struct ice_time_mac legacy_last_added_umac;
- DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
- DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- u16 port_vlan_info; /* Port VLAN ID and QoS */
- u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
- u8 trusted:1;
- u8 spoofchk:1;
- u8 link_forced:1;
- u8 link_up:1; /* only valid if VF link is forced */
- /* VSI indices - actual VSI pointers are maintained in the PF structure
- * When assigned, these will be non-zero, because VSI 0 is always
- * the main LAN VSI for the PF.
- */
- u16 lan_vsi_num; /* ID as used by firmware */
- unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
- unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
- DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
-
- u64 num_inval_msgs; /* number of continuous invalid msgs */
- u64 num_valid_msgs; /* number of valid msgs detected */
- unsigned long vf_caps; /* VF's adv. capabilities */
- u8 num_req_qs; /* num of queue pairs requested by VF */
- u16 num_mac;
- u16 num_vf_qs; /* num of queue configured per VF */
- struct ice_mdd_vf_events mdd_rx_events;
- struct ice_mdd_vf_events mdd_tx_events;
- DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
-
- struct ice_repr *repr;
-
- struct ice_vc_vf_ops vc_ops;
-
- /* devlink port data */
- struct devlink_port devlink_port;
-};
-
-#ifdef CONFIG_PCI_IOV
-struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
-void ice_process_vflr_event(struct ice_pf *pf);
-int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int
-ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
-
-void ice_free_vfs(struct ice_pf *pf);
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_vc_notify_link_state(struct ice_pf *pf);
-void ice_vc_notify_reset(struct ice_pf *pf);
-void ice_vc_notify_vf_link_state(struct ice_vf *vf);
-void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
-void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
-bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
-bool
-ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
- u16 num_msg_proc, u16 num_msg_pending);
-
-int
-ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
- __be16 vlan_proto);
-
-int
-ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate);
-
-int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
-
-int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
-
-int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
-
-bool ice_is_vf_disabled(struct ice_vf *vf);
-
-int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
-
-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
-int
-ice_get_vf_stats(struct net_device *netdev, int vf_id,
- struct ifla_vf_stats *vf_stats);
-bool ice_is_any_vf_in_promisc(struct ice_pf *pf);
-void
-ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
-void ice_print_vfs_mdd_events(struct ice_pf *pf);
-void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
-struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
-int
-ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
-bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
-#else /* CONFIG_PCI_IOV */
-static inline void ice_process_vflr_event(struct ice_pf *pf) { }
-static inline void ice_free_vfs(struct ice_pf *pf) { }
-static inline
-void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
-static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
-static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
-static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
-static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
-static inline
-void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
-static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
-static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
-
-static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_vf_disabled(struct ice_vf *vf)
-{
- return true;
-}
-
-static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
-{
- return NULL;
-}
-
-static inline bool
-ice_is_malicious_vf(struct ice_pf __always_unused *pf,
- struct ice_rq_event_info __always_unused *event,
- u16 __always_unused num_msg_proc,
- u16 __always_unused num_msg_pending)
-{
- return false;
-}
-
-static inline bool
-ice_reset_all_vfs(struct ice_pf __always_unused *pf,
- bool __always_unused is_vflr)
-{
- return true;
-}
-
-static inline bool
-ice_reset_vf(struct ice_vf __always_unused *vf, bool __always_unused is_vflr)
-{
- return true;
-}
-
-static inline int
-ice_sriov_configure(struct pci_dev __always_unused *pdev,
- int __always_unused num_vfs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_mac(struct net_device __always_unused *netdev,
- int __always_unused vf_id, u8 __always_unused *mac)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_get_vf_cfg(struct net_device __always_unused *netdev,
- int __always_unused vf_id,
- struct ifla_vf_info __always_unused *ivi)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_trust(struct net_device __always_unused *netdev,
- int __always_unused vf_id, bool __always_unused trusted)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_port_vlan(struct net_device __always_unused *netdev,
- int __always_unused vf_id, u16 __always_unused vid,
- u8 __always_unused qos, __be16 __always_unused v_proto)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_spoofchk(struct net_device __always_unused *netdev,
- int __always_unused vf_id, bool __always_unused ena)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_link_state(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused link_state)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused min_tx_rate,
- int __always_unused max_tx_rate)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
- struct ice_q_vector __always_unused *q_vector)
-{
- return 0;
-}
-
-static inline int
-ice_get_vf_stats(struct net_device __always_unused *netdev,
- int __always_unused vf_id,
- struct ifla_vf_stats __always_unused *vf_stats)
-{
- return -EOPNOTSUPP;
-}
-
-static inline bool ice_is_any_vf_in_promisc(struct ice_pf __always_unused *pf)
-{
- return false;
-}
-#endif /* CONFIG_PCI_IOV */
-#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan.h b/drivers/net/ethernet/intel/ice/ice_vlan.h
new file mode 100644
index 000000000000..bc4550a03173
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VLAN_H_
+#define _ICE_VLAN_H_
+
+#include <linux/types.h>
+#include "ice_type.h"
+
+struct ice_vlan {
+ u16 tpid;
+ u16 vid;
+ u8 prio;
+};
+
+#define ICE_VLAN(tpid, vid, prio) ((struct ice_vlan){ tpid, vid, prio })
+
+#endif /* _ICE_VLAN_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
new file mode 100644
index 000000000000..bcda2e004807
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_common.h"
+
+/**
+ * ice_pkg_get_supported_vlan_mode - determine if DDP supports Double VLAN mode
+ * @hw: pointer to the HW struct
+ * @dvm: output variable to determine if DDP supports DVM(true) or SVM(false)
+ */
+static int
+ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm)
+{
+ u16 meta_init_size = sizeof(struct ice_meta_init_section);
+ struct ice_meta_init_section *sect;
+ struct ice_buf_build *bld;
+ int status;
+
+ /* if anything fails, we assume there is no DVM support */
+ *dvm = false;
+
+ bld = ice_pkg_buf_alloc_single_section(hw,
+ ICE_SID_RXPARSER_METADATA_INIT,
+ meta_init_size, (void **)&sect);
+ if (!bld)
+ return -ENOMEM;
+
+ /* only need to read a single section */
+ sect->count = cpu_to_le16(1);
+ sect->offset = cpu_to_le16(ICE_META_VLAN_MODE_ENTRY);
+
+ status = ice_aq_upload_section(hw,
+ (struct ice_buf_hdr *)ice_pkg_buf(bld),
+ ICE_PKG_BUF_SIZE, NULL);
+ if (!status) {
+ DECLARE_BITMAP(entry, ICE_META_INIT_BITS);
+ u32 arr[ICE_META_INIT_DW_CNT];
+ u16 i;
+
+ /* convert to host bitmap format */
+ for (i = 0; i < ICE_META_INIT_DW_CNT; i++)
+ arr[i] = le32_to_cpu(sect->entry.bm[i]);
+
+ bitmap_from_arr32(entry, arr, (u16)ICE_META_INIT_BITS);
+
+ /* check if DVM is supported */
+ *dvm = test_bit(ICE_META_VLAN_MODE_BIT, entry);
+ }
+
+ ice_pkg_buf_free(hw, bld);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_vlan_mode - get the VLAN mode of the device
+ * @hw: pointer to the HW structure
+ * @get_params: structure FW fills in based on the current VLAN mode config
+ *
+ * Get VLAN Mode Parameters (0x020D)
+ */
+static int
+ice_aq_get_vlan_mode(struct ice_hw *hw,
+ struct ice_aqc_get_vlan_mode *get_params)
+{
+ struct ice_aq_desc desc;
+
+ if (!get_params)
+ return -EINVAL;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_get_vlan_mode_parameters);
+
+ return ice_aq_send_cmd(hw, &desc, get_params, sizeof(*get_params),
+ NULL);
+}
+
+/**
+ * ice_aq_is_dvm_ena - query FW to check if double VLAN mode is enabled
+ * @hw: pointer to the HW structure
+ *
+ * Returns true if the hardware/firmware is configured in double VLAN mode,
+ * else return false signaling that the hardware/firmware is configured in
+ * single VLAN mode.
+ *
+ * Also, return false if this call fails for any reason (i.e. firmware doesn't
+ * support this AQ call).
+ */
+static bool ice_aq_is_dvm_ena(struct ice_hw *hw)
+{
+ struct ice_aqc_get_vlan_mode get_params = { 0 };
+ int status;
+
+ status = ice_aq_get_vlan_mode(hw, &get_params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_AQ, "Failed to get VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return (get_params.vlan_mode & ICE_AQ_VLAN_MODE_DVM_ENA);
+}
+
+/**
+ * ice_is_dvm_ena - check if double VLAN mode is enabled
+ * @hw: pointer to the HW structure
+ *
+ * The device is configured in single or double VLAN mode on initialization and
+ * this cannot be dynamically changed during runtime. Based on this there is no
+ * need to make an AQ call every time the driver needs to know the VLAN mode.
+ * Instead, use the cached VLAN mode.
+ */
+bool ice_is_dvm_ena(struct ice_hw *hw)
+{
+ return hw->dvm_ena;
+}
+
+/**
+ * ice_cache_vlan_mode - cache VLAN mode after DDP is downloaded
+ * @hw: pointer to the HW structure
+ *
+ * This is only called after downloading the DDP and after the global
+ * configuration lock has been released because all ports on a device need to
+ * cache the VLAN mode.
+ */
+static void ice_cache_vlan_mode(struct ice_hw *hw)
+{
+ hw->dvm_ena = ice_aq_is_dvm_ena(hw) ? true : false;
+}
+
+/**
+ * ice_pkg_supports_dvm - find out if DDP supports DVM
+ * @hw: pointer to the HW structure
+ */
+static bool ice_pkg_supports_dvm(struct ice_hw *hw)
+{
+ bool pkg_supports_dvm;
+ int status;
+
+ status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Failed to get supported VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return pkg_supports_dvm;
+}
+
+/**
+ * ice_fw_supports_dvm - find out if FW supports DVM
+ * @hw: pointer to the HW structure
+ */
+static bool ice_fw_supports_dvm(struct ice_hw *hw)
+{
+ struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 };
+ int status;
+
+ /* If firmware returns success, then it supports DVM, else it only
+ * supports SVM
+ */
+ status = ice_aq_get_vlan_mode(hw, &get_vlan_mode);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to get VLAN mode, status %d\n",
+ status);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_is_dvm_supported - check if Double VLAN Mode is supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if Double VLAN Mode (DVM) is supported and false if only Single
+ * VLAN Mode (SVM) is supported. In order for DVM to be supported the DDP and
+ * firmware must support it, otherwise only SVM is supported. This function
+ * should only be called while the global config lock is held and after the
+ * package has been successfully downloaded.
+ */
+static bool ice_is_dvm_supported(struct ice_hw *hw)
+{
+ if (!ice_pkg_supports_dvm(hw)) {
+ ice_debug(hw, ICE_DBG_PKG, "DDP doesn't support DVM\n");
+ return false;
+ }
+
+ if (!ice_fw_supports_dvm(hw)) {
+ ice_debug(hw, ICE_DBG_PKG, "FW doesn't support DVM\n");
+ return false;
+ }
+
+ return true;
+}
+
+#define ICE_EXTERNAL_VLAN_ID_FV_IDX 11
+#define ICE_SW_LKUP_VLAN_LOC_LKUP_IDX 1
+#define ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX 2
+#define ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX 2
+#define ICE_PKT_FLAGS_0_TO_15_FV_IDX 1
+static struct ice_update_recipe_lkup_idx_params ice_dvm_dflt_recipes[] = {
+ {
+ /* Update recipe ICE_SW_LKUP_VLAN to filter based on the
+ * outer/single VLAN in DVM
+ */
+ .rid = ICE_SW_LKUP_VLAN,
+ .fv_idx = ICE_EXTERNAL_VLAN_ID_FV_IDX,
+ .ignore_valid = true,
+ .mask = 0,
+ .mask_valid = false, /* use pre-existing mask */
+ .lkup_idx = ICE_SW_LKUP_VLAN_LOC_LKUP_IDX,
+ },
+ {
+ /* Update recipe ICE_SW_LKUP_VLAN to filter based on the VLAN
+ * packet flags to support VLAN filtering on multiple VLAN
+ * ethertypes (i.e. 0x8100 and 0x88a8) in DVM
+ */
+ .rid = ICE_SW_LKUP_VLAN,
+ .fv_idx = ICE_PKT_FLAGS_0_TO_15_FV_IDX,
+ .ignore_valid = false,
+ .mask = ICE_PKT_FLAGS_0_TO_15_VLAN_FLAGS_MASK,
+ .mask_valid = true,
+ .lkup_idx = ICE_SW_LKUP_VLAN_PKT_FLAGS_LKUP_IDX,
+ },
+ {
+ /* Update recipe ICE_SW_LKUP_PROMISC_VLAN to filter based on the
+ * outer/single VLAN in DVM
+ */
+ .rid = ICE_SW_LKUP_PROMISC_VLAN,
+ .fv_idx = ICE_EXTERNAL_VLAN_ID_FV_IDX,
+ .ignore_valid = true,
+ .mask = 0,
+ .mask_valid = false, /* use pre-existing mask */
+ .lkup_idx = ICE_SW_LKUP_PROMISC_VLAN_LOC_LKUP_IDX,
+ },
+};
+
+/**
+ * ice_dvm_update_dflt_recipes - update default switch recipes in DVM
+ * @hw: hardware structure used to update the recipes
+ */
+static int ice_dvm_update_dflt_recipes(struct ice_hw *hw)
+{
+ unsigned long i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_dvm_dflt_recipes); i++) {
+ struct ice_update_recipe_lkup_idx_params *params;
+ int status;
+
+ params = &ice_dvm_dflt_recipes[i];
+
+ status = ice_update_recipe_lkup_idx(hw, params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to update RID %d lkup_idx %d fv_idx %d mask_valid %s mask 0x%04x\n",
+ params->rid, params->lkup_idx, params->fv_idx,
+ params->mask_valid ? "true" : "false",
+ params->mask);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_vlan_mode - set the VLAN mode of the device
+ * @hw: pointer to the HW structure
+ * @set_params: requested VLAN mode configuration
+ *
+ * Set VLAN Mode Parameters (0x020C)
+ */
+static int
+ice_aq_set_vlan_mode(struct ice_hw *hw,
+ struct ice_aqc_set_vlan_mode *set_params)
+{
+ u8 rdma_packet, mng_vlan_prot_id;
+ struct ice_aq_desc desc;
+
+ if (!set_params)
+ return -EINVAL;
+
+ if (set_params->l2tag_prio_tagging > ICE_AQ_VLAN_PRIO_TAG_MAX)
+ return -EINVAL;
+
+ rdma_packet = set_params->rdma_packet;
+ if (rdma_packet != ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING &&
+ rdma_packet != ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING)
+ return -EINVAL;
+
+ mng_vlan_prot_id = set_params->mng_vlan_prot_id;
+ if (mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER &&
+ mng_vlan_prot_id != ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER)
+ return -EINVAL;
+
+ ice_fill_dflt_direct_cmd_desc(&desc,
+ ice_aqc_opc_set_vlan_mode_parameters);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
+ NULL);
+}
+
+/**
+ * ice_set_dvm - sets up software and hardware for double VLAN mode
+ * @hw: pointer to the hardware structure
+ */
+static int ice_set_dvm(struct ice_hw *hw)
+{
+ struct ice_aqc_set_vlan_mode params = { 0 };
+ int status;
+
+ params.l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_OUTER_CTAG;
+ params.rdma_packet = ICE_AQ_DVM_VLAN_RDMA_PKT_FLAG_SETTING;
+ params.mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_OUTER;
+
+ status = ice_aq_set_vlan_mode(hw, &params);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set double VLAN mode parameters, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_dvm_update_dflt_recipes(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to update default recipes for double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_aq_set_port_params(hw->port_info, true, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set port in double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_set_dvm_boost_entries(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set boost TCAM entries for double VLAN mode, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_svm - set single VLAN mode
+ * @hw: pointer to the HW structure
+ */
+static int ice_set_svm(struct ice_hw *hw)
+{
+ struct ice_aqc_set_vlan_mode *set_params;
+ int status;
+
+ status = ice_aq_set_port_params(hw->port_info, false, NULL);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set port parameters for single VLAN mode\n");
+ return status;
+ }
+
+ set_params = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*set_params),
+ GFP_KERNEL);
+ if (!set_params)
+ return -ENOMEM;
+
+ /* default configuration for SVM configurations */
+ set_params->l2tag_prio_tagging = ICE_AQ_VLAN_PRIO_TAG_INNER_CTAG;
+ set_params->rdma_packet = ICE_AQ_SVM_VLAN_RDMA_PKT_FLAG_SETTING;
+ set_params->mng_vlan_prot_id = ICE_AQ_VLAN_MNG_PROTOCOL_ID_INNER;
+
+ status = ice_aq_set_vlan_mode(hw, set_params);
+ if (status)
+ ice_debug(hw, ICE_DBG_INIT, "Failed to configure port in single VLAN mode\n");
+
+ devm_kfree(ice_hw_to_dev(hw), set_params);
+ return status;
+}
+
+/**
+ * ice_set_vlan_mode
+ * @hw: pointer to the HW structure
+ */
+int ice_set_vlan_mode(struct ice_hw *hw)
+{
+ if (!ice_is_dvm_supported(hw))
+ return 0;
+
+ if (!ice_set_dvm(hw))
+ return 0;
+
+ return ice_set_svm(hw);
+}
+
+/**
+ * ice_print_dvm_not_supported - print if DDP and/or FW doesn't support DVM
+ * @hw: pointer to the HW structure
+ *
+ * The purpose of this function is to print that QinQ is not supported due to
+ * incompatibilty from the DDP and/or FW. This will give a hint to the user to
+ * update one and/or both components if they expect QinQ functionality.
+ */
+static void ice_print_dvm_not_supported(struct ice_hw *hw)
+{
+ bool pkg_supports_dvm = ice_pkg_supports_dvm(hw);
+ bool fw_supports_dvm = ice_fw_supports_dvm(hw);
+
+ if (!fw_supports_dvm && !pkg_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your DDP package and NVM to versions that support QinQ.\n");
+ else if (!pkg_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your DDP package to a version that supports QinQ.\n");
+ else if (!fw_supports_dvm)
+ dev_info(ice_hw_to_dev(hw), "QinQ functionality cannot be enabled on this device. Update your NVM to a version that supports QinQ.\n");
+}
+
+/**
+ * ice_post_pkg_dwnld_vlan_mode_cfg - configure VLAN mode after DDP download
+ * @hw: pointer to the HW structure
+ *
+ * This function is meant to configure any VLAN mode specific functionality
+ * after the global configuration lock has been released and the DDP has been
+ * downloaded.
+ *
+ * Since only one PF downloads the DDP and configures the VLAN mode there needs
+ * to be a way to configure the other PFs after the DDP has been downloaded and
+ * the global configuration lock has been released. All such code should go in
+ * this function.
+ */
+void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw)
+{
+ ice_cache_vlan_mode(hw);
+
+ if (ice_is_dvm_ena(hw))
+ ice_change_proto_id_to_dvm();
+ else
+ ice_print_dvm_not_supported(hw);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.h b/drivers/net/ethernet/intel/ice/ice_vlan_mode.h
new file mode 100644
index 000000000000..a0fb743d08e2
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VLAN_MODE_H_
+#define _ICE_VLAN_MODE_H_
+
+struct ice_hw;
+
+bool ice_is_dvm_ena(struct ice_hw *hw);
+int ice_set_vlan_mode(struct ice_hw *hw);
+void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw);
+
+#endif /* _ICE_VLAN_MODE_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
new file mode 100644
index 000000000000..5b4a0abb4607
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_vsi_vlan_lib.h"
+#include "ice_lib.h"
+#include "ice_fltr.h"
+#include "ice.h"
+
+static void print_invalid_tpid(struct ice_vsi *vsi, u16 tpid)
+{
+ dev_err(ice_pf_to_dev(vsi->back), "%s %d specified invalid VLAN tpid 0x%04x\n",
+ ice_vsi_type_str(vsi->type), vsi->idx, tpid);
+}
+
+/**
+ * validate_vlan - check if the ice_vlan passed in is valid
+ * @vsi: VSI used for printing error message
+ * @vlan: ice_vlan structure to validate
+ *
+ * Return true if the VLAN TPID is valid or if the VLAN TPID is 0 and the VLAN
+ * VID is 0, which allows for non-zero VLAN filters with the specified VLAN TPID
+ * and untagged VLAN 0 filters to be added to the prune list respectively.
+ */
+static bool validate_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ if (vlan->tpid != ETH_P_8021Q && vlan->tpid != ETH_P_8021AD &&
+ vlan->tpid != ETH_P_QINQ1 && (vlan->tpid || vlan->vid)) {
+ print_invalid_tpid(vsi, vlan->tpid);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_vsi_add_vlan - default add VLAN implementation for all VSI types
+ * @vsi: VSI being configured
+ * @vlan: VLAN filter to add
+ */
+int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ int err;
+
+ if (!validate_vlan(vsi, vlan))
+ return -EINVAL;
+
+ err = ice_fltr_add_vlan(vsi, vlan);
+ if (err && err != -EEXIST) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
+ vlan->vid, vsi->vsi_num, err);
+ return err;
+ }
+
+ vsi->num_vlan++;
+ return 0;
+}
+
+/**
+ * ice_vsi_del_vlan - default del VLAN implementation for all VSI types
+ * @vsi: VSI being configured
+ * @vlan: VLAN filter to delete
+ */
+int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int err;
+
+ if (!validate_vlan(vsi, vlan))
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+
+ err = ice_fltr_remove_vlan(vsi, vlan);
+ if (!err)
+ vsi->num_vlan--;
+ else if (err == -ENOENT || err == -EBUSY)
+ err = 0;
+ else
+ dev_err(dev, "Error removing VLAN %d on VSI %i error: %d\n",
+ vlan->vid, vsi->vsi_num, err);
+
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
+ * @vsi: the VSI being changed
+ */
+static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ /* Here we are configuring the VSI to let the driver add VLAN tags by
+ * setting inner_vlan_flags to ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL. The actual VLAN tag
+ * insertion happens in the Tx hot path, in ice_tx_map.
+ */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ /* Preserve existing VLAN strip setting */
+ ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags &
+ ICE_AQ_VSI_INNER_VLAN_EMODE_M);
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+out:
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
+ * @vsi: the VSI being changed
+ * @ena: boolean value indicating if this is a enable or disable request
+ */
+static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.port_based_inner_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ /* Here we are configuring what the VSI should do with the VLAN tag in
+ * the Rx packet. We can either leave the tag in the packet or put it in
+ * the Rx descriptor.
+ */
+ if (ena)
+ /* Strip VLAN tag from Rx packet and put it in the desc */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_STR_BOTH;
+ else
+ /* Disable stripping. Leave tag in packet */
+ ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING;
+
+ /* Allow all packets untagged/tagged */
+ ctxt->info.inner_vlan_flags |= ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
+ ena, err, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
+out:
+ kfree(ctxt);
+ return err;
+}
+
+int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, const u16 tpid)
+{
+ if (tpid != ETH_P_8021Q) {
+ print_invalid_tpid(vsi, tpid);
+ return -EINVAL;
+ }
+
+ return ice_vsi_manage_vlan_stripping(vsi, true);
+}
+
+int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi)
+{
+ return ice_vsi_manage_vlan_stripping(vsi, false);
+}
+
+int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, const u16 tpid)
+{
+ if (tpid != ETH_P_8021Q) {
+ print_invalid_tpid(vsi, tpid);
+ return -EINVAL;
+ }
+
+ return ice_vsi_manage_vlan_insertion(vsi);
+}
+
+int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi)
+{
+ return ice_vsi_manage_vlan_insertion(vsi);
+}
+
+/**
+ * __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN
+ * @vsi: the VSI to update
+ * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
+ */
+static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
+ struct ice_vsi_ctx *ctxt;
+ int ret;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+ info = &ctxt->info;
+ info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED |
+ ICE_AQ_VSI_INNER_VLAN_INSERT_PVID |
+ ICE_AQ_VSI_INNER_VLAN_EMODE_STR;
+ info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ info->port_based_inner_vlan = cpu_to_le16(pvid_info);
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret) {
+ dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+ goto out;
+ }
+
+ vsi->info.inner_vlan_flags = info->inner_vlan_flags;
+ vsi->info.sw_flags2 = info->sw_flags2;
+ vsi->info.port_based_inner_vlan = info->port_based_inner_vlan;
+out:
+ kfree(ctxt);
+ return ret;
+}
+
+int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u16 port_vlan_info;
+
+ if (vlan->tpid != ETH_P_8021Q)
+ return -EINVAL;
+
+ if (vlan->prio > 7)
+ return -EINVAL;
+
+ port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT);
+
+ return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info);
+}
+
+/**
+ * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
+ * @vsi: VSI to enable or disable VLAN pruning on
+ * @ena: set to true to enable VLAN pruning and false to disable it
+ *
+ * returns 0 if VSI is updated, negative otherwise
+ */
+static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+{
+ struct ice_vsi_ctx *ctxt;
+ struct ice_pf *pf;
+ int status;
+
+ if (!vsi)
+ return -EINVAL;
+
+ /* Don't enable VLAN pruning if the netdev is currently in promiscuous
+ * mode. VLAN pruning will be enabled when the interface exits
+ * promiscuous mode if any VLAN filters are active.
+ */
+ if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
+ return 0;
+
+ pf = vsi->back;
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ if (ena)
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ else
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
+ if (status) {
+ netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
+ ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+ goto err_out;
+ }
+
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+
+ kfree(ctxt);
+ return 0;
+
+err_out:
+ kfree(ctxt);
+ return status;
+}
+
+int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_pruning(vsi, true);
+}
+
+int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_pruning(vsi, false);
+}
+
+static int ice_cfg_vlan_antispoof(struct ice_vsi *vsi, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->info.sec_flags = vsi->info.sec_flags;
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+
+ if (enable)
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
+ else
+ ctx->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+
+ err = ice_update_vsi(&vsi->back->hw, vsi->idx, ctx, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to configure Tx VLAN anti-spoof %s for VSI %d, error %d\n",
+ enable ? "ON" : "OFF", vsi->vsi_num, err);
+ else
+ vsi->info.sec_flags = ctx->info.sec_flags;
+
+ kfree(ctx);
+
+ return err;
+}
+
+int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_antispoof(vsi, true);
+}
+
+int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi)
+{
+ return ice_cfg_vlan_antispoof(vsi, false);
+}
+
+/**
+ * tpid_to_vsi_outer_vlan_type - convert from TPID to VSI context based tag_type
+ * @tpid: tpid used to translate into VSI context based tag_type
+ * @tag_type: output variable to hold the VSI context based tag type
+ */
+static int tpid_to_vsi_outer_vlan_type(u16 tpid, u8 *tag_type)
+{
+ switch (tpid) {
+ case ETH_P_8021Q:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_VLAN_8100;
+ break;
+ case ETH_P_8021AD:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_STAG;
+ break;
+ case ETH_P_QINQ1:
+ *tag_type = ICE_AQ_VSI_OUTER_TAG_VLAN_9100;
+ break;
+ default:
+ *tag_type = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_ena_outer_stripping - enable outer VLAN stripping
+ * @vsi: VSI to configure
+ * @tpid: TPID to enable outer VLAN stripping for
+ *
+ * Enable outer VLAN stripping via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Since the VSI context only supports a single TPID for insertion and
+ * stripping, setting the TPID for stripping will affect the TPID for insertion.
+ * Callers need to be aware of this limitation.
+ *
+ * Only modify outer VLAN stripping settings and the VLAN TPID. Outer VLAN
+ * insertion settings are unmodified.
+ *
+ * This enables hardware to strip a VLAN tag with the specified TPID to be
+ * stripped from the packet and placed in the receive descriptor.
+ */
+int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ /* do not allow modifying VLAN stripping when a port VLAN is configured
+ * on this VSI
+ */
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN strip settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_EMODE_M | ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ ctxt->info.outer_vlan_flags |=
+ ((ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW_BOTH <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M));
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_dis_outer_stripping - disable outer VLAN stripping
+ * @vsi: VSI to configure
+ *
+ * Disable outer VLAN stripping via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Only modify the outer VLAN stripping settings. The VLAN TPID and outer VLAN
+ * insertion settings are unmodified.
+ *
+ * This tells the hardware to not strip any VLAN tagged packets, thus leaving
+ * them in the packet. This enables software offloaded VLAN stripping and
+ * disables hardware offloaded VLAN stripping.
+ */
+int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN strip settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~ICE_AQ_VSI_OUTER_VLAN_EMODE_M;
+ ctxt->info.outer_vlan_flags |= ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_ena_outer_insertion - enable outer VLAN insertion
+ * @vsi: VSI to configure
+ * @tpid: TPID to enable outer VLAN insertion for
+ *
+ * Enable outer VLAN insertion via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Since the VSI context only supports a single TPID for insertion and
+ * stripping, setting the TPID for insertion will affect the TPID for stripping.
+ * Callers need to be aware of this limitation.
+ *
+ * Only modify outer VLAN insertion settings and the VLAN TPID. Outer VLAN
+ * stripping settings are unmodified.
+ *
+ * This allows a VLAN tag with the specified TPID to be inserted in the transmit
+ * descriptor.
+ */
+int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN insertion settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M |
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+ ctxt->info.outer_vlan_flags |=
+ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_dis_outer_insertion - disable outer VLAN insertion
+ * @vsi: VSI to configure
+ *
+ * Disable outer VLAN insertion via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * Only modify the outer VLAN insertion settings. The VLAN TPID and outer VLAN
+ * settings are unmodified.
+ *
+ * This tells the hardware to not allow any VLAN tagged packets in the transmit
+ * descriptor. This enables software offloaded VLAN insertion and disables
+ * hardware offloaded VLAN insertion.
+ */
+int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ if (vsi->info.port_based_outer_vlan)
+ return 0;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID);
+ /* clear current outer VLAN insertion settings */
+ ctxt->info.outer_vlan_flags = vsi->info.outer_vlan_flags &
+ ~(ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT |
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+ ctxt->info.outer_vlan_flags |=
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ ((ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ else
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * __ice_vsi_set_outer_port_vlan - set the outer port VLAN and related settings
+ * @vsi: VSI to configure
+ * @vlan_info: packed u16 that contains the VLAN prio and ID
+ * @tpid: TPID of the port VLAN
+ *
+ * Set the port VLAN prio, ID, and TPID.
+ *
+ * Enable VLAN pruning so the VSI doesn't receive any traffic that doesn't match
+ * a VLAN prune rule. The caller should take care to add a VLAN prune rule that
+ * matches the port VLAN ID and TPID.
+ *
+ * Tell hardware to strip outer VLAN tagged packets on receive and don't put
+ * them in the receive descriptor. VSI(s) in port VLANs should not be aware of
+ * the port VLAN ID or TPID they are assigned to.
+ *
+ * Tell hardware to prevent outer VLAN tag insertion on transmit and only allow
+ * untagged outer packets from the transmit descriptor.
+ *
+ * Also, tell the hardware to insert the port VLAN on transmit.
+ */
+static int
+__ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ u8 tag_type;
+ int err;
+
+ if (tpid_to_vsi_outer_vlan_type(tpid, &tag_type))
+ return -EINVAL;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ctxt->info = vsi->info;
+
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+
+ ctxt->info.port_based_outer_vlan = cpu_to_le16(vlan_info);
+ ctxt->info.outer_vlan_flags =
+ (ICE_AQ_VSI_OUTER_VLAN_EMODE_SHOW <<
+ ICE_AQ_VSI_OUTER_VLAN_EMODE_S) |
+ ((tag_type << ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
+ ICE_AQ_VSI_OUTER_TAG_TYPE_M) |
+ ICE_AQ_VSI_OUTER_VLAN_BLOCK_TX_DESC |
+ (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ACCEPTUNTAGGED <<
+ ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) |
+ ICE_AQ_VSI_OUTER_VLAN_PORT_BASED_INSERT;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan;
+ vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
+ vsi->info.sw_flags2 = ctxt->info.sw_flags2;
+ }
+
+ kfree(ctxt);
+ return err;
+}
+
+/**
+ * ice_vsi_set_outer_port_vlan - public version of __ice_vsi_set_outer_port_vlan
+ * @vsi: VSI to configure
+ * @vlan: ice_vlan structure used to set the port VLAN
+ *
+ * Set the outer port VLAN via VSI context. This function should only be
+ * used if DVM is supported. Also, this function should never be called directly
+ * as it should be part of ice_vsi_vlan_ops if it's needed.
+ *
+ * This function does not support clearing the port VLAN as there is currently
+ * no use case for this.
+ *
+ * Use the ice_vlan structure passed in to set this VSI in a port VLAN.
+ */
+int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+{
+ u16 port_vlan_info;
+
+ if (vlan->prio > (VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT))
+ return -EINVAL;
+
+ port_vlan_info = vlan->vid | (vlan->prio << VLAN_PRIO_SHIFT);
+
+ return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
new file mode 100644
index 000000000000..f459909490ec
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VSI_VLAN_LIB_H_
+#define _ICE_VSI_VLAN_LIB_H_
+
+#include <linux/types.h>
+#include "ice_vlan.h"
+
+struct ice_vsi;
+
+int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_del_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+int ice_vsi_ena_inner_stripping(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi);
+int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi);
+int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_ena_tx_vlan_filtering(struct ice_vsi *vsi);
+int ice_vsi_dis_tx_vlan_filtering(struct ice_vsi *vsi);
+
+int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi);
+int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
+int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
+int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+
+#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
new file mode 100644
index 000000000000..4a6c850d83ac
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice_pf_vsi_vlan_ops.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_lib.h"
+#include "ice.h"
+
+static int
+op_unsupported_vlan_arg(struct ice_vsi * __always_unused vsi,
+ struct ice_vlan * __always_unused vlan)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+op_unsupported_tpid_arg(struct ice_vsi *__always_unused vsi,
+ u16 __always_unused tpid)
+{
+ return -EOPNOTSUPP;
+}
+
+static int op_unsupported(struct ice_vsi *__always_unused vsi)
+{
+ return -EOPNOTSUPP;
+}
+
+/* If any new ops are added to the VSI VLAN ops interface then an unsupported
+ * implementation should be set here.
+ */
+static struct ice_vsi_vlan_ops ops_unsupported = {
+ .add_vlan = op_unsupported_vlan_arg,
+ .del_vlan = op_unsupported_vlan_arg,
+ .ena_stripping = op_unsupported_tpid_arg,
+ .dis_stripping = op_unsupported,
+ .ena_insertion = op_unsupported_tpid_arg,
+ .dis_insertion = op_unsupported,
+ .ena_rx_filtering = op_unsupported,
+ .dis_rx_filtering = op_unsupported,
+ .ena_tx_filtering = op_unsupported,
+ .dis_tx_filtering = op_unsupported,
+ .set_port_vlan = op_unsupported_vlan_arg,
+};
+
+/**
+ * ice_vsi_init_unsupported_vlan_ops - init all VSI VLAN ops to unsupported
+ * @vsi: VSI to initialize VSI VLAN ops to unsupported for
+ *
+ * By default all inner and outer VSI VLAN ops return -EOPNOTSUPP. This was done
+ * as oppsed to leaving the ops null to prevent unexpected crashes. Instead if
+ * an unsupported VSI VLAN op is called it will just return -EOPNOTSUPP.
+ *
+ */
+static void ice_vsi_init_unsupported_vlan_ops(struct ice_vsi *vsi)
+{
+ vsi->outer_vlan_ops = ops_unsupported;
+ vsi->inner_vlan_ops = ops_unsupported;
+}
+
+/**
+ * ice_vsi_init_vlan_ops - initialize type specific VSI VLAN ops
+ * @vsi: VSI to initialize ops for
+ *
+ * If any VSI types are added and/or require different ops than the PF or VF VSI
+ * then they will have to add a case here to handle that. Also, VSI type
+ * specific files should be added in the same manner that was done for PF VSI.
+ */
+void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
+{
+ /* Initialize all VSI types to have unsupported VSI VLAN ops */
+ ice_vsi_init_unsupported_vlan_ops(vsi);
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
+ ice_pf_vsi_init_vlan_ops(vsi);
+ break;
+ case ICE_VSI_VF:
+ ice_vf_vsi_init_vlan_ops(vsi);
+ break;
+ default:
+ dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n",
+ ice_vsi_type_str(vsi->type));
+ break;
+ }
+}
+
+/**
+ * ice_get_compat_vsi_vlan_ops - Get VSI VLAN ops based on VLAN mode
+ * @vsi: VSI used to get the VSI VLAN ops
+ *
+ * This function is meant to be used when the caller doesn't know which VLAN ops
+ * to use (i.e. inner or outer). This allows backward compatibility for VLANs
+ * since most of the Outer VSI VLAN functins are not supported when
+ * the device is configured in Single VLAN Mode (SVM).
+ */
+struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi)
+{
+ if (ice_is_dvm_ena(&vsi->back->hw))
+ return &vsi->outer_vlan_ops;
+ else
+ return &vsi->inner_vlan_ops;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
new file mode 100644
index 000000000000..5b47568f6256
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_VSI_VLAN_OPS_H_
+#define _ICE_VSI_VLAN_OPS_H_
+
+#include "ice_type.h"
+#include "ice_vsi_vlan_lib.h"
+
+struct ice_vsi;
+
+struct ice_vsi_vlan_ops {
+ int (*add_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*del_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*ena_stripping)(struct ice_vsi *vsi, const u16 tpid);
+ int (*dis_stripping)(struct ice_vsi *vsi);
+ int (*ena_insertion)(struct ice_vsi *vsi, const u16 tpid);
+ int (*dis_insertion)(struct ice_vsi *vsi);
+ int (*ena_rx_filtering)(struct ice_vsi *vsi);
+ int (*dis_rx_filtering)(struct ice_vsi *vsi);
+ int (*ena_tx_filtering)(struct ice_vsi *vsi);
+ int (*dis_tx_filtering)(struct ice_vsi *vsi);
+ int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+};
+
+void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);
+struct ice_vsi_vlan_ops *ice_get_compat_vsi_vlan_ops(struct ice_vsi *vsi);
+
+#endif /* _ICE_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index bb9a80847298..d1e489da7363 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -12,6 +12,11 @@
#include "ice_txrx_lib.h"
#include "ice_lib.h"
+static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
+{
+ return &rx_ring->xdp_buf[idx];
+}
+
/**
* ice_qp_reset_stats - Resets all stats for rings of given index
* @vsi: VSI that contains rings of interest
@@ -19,13 +24,24 @@
*/
static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
{
- memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
- sizeof(vsi->rx_rings[q_idx]->rx_stats));
- memset(&vsi->tx_rings[q_idx]->stats, 0,
- sizeof(vsi->tx_rings[q_idx]->stats));
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
if (ice_is_xdp_ena_vsi(vsi))
- memset(&vsi->xdp_rings[q_idx]->stats, 0,
- sizeof(vsi->xdp_rings[q_idx]->stats));
+ memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
}
/**
@@ -36,8 +52,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi))
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ synchronize_rcu();
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ }
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
@@ -166,8 +184,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
}
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
if (err)
@@ -182,6 +198,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
@@ -236,7 +254,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -310,6 +328,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
}
/**
+ * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
+ * @rx_ring: Rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
+ sizeof(*rx_ring->rx_buf);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ rx_ring->xdp_buf = sw_ring;
+ } else {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ rx_ring->rx_buf = sw_ring;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+{
+ struct ice_rx_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps,
+ max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+ rx_ring = vsi->rx_rings[q];
+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
* ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
* @pool: buffer pool to enable/associate to a ring, NULL to disable
@@ -322,14 +396,26 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
+ if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
+ netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
+ pool_failure = -EINVAL;
+ goto failure;
+ }
+
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
+
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_pool_if_up;
}
+
+ ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
+ if (ret)
+ goto xsk_pool_if_up;
}
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
@@ -339,11 +425,12 @@ xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
if (!ret && pool_present)
- napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
+ napi_schedule(&vsi->rx_rings[qid]->xdp_ring->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
+failure:
if (pool_failure) {
netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
pool_present ? "en" : "dis", pool_failure);
@@ -354,33 +441,28 @@ xsk_pool_if_up:
}
/**
- * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
- * @rx_ring: Rx ring
+ * ice_fill_rx_descs - pick buffers from XSK buffer pool and use it
+ * @pool: XSK Buffer pool to pull the buffers from
+ * @xdp: SW ring of xdp_buff that will hold the buffers
+ * @rx_desc: Pointer to Rx descriptors that will be filled
* @count: The number of buffers to allocate
*
* This function allocates a number of Rx buffers from the fill ring
* or the internal recycle mechanism and places them on the Rx ring.
*
- * Returns true if all allocations were successful, false if any fail.
+ * Note that ring wrap should be handled by caller of this function.
+ *
+ * Returns the amount of allocated Rx descriptors
*/
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
+ union ice_32b_rx_flex_desc *rx_desc, u16 count)
{
- union ice_32b_rx_flex_desc *rx_desc;
- u16 ntu = rx_ring->next_to_use;
- struct xdp_buff **xdp;
- u32 nb_buffs, i;
dma_addr_t dma;
+ u16 buffs;
+ int i;
- rx_desc = ICE_RX_DESC(rx_ring, ntu);
- xdp = &rx_ring->xdp_buf[ntu];
-
- nb_buffs = min_t(u16, count, rx_ring->count - ntu);
- nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
- if (!nb_buffs)
- return false;
-
- i = nb_buffs;
- while (i--) {
+ buffs = xsk_buff_alloc_batch(pool, xdp, count);
+ for (i = 0; i < buffs; i++) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
@@ -389,18 +471,81 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
xdp++;
}
- ntu += nb_buffs;
- if (ntu == rx_ring->count) {
+ return buffs;
+}
+
+/**
+ * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * Place the @count of descriptors onto Rx ring. Handle the ring wrap
+ * for case where space from next_to_use up to the end of ring is less
+ * than @count. Finally do a tail bump.
+ *
+ * Returns true if all allocations were successful, false if any fail.
+ */
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+{
+ u32 nb_buffs_extra = 0, nb_buffs = 0;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 ntu = rx_ring->next_to_use;
+ u16 total_count = count;
+ struct xdp_buff **xdp;
+
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
+ xdp = ice_xdp_buf(rx_ring, ntu);
+
+ if (ntu + count >= rx_ring->count) {
+ nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
+ rx_desc,
+ rx_ring->count - ntu);
+ if (nb_buffs_extra != rx_ring->count - ntu) {
+ ntu += nb_buffs_extra;
+ goto exit;
+ }
rx_desc = ICE_RX_DESC(rx_ring, 0);
- xdp = rx_ring->xdp_buf;
+ xdp = ice_xdp_buf(rx_ring, 0);
ntu = 0;
+ count -= nb_buffs_extra;
+ ice_release_rx_desc(rx_ring, 0);
}
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
- ice_release_rx_desc(rx_ring, ntu);
+ nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count)
+ ntu = 0;
- return count == nb_buffs;
+exit:
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return total_count == (nb_buffs_extra + nb_buffs);
+}
+
+/**
+ * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
+ * @rx_ring: Rx ring
+ * @count: The number of buffers to allocate
+ *
+ * Wrapper for internal allocation routine; figure out how many tail
+ * bumps should take place based on the given threshold
+ *
+ * Returns true if all calls to internal alloc routine succeeded
+ */
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+{
+ u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
+ u16 leftover, i, tail_bumps;
+
+ tail_bumps = count / rx_thresh;
+ leftover = count - (tail_bumps * rx_thresh);
+
+ for (i = 0; i < tail_bumps; i++)
+ if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+ return false;
+ return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
}
/**
@@ -419,37 +564,145 @@ static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
+ * @xdp: Pointer to XDP buffer
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- struct xdp_buff *xdp = *xdp_arr;
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
xsk_buff_free(xdp);
- *xdp_arr = NULL;
return skb;
}
/**
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
+ * @xdp_ring: XDP Tx ring
+ */
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
+ struct ice_tx_buf *tx_buf;
+ u16 completed_frames = 0;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
+
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if (last_rs >= ntc)
+ completed_frames = last_rs - ntc + 1;
+ else
+ completed_frames = last_rs + cnt - ntc + 1;
+ }
+
+ if (!completed_frames)
+ return;
+
+ if (likely(!xdp_ring->xdp_tx_active)) {
+ xsk_frames = completed_frames;
+ goto skip;
+ }
+
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < completed_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+ xsk_buff_free(tx_buf->xdp);
+ xdp_ring->xdp_tx_active--;
+ } else {
+ xsk_frames++;
+ }
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+skip:
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += completed_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+}
+
+/**
+ * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
+ * @xdp: XDP buffer to xmit
+ * @xdp_ring: XDP ring to produce descriptor onto
+ *
+ * note that this function works directly on xdp_buff, no need to convert
+ * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
+ * side will be able to xsk_buff_free() it.
+ *
+ * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
+ * was not enough space on XDP ring
+ */
+static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
+ struct ice_tx_ring *xdp_ring)
+{
+ u32 size = xdp->data_end - xdp->data;
+ u32 ntu = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) {
+ ice_clean_xdp_irq_zc(xdp_ring);
+ if (!ICE_DESC_UNUSED(xdp_ring)) {
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+ }
+
+ dma = xsk_buff_xdp_get_dma(xdp);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ tx_buf->xdp = xdp;
+ tx_buf->type = ICE_TX_BUF_XSK_TX;
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, size, 0);
+ xdp_ring->xdp_tx_active++;
+
+ if (++ntu == xdp_ring->count)
+ ntu = 0;
+ xdp_ring->next_to_use = ntu;
+
+ return ICE_XDP_TX;
+}
+
+/**
* ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
@@ -469,28 +722,33 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return ICE_XDP_REDIR;
+ if (!err)
+ return ICE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = ICE_XDP_EXIT;
+ else
+ result = ICE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = ICE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- result = ICE_XDP_CONSUMED;
break;
}
@@ -507,11 +765,11 @@ out_failure:
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
bool failure = false;
+ int entries_to_alloc;
/* ZC patch is enabled only when XDP program is set,
* so here it can not be NULL
@@ -522,7 +780,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct xdp_buff **xdp;
+ struct xdp_buff *xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -531,7 +789,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
- if (!ice_test_staterr(rx_desc, stat_err_bits))
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
break;
/* This memory barrier is needed to keep us from reading
@@ -540,39 +798,50 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
- size = le16_to_cpu(rx_desc->wb.pkt_len) &
- ICE_RX_FLX_DESC_PKT_LEN_M;
- if (!size)
+ if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
break;
- xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
- xsk_buff_set_size(*xdp, size);
- xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
+ xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
- xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
- if (xdp_res) {
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(*xdp);
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (!size) {
+ xdp->data = NULL;
+ xdp->data_end = NULL;
+ xdp->data_hard_start = NULL;
+ xdp->data_meta = NULL;
+ goto construct_skb;
+ }
- *xdp = NULL;
- total_rx_bytes += size;
- total_rx_packets++;
- cleaned_count++;
+ xsk_buff_set_size(xdp, size);
+ xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
- ice_bump_ntc(rx_ring);
- continue;
+ xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
+ if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == ICE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == ICE_XDP_CONSUMED) {
+ xsk_buff_free(xdp);
+ } else if (xdp_res == ICE_XDP_PASS) {
+ goto construct_skb;
}
+ total_rx_bytes += size;
+ total_rx_packets++;
+
+ ice_bump_ntc(rx_ring);
+ continue;
+
+construct_skb:
/* XDP_PASS path */
skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
- rx_ring->rx_stats.alloc_buf_failed++;
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
break;
}
- cleaned_count++;
ice_bump_ntc(rx_ring);
if (eth_skb_pad(skb)) {
@@ -583,9 +852,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
total_rx_bytes += skb->len;
total_rx_packets++;
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
- if (ice_test_staterr(rx_desc, stat_err_bits))
- vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
+ vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
ICE_RX_FLEX_DESC_PTYPE_M;
@@ -594,10 +861,11 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- if (cleaned_count >= ICE_RX_BUF_WRITE)
- failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
+ entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
@@ -613,134 +881,117 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
}
/**
- * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
- * @xdp_ring: XDP Tx ring
- * @budget: max number of frames to xmit
- *
- * Returns true if cleanup/transmission is done.
+ * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
+ * @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @desc: AF_XDP descriptor to pull the DMA address and length from
+ * @total_bytes: bytes accumulator that will be used for stats update
*/
-static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+ unsigned int *total_bytes)
{
- struct ice_tx_desc *tx_desc = NULL;
- bool work_done = true;
- struct xdp_desc desc;
+ struct ice_tx_desc *tx_desc;
dma_addr_t dma;
- while (likely(budget-- > 0)) {
- struct ice_tx_buf *tx_buf;
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
- if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->tx_stats.tx_busy++;
- work_done = false;
- break;
- }
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, desc->len, 0);
- tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
+ *total_bytes += desc->len;
+}
- if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
- break;
+/**
+ * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @total_bytes: bytes accumulator that will be used for stats update
+ */
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+ unsigned int *total_bytes)
+{
+ u16 ntu = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ u32 i;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
- desc.len);
+ loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ dma_addr_t dma;
- tx_buf->bytecount = desc.len;
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz =
- ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, descs[i].len, 0);
- xdp_ring->next_to_use++;
- if (xdp_ring->next_to_use == xdp_ring->count)
- xdp_ring->next_to_use = 0;
+ *total_bytes += descs[i].len;
}
- if (tx_desc) {
- ice_xdp_ring_update_tail(xdp_ring);
- xsk_tx_release(xdp_ring->xsk_pool);
- }
-
- return budget > 0 && work_done;
+ xdp_ring->next_to_use = ntu;
}
/**
- * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
- * @xdp_ring: XDP Tx ring
- * @tx_buf: Tx buffer to clean
+ * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
+ * @nb_pkts: count of packets to be send
+ * @total_bytes: bytes accumulator that will be used for stats update
*/
-static void
-ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+ u32 nb_pkts, unsigned int *total_bytes)
{
- xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
+ u32 batched, leftover, i;
+
+ batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
+ leftover = nb_pkts & (PKTS_PER_BATCH - 1);
+ for (i = 0; i < batched; i += PKTS_PER_BATCH)
+ ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ for (; i < batched + leftover; i++)
+ ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
}
/**
- * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
- * @xdp_ring: XDP Tx ring
- * @budget: NAPI budget
+ * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
*
- * Returns true if cleanup/tranmission is done.
+ * Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
{
- int total_packets = 0, total_bytes = 0;
- s16 ntc = xdp_ring->next_to_clean;
- struct ice_tx_desc *tx_desc;
- struct ice_tx_buf *tx_buf;
- u32 xsk_frames = 0;
- bool xmit_done;
-
- tx_desc = ICE_TX_DESC(xdp_ring, ntc);
- tx_buf = &xdp_ring->tx_buf[ntc];
- ntc -= xdp_ring->count;
+ struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ u32 nb_pkts, nb_processed = 0;
+ unsigned int total_bytes = 0;
+ int budget;
- do {
- if (!(tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- break;
+ ice_clean_xdp_irq_zc(xdp_ring);
- total_bytes += tx_buf->bytecount;
- total_packets++;
+ budget = ICE_DESC_UNUSED(xdp_ring);
+ budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
+ nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ if (!nb_pkts)
+ return true;
- tx_desc->cmd_type_offset_bsz = 0;
- tx_buf++;
- tx_desc++;
- ntc++;
-
- if (unlikely(!ntc)) {
- ntc -= xdp_ring->count;
- tx_buf = xdp_ring->tx_buf;
- tx_desc = ICE_TX_DESC(xdp_ring, 0);
- }
-
- prefetch(tx_desc);
-
- } while (likely(--budget));
+ if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
+ nb_processed = xdp_ring->count - xdp_ring->next_to_use;
+ ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ xdp_ring->next_to_use = 0;
+ }
- ntc += xdp_ring->count;
- xdp_ring->next_to_clean = ntc;
+ ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
+ &total_bytes);
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ ice_set_rs_bit(xdp_ring);
+ ice_xdp_ring_update_tail(xdp_ring);
+ ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
- ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
- xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
-
- return budget > 0 && xmit_done;
+ return nb_pkts < budget;
}
/**
@@ -760,19 +1011,19 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *ring;
- if (test_bit(ICE_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
- return -ENXIO;
+ return -EINVAL;
- if (queue_id >= vsi->num_txq)
- return -ENXIO;
+ if (queue_id >= vsi->num_txq || queue_id >= vsi->num_rxq)
+ return -EINVAL;
- if (!vsi->xdp_rings[queue_id]->xsk_pool)
- return -ENXIO;
+ ring = vsi->rx_rings[queue_id]->xdp_ring;
- ring = vsi->xdp_rings[queue_id];
+ if (!ring->xsk_pool)
+ return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
* it will run again. If not, trigger an interrupt and
@@ -811,15 +1062,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
*/
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!xdp)
- continue;
+ while (ntc != ntu) {
+ struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
- *xdp = NULL;
+ xsk_buff_free(xdp);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
@@ -835,12 +1087,12 @@ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
while (ntc != ntu) {
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_buf->raw_buf)
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- else
+ if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+ xsk_buff_free(tx_buf->xdp);
+ } else {
xsk_frames++;
-
- tx_buf->raw_buf = NULL;
+ }
ntc++;
if (ntc >= xdp_ring->count)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 4c7bd8e9dfc4..6fa181f080ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -4,7 +4,16 @@
#ifndef _ICE_XSK_H_
#define _ICE_XSK_H_
#include "ice_txrx.h"
-#include "ice.h"
+
+#define PKTS_PER_BATCH 8
+
+#ifdef __clang__
+#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
+#elif __GNUC__ >= 8
+#define loop_unrolled_for _Pragma("GCC unroll 8") for
+#else
+#define loop_unrolled_for for
+#endif
struct ice_vsi;
@@ -12,13 +21,19 @@ struct ice_vsi;
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
-bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
+{
+ return false;
+}
+
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
struct xsk_buff_pool __always_unused *pool,
@@ -35,13 +50,6 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
}
static inline bool
-ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
- int __always_unused budget)
-{
- return false;
-}
-
-static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count)
{
@@ -62,5 +70,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
+
+static inline int
+ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
+ bool __always_unused zc)
+{
+ return 0;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index cbe92fd23a70..8d6e44ee1895 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2207,7 +2207,7 @@ out:
* igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
* @hw: pointer to the HW structure
*
- * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ * This resets the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
* the values found in the EEPROM. This addresses an issue in which these
* bits are not restored from EEPROM after reset.
**/
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index ca5429774994..fa028928482f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1033,9 +1033,6 @@
#define E1000_VFTA_ENTRY_MASK 0x7F
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
-/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
-
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 9265901455cd..b9b9d35494d2 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -792,7 +792,6 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
**/
s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
{
- s32 ret_val = 0;
struct e1000_nvm_info *nvm = &hw->nvm;
nvm->ops.acquire = igb_acquire_nvm_i210;
@@ -813,7 +812,7 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
nvm->ops.validate = NULL;
nvm->ops.update = NULL;
}
- return ret_val;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1277c5c7d099..205d577bdbba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -854,7 +854,7 @@ s32 igb_force_mac_fc(struct e1000_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 9cb49980ec2d..eb9f6da9208a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -116,7 +116,6 @@
#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */
#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */
#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */
-#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2d3daf022651..015b78144114 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -664,6 +664,8 @@ struct igb_adapter {
struct igb_mac_addr *mac_table;
struct vf_mac_filter vf_macs;
struct vf_mac_filter *vf_mac_list;
+ /* lock for VF resources */
+ spinlock_t vfs_lock;
};
/* flags controlling PTP/1588 function */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index fb1029352c3e..7d60da1b7bf4 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -850,21 +850,23 @@ static void igb_get_drvinfo(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers
*/
- strlcpy(drvinfo->fw_version, adapter->fw_version,
+ strscpy(drvinfo->fw_version, adapter->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
}
static void igb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -875,7 +877,9 @@ static void igb_get_ringparam(struct net_device *netdev,
}
static int igb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *temp_ring;
@@ -961,10 +965,6 @@ static int igb_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct igb_ring));
- /* Clear copied XDP RX-queue info */
- memset(&temp_ring[i].xdp_rxq, 0,
- sizeof(temp_ring[i].xdp_rxq));
-
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
@@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
+ wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8);
+ wr32(E1000_EIMS, BIT(0));
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,
@@ -1798,14 +1800,14 @@ static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
frame_size >>= 1;
- data = kmap(rx_buffer->page);
+ data = kmap_local_page(rx_buffer->page);
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
- kunmap(rx_buffer->page);
+ kunmap_local(data);
return match;
}
@@ -2311,15 +2313,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
ring = adapter->tx_ring[j];
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ start = u64_stats_fetch_begin(&ring->tx_syncp);
data[i] = ring->tx_stats.packets;
data[i+1] = ring->tx_stats.bytes;
data[i+2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+ start = u64_stats_fetch_begin(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp2, start));
data[i+2] += restart2;
i += IGB_TX_QUEUE_STATS_LEN;
@@ -2327,13 +2329,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
do {
- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ start = u64_stats_fetch_begin(&ring->rx_syncp);
data[i] = ring->rx_stats.packets;
data[i+1] = ring->rx_stats.bytes;
data[i+2] = ring->rx_stats.drops;
data[i+3] = ring->rx_stats.csum_err;
data[i+4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
spin_unlock(&adapter->stats64_lock);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b597b8bfb910..58872a4c2540 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -28,7 +28,6 @@
#include <linux/tcp.h>
#include <linux/sctp.h>
#include <linux/if_ether.h>
-#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
@@ -109,6 +108,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
static void igb_remove(struct pci_dev *pdev);
+static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *);
int igb_close(struct net_device *);
@@ -175,9 +175,7 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter);
#ifdef CONFIG_PCI_IOV
static int igb_vf_configure(struct igb_adapter *adapter, int vf);
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
-static int igb_disable_sriov(struct pci_dev *dev);
-static int igb_pci_disable_sriov(struct pci_dev *dev);
+static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
#endif
static int igb_suspend(struct device *);
@@ -1195,15 +1193,19 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
ring_count = txr_count + rxr_count;
- size = struct_size(q_vector, ring, ring_count);
+ size = kmalloc_size_roundup(struct_size(q_vector, ring, ring_count));
/* allocate q_vector and rings */
q_vector = adapter->q_vector[v_idx];
if (!q_vector) {
q_vector = kzalloc(size, GFP_KERNEL);
} else if (size > ksize(q_vector)) {
- kfree_rcu(q_vector, rcu);
- q_vector = kzalloc(size, GFP_KERNEL);
+ struct igb_q_vector *new_q_vector;
+
+ new_q_vector = kzalloc(size, GFP_KERNEL);
+ if (new_q_vector)
+ kfree_rcu(q_vector, rcu);
+ q_vector = new_q_vector;
} else {
memset(q_vector, 0, size);
}
@@ -1211,8 +1213,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igb_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -1945,7 +1946,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
* However, when we do so, no frame from queue 2 and 3 are
* transmitted. It seems the MAX_TPKT_SIZE should not be great
* or _equal_ to the buffer size programmed in TXPBS. For this
- * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
+ * reason, we set MAX_ TPKT_SIZE to (4kB - 1) / 64.
*/
val = (4096 - 1) / 64;
wr32(E1000_I210_DTXMXPKTSZ, val);
@@ -2253,6 +2254,30 @@ static void igb_enable_mas(struct igb_adapter *adapter)
}
}
+#ifdef CONFIG_IGB_HWMON
+/**
+ * igb_set_i2c_bb - Init I2C interface
+ * @hw: pointer to hardware structure
+ **/
+static void igb_set_i2c_bb(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 i2cctl;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ wrfl();
+
+ i2cctl = rd32(E1000_I2CPARAMS);
+ i2cctl |= E1000_I2CBB_EN
+ | E1000_I2C_CLK_OE_N
+ | E1000_I2C_DATA_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+}
+#endif
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -2397,7 +2422,8 @@ void igb_reset(struct igb_adapter *adapter)
* interface.
*/
if (adapter->ets)
- mac->ops.init_thermal_sensor_thresh(hw);
+ igb_set_i2c_bb(hw);
+ mac->ops.init_thermal_sensor_thresh(hw);
}
}
#endif
@@ -2807,6 +2833,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter,
return 0;
}
+static int igb_tc_query_caps(struct igb_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->broken_mqprio = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static LIST_HEAD(igb_block_cb_list);
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -2815,6 +2857,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct igb_adapter *adapter = netdev_priv(dev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return igb_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_CBS:
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
@@ -2868,8 +2912,14 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
bpf_prog_put(old_prog);
/* bpf is just replaced, RXQ and MTU are already setup */
- if (!need_reset)
+ if (!need_reset) {
return 0;
+ } else {
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+ }
if (running)
igb_open(dev);
@@ -2927,7 +2977,7 @@ static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
nq = txring_txq(tx_ring);
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
__netif_tx_unlock(nq);
@@ -2961,7 +3011,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
__netif_tx_lock(nq, cpu);
/* Avoid transmit queue timeout since we share it with the slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -3114,21 +3164,12 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
s32 status = 0;
- s32 i2cctl;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
return 0;
- i2cctl = rd32(E1000_I2CPARAMS);
- i2cctl |= E1000_I2CBB_EN
- | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
- | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
- wr32(E1000_I2CPARAMS, i2cctl);
- wrfl();
-
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
@@ -3138,7 +3179,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
adapter->i2c_algo.data = adapter;
adapter->i2c_adap.algo_data = &adapter->i2c_algo;
adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
- strlcpy(adapter->i2c_adap.name, "igb BB",
+ strscpy(adapter->i2c_adap.name, "igb BB",
sizeof(adapter->i2c_adap.name));
status = i2c_bit_add_bus(&adapter->i2c_adap);
return status;
@@ -3164,8 +3205,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
s32 ret_val;
static int global_quad_port_a; /* global quad port a indication */
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
- int err, pci_using_dac;
u8 part_str[E1000_PBANUM_LENGTH];
+ int err;
/* Catch broken hardware that put the wrong VF device ID in
* the PCIe SR-IOV capability.
@@ -3180,25 +3221,17 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igb_driver_name);
if (err)
goto err_pci_reg;
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -3306,8 +3339,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_i350)
netdev->hw_features |= NETIF_F_NTUPLE;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
@@ -3321,6 +3353,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -3525,6 +3558,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = true;
else
adapter->ets = false;
+ /* Only enable I2C bit banging if an external thermal
+ * sensor is supported.
+ */
+ if (adapter->ets)
+ igb_set_i2c_bb(hw);
+ hw->mac.ops.init_thermal_sensor_thresh(hw);
if (igb_sysfs_init(adapter))
dev_err(&pdev->dev,
"failed to allocate sysfs resources\n");
@@ -3624,13 +3663,12 @@ err_sw_init:
kfree(adapter->shadow_vfta);
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
- igb_disable_sriov(pdev);
+ igb_disable_sriov(pdev, false);
#endif
pci_iounmap(pdev, adapter->io_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -3639,11 +3677,43 @@ err_dma:
}
#ifdef CONFIG_PCI_IOV
-static int igb_disable_sriov(struct pci_dev *pdev)
+static int igb_sriov_reinit(struct pci_dev *dev)
+{
+ struct net_device *netdev = pci_get_drvdata(dev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+
+ rtnl_lock();
+
+ if (netif_running(netdev))
+ igb_close(netdev);
+ else
+ igb_reset(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+
+ igb_init_queue_configuration(adapter);
+
+ if (igb_init_interrupt_scheme(adapter, true)) {
+ rtnl_unlock();
+ dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ return -ENOMEM;
+ }
+
+ if (netif_running(netdev))
+ igb_open(netdev);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int igb_disable_sriov(struct pci_dev *pdev, bool reinit)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
/* reclaim resources allocated to VFs */
if (adapter->vf_data) {
@@ -3656,12 +3726,13 @@ static int igb_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
msleep(500);
}
-
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
kfree(adapter->vf_mac_list);
adapter->vf_mac_list = NULL;
kfree(adapter->vf_data);
adapter->vf_data = NULL;
adapter->vfs_allocated_count = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
wrfl();
msleep(100);
@@ -3671,10 +3742,10 @@ static int igb_disable_sriov(struct pci_dev *pdev)
adapter->flags |= IGB_FLAG_DMAC;
}
- return 0;
+ return reinit ? igb_sriov_reinit(pdev) : 0;
}
-static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
+static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -3739,12 +3810,6 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
"Unable to allocate memory for VF MAC filter list\n");
}
- /* only call pci_enable_sriov() if no VFs are allocated already */
- if (!old_vfs) {
- err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
- if (err)
- goto err_out;
- }
dev_info(&pdev->dev, "%d VFs allocated\n",
adapter->vfs_allocated_count);
for (i = 0; i < adapter->vfs_allocated_count; i++)
@@ -3752,6 +3817,17 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
/* DMA Coalescing is not supported in IOV mode. */
adapter->flags &= ~IGB_FLAG_DMAC;
+
+ if (reinit) {
+ err = igb_sriov_reinit(pdev);
+ if (err)
+ goto err_out;
+ }
+
+ /* only call pci_enable_sriov() if no VFs are allocated already */
+ if (!old_vfs)
+ err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+
goto out;
err_out:
@@ -3821,7 +3897,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);
#ifdef CONFIG_PCI_IOV
- igb_disable_sriov(pdev);
+ igb_disable_sriov(pdev, false);
#endif
unregister_netdev(netdev);
@@ -3837,8 +3913,6 @@ static void igb_remove(struct pci_dev *pdev)
kfree(adapter->shadow_vfta);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
@@ -3869,7 +3943,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
igb_reset_interrupt_capability(adapter);
pci_sriov_set_totalvfs(pdev, 7);
- igb_enable_sriov(pdev, max_vfs);
+ igb_enable_sriov(pdev, max_vfs, false);
#endif /* CONFIG_PCI_IOV */
}
@@ -3981,6 +4055,9 @@ static int igb_sw_init(struct igb_adapter *adapter)
spin_lock_init(&adapter->nfc_lock);
spin_lock_init(&adapter->stats64_lock);
+
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -4352,7 +4429,18 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
struct device *dev = rx_ring->dev;
- int size;
+ int size, res;
+
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index, 0);
+ if (res < 0) {
+ dev_err(dev, "Failed to register xdp_rxq index %u\n",
+ rx_ring->queue_index);
+ return res;
+ }
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -4375,14 +4463,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->xdp_prog = adapter->xdp_prog;
- /* XDP RX-queue info */
- if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
- rx_ring->queue_index, 0) < 0)
- goto err;
-
return 0;
err:
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
@@ -4819,8 +4903,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
while (i != tx_ring->next_to_use) {
union e1000_adv_tx_desc *eop_desc, *tx_desc;
- /* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
+ /* Free all the Tx ring sk_buffs or xdp frames */
+ if (tx_buffer->type == IGB_TYPE_SKB)
+ dev_kfree_skb_any(tx_buffer->skb);
+ else
+ xdp_return_frame(tx_buffer->xdpf);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -5505,7 +5592,8 @@ static void igb_watchdog_task(struct work_struct *work)
break;
}
- if (adapter->link_speed != SPEED_1000)
+ if (adapter->link_speed != SPEED_1000 ||
+ !hw->phy.ops.read_reg)
goto no_wait;
/* wait for Remote receiver status OK */
@@ -6256,74 +6344,108 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter,
struct igb_ring *tx_ring,
struct xdp_frame *xdpf)
{
- union e1000_adv_tx_desc *tx_desc;
- u32 len, cmd_type, olinfo_status;
- struct igb_tx_buffer *tx_buffer;
- dma_addr_t dma;
- u16 i;
-
- len = xdpf->len;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, i, index = tx_ring->next_to_use;
+ struct igb_tx_buffer *tx_head = &tx_ring->tx_buffer_info[index];
+ struct igb_tx_buffer *tx_buffer = tx_head;
+ union e1000_adv_tx_desc *tx_desc = IGB_TX_DESC(tx_ring, index);
+ u32 len = xdpf->len, cmd_type, olinfo_status;
+ void *data = xdpf->data;
- if (unlikely(!igb_desc_unused(tx_ring)))
- return IGB_XDP_CONSUMED;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
+ if (igb_maybe_stop_tx(tx_ring, count + 3))
return IGB_XDP_CONSUMED;
+ i = 0;
/* record the location of the first descriptor for this packet */
- tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->type = IGB_TYPE_XDP;
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- i = tx_ring->next_to_use;
- tx_desc = IGB_TX_DESC(tx_ring, i);
+ olinfo_status = tx_head->bytecount << E1000_ADVTXD_PAYLEN_SHIFT;
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- dma_unmap_len_set(tx_buffer, len, len);
- dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_buffer->type = IGB_TYPE_XDP;
- tx_buffer->xdpf = xdpf;
+ for (;;) {
+ dma_addr_t dma;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto unmap;
- /* put descriptor type bits */
- cmd_type = E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_DEXT |
- E1000_ADVTXD_DCMD_IFCS;
- cmd_type |= len | IGB_TXD_DCMD;
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
- olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring */
- if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
- olinfo_status |= tx_ring->reg_idx << 4;
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS | len;
- tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
+ tx_buffer->protocol = 0;
+ if (++index == tx_ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ tx_buffer = &tx_ring->tx_buffer_info[index];
+ tx_desc = IGB_TX_DESC(tx_ring, index);
+ tx_desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount);
/* set the timestamp */
- tx_buffer->time_stamp = jiffies;
+ tx_head->time_stamp = jiffies;
/* Avoid any potential race with xdp_xmit and cleanup */
smp_wmb();
/* set next_to_watch value indicating a packet is present */
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- tx_buffer->next_to_watch = tx_desc;
- tx_ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ tx_ring->next_to_use = index;
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
- writel(i, tx_ring->tail);
+ writel(index, tx_ring->tail);
return IGB_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[index];
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ if (tx_buffer == tx_head)
+ break;
+
+ if (!index)
+ index += tx_ring->count;
+ index--;
+ }
+
+ return IGB_XDP_CONSUMED;
}
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
@@ -6588,10 +6710,10 @@ void igb_update_stats(struct igb_adapter *adapter)
}
do {
- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ start = u64_stats_fetch_begin(&ring->rx_syncp);
_bytes = ring->rx_stats.bytes;
_packets = ring->rx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -6604,10 +6726,10 @@ void igb_update_stats(struct igb_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ start = u64_stats_fetch_begin(&ring->tx_syncp);
_bytes = ring->tx_stats.bytes;
_packets = ring->tx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -6739,12 +6861,119 @@ void igb_update_stats(struct igb_adapter *adapter)
}
}
-static void igb_tsync_interrupt(struct igb_adapter *adapter)
+static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
{
+ int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_PEROUT, tsintr_tt);
+ struct e1000_hw *hw = &adapter->hw;
+ struct timespec64 ts;
+ u32 tsauxc;
+
+ if (pin < 0 || pin >= IGB_N_SDP)
+ return;
+
+ spin_lock(&adapter->tmreg_lock);
+
+ if (hw->mac.type == e1000_82580 ||
+ hw->mac.type == e1000_i354 ||
+ hw->mac.type == e1000_i350) {
+ s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
+ u32 systiml, systimh, level_mask, level, rem;
+ u64 systim, now;
+
+ /* read systim registers in sequence */
+ rd32(E1000_SYSTIMR);
+ systiml = rd32(E1000_SYSTIML);
+ systimh = rd32(E1000_SYSTIMH);
+ systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
+ now = timecounter_cyc2time(&adapter->tc, systim);
+
+ if (pin < 2) {
+ level_mask = (tsintr_tt == 1) ? 0x80000 : 0x40000;
+ level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
+ } else {
+ level_mask = (tsintr_tt == 1) ? 0x80 : 0x40;
+ level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
+ }
+
+ div_u64_rem(now, ns, &rem);
+ systim = systim + (ns - rem);
+
+ /* synchronize pin level with rising/falling edges */
+ div_u64_rem(now, ns << 1, &rem);
+ if (rem < ns) {
+ /* first half of period */
+ if (level == 0) {
+ /* output is already low, skip this period */
+ systim += ns;
+ pr_notice("igb: periodic output on %s missed falling edge\n",
+ adapter->sdp_config[pin].name);
+ }
+ } else {
+ /* second half of period */
+ if (level == 1) {
+ /* output is already high, skip this period */
+ systim += ns;
+ pr_notice("igb: periodic output on %s missed rising edge\n",
+ adapter->sdp_config[pin].name);
+ }
+ }
+
+ /* for this chip family tv_sec is the upper part of the binary value,
+ * so not seconds
+ */
+ ts.tv_nsec = (u32)systim;
+ ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
+ } else {
+ ts = timespec64_add(adapter->perout[tsintr_tt].start,
+ adapter->perout[tsintr_tt].period);
+ }
+
+ /* u32 conversion of tv_sec is safe until y2106 */
+ wr32((tsintr_tt == 1) ? E1000_TRGTTIML1 : E1000_TRGTTIML0, ts.tv_nsec);
+ wr32((tsintr_tt == 1) ? E1000_TRGTTIMH1 : E1000_TRGTTIMH0, (u32)ts.tv_sec);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsauxc |= TSAUXC_EN_TT0;
+ wr32(E1000_TSAUXC, tsauxc);
+ adapter->perout[tsintr_tt].start = ts;
+
+ spin_unlock(&adapter->tmreg_lock);
+}
+
+static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
+{
+ int pin = ptp_find_pin(adapter->ptp_clock, PTP_PF_EXTTS, tsintr_tt);
+ int auxstmpl = (tsintr_tt == 1) ? E1000_AUXSTMPL1 : E1000_AUXSTMPL0;
+ int auxstmph = (tsintr_tt == 1) ? E1000_AUXSTMPH1 : E1000_AUXSTMPH0;
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
struct timespec64 ts;
- u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
+
+ if (pin < 0 || pin >= IGB_N_SDP)
+ return;
+
+ if (hw->mac.type == e1000_82580 ||
+ hw->mac.type == e1000_i354 ||
+ hw->mac.type == e1000_i350) {
+ s64 ns = rd32(auxstmpl);
+
+ ns += ((s64)(rd32(auxstmph) & 0xFF)) << 32;
+ ts = ns_to_timespec64(ns);
+ } else {
+ ts.tv_nsec = rd32(auxstmpl);
+ ts.tv_sec = rd32(auxstmph);
+ }
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = tsintr_tt;
+ event.timestamp = ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+ ptp_clock_event(adapter->ptp_clock, &event);
+}
+
+static void igb_tsync_interrupt(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ack = 0, tsicr = rd32(E1000_TSICR);
+ struct ptp_clock_event event;
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
@@ -6760,51 +6989,22 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter)
}
if (tsicr & TSINTR_TT0) {
- spin_lock(&adapter->tmreg_lock);
- ts = timespec64_add(adapter->perout[0].start,
- adapter->perout[0].period);
- /* u32 conversion of tv_sec is safe until y2106 */
- wr32(E1000_TRGTTIML0, ts.tv_nsec);
- wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
- tsauxc = rd32(E1000_TSAUXC);
- tsauxc |= TSAUXC_EN_TT0;
- wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[0].start = ts;
- spin_unlock(&adapter->tmreg_lock);
+ igb_perout(adapter, 0);
ack |= TSINTR_TT0;
}
if (tsicr & TSINTR_TT1) {
- spin_lock(&adapter->tmreg_lock);
- ts = timespec64_add(adapter->perout[1].start,
- adapter->perout[1].period);
- wr32(E1000_TRGTTIML1, ts.tv_nsec);
- wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
- tsauxc = rd32(E1000_TSAUXC);
- tsauxc |= TSAUXC_EN_TT1;
- wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[1].start = ts;
- spin_unlock(&adapter->tmreg_lock);
+ igb_perout(adapter, 1);
ack |= TSINTR_TT1;
}
if (tsicr & TSINTR_AUTT0) {
- nsec = rd32(E1000_AUXSTMPL0);
- sec = rd32(E1000_AUXSTMPH0);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
- event.timestamp = sec * 1000000000ULL + nsec;
- ptp_clock_event(adapter->ptp_clock, &event);
+ igb_extts(adapter, 0);
ack |= TSINTR_AUTT0;
}
if (tsicr & TSINTR_AUTT1) {
- nsec = rd32(E1000_AUXSTMPL1);
- sec = rd32(E1000_AUXSTMPH1);
- event.type = PTP_CLOCK_EXTTS;
- event.index = 1;
- event.timestamp = sec * 1000000000ULL + nsec;
- ptp_clock_event(adapter->ptp_clock, &event);
+ igb_extts(adapter, 1);
ack |= TSINTR_AUTT1;
}
@@ -7399,7 +7599,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
{
struct e1000_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
- u32 reg, msgbuf[3];
+ u32 reg, msgbuf[3] = {};
u8 *addr = (u8 *)(&msgbuf[1]);
/* process all the same items cleared in a function level reset */
@@ -7842,8 +8042,10 @@ unlock:
static void igb_msg_task(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
/* process any reset requests */
if (!igb_check_for_rst(hw, vf))
@@ -7857,6 +8059,7 @@ static void igb_msg_task(struct igb_adapter *adapter)
if (!igb_check_for_ack(hw, vf))
igb_rcv_ack_from_vf(adapter, vf);
}
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
/**
@@ -8367,7 +8570,7 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -8422,7 +8625,7 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
result = IGB_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(adapter->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -8736,6 +8939,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
@@ -9254,7 +9458,7 @@ static int __maybe_unused igb_suspend(struct device *dev)
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9297,17 +9501,24 @@ static int __maybe_unused igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
- rtnl_lock();
+ if (!rpm)
+ rtnl_lock();
if (!err && netif_running(netdev))
err = __igb_open(netdev, true);
if (!err)
netif_device_attach(netdev);
- rtnl_unlock();
+ if (!rpm)
+ rtnl_unlock();
return err;
}
+static int __maybe_unused igb_resume(struct device *dev)
+{
+ return __igb_resume(dev, false);
+}
+
static int __maybe_unused igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
@@ -9326,7 +9537,7 @@ static int __maybe_unused igb_runtime_suspend(struct device *dev)
static int __maybe_unused igb_runtime_resume(struct device *dev)
{
- return igb_resume(dev);
+ return __igb_resume(dev, true);
}
static void igb_shutdown(struct pci_dev *pdev)
@@ -9341,71 +9552,17 @@ static void igb_shutdown(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PCI_IOV
-static int igb_sriov_reinit(struct pci_dev *dev)
-{
- struct net_device *netdev = pci_get_drvdata(dev);
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
-
- rtnl_lock();
-
- if (netif_running(netdev))
- igb_close(netdev);
- else
- igb_reset(adapter);
-
- igb_clear_interrupt_scheme(adapter);
-
- igb_init_queue_configuration(adapter);
-
- if (igb_init_interrupt_scheme(adapter, true)) {
- rtnl_unlock();
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- return -ENOMEM;
- }
-
- if (netif_running(netdev))
- igb_open(netdev);
-
- rtnl_unlock();
-
- return 0;
-}
-
-static int igb_pci_disable_sriov(struct pci_dev *dev)
-{
- int err = igb_disable_sriov(dev);
-
- if (!err)
- err = igb_sriov_reinit(dev);
-
- return err;
-}
-
-static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
-{
- int err = igb_enable_sriov(dev, num_vfs);
-
- if (err)
- goto out;
-
- err = igb_sriov_reinit(dev);
- if (!err)
- return num_vfs;
-
-out:
- return err;
-}
-
-#endif
static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
{
#ifdef CONFIG_PCI_IOV
- if (num_vfs == 0)
- return igb_pci_disable_sriov(dev);
- else
- return igb_pci_enable_sriov(dev, num_vfs);
+ int err;
+
+ if (num_vfs == 0) {
+ return igb_disable_sriov(dev, true);
+ } else {
+ err = igb_enable_sriov(dev, num_vfs, true);
+ return err ? err : num_vfs;
+ }
#endif
return 0;
}
@@ -9433,7 +9590,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
igb_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -9442,7 +9599,7 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
- * resembles the first-half of the igb_resume routine.
+ * resembles the first-half of the __igb_resume routine.
**/
static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
{
@@ -9482,7 +9639,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
- * second-half of the igb_resume routine.
+ * second-half of the __igb_resume routine.
*/
static void igb_io_resume(struct pci_dev *pdev)
{
@@ -9812,11 +9969,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
struct e1000_hw *hw = &adapter->hw;
u32 dmac_thr;
u16 hwm;
+ u32 reg;
if (hw->mac.type > e1000_82580) {
if (adapter->flags & IGB_FLAG_DMAC) {
- u32 reg;
-
/* force threshold to 0. */
wr32(E1000_DMCTXTH, 0);
@@ -9849,7 +10005,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
/* Disable BMC-to-OS Watchdog Enable */
if (hw->mac.type != e1000_i354)
reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
-
wr32(E1000_DMACR, reg);
/* no lower threshold to disable
@@ -9866,12 +10021,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
*/
wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
(IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+ }
- /* make low power state decision controlled
- * by DMA coal
- */
+ if (hw->mac.type >= e1000_i210 ||
+ (adapter->flags & IGB_FLAG_DMAC)) {
reg = rd32(E1000_PCIEMISC);
- reg &= ~E1000_PCIEMISC_LX_DECISION;
+ reg |= E1000_PCIEMISC_LX_DECISION;
wr32(E1000_PCIEMISC, reg);
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 0011b15e678c..405886ee5261 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -67,8 +67,10 @@
#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
+#define IGB_82580_BASE_PERIOD 0x800000000
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+static void igb_ptp_sdp_init(struct igb_adapter *adapter);
/* SYSTIM read access for the 82576 */
static u64 igb_ptp_read_82576(const struct cyclecounter *cc)
@@ -164,23 +166,21 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
unsigned long flags;
u64 ns;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+
switch (adapter->hw.mac.type) {
case e1000_82576:
case e1000_82580:
case e1000_i354:
case e1000_i350:
spin_lock_irqsave(&adapter->tmreg_lock, flags);
-
ns = timecounter_cyc2time(&adapter->tc, systim);
-
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
hwtstamps->hwtstamp = ns_to_ktime(ns);
break;
case e1000_i210:
case e1000_i211:
- memset(hwtstamps, 0, sizeof(*hwtstamps));
/* Upper 32 bits contain s, lower 32 bits contain ns. */
hwtstamps->hwtstamp = ktime_set(systim >> 32,
systim & 0xFFFFFFFF);
@@ -191,29 +191,14 @@ static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
}
/* PTP clock operations */
-static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb)
+static int igb_ptp_adjfine_82576(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
struct e1000_hw *hw = &igb->hw;
- int neg_adj = 0;
- u64 rate;
- u32 incvalue;
+ u64 incvalue;
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
- rate = ppb;
- rate <<= 14;
- rate = div_u64(rate, 1953125);
-
- incvalue = 16 << IGB_82576_TSYNC_SHIFT;
-
- if (neg_adj)
- incvalue -= rate;
- else
- incvalue += rate;
+ incvalue = adjust_by_scaled_ppm(INCVALUE_82576, scaled_ppm);
wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
@@ -225,17 +210,11 @@ static int igb_ptp_adjfine_82580(struct ptp_clock_info *ptp, long scaled_ppm)
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
struct e1000_hw *hw = &igb->hw;
- int neg_adj = 0;
+ bool neg_adj;
u64 rate;
u32 inca;
- if (scaled_ppm < 0) {
- neg_adj = 1;
- scaled_ppm = -scaled_ppm;
- }
- rate = scaled_ppm;
- rate <<= 13;
- rate = div_u64(rate, 15625);
+ neg_adj = diff_by_scaled_ppm(IGB_82580_BASE_PERIOD, scaled_ppm, &rate);
inca = rate & INCVALUE_MASK;
if (neg_adj)
@@ -507,6 +486,158 @@ static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin, int freq)
wr32(E1000_CTRL_EXT, ctrl_ext);
}
+static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct igb_adapter *igb =
+ container_of(ptp, struct igb_adapter, ptp_caps);
+ u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh, systiml,
+ systimh, level_mask, level, rem;
+ struct e1000_hw *hw = &igb->hw;
+ struct timespec64 ts, start;
+ unsigned long flags;
+ u64 systim, now;
+ int pin = -1;
+ s64 ns;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ /* Reject requests with unsupported flags */
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ if (on) {
+ pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ if (rq->extts.index == 1) {
+ tsauxc_mask = TSAUXC_EN_TS1;
+ tsim_mask = TSINTR_AUTT1;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TS0;
+ tsim_mask = TSINTR_AUTT0;
+ }
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsim = rd32(E1000_TSIM);
+ if (on) {
+ igb_pin_extts(igb, rq->extts.index, pin);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ } else {
+ tsauxc &= ~tsauxc_mask;
+ tsim &= ~tsim_mask;
+ }
+ wr32(E1000_TSAUXC, tsauxc);
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (on) {
+ pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+ ns = ns >> 1;
+ if (on && ns < 8LL)
+ return -EINVAL;
+ ts = ns_to_timespec64(ns);
+ if (rq->perout.index == 1) {
+ tsauxc_mask = TSAUXC_EN_TT1;
+ tsim_mask = TSINTR_TT1;
+ trgttiml = E1000_TRGTTIML1;
+ trgttimh = E1000_TRGTTIMH1;
+ } else {
+ tsauxc_mask = TSAUXC_EN_TT0;
+ tsim_mask = TSINTR_TT0;
+ trgttiml = E1000_TRGTTIML0;
+ trgttimh = E1000_TRGTTIMH0;
+ }
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+ tsauxc = rd32(E1000_TSAUXC);
+ tsim = rd32(E1000_TSIM);
+ if (rq->perout.index == 1) {
+ tsauxc &= ~(TSAUXC_EN_TT1 | TSAUXC_EN_CLK1 | TSAUXC_ST1);
+ tsim &= ~TSINTR_TT1;
+ } else {
+ tsauxc &= ~(TSAUXC_EN_TT0 | TSAUXC_EN_CLK0 | TSAUXC_ST0);
+ tsim &= ~TSINTR_TT0;
+ }
+ if (on) {
+ int i = rq->perout.index;
+
+ /* read systim registers in sequence */
+ rd32(E1000_SYSTIMR);
+ systiml = rd32(E1000_SYSTIML);
+ systimh = rd32(E1000_SYSTIMH);
+ systim = (((u64)(systimh & 0xFF)) << 32) | ((u64)systiml);
+ now = timecounter_cyc2time(&igb->tc, systim);
+
+ if (pin < 2) {
+ level_mask = (i == 1) ? 0x80000 : 0x40000;
+ level = (rd32(E1000_CTRL) & level_mask) ? 1 : 0;
+ } else {
+ level_mask = (i == 1) ? 0x80 : 0x40;
+ level = (rd32(E1000_CTRL_EXT) & level_mask) ? 1 : 0;
+ }
+
+ div_u64_rem(now, ns, &rem);
+ systim = systim + (ns - rem);
+
+ /* synchronize pin level with rising/falling edges */
+ div_u64_rem(now, ns << 1, &rem);
+ if (rem < ns) {
+ /* first half of period */
+ if (level == 0) {
+ /* output is already low, skip this period */
+ systim += ns;
+ }
+ } else {
+ /* second half of period */
+ if (level == 1) {
+ /* output is already high, skip this period */
+ systim += ns;
+ }
+ }
+
+ start = ns_to_timespec64(systim + (ns - rem));
+ igb_pin_perout(igb, i, pin, 0);
+ igb->perout[i].start.tv_sec = start.tv_sec;
+ igb->perout[i].start.tv_nsec = start.tv_nsec;
+ igb->perout[i].period.tv_sec = ts.tv_sec;
+ igb->perout[i].period.tv_nsec = ts.tv_nsec;
+
+ wr32(trgttiml, (u32)systim);
+ wr32(trgttimh, ((u32)(systim >> 32)) & 0xFF);
+ tsauxc |= tsauxc_mask;
+ tsim |= tsim_mask;
+ }
+ wr32(E1000_TSAUXC, tsauxc);
+ wr32(E1000_TSIM, tsim);
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+ return 0;
+
+ case PTP_CLK_REQ_PPS:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -1015,10 +1146,6 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
bool is_l2 = false;
u32 regval;
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
@@ -1192,7 +1319,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- int i;
switch (hw->mac.type) {
case e1000_82576:
@@ -1201,7 +1327,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.max_adj = 999999881;
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
+ adapter->ptp_caps.adjfine = igb_ptp_adjfine_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
@@ -1215,16 +1341,21 @@ void igb_ptp_init(struct igb_adapter *adapter)
case e1000_82580:
case e1000_i354:
case e1000_i350:
+ igb_ptp_sdp_init(adapter);
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
- adapter->ptp_caps.n_ext_ts = 0;
+ adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
+ adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
+ adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 0;
+ adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_feature_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable_82580;
+ adapter->ptp_caps.verify = igb_ptp_verify_pin;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
@@ -1233,13 +1364,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
break;
case e1000_i210:
case e1000_i211:
- for (i = 0; i < IGB_N_SDP; i++) {
- struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
-
- snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
- ppd->index = i;
- ppd->func = PTP_PF_NONE;
- }
+ igb_ptp_sdp_init(adapter);
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999;
@@ -1285,6 +1410,23 @@ void igb_ptp_init(struct igb_adapter *adapter)
}
/**
+ * igb_ptp_sdp_init - utility function which inits the SDP config structs
+ * @adapter: Board private structure.
+ **/
+void igb_ptp_sdp_init(struct igb_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < IGB_N_SDP; i++) {
+ struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
+
+ snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ }
+}
+
+/**
* igb_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
*
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 06e5bd646a0e..83b97989a6bd 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -169,13 +169,15 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
static void igbvf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *tx_ring = adapter->tx_ring;
@@ -188,7 +190,9 @@ static void igbvf_get_ringparam(struct net_device *netdev,
}
static int igbvf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct igbvf_ring *temp_ring;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 975eb47ee04d..57d39ee00b58 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -227,7 +227,7 @@ struct igbvf_adapter {
/* The VF counters don't clear on read so we have to get a base
* count on driver start up and always subtract that base on
- * on the first update, thus the flag..
+ * the first update, thus the flag..
*/
struct e1000_vf_stats stats;
u64 zero_base;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 4d988da68394..7ff2752dd763 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
- goto out;
+ goto free_irq_tx;
adapter->rx_ring->itr_register = E1000_EITR(vector);
adapter->rx_ring->itr_val = adapter->current_itr;
@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
igbvf_msix_other, 0, netdev->name, netdev);
if (err)
- goto out;
+ goto free_irq_rx;
igbvf_configure_msix(adapter);
return 0;
+free_irq_rx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
+free_irq_tx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
out:
return err;
}
@@ -1109,7 +1113,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
return -ENOMEM;
}
- netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
+ netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll);
return 0;
}
@@ -1520,7 +1524,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
/* Allow time for pending master requests to run */
if (mac->ops.reset_hw(hw))
- dev_warn(&adapter->pdev->dev, "PF still resetting\n");
+ dev_info(&adapter->pdev->dev, "PF still resetting\n");
mac->ops.init_hw(hw);
@@ -2537,7 +2541,7 @@ static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
igbvf_down(adapter);
pci_disable_device(pdev);
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -2589,6 +2593,33 @@ static void igbvf_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
}
+/**
+ * igbvf_io_prepare - prepare device driver for PCI reset
+ * @pdev: PCI device information struct
+ */
+static void igbvf_io_prepare(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+ igbvf_down(adapter);
+}
+
+/**
+ * igbvf_io_reset_done - PCI reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void igbvf_io_reset_done(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igbvf_adapter *adapter = netdev_priv(netdev);
+
+ igbvf_up(adapter);
+ clear_bit(__IGBVF_RESETTING, &adapter->state);
+}
+
static void igbvf_print_device_info(struct igbvf_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -2684,25 +2715,18 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct igbvf_adapter *adapter;
struct e1000_hw *hw;
const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
-
static int cards_found;
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, igbvf_driver_name);
@@ -2783,10 +2807,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IGBVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
@@ -2926,6 +2947,8 @@ static const struct pci_error_handlers igbvf_err_handler = {
.error_detected = igbvf_io_error_detected,
.slot_reset = igbvf_io_slot_reset,
.resume = igbvf_io_resume,
+ .reset_prepare = igbvf_io_prepare,
+ .reset_done = igbvf_io_reset_done,
};
static const struct pci_device_id igbvf_pci_tbl[] = {
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index b8ba3f94c363..a47a2e3e548c 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */
+#include <linux/etherdevice.h>
+
#include "vf.h"
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
- if (msgbuf[0] == (E1000_VF_RESET |
- E1000_VT_MSGTYPE_ACK))
+ switch (msgbuf[0]) {
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
- else
+ break;
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
+ eth_zero_addr(hw->mac.perm_addr);
+ break;
+ default:
ret_val = -E1000_ERR_MAC_INIT;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 3e386c38d016..34aebf00a512 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -94,9 +94,12 @@ struct igc_ring {
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
bool launchtime_enable; /* true if LaunchTime is enabled */
+ ktime_t last_tx_cycle; /* end of the cycle with a launchtime transmission */
+ ktime_t last_ff_cycle; /* Last cycle with an active first flag */
u32 start_time;
u32 end_time;
+ u32 max_sdu;
/* CBS parameters */
bool cbs_enable; /* indicates if CBS is enabled */
@@ -182,6 +185,8 @@ struct igc_adapter {
ktime_t base_time;
ktime_t cycle_time;
+ bool qbv_enable;
+ u32 qbv_config_change_errors;
/* OS defined structs */
struct pci_dev *pdev;
@@ -264,7 +269,6 @@ int igc_reinit_queues(struct igc_adapter *adapter);
void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
-int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
void igc_update_stats(struct igc_adapter *adapter);
void igc_disable_rx_ring(struct igc_ring *ring);
void igc_enable_rx_ring(struct igc_ring *ring);
@@ -290,8 +294,6 @@ extern char igc_driver_name[];
#define IGC_FLAG_PTP BIT(8)
#define IGC_FLAG_WOL_SUPPORTED BIT(8)
#define IGC_FLAG_NEED_LINK_UPDATE BIT(9)
-#define IGC_FLAG_MEDIA_RESET BIT(10)
-#define IGC_FLAG_MAS_ENABLE BIT(12)
#define IGC_FLAG_HAS_MSIX BIT(13)
#define IGC_FLAG_EEE BIT(14)
#define IGC_FLAG_VLAN_PROMISC BIT(15)
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index f068b66b8025..a1d815af507d 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -182,8 +182,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
igc_check_for_copper_link(hw);
- phy->type = igc_phy_i225;
-
out:
return ret_val;
}
@@ -398,6 +396,35 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)
rd32(IGC_MPC);
}
+bool igc_is_device_id_i225(struct igc_hw *hw)
+{
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I225_LM:
+ case IGC_DEV_ID_I225_V:
+ case IGC_DEV_ID_I225_I:
+ case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_K2:
+ case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I225_IT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool igc_is_device_id_i226(struct igc_hw *hw)
+{
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I226_LM:
+ case IGC_DEV_ID_I226_V:
+ case IGC_DEV_ID_I226_K:
+ case IGC_DEV_ID_I226_IT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static struct igc_mac_operations igc_mac_ops_base = {
.init_hw = igc_init_hw_base,
.check_for_link = igc_check_for_copper_link,
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index ce530f5fd7bd..9f3827eda157 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -7,6 +7,8 @@
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
void igc_power_down_phy_copper_base(struct igc_hw *hw);
+bool igc_is_device_id_i225(struct igc_hw *hw);
+bool igc_is_device_id_i226(struct igc_hw *hw);
/* Transmit Descriptor - Advanced */
union igc_adv_tx_desc {
@@ -85,8 +87,13 @@ union igc_adv_rx_desc {
#define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
/* SRRCTL bit definitions */
-#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
-#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
-#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0)
+#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \
+ (x) / 1024) /* in 1 KB resolution */
+#define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8)
+#define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \
+ (x) / 64) /* in 64 bytes resolution */
+#define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25)
+#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1)
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index c7fe61509d5b..44a507029946 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -85,9 +85,6 @@
#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8)
-/* Physical Func Reset Done Indication */
-#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-
/* Loop limit on how long we wait for auto-negotiation to complete */
#define COPPER_LINK_UP_LIMIT 10
#define PHY_AUTO_NEG_LIMIT 45
@@ -324,6 +321,8 @@
#define IGC_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IGC_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+#define IGC_ADVTXD_TSN_CNTX_FIRST 0x00000080
+
/* Transmit Control */
#define IGC_TCTL_EN 0x00000002 /* enable Tx */
#define IGC_TCTL_PSP 0x00000008 /* pad short packets */
@@ -403,6 +402,15 @@
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+/* Transmit Scheduling Latency */
+/* Latency between transmission scheduling (LaunchTime) and the time
+ * the packet is transmitted to the network in nanosecond.
+ */
+#define IGC_TXOFFSET_SPEED_10 0x000034BC
+#define IGC_TXOFFSET_SPEED_100 0x00000578
+#define IGC_TXOFFSET_SPEED_1000 0x0000012C
+#define IGC_TXOFFSET_SPEED_2500 0x00000578
+
/* Time Sync Interrupt Causes */
#define IGC_TSICR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
#define IGC_TSICR_TXTS BIT(1) /* Transmit Timestamp. */
@@ -467,7 +475,9 @@
#define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */
#define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */
#define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */
+#define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */
#define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */
+#define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */
#define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */
#define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */
#define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
@@ -514,6 +524,7 @@
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
+#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
@@ -584,7 +595,6 @@
#define IGC_GEN_POLL_TIMEOUT 1920
/* PHY Control Register */
-#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
@@ -605,9 +615,6 @@
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-/* Bit definitions for valid PHY IDs. I = Integrated E = External */
-#define I225_I_PHY_ID 0x67C9DC00
-
/* MDI Control */
#define IGC_MDIC_DATA_MASK 0x0000FFFF
#define IGC_MDIC_REG_MASK 0x001F0000
@@ -617,7 +624,6 @@
#define IGC_MDIC_OP_WRITE 0x04000000
#define IGC_MDIC_OP_READ 0x08000000
#define IGC_MDIC_READY 0x10000000
-#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_N0_QUEUE -1
@@ -656,9 +662,6 @@
*/
#define IGC_TW_SYSTEM_100_MASK 0x0000FF00
#define IGC_TW_SYSTEM_100_SHIFT 8
-#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
-#define IGC_DMACR_DMACTHR_MASK 0x00FF0000
-#define IGC_DMACR_DMACTHR_SHIFT 16
/* Reg val to set scale to 1024 nsec */
#define IGC_LTRMINV_SCALE_1024 2
/* Reg val to set scale to 32768 nsec */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index e0a76ac1bbbc..0e2cb00622d1 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -67,6 +67,7 @@ static const struct igc_stats igc_gstrings_stats[] = {
IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
IGC_STAT("tx_lpi_counter", stats.tlpic),
IGC_STAT("rx_lpi_counter", stats.rlpic),
+ IGC_STAT("qbv_config_change_errors", qbv_config_change_errors),
};
#define IGC_NETDEV_STAT(_net_stat) { \
@@ -567,8 +568,11 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
return ret_val;
}
-static void igc_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void
+igc_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
@@ -578,8 +582,11 @@ static void igc_ethtool_get_ringparam(struct net_device *netdev,
ring->tx_pending = adapter->tx_ring_count;
}
-static int igc_ethtool_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static int
+igc_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct igc_adapter *adapter = netdev_priv(netdev);
struct igc_ring *temp_ring;
@@ -833,15 +840,15 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
ring = adapter->tx_ring[j];
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ start = u64_stats_fetch_begin(&ring->tx_syncp);
data[i] = ring->tx_stats.packets;
data[i + 1] = ring->tx_stats.bytes;
data[i + 2] = ring->tx_stats.restart_queue;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+ start = u64_stats_fetch_begin(&ring->tx_syncp2);
restart2 = ring->tx_stats.restart_queue2;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp2, start));
data[i + 2] += restart2;
i += IGC_TX_QUEUE_STATS_LEN;
@@ -849,13 +856,13 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
for (j = 0; j < adapter->num_rx_queues; j++) {
ring = adapter->rx_ring[j];
do {
- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ start = u64_stats_fetch_begin(&ring->rx_syncp);
data[i] = ring->rx_stats.packets;
data[i + 1] = ring->rx_stats.bytes;
data[i + 2] = ring->rx_stats.drops;
data[i + 3] = ring->rx_stats.csum_err;
data[i + 4] = ring->rx_stats.alloc_failed;
- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
i += IGC_RX_QUEUE_STATS_LEN;
}
spin_unlock(&adapter->stats64_lock);
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 587db7483f25..e1c572e0d4ef 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -53,12 +53,6 @@ enum igc_mac_type {
igc_num_macs /* List is 1-based, so subtract 1 for true count. */
};
-enum igc_phy_type {
- igc_phy_unknown = 0,
- igc_phy_none,
- igc_phy_i225,
-};
-
enum igc_media_type {
igc_media_type_unknown = 0,
igc_media_type_copper = 1,
@@ -68,8 +62,6 @@ enum igc_media_type {
enum igc_nvm_type {
igc_nvm_unknown = 0,
igc_nvm_eeprom_spi,
- igc_nvm_flash_hw,
- igc_nvm_invm,
};
struct igc_info {
@@ -97,8 +89,6 @@ struct igc_mac_info {
u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
- u8 forced_speed_duplex;
-
bool asf_firmware_present;
bool arc_subsystem_valid;
@@ -141,8 +131,6 @@ struct igc_nvm_info {
struct igc_phy_info {
struct igc_phy_operations ops;
- enum igc_phy_type type;
-
u32 addr;
u32 id;
u32 reset_delay_us; /* in usec */
@@ -285,6 +273,7 @@ struct igc_hw_stats {
u64 o2bspc;
u64 b2ospc;
u64 b2ogprc;
+ u64 txdrop;
};
struct net_device *igc_get_hw_dev(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
index b6807e16eea9..17546a035ab1 100644
--- a/drivers/net/ethernet/intel/igc/igc_i225.c
+++ b/drivers/net/ethernet/intel/igc/igc_i225.c
@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igc_get_hw_semaphore_i225(hw))
- ; /* Empty */
+ /* Releasing the resource requires first getting the HW semaphore.
+ * If we fail to get the semaphore, there is nothing we can do,
+ * except log an error and quit. We are not allowed to hang here
+ * indefinitely, as it may cause denial of service or system crash.
+ */
+ if (igc_get_hw_semaphore_i225(hw)) {
+ hw_dbg("Failed to release SW_FW_SYNC.\n");
+ return;
+ }
swfw_sync = rd32(IGC_SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -473,13 +480,11 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw)
/* NVM Function Pointers */
if (igc_get_flash_presence_i225(hw)) {
- hw->nvm.type = igc_nvm_flash_hw;
nvm->ops.read = igc_read_nvm_srrd_i225;
nvm->ops.write = igc_write_nvm_srwr_i225;
nvm->ops.validate = igc_validate_nvm_checksum_i225;
nvm->ops.update = igc_update_nvm_checksum_i225;
} else {
- hw->nvm.type = igc_nvm_invm;
nvm->ops.read = igc_read_nvm_eerd;
nvm->ops.write = NULL;
nvm->ops.validate = NULL;
@@ -588,20 +593,11 @@ s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
size = rd32(IGC_RXPBS) &
IGC_RXPBS_SIZE_I225_MASK;
- /* Calculations vary based on DMAC settings. */
- if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) {
- size -= (rd32(IGC_DMACR) &
- IGC_DMACR_DMACTHR_MASK) >>
- IGC_DMACR_DMACTHR_SHIFT;
- /* Convert size to bits. */
- size *= 1024 * 8;
- } else {
- /* Convert size to bytes, subtract the MTU, and then
- * convert the size to bits.
- */
- size *= 1024;
- size *= 8;
- }
+ /* Convert size to bytes, subtract the MTU, and then
+ * convert the size to bits.
+ */
+ size *= 1024;
+ size *= 8;
if (size < 0) {
hw_dbg("Invalid effective Rx buffer size %d\n",
diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c
index 67b8ffd21d8a..a5c4b19d71a2 100644
--- a/drivers/net/ethernet/intel/igc/igc_mac.c
+++ b/drivers/net/ethernet/intel/igc/igc_mac.c
@@ -193,7 +193,7 @@ s32 igc_force_mac_fc(struct igc_hw *hw)
* 1: Rx flow control is enabled (we can receive pause
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
- * frames but we do not receive pause frames).
+ * but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8e448288ee26..1c4676882082 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -4,7 +4,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/if_vlan.h>
-#include <linux/aer.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ip.h>
@@ -505,6 +504,9 @@ int igc_setup_rx_resources(struct igc_ring *rx_ring)
u8 index = rx_ring->queue_index;
int size, desc_len, res;
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
rx_ring->q_vector->napi.napi_id);
if (res < 0) {
@@ -638,8 +640,11 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
else
buf_size = IGC_RXBUFFER_2048;
- srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl = rd32(IGC_SRRCTL(reg_idx));
+ srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
+ IGC_SRRCTL_DESCTYPE_MASK);
+ srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
+ srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
wr32(IGC_SRRCTL(reg_idx), srrctl);
@@ -997,25 +1002,118 @@ static int igc_write_mc_addr_list(struct net_device *netdev)
return netdev_mc_count(netdev);
}
-static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
+static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
+ bool *first_flag, bool *insert_empty)
{
+ struct igc_adapter *adapter = netdev_priv(ring->netdev);
ktime_t cycle_time = adapter->cycle_time;
ktime_t base_time = adapter->base_time;
+ ktime_t now = ktime_get_clocktai();
+ ktime_t baset_est, end_of_cycle;
u32 launchtime;
+ s64 n;
- /* FIXME: when using ETF together with taprio, we may have a
- * case where 'delta' is larger than the cycle_time, this may
- * cause problems if we don't read the current value of
- * IGC_BASET, as the value writen into the launchtime
- * descriptor field may be misinterpreted.
+ n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
+
+ baset_est = ktime_add_ns(base_time, cycle_time * (n));
+ end_of_cycle = ktime_add_ns(baset_est, cycle_time);
+
+ if (ktime_compare(txtime, end_of_cycle) >= 0) {
+ if (baset_est != ring->last_ff_cycle) {
+ *first_flag = true;
+ ring->last_ff_cycle = baset_est;
+
+ if (ktime_compare(txtime, ring->last_tx_cycle) > 0)
+ *insert_empty = true;
+ }
+ }
+
+ /* Introducing a window at end of cycle on which packets
+ * potentially not honor launchtime. Window of 5us chosen
+ * considering software update the tail pointer and packets
+ * are dma'ed to packet buffer.
*/
- div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
+ if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
+ netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
+ txtime);
+
+ ring->last_tx_cycle = end_of_cycle;
+
+ launchtime = ktime_sub_ns(txtime, baset_est);
+ if (launchtime > 0)
+ div_s64_rem(launchtime, cycle_time, &launchtime);
+ else
+ launchtime = 0;
return cpu_to_le32(launchtime);
}
+static int igc_init_empty_frame(struct igc_ring *ring,
+ struct igc_tx_buffer *buffer,
+ struct sk_buff *skb)
+{
+ unsigned int size;
+ dma_addr_t dma;
+
+ size = skb_headlen(skb);
+
+ dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->protocol = 0;
+ buffer->bytecount = skb->len;
+ buffer->gso_segs = 1;
+ buffer->time_stamp = jiffies;
+ dma_unmap_len_set(buffer, len, skb->len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ return 0;
+}
+
+static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
+ struct sk_buff *skb,
+ struct igc_tx_buffer *first)
+{
+ union igc_adv_tx_desc *desc;
+ u32 cmd_type, olinfo_status;
+ int err;
+
+ if (!igc_desc_unused(ring))
+ return -EBUSY;
+
+ err = igc_init_empty_frame(ring, first, skb);
+ if (err)
+ return err;
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
+ first->bytecount;
+ olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
+
+ desc = IGC_TX_DESC(ring, ring->next_to_use);
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
+
+ netdev_tx_sent_queue(txring_txq(ring), skb->len);
+
+ first->next_to_watch = desc;
+
+ ring->next_to_use++;
+ if (ring->next_to_use == ring->count)
+ ring->next_to_use = 0;
+
+ return 0;
+}
+
+#define IGC_EMPTY_FRAME_SIZE 60
+
static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
- struct igc_tx_buffer *first,
+ __le32 launch_time, bool first_flag,
u32 vlan_macip_lens, u32 type_tucmd,
u32 mss_l4len_idx)
{
@@ -1034,26 +1132,17 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
mss_l4len_idx |= tx_ring->reg_idx << 4;
+ if (first_flag)
+ mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
+
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
- /* We assume there is always a valid Tx time available. Invalid times
- * should have been handled by the upper layers.
- */
- if (tx_ring->launchtime_enable) {
- struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
- ktime_t txtime = first->skb->tstamp;
-
- skb_txtime_consumed(first->skb);
- context_desc->launch_time = igc_tx_launchtime(adapter,
- txtime);
- } else {
- context_desc->launch_time = 0;
- }
+ context_desc->launch_time = launch_time;
}
-static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
+static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
+ __le32 launch_time, bool first_flag)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
@@ -1093,7 +1182,8 @@ no_csum:
vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
- igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
+ igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
+ vlan_macip_lens, type_tucmd, 0);
}
static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
@@ -1317,6 +1407,7 @@ dma_error:
static int igc_tso(struct igc_ring *tx_ring,
struct igc_tx_buffer *first,
+ __le32 launch_time, bool first_flag,
u8 *hdr_len)
{
u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
@@ -1403,8 +1494,8 @@ static int igc_tso(struct igc_ring *tx_ring,
vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
- igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
- type_tucmd, mss_l4len_idx);
+ igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
+ vlan_macip_lens, type_tucmd, mss_l4len_idx);
return 1;
}
@@ -1412,11 +1503,15 @@ static int igc_tso(struct igc_ring *tx_ring,
static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
struct igc_ring *tx_ring)
{
+ struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+ bool first_flag = false, insert_empty = false;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol = vlan_get_protocol(skb);
struct igc_tx_buffer *first;
+ __le32 launch_time = 0;
u32 tx_flags = 0;
unsigned short f;
+ ktime_t txtime;
u8 hdr_len = 0;
int tso = 0;
@@ -1430,11 +1525,40 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_frag_size(
&skb_shinfo(skb)->frags[f]));
- if (igc_maybe_stop_tx(tx_ring, count + 3)) {
+ if (igc_maybe_stop_tx(tx_ring, count + 5)) {
/* this is a hard error */
return NETDEV_TX_BUSY;
}
+ if (!tx_ring->launchtime_enable)
+ goto done;
+
+ txtime = skb->tstamp;
+ skb->tstamp = ktime_set(0, 0);
+ launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
+
+ if (insert_empty) {
+ struct igc_tx_buffer *empty_info;
+ struct sk_buff *empty;
+ void *data;
+
+ empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
+ if (!empty)
+ goto done;
+
+ data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
+ memset(data, 0, IGC_EMPTY_FRAME_SIZE);
+
+ igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
+
+ if (igc_init_tx_empty_descriptor(tx_ring,
+ empty,
+ empty_info) < 0)
+ dev_kfree_skb_any(empty);
+ }
+
+done:
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
first->type = IGC_TX_BUFFER_TYPE_SKB;
@@ -1442,9 +1566,19 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
+ if (tx_ring->max_sdu > 0) {
+ u32 max_sdu = 0;
+
+ max_sdu = tx_ring->max_sdu +
+ (skb_vlan_tagged(first->skb) ? VLAN_HLEN : 0);
+
+ if (first->bytecount > max_sdu) {
+ adapter->stats.txdrop++;
+ goto out_drop;
+ }
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
/* FIXME: add support for retrieving timestamps from
* the other timer registers before skipping the
* timestamping request.
@@ -1471,11 +1605,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->tx_flags = tx_flags;
first->protocol = protocol;
- tso = igc_tso(tx_ring, first, &hdr_len);
+ tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
if (tso < 0)
goto out_drop;
else if (!tso)
- igc_tx_csum(tx_ring, first);
+ igc_tx_csum(tx_ring, first, launch_time, first_flag);
igc_tx_map(tx_ring, first, hdr_len);
@@ -1718,24 +1852,26 @@ static void igc_add_rx_frag(struct igc_ring *rx_ring,
static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
struct igc_rx_buffer *rx_buffer,
- union igc_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(va);
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(va - IGC_SKB_PAD, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IGC_SKB_PAD);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, size);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
igc_rx_buffer_flip(rx_buffer, truesize);
return skb;
@@ -1746,6 +1882,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct xdp_buff *xdp,
ktime_t timestamp)
{
+ unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
void *va = xdp->data;
@@ -1753,10 +1890,11 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(va);
+ net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi,
+ IGC_RX_HDR_LEN + metasize);
if (unlikely(!skb))
return NULL;
@@ -1769,7 +1907,13 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
+ ALIGN(headlen + metasize, sizeof(long)));
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
/* update all of the pointers */
size -= headlen;
@@ -2116,65 +2260,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
return ok;
}
-static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
- struct xdp_frame *xdpf,
- struct igc_ring *ring)
-{
- dma_addr_t dma;
-
- dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
- return -ENOMEM;
- }
-
- buffer->type = IGC_TX_BUFFER_TYPE_XDP;
- buffer->xdpf = xdpf;
- buffer->protocol = 0;
- buffer->bytecount = xdpf->len;
- buffer->gso_segs = 1;
- buffer->time_stamp = jiffies;
- dma_unmap_len_set(buffer, len, xdpf->len);
- dma_unmap_addr_set(buffer, dma, dma);
- return 0;
-}
-
/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
struct xdp_frame *xdpf)
{
- struct igc_tx_buffer *buffer;
- union igc_adv_tx_desc *desc;
- u32 cmd_type, olinfo_status;
- int err;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, index = ring->next_to_use;
+ struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
+ struct igc_tx_buffer *buffer = head;
+ union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
+ u32 olinfo_status, len = xdpf->len, cmd_type;
+ void *data = xdpf->data;
+ u16 i;
- if (!igc_desc_unused(ring))
- return -EBUSY;
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
- buffer = &ring->tx_buffer_info[ring->next_to_use];
- err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
- if (err)
- return err;
+ if (igc_maybe_stop_tx(ring, count + 3)) {
+ /* this is a hard error */
+ return -EBUSY;
+ }
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- buffer->bytecount;
- olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
+ i = 0;
+ head->bytecount = xdp_get_frame_len(xdpf);
+ head->type = IGC_TX_BUFFER_TYPE_XDP;
+ head->gso_segs = 1;
+ head->xdpf = xdpf;
- desc = IGC_TX_DESC(ring, ring->next_to_use);
- desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
- netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
+ for (;;) {
+ dma_addr_t dma;
- buffer->next_to_watch = desc;
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev,
+ "Failed to map DMA for TX\n");
+ goto unmap;
+ }
- ring->next_to_use++;
- if (ring->next_to_use == ring->count)
- ring->next_to_use = 0;
+ dma_unmap_len_set(buffer, len, len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | len;
+
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.buffer_addr = cpu_to_le64(dma);
+
+ buffer->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ buffer = &ring->tx_buffer_info[index];
+ desc = IGC_TX_DESC(ring, index);
+ desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
+ /* set the timestamp */
+ head->time_stamp = jiffies;
+ /* set next_to_watch value indicating a packet is present */
+ head->next_to_watch = desc;
+ ring->next_to_use = index;
return 0;
+
+unmap:
+ for (;;) {
+ buffer = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(buffer, len))
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(buffer, dma),
+ dma_unmap_len(buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(buffer, len, 0);
+ if (buffer == head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return -ENOMEM;
}
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2231,7 +2412,7 @@ static int __igc_xdp_run_prog(struct igc_adapter *adapter,
return IGC_XDP_REDIRECT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -2354,7 +2535,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
if (!skb) {
xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
- igc_rx_offset(rx_ring) + pkt_offset, size, false);
+ igc_rx_offset(rx_ring) + pkt_offset,
+ size, true);
+ xdp_buff_clear_frags_flag(&xdp);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -2378,7 +2561,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
} else if (skb)
igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ skb = igc_build_skb(rx_ring, rx_buffer, &xdp);
else
skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
timestamp);
@@ -2435,21 +2618,24 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
struct xdp_buff *xdp)
{
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
- unsigned int datasize = xdp->data_end - xdp->data;
- unsigned int totalsize = metasize + datasize;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ring->q_vector->napi,
- xdp->data_end - xdp->data_hard_start,
+ net_prefetch(xdp->data_meta);
+
+ skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
- memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
return skb;
}
@@ -2769,7 +2955,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
- !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
+ !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
+ (rd32(IGC_TDH(tx_ring->reg_idx)) !=
+ readl(tx_ring->tail))) {
/* detected Tx unit hang */
netdev_err(tx_ring->netdev,
"Detected Tx Unit Hang\n"
@@ -4339,8 +4527,7 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -4628,10 +4815,10 @@ void igc_update_stats(struct igc_adapter *adapter)
}
do {
- start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ start = u64_stats_fetch_begin(&ring->rx_syncp);
_bytes = ring->rx_stats.bytes;
_packets = ring->rx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -4645,10 +4832,10 @@ void igc_update_stats(struct igc_adapter *adapter)
struct igc_ring *ring = adapter->tx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ start = u64_stats_fetch_begin(&ring->tx_syncp);
_bytes = ring->tx_stats.bytes;
_packets = ring->tx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
bytes += _bytes;
packets += _packets;
}
@@ -4746,7 +4933,8 @@ void igc_update_stats(struct igc_adapter *adapter)
net_stats->tx_window_errors = adapter->stats.latecol;
net_stats->tx_carrier_errors = adapter->stats.tncrs;
- /* Tx Dropped needs to be maintained elsewhere */
+ /* Tx Dropped */
+ net_stats->tx_dropped = adapter->stats.txdrop;
/* Management Stats */
adapter->stats.mgptc += rd32(IGC_MGTPTC);
@@ -4897,6 +5085,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * igc_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: queue number that timed out
+ **/
+static void igc_tx_timeout(struct net_device *netdev,
+ unsigned int __always_unused txqueue)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ wr32(IGC_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
+}
+
+/**
* igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
@@ -5323,10 +5529,17 @@ static void igc_watchdog_task(struct work_struct *work)
case SPEED_100:
case SPEED_1000:
case SPEED_2500:
- adapter->tx_timeout_factor = 7;
+ adapter->tx_timeout_factor = 1;
break;
}
+ /* Once the launch time has been set on the wire, there
+ * is a delay before the link speed can be determined
+ * based on link-up activity. Write into the register
+ * as soon as we know the correct link speed.
+ */
+ igc_tsn_adjust_txtime_offset(adapter);
+
if (adapter->link_speed != SPEED_1000)
goto no_wait;
@@ -5367,25 +5580,8 @@ no_wait:
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
- /* link is down, time to check for alternate media */
- if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
- if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
- schedule_work(&adapter->reset_task);
- /* return immediately */
- return;
- }
- }
pm_schedule_suspend(netdev->dev.parent,
MSEC_PER_SEC * 5);
-
- /* also check for alternate media here */
- } else if (!netif_carrier_ok(netdev) &&
- (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
- if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
- schedule_work(&adapter->reset_task);
- /* return immediately */
- return;
- }
}
}
@@ -5467,6 +5663,9 @@ static irqreturn_t igc_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5510,6 +5709,9 @@ static irqreturn_t igc_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
+ if (icr & IGC_ICR_TS)
+ igc_tsync_interrupt(adapter);
+
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
@@ -5773,6 +5975,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ struct igc_hw *hw = &adapter->hw;
struct timespec64 now;
size_t n;
@@ -5785,14 +5988,17 @@ static bool validate_schedule(struct igc_adapter *adapter,
* in the future, it will hold all the packets until that
* time, causing a lot of TX Hangs, so to avoid that, we
* reject schedules that would start in the future.
+ * Note: Limitation above is no longer in i226.
*/
- if (!is_base_time_past(qopt->base_time, &now))
+ if (!is_base_time_past(qopt->base_time, &now) &&
+ igc_is_device_id_i225(hw))
return false;
for (n = 0; n < qopt->num_entries; n++) {
- const struct tc_taprio_sched_entry *e;
+ const struct tc_taprio_sched_entry *e, *prev;
int i;
+ prev = n ? &qopt->entries[n - 1] : NULL;
e = &qopt->entries[n];
/* i225 only supports "global" frame preemption
@@ -5801,13 +6007,18 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;
- for (i = 0; i < adapter->num_tx_queues; i++) {
- if (e->gate_mask & BIT(i))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (e->gate_mask & BIT(i)) {
queue_uses[i]++;
- if (queue_uses[i] > 1)
- return false;
- }
+ /* There are limitations: A single queue cannot
+ * be opened and closed multiple times per cycle
+ * unless the gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
+ return false;
+ }
}
return true;
@@ -5835,12 +6046,14 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
adapter->base_time = 0;
adapter->cycle_time = NSEC_PER_SEC;
+ adapter->qbv_config_change_errors = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
+ ring->max_sdu = 0;
}
return 0;
@@ -5849,13 +6062,21 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
+ bool queue_configured[IGC_MAX_TX_QUEUES] = { };
+ struct igc_hw *hw = &adapter->hw;
u32 start_time = 0, end_time = 0;
size_t n;
+ int i;
+
+ adapter->qbv_enable = qopt->enable;
if (!qopt->enable)
return igc_tsn_clear_schedule(adapter);
- if (adapter->base_time)
+ if (qopt->base_time < 0)
+ return -ERANGE;
+
+ if (igc_is_device_id_i225(hw) && adapter->base_time)
return -EALREADY;
if (!validate_schedule(adapter, qopt))
@@ -5864,28 +6085,68 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
- /* FIXME: be a little smarter about cases when the gate for a
- * queue stays open for more than one entry.
- */
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
- int i;
end_time += e->interval;
+ /* If any of the conditions below are true, we need to manually
+ * control the end time of the cycle.
+ * 1. Qbv users can specify a cycle time that is not equal
+ * to the total GCL intervals. Hence, recalculation is
+ * necessary here to exclude the time interval that
+ * exceeds the cycle time.
+ * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
+ * once the end of the list is reached, it will switch
+ * to the END_OF_CYCLE state and leave the gates in the
+ * same state until the next cycle is started.
+ */
+ if (end_time > adapter->cycle_time ||
+ n + 1 == qopt->num_entries)
+ end_time = adapter->cycle_time;
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
if (!(e->gate_mask & BIT(i)))
continue;
- ring->start_time = start_time;
+ /* Check whether a queue stays open for more than one
+ * entry. If so, keep the start and advance the end
+ * time.
+ */
+ if (!queue_configured[i])
+ ring->start_time = start_time;
ring->end_time = end_time;
+
+ queue_configured[i] = true;
}
start_time += e->interval;
}
+ /* Check whether a queue gets configured.
+ * If not, set the start and end time to be end time.
+ */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ if (!queue_configured[i]) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ ring->start_time = end_time;
+ ring->end_time = end_time;
+ }
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+ struct net_device *dev = adapter->netdev;
+
+ if (qopt->max_sdu[i])
+ ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len;
+ else
+ ring->max_sdu = 0;
+ }
+
return 0;
}
@@ -5973,12 +6234,37 @@ static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
+static int igc_tc_query_caps(struct igc_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->broken_mqprio = true;
+
+ if (hw->mac.type == igc_i225) {
+ caps->supports_queue_max_sdu = true;
+ caps->gate_mask_per_txq = true;
+ }
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return igc_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_TAPRIO:
return igc_tsn_enable_qbv_scheduling(adapter, type_data);
@@ -6092,6 +6378,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
+ .ndo_tx_timeout = igc_tx_timeout,
.ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
@@ -6148,6 +6435,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
u32 value = 0;
+ if (IGC_REMOVED(hw_addr))
+ return ~value;
+
value = readl(&hw_addr[reg]);
/* reads should not return all F's */
@@ -6164,56 +6454,6 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
return value;
}
-int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
-{
- struct igc_mac_info *mac = &adapter->hw.mac;
-
- mac->autoneg = false;
-
- /* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work
- */
- if ((spd & 1) || (dplx & ~1))
- goto err_inval;
-
- switch (spd + dplx) {
- case SPEED_10 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_10_HALF;
- break;
- case SPEED_10 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_10_FULL;
- break;
- case SPEED_100 + DUPLEX_HALF:
- mac->forced_speed_duplex = ADVERTISE_100_HALF;
- break;
- case SPEED_100 + DUPLEX_FULL:
- mac->forced_speed_duplex = ADVERTISE_100_FULL;
- break;
- case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = true;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
- break;
- case SPEED_1000 + DUPLEX_HALF: /* not supported */
- goto err_inval;
- case SPEED_2500 + DUPLEX_FULL:
- mac->autoneg = true;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
- break;
- case SPEED_2500 + DUPLEX_HALF: /* not supported */
- default:
- goto err_inval;
- }
-
- /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
- adapter->hw.phy.mdix = AUTO_ALL_MODES;
-
- return 0;
-
-err_inval:
- netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
-}
-
/**
* igc_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -6232,31 +6472,23 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, igc_driver_name);
if (err)
goto err_pci_reg;
- pci_enable_pcie_error_reporting(pdev);
-
err = pci_enable_ptm(pdev, NULL);
if (err < 0)
dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
@@ -6348,13 +6580,15 @@ static int igc_probe(struct pci_dev *pdev,
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= netdev->features;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= netdev->vlan_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -6462,7 +6696,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -6510,8 +6743,6 @@ static void igc_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 5cad31c3c7b0..53b77c969c85 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -141,24 +141,14 @@ void igc_power_down_phy_copper(struct igc_hw *hw)
* igc_check_downshift - Checks whether a downshift in speed occurred
* @hw: pointer to the HW structure
*
- * Success returns 0, Failure returns 1
- *
* A downshift is detected by querying the PHY link health.
*/
-s32 igc_check_downshift(struct igc_hw *hw)
+void igc_check_downshift(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
- s32 ret_val;
-
- switch (phy->type) {
- case igc_phy_i225:
- default:
- /* speed downshift not supported */
- phy->speed_downgraded = false;
- ret_val = 0;
- }
- return ret_val;
+ /* speed downshift not supported */
+ phy->speed_downgraded = false;
}
/**
@@ -581,7 +571,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -638,7 +628,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
* the lower time out
*/
for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
- usleep_range(500, 1000);
+ udelay(50);
mdic = rd32(IGC_MDIC);
if (mdic & IGC_MDIC_READY)
break;
@@ -746,8 +736,6 @@ s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data)
if (ret_val)
return ret_val;
ret_val = igc_write_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_write_xmdio_reg(hw, (u16)offset, dev_addr,
@@ -779,8 +767,6 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
if (ret_val)
return ret_val;
ret_val = igc_read_phy_reg_mdic(hw, offset, data);
- if (ret_val)
- return ret_val;
hw->phy.ops.release(hw);
} else {
ret_val = igc_read_xmdio_reg(hw, (u16)offset, dev_addr,
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h
index 1b031372d206..832a7e359f18 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.h
+++ b/drivers/net/ethernet/intel/igc/igc_phy.h
@@ -11,7 +11,7 @@ s32 igc_phy_hw_reset(struct igc_hw *hw);
s32 igc_get_phy_id(struct igc_hw *hw);
s32 igc_phy_has_link(struct igc_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
-s32 igc_check_downshift(struct igc_hw *hw);
+void igc_check_downshift(struct igc_hw *hw);
s32 igc_setup_copper_link(struct igc_hw *hw);
void igc_power_up_phy_copper(struct igc_hw *hw);
void igc_power_down_phy_copper(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 30568e3544cd..4e10ced736db 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -15,7 +15,6 @@
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
-#define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
#define IGC_PTM_STAT_SLEEP 2
@@ -323,7 +322,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
ts = ns_to_timespec64(ns);
if (rq->perout.index == 1) {
if (use_freq) {
- tsauxc_mask = IGC_TSAUXC_EN_CLK1;
+ tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1;
tsim_mask = 0;
} else {
tsauxc_mask = IGC_TSAUXC_EN_TT1;
@@ -334,7 +333,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
freqout = IGC_FREQOUT1;
} else {
if (use_freq) {
- tsauxc_mask = IGC_TSAUXC_EN_CLK0;
+ tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0;
tsim_mask = 0;
} else {
tsauxc_mask = IGC_TSAUXC_EN_TT0;
@@ -348,10 +347,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
tsauxc = rd32(IGC_TSAUXC);
tsim = rd32(IGC_TSIM);
if (rq->perout.index == 1) {
- tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1);
+ tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 |
+ IGC_TSAUXC_ST1);
tsim &= ~IGC_TSICR_TT1;
} else {
- tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0);
+ tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 |
+ IGC_TSAUXC_ST0);
tsim &= ~IGC_TSICR_TT0;
}
if (on) {
@@ -416,10 +417,12 @@ static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
*
* We need to convert the system time value stored in the RX/TXSTMP registers
* into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * Returns 0 on success.
**/
-static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
- struct skb_shared_hwtstamps *hwtstamps,
- u64 systim)
+static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
{
switch (adapter->hw.mac.type) {
case igc_i225:
@@ -429,8 +432,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
systim & 0xFFFFFFFF);
break;
default:
- break;
+ return -EINVAL;
}
+ return 0;
}
/**
@@ -560,10 +564,6 @@ static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
struct hwtstamp_config *config)
{
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
igc_ptp_disable_tx_timestamp(adapter);
@@ -655,7 +655,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
- igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
+ return;
switch (adapter->link_speed) {
case SPEED_10:
@@ -768,7 +769,20 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
*/
static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
{
- return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false;
+ if (!IS_ENABLED(CONFIG_X86_TSC))
+ return false;
+
+ /* FIXME: it was noticed that enabling support for PCIe PTM in
+ * some i225-V models could cause lockups when bringing the
+ * interface up/down. There should be no downsides to
+ * disabling crosstimestamping support for i225-V, as it
+ * doesn't have any PTP support. That way we gain some time
+ * while root causing the issue.
+ */
+ if (adapter->pdev->device == IGC_DEV_ID_I225_V)
+ return false;
+
+ return pcie_ptm_enabled(adapter->pdev);
}
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
@@ -983,6 +997,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
igc_ptp_write_i225(adapter, &ts);
}
+static void igc_ptm_stop(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_PTM_CTRL);
+ ctrl &= ~IGC_PTM_CTRL_EN;
+
+ wr32(IGC_PTM_CTRL, ctrl);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -1000,8 +1025,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
- if (pci_device_is_present(adapter->pdev))
+ if (pci_device_is_present(adapter->pdev)) {
igc_ptp_time_save(adapter);
+ igc_ptm_stop(adapter);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index e197a33d93a0..dba5a5759b1c 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -59,9 +59,6 @@
#define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
#define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */
-/* MSI-X Table Register Descriptions */
-#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
-
/* RSS registers */
#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
@@ -227,6 +224,7 @@
/* Transmit Scheduling Registers */
#define IGC_TQAVCTRL 0x3570
#define IGC_TXQCTL(_n) (0x3344 + 0x4 * (_n))
+#define IGC_GTXOFFSET 0x3310
#define IGC_BASET_L 0x3314
#define IGC_BASET_H 0x3318
#define IGC_QBVCYCLET 0x331C
@@ -294,7 +292,6 @@
/* LTR registers */
#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */
-#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */
#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */
#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */
@@ -306,7 +303,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg);
#define wr32(reg, val) \
do { \
u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
- writel((val), &hw_addr[(reg)]); \
+ if (!IGC_REMOVED(hw_addr)) \
+ writel((val), &hw_addr[(reg)]); \
} while (0)
#define rd32(reg) (igc_rd32(hw, reg))
@@ -318,4 +316,6 @@ do { \
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+#define IGC_REMOVED(h) unlikely(!(h))
+
#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 0fce22de2ab8..94a2b0dfb54d 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
+#include "igc_hw.h"
#include "igc_tsn.h"
static bool is_any_launchtime(struct igc_adapter *adapter)
@@ -36,7 +37,7 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
{
unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED;
- if (adapter->base_time)
+ if (adapter->qbv_enable)
new_flags |= IGC_FLAG_TSN_QBV_ENABLED;
if (is_any_launchtime(adapter))
@@ -48,6 +49,35 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
return new_flags;
}
+void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u16 txoffset;
+
+ if (!is_any_launchtime(adapter))
+ return;
+
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ txoffset = IGC_TXOFFSET_SPEED_10;
+ break;
+ case SPEED_100:
+ txoffset = IGC_TXOFFSET_SPEED_100;
+ break;
+ case SPEED_1000:
+ txoffset = IGC_TXOFFSET_SPEED_1000;
+ break;
+ case SPEED_2500:
+ txoffset = IGC_TXOFFSET_SPEED_2500;
+ break;
+ default:
+ txoffset = 0;
+ break;
+ }
+
+ wr32(IGC_GTXOFFSET, txoffset);
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@@ -57,12 +87,14 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
u32 tqavctrl;
int i;
+ wr32(IGC_GTXOFFSET, 0);
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
- IGC_TQAVCTRL_ENHANCED_QAV);
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -82,25 +114,16 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
+ bool tsn_mode_reconfig = false;
u32 tqavctrl, baset_l, baset_h;
u32 sec, nsec, cycle;
ktime_t base_time, systim;
int i;
- cycle = adapter->cycle_time;
- base_time = adapter->base_time;
-
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
- tqavctrl = rd32(IGC_TQAVCTRL);
- tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
- wr32(IGC_TQAVCTRL, tqavctrl);
-
- wr32(IGC_QBVCYCLET_S, cycle);
- wr32(IGC_QBVCYCLET, cycle);
-
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
@@ -110,15 +133,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_STQT(i), ring->start_time);
wr32(IGC_ENDQT(i), ring->end_time);
- if (adapter->base_time) {
- /* If we have a base_time we are in "taprio"
- * mode and we need to be strict about the
- * cycles: only transmit a packet if it can be
- * completed during that cycle.
- */
- txqctl |= IGC_TXQCTL_STRICT_CYCLE |
- IGC_TXQCTL_STRICT_END;
- }
+ txqctl |= IGC_TXQCTL_STRICT_CYCLE |
+ IGC_TXQCTL_STRICT_END;
if (ring->launchtime_enable)
txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT;
@@ -210,21 +226,57 @@ skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
+ tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
+
+ if (tqavctrl & IGC_TQAVCTRL_TRANSMIT_MODE_TSN)
+ tsn_mode_reconfig = true;
+
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+
+ cycle = adapter->cycle_time;
+ base_time = adapter->base_time;
+
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
systim = ktime_set(sec, nsec);
-
if (ktime_compare(systim, base_time) > 0) {
- s64 n;
+ s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
- n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
+
+ /* Increase the counter if scheduling into the past while
+ * Gate Control List (GCL) is running.
+ */
+ if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
+ tsn_mode_reconfig)
+ adapter->qbv_config_change_errors++;
+ } else {
+ /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
+ * has to be configured before the cycle time and base time.
+ * Tx won't hang if there is a GCL is already running,
+ * so in this case we don't need to set FutScdDis.
+ */
+ if (igc_is_device_id_i226(hw) &&
+ !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L)))
+ tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;
}
- baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
+ wr32(IGC_TQAVCTRL, tqavctrl);
+ wr32(IGC_QBVCYCLET_S, cycle);
+ wr32(IGC_QBVCYCLET, cycle);
+
+ baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
wr32(IGC_BASET_H, baset_h);
+
+ /* In i226, Future base time is only supported when FutScdDis bit
+ * is enabled and only active for re-configuration.
+ * In this case, initialize the base time with zero to create
+ * "re-configuration" scenario then only set the desired base time.
+ */
+ if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS)
+ wr32(IGC_BASET_L, 0);
wr32(IGC_BASET_L, baset_l);
return 0;
@@ -251,17 +303,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
- int err;
+ struct igc_hw *hw = &adapter->hw;
- if (netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) {
schedule_work(&adapter->reset_task);
return 0;
}
- err = igc_tsn_enable_offload(adapter);
- if (err < 0)
- return err;
+ igc_tsn_reset(adapter);
- adapter->flags = igc_tsn_new_flags(adapter);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index 1512307f5a52..b53e6af560b7 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -6,5 +6,6 @@
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
+void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index a8cf5374be47..e27af72aada8 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
+#include <linux/if_vlan.h>
#include <net/xdp_sock_drv.h>
#include "igc.h"
@@ -28,6 +29,11 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
if (old_prog)
bpf_prog_put(old_prog);
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+
if (if_running)
igc_open(dev);
diff --git a/drivers/net/ethernet/intel/ixgb/Makefile b/drivers/net/ethernet/intel/ixgb/Makefile
deleted file mode 100644
index 2433e9300a33..000000000000
--- a/drivers/net/ethernet/intel/ixgb/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Copyright(c) 1999 - 2008 Intel Corporation.
-#
-# Makefile for the Intel(R) PRO/10GbE ethernet driver
-#
-
-obj-$(CONFIG_IXGB) += ixgb.o
-
-ixgb-objs := ixgb_main.o ixgb_hw.o ixgb_ee.o ixgb_ethtool.o ixgb_param.o
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h
deleted file mode 100644
index 81ac39576803..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#ifndef _IXGB_H_
-#define _IXGB_H_
-
-#include <linux/stddef.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-#include <linux/bitops.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <linux/capability.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#include <net/pkt_sched.h>
-#include <linux/list.h>
-#include <linux/reboot.h>
-#include <net/checksum.h>
-
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-
-#define BAR_0 0
-#define BAR_1 1
-
-struct ixgb_adapter;
-#include "ixgb_hw.h"
-#include "ixgb_ee.h"
-#include "ixgb_ids.h"
-
-/* TX/RX descriptor defines */
-#define DEFAULT_TXD 256
-#define MAX_TXD 4096
-#define MIN_TXD 64
-
-/* hardware cannot reliably support more than 512 descriptors owned by
- * hardware descriptor cache otherwise an unreliable ring under heavy
- * receive load may result */
-#define DEFAULT_RXD 512
-#define MAX_RXD 512
-#define MIN_RXD 64
-
-/* Supported Rx Buffer Sizes */
-#define IXGB_RXBUFFER_2048 2048
-#define IXGB_RXBUFFER_4096 4096
-#define IXGB_RXBUFFER_8192 8192
-#define IXGB_RXBUFFER_16384 16384
-
-/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define IXGB_RX_BUFFER_WRITE 8 /* Must be power of 2 */
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
-struct ixgb_buffer {
- struct sk_buff *skb;
- dma_addr_t dma;
- unsigned long time_stamp;
- u16 length;
- u16 next_to_watch;
- u16 mapped_as_page;
-};
-
-struct ixgb_desc_ring {
- /* pointer to the descriptor ring memory */
- void *desc;
- /* physical address of the descriptor ring */
- dma_addr_t dma;
- /* length of descriptor ring in bytes */
- unsigned int size;
- /* number of descriptors in the ring */
- unsigned int count;
- /* next descriptor to associate a buffer with */
- unsigned int next_to_use;
- /* next descriptor to check for DD status bit */
- unsigned int next_to_clean;
- /* array of buffer information structs */
- struct ixgb_buffer *buffer_info;
-};
-
-#define IXGB_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
-
-#define IXGB_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
-#define IXGB_RX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_rx_desc)
-#define IXGB_TX_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_tx_desc)
-#define IXGB_CONTEXT_DESC(R, i) IXGB_GET_DESC(R, i, ixgb_context_desc)
-
-/* board specific private data structure */
-
-struct ixgb_adapter {
- struct timer_list watchdog_timer;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- u32 bd_number;
- u32 rx_buffer_len;
- u32 part_num;
- u16 link_speed;
- u16 link_duplex;
- struct work_struct tx_timeout_task;
-
- /* TX */
- struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
- unsigned int restart_queue;
- unsigned long timeo_start;
- u32 tx_cmd_type;
- u64 hw_csum_tx_good;
- u64 hw_csum_tx_error;
- u32 tx_int_delay;
- u32 tx_timeout_count;
- bool tx_int_delay_enable;
- bool detect_tx_hung;
-
- /* RX */
- struct ixgb_desc_ring rx_ring;
- u64 hw_csum_rx_error;
- u64 hw_csum_rx_good;
- u32 rx_int_delay;
- bool rx_csum;
-
- /* OS defined structs */
- struct napi_struct napi;
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- /* structs defined in ixgb_hw.h */
- struct ixgb_hw hw;
- u16 msg_enable;
- struct ixgb_hw_stats stats;
- u32 alloc_rx_buff_failed;
- bool have_msi;
- unsigned long flags;
-};
-
-enum ixgb_state_t {
- /* TBD
- __IXGB_TESTING,
- __IXGB_RESETTING,
- */
- __IXGB_DOWN
-};
-
-/* Exported from other modules */
-void ixgb_check_options(struct ixgb_adapter *adapter);
-void ixgb_set_ethtool_ops(struct net_device *netdev);
-extern char ixgb_driver_name[];
-
-void ixgb_set_speed_duplex(struct net_device *netdev);
-
-int ixgb_up(struct ixgb_adapter *adapter);
-void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-void ixgb_reset(struct ixgb_adapter *adapter);
-int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-void ixgb_update_stats(struct ixgb_adapter *adapter);
-
-
-#endif /* _IXGB_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c b/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
deleted file mode 100644
index 129286fc1634..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.c
+++ /dev/null
@@ -1,580 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "ixgb_hw.h"
-#include "ixgb_ee.h"
-/* Local prototypes */
-static u16 ixgb_shift_in_bits(struct ixgb_hw *hw);
-
-static void ixgb_shift_out_bits(struct ixgb_hw *hw,
- u16 data,
- u16 count);
-static void ixgb_standby_eeprom(struct ixgb_hw *hw);
-
-static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
-
-static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
-
-/******************************************************************************
- * Raises the EEPROM's clock input.
- *
- * hw - Struct containing variables accessed by shared code
- * eecd_reg - EECD's current value
- *****************************************************************************/
-static void
-ixgb_raise_clock(struct ixgb_hw *hw,
- u32 *eecd_reg)
-{
- /* Raise the clock input to the EEPROM (by setting the SK bit), and then
- * wait 50 microseconds.
- */
- *eecd_reg = *eecd_reg | IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, *eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-}
-
-/******************************************************************************
- * Lowers the EEPROM's clock input.
- *
- * hw - Struct containing variables accessed by shared code
- * eecd_reg - EECD's current value
- *****************************************************************************/
-static void
-ixgb_lower_clock(struct ixgb_hw *hw,
- u32 *eecd_reg)
-{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
- */
- *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, *eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-}
-
-/******************************************************************************
- * Shift data bits out to the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * data - data to send to the EEPROM
- * count - number of bits to shift out
- *****************************************************************************/
-static void
-ixgb_shift_out_bits(struct ixgb_hw *hw,
- u16 data,
- u16 count)
-{
- u32 eecd_reg;
- u32 mask;
-
- /* We need to shift "count" bits out to the EEPROM. So, value in the
- * "data" parameter will be shifted out to the EEPROM one bit at a time.
- * In order to do this, "data" must be broken down into bits.
- */
- mask = 0x01 << (count - 1);
- eecd_reg = IXGB_READ_REG(hw, EECD);
- eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
- do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
- */
- eecd_reg &= ~IXGB_EECD_DI;
-
- if (data & mask)
- eecd_reg |= IXGB_EECD_DI;
-
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
-
- udelay(50);
-
- ixgb_raise_clock(hw, &eecd_reg);
- ixgb_lower_clock(hw, &eecd_reg);
-
- mask = mask >> 1;
-
- } while (mask);
-
- /* We leave the "DI" bit set to "0" when we leave this routine. */
- eecd_reg &= ~IXGB_EECD_DI;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-}
-
-/******************************************************************************
- * Shift data bits in from the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static u16
-ixgb_shift_in_bits(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
- u32 i;
- u16 data;
-
- /* In order to read a register from the EEPROM, we need to shift 16 bits
- * in from the EEPROM. Bits are "shifted in" by raising the clock input to
- * the EEPROM (setting the SK bit), and then reading the value of the "DO"
- * bit. During this "shifting in" process the "DI" bit should always be
- * clear..
- */
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
- data = 0;
-
- for (i = 0; i < 16; i++) {
- data = data << 1;
- ixgb_raise_clock(hw, &eecd_reg);
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- eecd_reg &= ~(IXGB_EECD_DI);
- if (eecd_reg & IXGB_EECD_DO)
- data |= 1;
-
- ixgb_lower_clock(hw, &eecd_reg);
- }
-
- return data;
-}
-
-/******************************************************************************
- * Prepares EEPROM for access
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
- * function should be called before issuing a command to the EEPROM.
- *****************************************************************************/
-static void
-ixgb_setup_eeprom(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- /* Clear SK and DI */
- eecd_reg &= ~(IXGB_EECD_SK | IXGB_EECD_DI);
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-
- /* Set CS */
- eecd_reg |= IXGB_EECD_CS;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-}
-
-/******************************************************************************
- * Returns EEPROM to a "standby" state
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_standby_eeprom(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- /* Deselect EEPROM */
- eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_SK);
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-
- /* Clock high */
- eecd_reg |= IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-
- /* Select EEPROM */
- eecd_reg |= IXGB_EECD_CS;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-
- /* Clock low */
- eecd_reg &= ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-}
-
-/******************************************************************************
- * Raises then lowers the EEPROM's clock pin
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_clock_eeprom(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- /* Rising edge of clock */
- eecd_reg |= IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-
- /* Falling edge of clock */
- eecd_reg &= ~IXGB_EECD_SK;
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
- IXGB_WRITE_FLUSH(hw);
- udelay(50);
-}
-
-/******************************************************************************
- * Terminates a command by lowering the EEPROM's chip select pin
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_cleanup_eeprom(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
-
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- eecd_reg &= ~(IXGB_EECD_CS | IXGB_EECD_DI);
-
- IXGB_WRITE_REG(hw, EECD, eecd_reg);
-
- ixgb_clock_eeprom(hw);
-}
-
-/******************************************************************************
- * Waits for the EEPROM to finish the current command.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * The command is done when the EEPROM's data out pin goes high.
- *
- * Returns:
- * true: EEPROM data pin is high before timeout.
- * false: Time expired.
- *****************************************************************************/
-static bool
-ixgb_wait_eeprom_command(struct ixgb_hw *hw)
-{
- u32 eecd_reg;
- u32 i;
-
- /* Toggle the CS line. This in effect tells to EEPROM to actually execute
- * the command in question.
- */
- ixgb_standby_eeprom(hw);
-
- /* Now read DO repeatedly until is high (equal to '1'). The EEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
- */
- for (i = 0; i < 200; i++) {
- eecd_reg = IXGB_READ_REG(hw, EECD);
-
- if (eecd_reg & IXGB_EECD_DO)
- return true;
-
- udelay(50);
- }
- ASSERT(0);
- return false;
-}
-
-/******************************************************************************
- * Verifies that the EEPROM has a valid checksum
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Reads the first 64 16 bit words of the EEPROM and sums the values read.
- * If the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
- * valid.
- *
- * Returns:
- * true: Checksum is valid
- * false: Checksum is not valid.
- *****************************************************************************/
-bool
-ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
-{
- u16 checksum = 0;
- u16 i;
-
- for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
- checksum += ixgb_read_eeprom(hw, i);
-
- if (checksum == (u16) EEPROM_SUM)
- return true;
- else
- return false;
-}
-
-/******************************************************************************
- * Calculates the EEPROM checksum and writes it to the EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
- * Writes the difference to word offset 63 of the EEPROM.
- *****************************************************************************/
-void
-ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
-{
- u16 checksum = 0;
- u16 i;
-
- for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
- checksum += ixgb_read_eeprom(hw, i);
-
- checksum = (u16) EEPROM_SUM - checksum;
-
- ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
-}
-
-/******************************************************************************
- * Writes a 16 bit word to a given offset in the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * reg - offset within the EEPROM to be written to
- * data - 16 bit word to be written to the EEPROM
- *
- * If ixgb_update_eeprom_checksum is not called after this function, the
- * EEPROM will most likely contain an invalid checksum.
- *
- *****************************************************************************/
-void
-ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- /* Prepare the EEPROM for writing */
- ixgb_setup_eeprom(hw);
-
- /* Send the 9-bit EWEN (write enable) command to the EEPROM (5-bit opcode
- * plus 4-bit dummy). This puts the EEPROM into write/erase mode.
- */
- ixgb_shift_out_bits(hw, EEPROM_EWEN_OPCODE, 5);
- ixgb_shift_out_bits(hw, 0, 4);
-
- /* Prepare the EEPROM */
- ixgb_standby_eeprom(hw);
-
- /* Send the Write command (3-bit opcode + 6-bit addr) */
- ixgb_shift_out_bits(hw, EEPROM_WRITE_OPCODE, 3);
- ixgb_shift_out_bits(hw, offset, 6);
-
- /* Send the data */
- ixgb_shift_out_bits(hw, data, 16);
-
- ixgb_wait_eeprom_command(hw);
-
- /* Recover from write */
- ixgb_standby_eeprom(hw);
-
- /* Send the 9-bit EWDS (write disable) command to the EEPROM (5-bit
- * opcode plus 4-bit dummy). This takes the EEPROM out of write/erase
- * mode.
- */
- ixgb_shift_out_bits(hw, EEPROM_EWDS_OPCODE, 5);
- ixgb_shift_out_bits(hw, 0, 4);
-
- /* Done with writing */
- ixgb_cleanup_eeprom(hw);
-
- /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
- ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
-}
-
-/******************************************************************************
- * Reads a 16 bit word from the EEPROM.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - offset of 16 bit word in the EEPROM to read
- *
- * Returns:
- * The 16-bit value read from the eeprom
- *****************************************************************************/
-u16
-ixgb_read_eeprom(struct ixgb_hw *hw,
- u16 offset)
-{
- u16 data;
-
- /* Prepare the EEPROM for reading */
- ixgb_setup_eeprom(hw);
-
- /* Send the READ command (opcode + addr) */
- ixgb_shift_out_bits(hw, EEPROM_READ_OPCODE, 3);
- /*
- * We have a 64 word EEPROM, there are 6 address bits
- */
- ixgb_shift_out_bits(hw, offset, 6);
-
- /* Read the data */
- data = ixgb_shift_in_bits(hw);
-
- /* End this read operation */
- ixgb_standby_eeprom(hw);
-
- return data;
-}
-
-/******************************************************************************
- * Reads eeprom and stores data in shared structure.
- * Validates eeprom checksum and eeprom signature.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * true: if eeprom read is successful
- * false: otherwise.
- *****************************************************************************/
-bool
-ixgb_get_eeprom_data(struct ixgb_hw *hw)
-{
- u16 i;
- u16 checksum = 0;
- struct ixgb_ee_map_type *ee_map;
-
- ENTER();
-
- ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- pr_debug("Reading eeprom data\n");
- for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
- u16 ee_data;
- ee_data = ixgb_read_eeprom(hw, i);
- checksum += ee_data;
- hw->eeprom[i] = cpu_to_le16(ee_data);
- }
-
- if (checksum != (u16) EEPROM_SUM) {
- pr_debug("Checksum invalid\n");
- /* clear the init_ctrl_reg_1 to signify that the cache is
- * invalidated */
- ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
- return false;
- }
-
- if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
- != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- pr_debug("Signature invalid\n");
- return false;
- }
-
- return true;
-}
-
-/******************************************************************************
- * Local function to check if the eeprom signature is good
- * If the eeprom signature is good, calls ixgb)get_eeprom_data.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * true: eeprom signature was good and the eeprom read was successful
- * false: otherwise.
- ******************************************************************************/
-static bool
-ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
- == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
- return true;
- } else {
- return ixgb_get_eeprom_data(hw);
- }
-}
-
-/******************************************************************************
- * return a word from the eeprom
- *
- * hw - Struct containing variables accessed by shared code
- * index - Offset of eeprom word
- *
- * Returns:
- * Word at indexed offset in eeprom, if valid, 0 otherwise.
- ******************************************************************************/
-__le16
-ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
-{
-
- if (index < IXGB_EEPROM_SIZE && ixgb_check_and_get_eeprom_data(hw))
- return hw->eeprom[index];
-
- return 0;
-}
-
-/******************************************************************************
- * return the mac address from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- * mac_addr - Ethernet Address if EEPROM contents are valid, 0 otherwise
- *
- * Returns: None.
- ******************************************************************************/
-void
-ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
- u8 *mac_addr)
-{
- int i;
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- ENTER();
-
- if (ixgb_check_and_get_eeprom_data(hw)) {
- for (i = 0; i < ETH_ALEN; i++) {
- mac_addr[i] = ee_map->mac_addr[i];
- }
- pr_debug("eeprom mac address = %pM\n", mac_addr);
- }
-}
-
-
-/******************************************************************************
- * return the Printed Board Assembly number from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * PBA number if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-u32
-ixgb_get_ee_pba_number(struct ixgb_hw *hw)
-{
- if (ixgb_check_and_get_eeprom_data(hw))
- return le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
- | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16);
-
- return 0;
-}
-
-
-/******************************************************************************
- * return the Device Id from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- * Device Id if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-u16
-ixgb_get_ee_device_id(struct ixgb_hw *hw)
-{
- struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
- if (ixgb_check_and_get_eeprom_data(hw))
- return le16_to_cpu(ee_map->device_id);
-
- return 0;
-}
-
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h b/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
deleted file mode 100644
index 3ee0a09e5d0a..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ee.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#ifndef _IXGB_EE_H_
-#define _IXGB_EE_H_
-
-#define IXGB_EEPROM_SIZE 64 /* Size in words */
-
-/* EEPROM Commands */
-#define EEPROM_READ_OPCODE 0x6 /* EEPROM read opcode */
-#define EEPROM_WRITE_OPCODE 0x5 /* EEPROM write opcode */
-#define EEPROM_ERASE_OPCODE 0x7 /* EEPROM erase opcode */
-#define EEPROM_EWEN_OPCODE 0x13 /* EEPROM erase/write enable */
-#define EEPROM_EWDS_OPCODE 0x10 /* EEPROM erase/write disable */
-
-/* EEPROM MAP (Word Offsets) */
-#define EEPROM_IA_1_2_REG 0x0000
-#define EEPROM_IA_3_4_REG 0x0001
-#define EEPROM_IA_5_6_REG 0x0002
-#define EEPROM_COMPATIBILITY_REG 0x0003
-#define EEPROM_PBA_1_2_REG 0x0008
-#define EEPROM_PBA_3_4_REG 0x0009
-#define EEPROM_INIT_CONTROL1_REG 0x000A
-#define EEPROM_SUBSYS_ID_REG 0x000B
-#define EEPROM_SUBVEND_ID_REG 0x000C
-#define EEPROM_DEVICE_ID_REG 0x000D
-#define EEPROM_VENDOR_ID_REG 0x000E
-#define EEPROM_INIT_CONTROL2_REG 0x000F
-#define EEPROM_SWDPINS_REG 0x0020
-#define EEPROM_CIRCUIT_CTRL_REG 0x0021
-#define EEPROM_D0_D3_POWER_REG 0x0022
-#define EEPROM_FLASH_VERSION 0x0032
-#define EEPROM_CHECKSUM_REG 0x003F
-
-/* Mask bits for fields in Word 0x0a of the EEPROM */
-
-#define EEPROM_ICW1_SIGNATURE_MASK 0xC000
-#define EEPROM_ICW1_SIGNATURE_VALID 0x4000
-#define EEPROM_ICW1_SIGNATURE_CLEAR 0x0000
-
-/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
-#define EEPROM_SUM 0xBABA
-
-/* EEPROM Map Sizes (Byte Counts) */
-#define PBA_SIZE 4
-
-/* EEPROM Map defines (WORD OFFSETS)*/
-
-/* EEPROM structure */
-struct ixgb_ee_map_type {
- u8 mac_addr[ETH_ALEN];
- __le16 compatibility;
- __le16 reserved1[4];
- __le32 pba_number;
- __le16 init_ctrl_reg_1;
- __le16 subsystem_id;
- __le16 subvendor_id;
- __le16 device_id;
- __le16 vendor_id;
- __le16 init_ctrl_reg_2;
- __le16 oem_reserved[16];
- __le16 swdpins_reg;
- __le16 circuit_ctrl_reg;
- u8 d3_power;
- u8 d0_power;
- __le16 reserved2[28];
- __le16 checksum;
-};
-
-/* EEPROM Functions */
-u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg);
-
-bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
-
-void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
-
-void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data);
-
-#endif /* IXGB_EE_H */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
deleted file mode 100644
index 582099a5ad41..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ /dev/null
@@ -1,638 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-/* ethtool support for ixgb */
-
-#include "ixgb.h"
-
-#include <linux/uaccess.h>
-
-#define IXGB_ALL_RAR_ENTRIES 16
-
-enum {NETDEV_STATS, IXGB_STATS};
-
-struct ixgb_stats {
- char stat_string[ETH_GSTRING_LEN];
- int type;
- int sizeof_stat;
- int stat_offset;
-};
-
-#define IXGB_STAT(m) IXGB_STATS, \
- sizeof_field(struct ixgb_adapter, m), \
- offsetof(struct ixgb_adapter, m)
-#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \
- sizeof_field(struct net_device, m), \
- offsetof(struct net_device, m)
-
-static struct ixgb_stats ixgb_gstrings_stats[] = {
- {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)},
- {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)},
- {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)},
- {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)},
- {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)},
- {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)},
- {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)},
- {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)},
- {"multicast", IXGB_NETDEV_STAT(stats.multicast)},
- {"collisions", IXGB_NETDEV_STAT(stats.collisions)},
-
-/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */
- {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)},
- {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)},
- {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)},
- {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
- {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)},
- {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)},
- {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)},
- {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)},
- {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)},
- {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)},
- {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)},
- {"tx_deferred_ok", IXGB_STAT(stats.dc)},
- {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
- {"tx_restart_queue", IXGB_STAT(restart_queue) },
- {"rx_long_length_errors", IXGB_STAT(stats.roc)},
- {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
- {"tx_tcp_seg_good", IXGB_STAT(stats.tsctc)},
- {"tx_tcp_seg_failed", IXGB_STAT(stats.tsctfc)},
- {"rx_flow_control_xon", IXGB_STAT(stats.xonrxc)},
- {"rx_flow_control_xoff", IXGB_STAT(stats.xoffrxc)},
- {"tx_flow_control_xon", IXGB_STAT(stats.xontxc)},
- {"tx_flow_control_xoff", IXGB_STAT(stats.xofftxc)},
- {"rx_csum_offload_good", IXGB_STAT(hw_csum_rx_good)},
- {"rx_csum_offload_errors", IXGB_STAT(hw_csum_rx_error)},
- {"tx_csum_offload_good", IXGB_STAT(hw_csum_tx_good)},
- {"tx_csum_offload_errors", IXGB_STAT(hw_csum_tx_error)}
-};
-
-#define IXGB_STATS_LEN ARRAY_SIZE(ixgb_gstrings_stats)
-
-static int
-ixgb_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *cmd)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- ethtool_link_ksettings_zero_link_mode(cmd, supported);
- ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
- ethtool_link_ksettings_zero_link_mode(cmd, advertising);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
-
- cmd->base.port = PORT_FIBRE;
-
- if (netif_carrier_ok(adapter->netdev)) {
- cmd->base.speed = SPEED_10000;
- cmd->base.duplex = DUPLEX_FULL;
- } else {
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
- }
-
- cmd->base.autoneg = AUTONEG_DISABLE;
- return 0;
-}
-
-void ixgb_set_speed_duplex(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- /* be optimistic about our link, since we were up before */
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
-}
-
-static int
-ixgb_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 speed = cmd->base.speed;
-
- if (cmd->base.autoneg == AUTONEG_ENABLE ||
- (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
- return -EINVAL;
-
- if (netif_running(adapter->netdev)) {
- ixgb_down(adapter, true);
- ixgb_reset(adapter);
- ixgb_up(adapter);
- ixgb_set_speed_duplex(netdev);
- } else
- ixgb_reset(adapter);
-
- return 0;
-}
-
-static void
-ixgb_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
-
- pause->autoneg = AUTONEG_DISABLE;
-
- if (hw->fc.type == ixgb_fc_rx_pause)
- pause->rx_pause = 1;
- else if (hw->fc.type == ixgb_fc_tx_pause)
- pause->tx_pause = 1;
- else if (hw->fc.type == ixgb_fc_full) {
- pause->rx_pause = 1;
- pause->tx_pause = 1;
- }
-}
-
-static int
-ixgb_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
-
- if (pause->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- if (pause->rx_pause && pause->tx_pause)
- hw->fc.type = ixgb_fc_full;
- else if (pause->rx_pause && !pause->tx_pause)
- hw->fc.type = ixgb_fc_rx_pause;
- else if (!pause->rx_pause && pause->tx_pause)
- hw->fc.type = ixgb_fc_tx_pause;
- else if (!pause->rx_pause && !pause->tx_pause)
- hw->fc.type = ixgb_fc_none;
-
- if (netif_running(adapter->netdev)) {
- ixgb_down(adapter, true);
- ixgb_up(adapter);
- ixgb_set_speed_duplex(netdev);
- } else
- ixgb_reset(adapter);
-
- return 0;
-}
-
-static u32
-ixgb_get_msglevel(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- return adapter->msg_enable;
-}
-
-static void
-ixgb_set_msglevel(struct net_device *netdev, u32 data)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- adapter->msg_enable = data;
-}
-#define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
-
-static int
-ixgb_get_regs_len(struct net_device *netdev)
-{
-#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
- return IXGB_REG_DUMP_LEN;
-}
-
-static void
-ixgb_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- u32 *reg = p;
- u32 *reg_start = reg;
- u8 i;
-
- /* the 1 (one) below indicates an attempt at versioning, if the
- * interface in ethtool or the driver changes, this 1 should be
- * incremented */
- regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
-
- /* General Registers */
- *reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
- *reg++ = IXGB_READ_REG(hw, CTRL1); /* 1 */
- *reg++ = IXGB_READ_REG(hw, STATUS); /* 2 */
- *reg++ = IXGB_READ_REG(hw, EECD); /* 3 */
- *reg++ = IXGB_READ_REG(hw, MFS); /* 4 */
-
- /* Interrupt */
- *reg++ = IXGB_READ_REG(hw, ICR); /* 5 */
- *reg++ = IXGB_READ_REG(hw, ICS); /* 6 */
- *reg++ = IXGB_READ_REG(hw, IMS); /* 7 */
- *reg++ = IXGB_READ_REG(hw, IMC); /* 8 */
-
- /* Receive */
- *reg++ = IXGB_READ_REG(hw, RCTL); /* 9 */
- *reg++ = IXGB_READ_REG(hw, FCRTL); /* 10 */
- *reg++ = IXGB_READ_REG(hw, FCRTH); /* 11 */
- *reg++ = IXGB_READ_REG(hw, RDBAL); /* 12 */
- *reg++ = IXGB_READ_REG(hw, RDBAH); /* 13 */
- *reg++ = IXGB_READ_REG(hw, RDLEN); /* 14 */
- *reg++ = IXGB_READ_REG(hw, RDH); /* 15 */
- *reg++ = IXGB_READ_REG(hw, RDT); /* 16 */
- *reg++ = IXGB_READ_REG(hw, RDTR); /* 17 */
- *reg++ = IXGB_READ_REG(hw, RXDCTL); /* 18 */
- *reg++ = IXGB_READ_REG(hw, RAIDC); /* 19 */
- *reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
-
- /* there are 16 RAR entries in hardware, we only use 3 */
- for (i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
- *reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
- *reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
- }
-
- /* Transmit */
- *reg++ = IXGB_READ_REG(hw, TCTL); /* 53 */
- *reg++ = IXGB_READ_REG(hw, TDBAL); /* 54 */
- *reg++ = IXGB_READ_REG(hw, TDBAH); /* 55 */
- *reg++ = IXGB_READ_REG(hw, TDLEN); /* 56 */
- *reg++ = IXGB_READ_REG(hw, TDH); /* 57 */
- *reg++ = IXGB_READ_REG(hw, TDT); /* 58 */
- *reg++ = IXGB_READ_REG(hw, TIDV); /* 59 */
- *reg++ = IXGB_READ_REG(hw, TXDCTL); /* 60 */
- *reg++ = IXGB_READ_REG(hw, TSPMT); /* 61 */
- *reg++ = IXGB_READ_REG(hw, PAP); /* 62 */
-
- /* Physical */
- *reg++ = IXGB_READ_REG(hw, PCSC1); /* 63 */
- *reg++ = IXGB_READ_REG(hw, PCSC2); /* 64 */
- *reg++ = IXGB_READ_REG(hw, PCSS1); /* 65 */
- *reg++ = IXGB_READ_REG(hw, PCSS2); /* 66 */
- *reg++ = IXGB_READ_REG(hw, XPCSS); /* 67 */
- *reg++ = IXGB_READ_REG(hw, UCCR); /* 68 */
- *reg++ = IXGB_READ_REG(hw, XPCSTC); /* 69 */
- *reg++ = IXGB_READ_REG(hw, MACA); /* 70 */
- *reg++ = IXGB_READ_REG(hw, APAE); /* 71 */
- *reg++ = IXGB_READ_REG(hw, ARD); /* 72 */
- *reg++ = IXGB_READ_REG(hw, AIS); /* 73 */
- *reg++ = IXGB_READ_REG(hw, MSCA); /* 74 */
- *reg++ = IXGB_READ_REG(hw, MSRWD); /* 75 */
-
- /* Statistics */
- *reg++ = IXGB_GET_STAT(adapter, tprl); /* 76 */
- *reg++ = IXGB_GET_STAT(adapter, tprh); /* 77 */
- *reg++ = IXGB_GET_STAT(adapter, gprcl); /* 78 */
- *reg++ = IXGB_GET_STAT(adapter, gprch); /* 79 */
- *reg++ = IXGB_GET_STAT(adapter, bprcl); /* 80 */
- *reg++ = IXGB_GET_STAT(adapter, bprch); /* 81 */
- *reg++ = IXGB_GET_STAT(adapter, mprcl); /* 82 */
- *reg++ = IXGB_GET_STAT(adapter, mprch); /* 83 */
- *reg++ = IXGB_GET_STAT(adapter, uprcl); /* 84 */
- *reg++ = IXGB_GET_STAT(adapter, uprch); /* 85 */
- *reg++ = IXGB_GET_STAT(adapter, vprcl); /* 86 */
- *reg++ = IXGB_GET_STAT(adapter, vprch); /* 87 */
- *reg++ = IXGB_GET_STAT(adapter, jprcl); /* 88 */
- *reg++ = IXGB_GET_STAT(adapter, jprch); /* 89 */
- *reg++ = IXGB_GET_STAT(adapter, gorcl); /* 90 */
- *reg++ = IXGB_GET_STAT(adapter, gorch); /* 91 */
- *reg++ = IXGB_GET_STAT(adapter, torl); /* 92 */
- *reg++ = IXGB_GET_STAT(adapter, torh); /* 93 */
- *reg++ = IXGB_GET_STAT(adapter, rnbc); /* 94 */
- *reg++ = IXGB_GET_STAT(adapter, ruc); /* 95 */
- *reg++ = IXGB_GET_STAT(adapter, roc); /* 96 */
- *reg++ = IXGB_GET_STAT(adapter, rlec); /* 97 */
- *reg++ = IXGB_GET_STAT(adapter, crcerrs); /* 98 */
- *reg++ = IXGB_GET_STAT(adapter, icbc); /* 99 */
- *reg++ = IXGB_GET_STAT(adapter, ecbc); /* 100 */
- *reg++ = IXGB_GET_STAT(adapter, mpc); /* 101 */
- *reg++ = IXGB_GET_STAT(adapter, tptl); /* 102 */
- *reg++ = IXGB_GET_STAT(adapter, tpth); /* 103 */
- *reg++ = IXGB_GET_STAT(adapter, gptcl); /* 104 */
- *reg++ = IXGB_GET_STAT(adapter, gptch); /* 105 */
- *reg++ = IXGB_GET_STAT(adapter, bptcl); /* 106 */
- *reg++ = IXGB_GET_STAT(adapter, bptch); /* 107 */
- *reg++ = IXGB_GET_STAT(adapter, mptcl); /* 108 */
- *reg++ = IXGB_GET_STAT(adapter, mptch); /* 109 */
- *reg++ = IXGB_GET_STAT(adapter, uptcl); /* 110 */
- *reg++ = IXGB_GET_STAT(adapter, uptch); /* 111 */
- *reg++ = IXGB_GET_STAT(adapter, vptcl); /* 112 */
- *reg++ = IXGB_GET_STAT(adapter, vptch); /* 113 */
- *reg++ = IXGB_GET_STAT(adapter, jptcl); /* 114 */
- *reg++ = IXGB_GET_STAT(adapter, jptch); /* 115 */
- *reg++ = IXGB_GET_STAT(adapter, gotcl); /* 116 */
- *reg++ = IXGB_GET_STAT(adapter, gotch); /* 117 */
- *reg++ = IXGB_GET_STAT(adapter, totl); /* 118 */
- *reg++ = IXGB_GET_STAT(adapter, toth); /* 119 */
- *reg++ = IXGB_GET_STAT(adapter, dc); /* 120 */
- *reg++ = IXGB_GET_STAT(adapter, plt64c); /* 121 */
- *reg++ = IXGB_GET_STAT(adapter, tsctc); /* 122 */
- *reg++ = IXGB_GET_STAT(adapter, tsctfc); /* 123 */
- *reg++ = IXGB_GET_STAT(adapter, ibic); /* 124 */
- *reg++ = IXGB_GET_STAT(adapter, rfc); /* 125 */
- *reg++ = IXGB_GET_STAT(adapter, lfc); /* 126 */
- *reg++ = IXGB_GET_STAT(adapter, pfrc); /* 127 */
- *reg++ = IXGB_GET_STAT(adapter, pftc); /* 128 */
- *reg++ = IXGB_GET_STAT(adapter, mcfrc); /* 129 */
- *reg++ = IXGB_GET_STAT(adapter, mcftc); /* 130 */
- *reg++ = IXGB_GET_STAT(adapter, xonrxc); /* 131 */
- *reg++ = IXGB_GET_STAT(adapter, xontxc); /* 132 */
- *reg++ = IXGB_GET_STAT(adapter, xoffrxc); /* 133 */
- *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
- *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
-
- regs->len = (reg - reg_start) * sizeof(u32);
-}
-
-static int
-ixgb_get_eeprom_len(struct net_device *netdev)
-{
- /* return size in bytes */
- return IXGB_EEPROM_SIZE << 1;
-}
-
-static int
-ixgb_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- __le16 *eeprom_buff;
- int i, max_len, first_word, last_word;
- int ret_val = 0;
-
- if (eeprom->len == 0) {
- ret_val = -EINVAL;
- goto geeprom_error;
- }
-
- eeprom->magic = hw->vendor_id | (hw->device_id << 16);
-
- max_len = ixgb_get_eeprom_len(netdev);
-
- if (eeprom->offset > eeprom->offset + eeprom->len) {
- ret_val = -EINVAL;
- goto geeprom_error;
- }
-
- if ((eeprom->offset + eeprom->len) > max_len)
- eeprom->len = (max_len - eeprom->offset);
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
-
- eeprom_buff = kmalloc_array(last_word - first_word + 1,
- sizeof(__le16),
- GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- /* note the eeprom was good because the driver loaded */
- for (i = 0; i <= (last_word - first_word); i++)
- eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
-
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
- kfree(eeprom_buff);
-
-geeprom_error:
- return ret_val;
-}
-
-static int
-ixgb_set_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- u16 *eeprom_buff;
- void *ptr;
- int max_len, first_word, last_word;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
-
- if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
- return -EFAULT;
-
- max_len = ixgb_get_eeprom_len(netdev);
-
- if (eeprom->offset > eeprom->offset + eeprom->len)
- return -EINVAL;
-
- if ((eeprom->offset + eeprom->len) > max_len)
- eeprom->len = (max_len - eeprom->offset);
-
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
-
- ptr = (void *)eeprom_buff;
-
- if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
- eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
- ptr++;
- }
- if ((eeprom->offset + eeprom->len) & 1) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
- eeprom_buff[last_word - first_word]
- = ixgb_read_eeprom(hw, last_word);
- }
-
- memcpy(ptr, bytes, eeprom->len);
- for (i = 0; i <= (last_word - first_word); i++)
- ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
-
- /* Update the checksum over the first part of the EEPROM if needed */
- if (first_word <= EEPROM_CHECKSUM_REG)
- ixgb_update_eeprom_checksum(hw);
-
- kfree(eeprom_buff);
- return 0;
-}
-
-static void
-ixgb_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- strlcpy(drvinfo->driver, ixgb_driver_name,
- sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void
-ixgb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
-
- ring->rx_max_pending = MAX_RXD;
- ring->tx_max_pending = MAX_TXD;
- ring->rx_pending = rxdr->count;
- ring->tx_pending = txdr->count;
-}
-
-static int
-ixgb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
- struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
- int err;
-
- tx_old = adapter->tx_ring;
- rx_old = adapter->rx_ring;
-
- if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
- return -EINVAL;
-
- if (netif_running(adapter->netdev))
- ixgb_down(adapter, true);
-
- rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
- rxdr->count = min(rxdr->count,(u32)MAX_RXD);
- rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
- txdr->count = min(txdr->count,(u32)MAX_TXD);
- txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
-
- if (netif_running(adapter->netdev)) {
- /* Try to get new resources before deleting old */
- if ((err = ixgb_setup_rx_resources(adapter)))
- goto err_setup_rx;
- if ((err = ixgb_setup_tx_resources(adapter)))
- goto err_setup_tx;
-
- /* save the new, restore the old in order to free it,
- * then restore the new back again */
-
- rx_new = adapter->rx_ring;
- tx_new = adapter->tx_ring;
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- ixgb_free_rx_resources(adapter);
- ixgb_free_tx_resources(adapter);
- adapter->rx_ring = rx_new;
- adapter->tx_ring = tx_new;
- if ((err = ixgb_up(adapter)))
- return err;
- ixgb_set_speed_duplex(netdev);
- }
-
- return 0;
-err_setup_tx:
- ixgb_free_rx_resources(adapter);
-err_setup_rx:
- adapter->rx_ring = rx_old;
- adapter->tx_ring = tx_old;
- ixgb_up(adapter);
- return err;
-}
-
-static int
-ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- switch (state) {
- case ETHTOOL_ID_ACTIVE:
- return 2;
-
- case ETHTOOL_ID_ON:
- ixgb_led_on(&adapter->hw);
- break;
-
- case ETHTOOL_ID_OFF:
- case ETHTOOL_ID_INACTIVE:
- ixgb_led_off(&adapter->hw);
- }
-
- return 0;
-}
-
-static int
-ixgb_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return IXGB_STATS_LEN;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void
-ixgb_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- int i;
- char *p = NULL;
-
- ixgb_update_stats(adapter);
- for (i = 0; i < IXGB_STATS_LEN; i++) {
- switch (ixgb_gstrings_stats[i].type) {
- case NETDEV_STATS:
- p = (char *) netdev +
- ixgb_gstrings_stats[i].stat_offset;
- break;
- case IXGB_STATS:
- p = (char *) adapter +
- ixgb_gstrings_stats[i].stat_offset;
- break;
- }
-
- data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
-}
-
-static void
-ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- int i;
-
- switch(stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < IXGB_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- ixgb_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- }
- break;
- }
-}
-
-static const struct ethtool_ops ixgb_ethtool_ops = {
- .get_drvinfo = ixgb_get_drvinfo,
- .get_regs_len = ixgb_get_regs_len,
- .get_regs = ixgb_get_regs,
- .get_link = ethtool_op_get_link,
- .get_eeprom_len = ixgb_get_eeprom_len,
- .get_eeprom = ixgb_get_eeprom,
- .set_eeprom = ixgb_set_eeprom,
- .get_ringparam = ixgb_get_ringparam,
- .set_ringparam = ixgb_set_ringparam,
- .get_pauseparam = ixgb_get_pauseparam,
- .set_pauseparam = ixgb_set_pauseparam,
- .get_msglevel = ixgb_get_msglevel,
- .set_msglevel = ixgb_set_msglevel,
- .get_strings = ixgb_get_strings,
- .set_phys_id = ixgb_set_phys_id,
- .get_sset_count = ixgb_get_sset_count,
- .get_ethtool_stats = ixgb_get_ethtool_stats,
- .get_link_ksettings = ixgb_get_link_ksettings,
- .set_link_ksettings = ixgb_set_link_ksettings,
-};
-
-void ixgb_set_ethtool_ops(struct net_device *netdev)
-{
- netdev->ethtool_ops = &ixgb_ethtool_ops;
-}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
deleted file mode 100644
index c8d1e815ec6b..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ /dev/null
@@ -1,1229 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-/* ixgb_hw.c
- * Shared functions for accessing and configuring the adapter
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci_ids.h>
-#include "ixgb_hw.h"
-#include "ixgb_ids.h"
-
-#include <linux/etherdevice.h>
-
-/* Local function prototypes */
-
-static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
-
-static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
-
-static void ixgb_get_bus_info(struct ixgb_hw *hw);
-
-static bool ixgb_link_reset(struct ixgb_hw *hw);
-
-static void ixgb_optics_reset(struct ixgb_hw *hw);
-
-static void ixgb_optics_reset_bcm(struct ixgb_hw *hw);
-
-static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw);
-
-static void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
-
-static void ixgb_clear_vfta(struct ixgb_hw *hw);
-
-static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
-
-static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
- u32 reg_address,
- u32 phy_address,
- u32 device_type);
-
-static bool ixgb_setup_fc(struct ixgb_hw *hw);
-
-static bool mac_addr_valid(u8 *mac_addr);
-
-static u32 ixgb_mac_reset(struct ixgb_hw *hw)
-{
- u32 ctrl_reg;
-
- ctrl_reg = IXGB_CTRL0_RST |
- IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
- IXGB_CTRL0_SDP2_DIR |
- IXGB_CTRL0_SDP1_DIR |
- IXGB_CTRL0_SDP0_DIR |
- IXGB_CTRL0_SDP3 | /* Initial value 1101 */
- IXGB_CTRL0_SDP2 |
- IXGB_CTRL0_SDP0;
-
-#ifdef HP_ZX1
- /* Workaround for 82597EX reset errata */
- IXGB_WRITE_REG_IO(hw, CTRL0, ctrl_reg);
-#else
- IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
-#endif
-
- /* Delay a few ms just to allow the reset to complete */
- msleep(IXGB_DELAY_AFTER_RESET);
- ctrl_reg = IXGB_READ_REG(hw, CTRL0);
-#ifdef DBG
- /* Make sure the self-clearing global reset bit did self clear */
- ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
-#endif
-
- if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) {
- ctrl_reg = /* Enable interrupt from XFP and SerDes */
- IXGB_CTRL1_GPI0_EN |
- IXGB_CTRL1_SDP6_DIR |
- IXGB_CTRL1_SDP7_DIR |
- IXGB_CTRL1_SDP6 |
- IXGB_CTRL1_SDP7;
- IXGB_WRITE_REG(hw, CTRL1, ctrl_reg);
- ixgb_optics_reset_bcm(hw);
- }
-
- if (hw->phy_type == ixgb_phy_type_txn17401)
- ixgb_optics_reset(hw);
-
- return ctrl_reg;
-}
-
-/******************************************************************************
- * Reset the transmit and receive units; mask and clear all interrupts.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-bool
-ixgb_adapter_stop(struct ixgb_hw *hw)
-{
- u32 ctrl_reg;
-
- ENTER();
-
- /* If we are stopped or resetting exit gracefully and wait to be
- * started again before accessing the hardware.
- */
- if (hw->adapter_stopped) {
- pr_debug("Exiting because the adapter is already stopped!!!\n");
- return false;
- }
-
- /* Set the Adapter Stopped flag so other driver functions stop
- * touching the Hardware.
- */
- hw->adapter_stopped = true;
-
- /* Clear interrupt mask to stop board from generating interrupts */
- pr_debug("Masking off all interrupts\n");
- IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
-
- /* Disable the Transmit and Receive units. Then delay to allow
- * any pending transactions to complete before we hit the MAC with
- * the global reset.
- */
- IXGB_WRITE_REG(hw, RCTL, IXGB_READ_REG(hw, RCTL) & ~IXGB_RCTL_RXEN);
- IXGB_WRITE_REG(hw, TCTL, IXGB_READ_REG(hw, TCTL) & ~IXGB_TCTL_TXEN);
- IXGB_WRITE_FLUSH(hw);
- msleep(IXGB_DELAY_BEFORE_RESET);
-
- /* Issue a global reset to the MAC. This will reset the chip's
- * transmit, receive, DMA, and link units. It will not effect
- * the current PCI configuration. The global reset bit is self-
- * clearing, and should clear within a microsecond.
- */
- pr_debug("Issuing a global reset to MAC\n");
-
- ctrl_reg = ixgb_mac_reset(hw);
-
- /* Clear interrupt mask to stop board from generating interrupts */
- pr_debug("Masking off all interrupts\n");
- IXGB_WRITE_REG(hw, IMC, 0xffffffff);
-
- /* Clear any pending interrupt events. */
- IXGB_READ_REG(hw, ICR);
-
- return ctrl_reg & IXGB_CTRL0_RST;
-}
-
-
-/******************************************************************************
- * Identifies the vendor of the optics module on the adapter. The SR adapters
- * support two different types of XPAK optics, so it is necessary to determine
- * which optics are present before applying any optics-specific workarounds.
- *
- * hw - Struct containing variables accessed by shared code.
- *
- * Returns: the vendor of the XPAK optics module.
- *****************************************************************************/
-static ixgb_xpak_vendor
-ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
-{
- u32 i;
- u16 vendor_name[5];
- ixgb_xpak_vendor xpak_vendor;
-
- ENTER();
-
- /* Read the first few bytes of the vendor string from the XPAK NVR
- * registers. These are standard XENPAK/XPAK registers, so all XPAK
- * devices should implement them. */
- for (i = 0; i < 5; i++) {
- vendor_name[i] = ixgb_read_phy_reg(hw,
- MDIO_PMA_PMD_XPAK_VENDOR_NAME
- + i, IXGB_PHY_ADDRESS,
- MDIO_MMD_PMAPMD);
- }
-
- /* Determine the actual vendor */
- if (vendor_name[0] == 'I' &&
- vendor_name[1] == 'N' &&
- vendor_name[2] == 'T' &&
- vendor_name[3] == 'E' && vendor_name[4] == 'L') {
- xpak_vendor = ixgb_xpak_vendor_intel;
- } else {
- xpak_vendor = ixgb_xpak_vendor_infineon;
- }
-
- return xpak_vendor;
-}
-
-/******************************************************************************
- * Determine the physical layer module on the adapter.
- *
- * hw - Struct containing variables accessed by shared code. The device_id
- * field must be (correctly) populated before calling this routine.
- *
- * Returns: the phy type of the adapter.
- *****************************************************************************/
-static ixgb_phy_type
-ixgb_identify_phy(struct ixgb_hw *hw)
-{
- ixgb_phy_type phy_type;
- ixgb_xpak_vendor xpak_vendor;
-
- ENTER();
-
- /* Infer the transceiver/phy type from the device id */
- switch (hw->device_id) {
- case IXGB_DEVICE_ID_82597EX:
- pr_debug("Identified TXN17401 optics\n");
- phy_type = ixgb_phy_type_txn17401;
- break;
-
- case IXGB_DEVICE_ID_82597EX_SR:
- /* The SR adapters carry two different types of XPAK optics
- * modules; read the vendor identifier to determine the exact
- * type of optics. */
- xpak_vendor = ixgb_identify_xpak_vendor(hw);
- if (xpak_vendor == ixgb_xpak_vendor_intel) {
- pr_debug("Identified TXN17201 optics\n");
- phy_type = ixgb_phy_type_txn17201;
- } else {
- pr_debug("Identified G6005 optics\n");
- phy_type = ixgb_phy_type_g6005;
- }
- break;
- case IXGB_DEVICE_ID_82597EX_LR:
- pr_debug("Identified G6104 optics\n");
- phy_type = ixgb_phy_type_g6104;
- break;
- case IXGB_DEVICE_ID_82597EX_CX4:
- pr_debug("Identified CX4\n");
- xpak_vendor = ixgb_identify_xpak_vendor(hw);
- if (xpak_vendor == ixgb_xpak_vendor_intel) {
- pr_debug("Identified TXN17201 optics\n");
- phy_type = ixgb_phy_type_txn17201;
- } else {
- pr_debug("Identified G6005 optics\n");
- phy_type = ixgb_phy_type_g6005;
- }
- break;
- default:
- pr_debug("Unknown physical layer module\n");
- phy_type = ixgb_phy_type_unknown;
- break;
- }
-
- /* update phy type for sun specific board */
- if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN)
- phy_type = ixgb_phy_type_bcm;
-
- return phy_type;
-}
-
-/******************************************************************************
- * Performs basic configuration of the adapter.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Resets the controller.
- * Reads and validates the EEPROM.
- * Initializes the receive address registers.
- * Initializes the multicast table.
- * Clears all on-chip counters.
- * Calls routine to setup flow control settings.
- * Leaves the transmit and receive units disabled and uninitialized.
- *
- * Returns:
- * true if successful,
- * false if unrecoverable problems were encountered.
- *****************************************************************************/
-bool
-ixgb_init_hw(struct ixgb_hw *hw)
-{
- u32 i;
- bool status;
-
- ENTER();
-
- /* Issue a global reset to the MAC. This will reset the chip's
- * transmit, receive, DMA, and link units. It will not effect
- * the current PCI configuration. The global reset bit is self-
- * clearing, and should clear within a microsecond.
- */
- pr_debug("Issuing a global reset to MAC\n");
-
- ixgb_mac_reset(hw);
-
- pr_debug("Issuing an EE reset to MAC\n");
-#ifdef HP_ZX1
- /* Workaround for 82597EX reset errata */
- IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
-#else
- IXGB_WRITE_REG(hw, CTRL1, IXGB_CTRL1_EE_RST);
-#endif
-
- /* Delay a few ms just to allow the reset to complete */
- msleep(IXGB_DELAY_AFTER_EE_RESET);
-
- if (!ixgb_get_eeprom_data(hw))
- return false;
-
- /* Use the device id to determine the type of phy/transceiver. */
- hw->device_id = ixgb_get_ee_device_id(hw);
- hw->phy_type = ixgb_identify_phy(hw);
-
- /* Setup the receive addresses.
- * Receive Address Registers (RARs 0 - 15).
- */
- ixgb_init_rx_addrs(hw);
-
- /*
- * Check that a valid MAC address has been set.
- * If it is not valid, we fail hardware init.
- */
- if (!mac_addr_valid(hw->curr_mac_addr)) {
- pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
- return(false);
- }
-
- /* tell the routines in this file they can access hardware again */
- hw->adapter_stopped = false;
-
- /* Fill in the bus_info structure */
- ixgb_get_bus_info(hw);
-
- /* Zero out the Multicast HASH table */
- pr_debug("Zeroing the MTA\n");
- for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
- IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
-
- /* Zero out the VLAN Filter Table Array */
- ixgb_clear_vfta(hw);
-
- /* Zero all of the hardware counters */
- ixgb_clear_hw_cntrs(hw);
-
- /* Call a subroutine to setup flow control. */
- status = ixgb_setup_fc(hw);
-
- /* 82597EX errata: Call check-for-link in case lane deskew is locked */
- ixgb_check_for_link(hw);
-
- return status;
-}
-
-/******************************************************************************
- * Initializes receive address filters.
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Places the MAC address in receive address register 0 and clears the rest
- * of the receive address registers. Clears the multicast table. Assumes
- * the receiver is in reset when the routine is called.
- *****************************************************************************/
-static void
-ixgb_init_rx_addrs(struct ixgb_hw *hw)
-{
- u32 i;
-
- ENTER();
-
- /*
- * If the current mac address is valid, assume it is a software override
- * to the permanent address.
- * Otherwise, use the permanent address from the eeprom.
- */
- if (!mac_addr_valid(hw->curr_mac_addr)) {
-
- /* Get the MAC address from the eeprom for later reference */
- ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
-
- pr_debug("Keeping Permanent MAC Addr = %pM\n",
- hw->curr_mac_addr);
- } else {
-
- /* Setup the receive address. */
- pr_debug("Overriding MAC Address in RAR[0]\n");
- pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
-
- ixgb_rar_set(hw, hw->curr_mac_addr, 0);
- }
-
- /* Zero out the other 15 receive addresses. */
- pr_debug("Clearing RAR[1-15]\n");
- for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
- /* Write high reg first to disable the AV bit first */
- IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
- IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
- }
-}
-
-/******************************************************************************
- * Updates the MAC's list of multicast addresses.
- *
- * hw - Struct containing variables accessed by shared code
- * mc_addr_list - the list of new multicast addresses
- * mc_addr_count - number of addresses
- * pad - number of bytes between addresses in the list
- *
- * The given list replaces any existing list. Clears the last 15 receive
- * address registers and the multicast table. Uses receive address registers
- * for the first 15 multicast addresses, and hashes the rest into the
- * multicast table.
- *****************************************************************************/
-void
-ixgb_mc_addr_list_update(struct ixgb_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 pad)
-{
- u32 hash_value;
- u32 i;
- u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
- u8 *mca;
-
- ENTER();
-
- /* Set the new number of MC addresses that we are being requested to use. */
- hw->num_mc_addrs = mc_addr_count;
-
- /* Clear RAR[1-15] */
- pr_debug("Clearing RAR[1-15]\n");
- for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
- IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
- IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
- }
-
- /* Clear the MTA */
- pr_debug("Clearing MTA\n");
- for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
- IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
-
- /* Add the new addresses */
- mca = mc_addr_list;
- for (i = 0; i < mc_addr_count; i++) {
- pr_debug("Adding the multicast addresses:\n");
- pr_debug("MC Addr #%d = %pM\n", i, mca);
-
- /* Place this multicast address in the RAR if there is room, *
- * else put it in the MTA
- */
- if (rar_used_count < IXGB_RAR_ENTRIES) {
- ixgb_rar_set(hw, mca, rar_used_count);
- pr_debug("Added a multicast address to RAR[%d]\n", i);
- rar_used_count++;
- } else {
- hash_value = ixgb_hash_mc_addr(hw, mca);
-
- pr_debug("Hash value = 0x%03X\n", hash_value);
-
- ixgb_mta_set(hw, hash_value);
- }
-
- mca += ETH_ALEN + pad;
- }
-
- pr_debug("MC Update Complete\n");
-}
-
-/******************************************************************************
- * Hashes an address to determine its location in the multicast table
- *
- * hw - Struct containing variables accessed by shared code
- * mc_addr - the multicast address to hash
- *
- * Returns:
- * The hash value
- *****************************************************************************/
-static u32
-ixgb_hash_mc_addr(struct ixgb_hw *hw,
- u8 *mc_addr)
-{
- u32 hash_value = 0;
-
- ENTER();
-
- /* The portion of the address that is used for the hash table is
- * determined by the mc_filter_type setting.
- */
- switch (hw->mc_filter_type) {
- /* [0] [1] [2] [3] [4] [5]
- * 01 AA 00 12 34 56
- * LSB MSB - According to H/W docs */
- case 0:
- /* [47:36] i.e. 0x563 for above example address */
- hash_value =
- ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
- break;
- case 1: /* [46:35] i.e. 0xAC6 for above example address */
- hash_value =
- ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
- break;
- case 2: /* [45:34] i.e. 0x5D8 for above example address */
- hash_value =
- ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
- break;
- case 3: /* [43:32] i.e. 0x634 for above example address */
- hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
- break;
- default:
- /* Invalid mc_filter_type, what should we do? */
- pr_debug("MC filter type param set incorrectly\n");
- ASSERT(0);
- break;
- }
-
- hash_value &= 0xFFF;
- return hash_value;
-}
-
-/******************************************************************************
- * Sets the bit in the multicast table corresponding to the hash value.
- *
- * hw - Struct containing variables accessed by shared code
- * hash_value - Multicast address hash value
- *****************************************************************************/
-static void
-ixgb_mta_set(struct ixgb_hw *hw,
- u32 hash_value)
-{
- u32 hash_bit, hash_reg;
- u32 mta_reg;
-
- /* The MTA is a register array of 128 32-bit registers.
- * It is treated like an array of 4096 bits. We want to set
- * bit BitArray[hash_value]. So we figure out what register
- * the bit is in, read it, OR in the new bit, then write
- * back the new value. The register is determined by the
- * upper 7 bits of the hash value and the bit within that
- * register are determined by the lower 5 bits of the value.
- */
- hash_reg = (hash_value >> 5) & 0x7F;
- hash_bit = hash_value & 0x1F;
-
- mta_reg = IXGB_READ_REG_ARRAY(hw, MTA, hash_reg);
-
- mta_reg |= (1 << hash_bit);
-
- IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
-}
-
-/******************************************************************************
- * Puts an ethernet address into a receive address register.
- *
- * hw - Struct containing variables accessed by shared code
- * addr - Address to put into receive address register
- * index - Receive address register to write
- *****************************************************************************/
-void
-ixgb_rar_set(struct ixgb_hw *hw,
- const u8 *addr,
- u32 index)
-{
- u32 rar_low, rar_high;
-
- ENTER();
-
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to little endian
- */
- rar_low = ((u32) addr[0] |
- ((u32)addr[1] << 8) |
- ((u32)addr[2] << 16) |
- ((u32)addr[3] << 24));
-
- rar_high = ((u32) addr[4] |
- ((u32)addr[5] << 8) |
- IXGB_RAH_AV);
-
- IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
- IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
-}
-
-/******************************************************************************
- * Writes a value to the specified offset in the VLAN filter table.
- *
- * hw - Struct containing variables accessed by shared code
- * offset - Offset in VLAN filer table to write
- * value - Value to write into VLAN filter table
- *****************************************************************************/
-void
-ixgb_write_vfta(struct ixgb_hw *hw,
- u32 offset,
- u32 value)
-{
- IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
-}
-
-/******************************************************************************
- * Clears the VLAN filer table
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_clear_vfta(struct ixgb_hw *hw)
-{
- u32 offset;
-
- for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
- IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
-}
-
-/******************************************************************************
- * Configures the flow control settings based on SW configuration.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-
-static bool
-ixgb_setup_fc(struct ixgb_hw *hw)
-{
- u32 ctrl_reg;
- u32 pap_reg = 0; /* by default, assume no pause time */
- bool status = true;
-
- ENTER();
-
- /* Get the current control reg 0 settings */
- ctrl_reg = IXGB_READ_REG(hw, CTRL0);
-
- /* Clear the Receive Pause Enable and Transmit Pause Enable bits */
- ctrl_reg &= ~(IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
-
- /* The possible values of the "flow_control" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames
- * but not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames
- * but we do not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
- * other: Invalid.
- */
- switch (hw->fc.type) {
- case ixgb_fc_none: /* 0 */
- /* Set CMDC bit to disable Rx Flow control */
- ctrl_reg |= (IXGB_CTRL0_CMDC);
- break;
- case ixgb_fc_rx_pause: /* 1 */
- /* RX Flow control is enabled, and TX Flow control is
- * disabled.
- */
- ctrl_reg |= (IXGB_CTRL0_RPE);
- break;
- case ixgb_fc_tx_pause: /* 2 */
- /* TX Flow control is enabled, and RX Flow control is
- * disabled, by a software over-ride.
- */
- ctrl_reg |= (IXGB_CTRL0_TPE);
- pap_reg = hw->fc.pause_time;
- break;
- case ixgb_fc_full: /* 3 */
- /* Flow control (both RX and TX) is enabled by a software
- * over-ride.
- */
- ctrl_reg |= (IXGB_CTRL0_RPE | IXGB_CTRL0_TPE);
- pap_reg = hw->fc.pause_time;
- break;
- default:
- /* We should never get here. The value should be 0-3. */
- pr_debug("Flow control param set incorrectly\n");
- ASSERT(0);
- break;
- }
-
- /* Write the new settings */
- IXGB_WRITE_REG(hw, CTRL0, ctrl_reg);
-
- if (pap_reg != 0)
- IXGB_WRITE_REG(hw, PAP, pap_reg);
-
- /* Set the flow control receive threshold registers. Normally,
- * these registers will be set to a default threshold that may be
- * adjusted later by the driver's runtime code. However, if the
- * ability to transmit pause frames in not enabled, then these
- * registers will be set to 0.
- */
- if (!(hw->fc.type & ixgb_fc_tx_pause)) {
- IXGB_WRITE_REG(hw, FCRTL, 0);
- IXGB_WRITE_REG(hw, FCRTH, 0);
- } else {
- /* We need to set up the Receive Threshold high and low water
- * marks as well as (optionally) enabling the transmission of XON
- * frames. */
- if (hw->fc.send_xon) {
- IXGB_WRITE_REG(hw, FCRTL,
- (hw->fc.low_water | IXGB_FCRTL_XONE));
- } else {
- IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
- }
- IXGB_WRITE_REG(hw, FCRTH, hw->fc.high_water);
- }
- return status;
-}
-
-/******************************************************************************
- * Reads a word from a device over the Management Data Interface (MDI) bus.
- * This interface is used to manage Physical layer devices.
- *
- * hw - Struct containing variables accessed by hw code
- * reg_address - Offset of device register being read.
- * phy_address - Address of device on MDI.
- *
- * Returns: Data word (16 bits) from MDI device.
- *
- * The 82597EX has support for several MDI access methods. This routine
- * uses the new protocol MDI Single Command and Address Operation.
- * This requires that first an address cycle command is sent, followed by a
- * read command.
- *****************************************************************************/
-static u16
-ixgb_read_phy_reg(struct ixgb_hw *hw,
- u32 reg_address,
- u32 phy_address,
- u32 device_type)
-{
- u32 i;
- u32 data;
- u32 command = 0;
-
- ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
- ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
- ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
-
- /* Setup and write the address cycle command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
-
- IXGB_WRITE_REG(hw, MSCA, command);
-
- /**************************************************************
- ** Check every 10 usec to see if the address cycle completed
- ** The COMMAND bit will clear when the operation is complete.
- ** This may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
-
- for (i = 0; i < 10; i++)
- {
- udelay(10);
-
- command = IXGB_READ_REG(hw, MSCA);
-
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
-
- /* Address cycle complete, setup and write the read command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_READ | IXGB_MSCA_MDI_COMMAND));
-
- IXGB_WRITE_REG(hw, MSCA, command);
-
- /**************************************************************
- ** Check every 10 usec to see if the read command completed
- ** The COMMAND bit will clear when the operation is complete.
- ** The read may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
-
- for (i = 0; i < 10; i++)
- {
- udelay(10);
-
- command = IXGB_READ_REG(hw, MSCA);
-
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
-
- /* Operation is complete, get the data from the MDIO Read/Write Data
- * register and return.
- */
- data = IXGB_READ_REG(hw, MSRWD);
- data >>= IXGB_MSRWD_READ_DATA_SHIFT;
- return((u16) data);
-}
-
-/******************************************************************************
- * Writes a word to a device over the Management Data Interface (MDI) bus.
- * This interface is used to manage Physical layer devices.
- *
- * hw - Struct containing variables accessed by hw code
- * reg_address - Offset of device register being read.
- * phy_address - Address of device on MDI.
- * device_type - Also known as the Device ID or DID.
- * data - 16-bit value to be written
- *
- * Returns: void.
- *
- * The 82597EX has support for several MDI access methods. This routine
- * uses the new protocol MDI Single Command and Address Operation.
- * This requires that first an address cycle command is sent, followed by a
- * write command.
- *****************************************************************************/
-static void
-ixgb_write_phy_reg(struct ixgb_hw *hw,
- u32 reg_address,
- u32 phy_address,
- u32 device_type,
- u16 data)
-{
- u32 i;
- u32 command = 0;
-
- ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
- ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
- ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
-
- /* Put the data in the MDIO Read/Write Data register */
- IXGB_WRITE_REG(hw, MSRWD, (u32)data);
-
- /* Setup and write the address cycle command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
-
- IXGB_WRITE_REG(hw, MSCA, command);
-
- /**************************************************************
- ** Check every 10 usec to see if the address cycle completed
- ** The COMMAND bit will clear when the operation is complete.
- ** This may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
-
- for (i = 0; i < 10; i++)
- {
- udelay(10);
-
- command = IXGB_READ_REG(hw, MSCA);
-
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
-
- /* Address cycle complete, setup and write the write command */
- command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
- (phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
- (IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
-
- IXGB_WRITE_REG(hw, MSCA, command);
-
- /**************************************************************
- ** Check every 10 usec to see if the read command completed
- ** The COMMAND bit will clear when the operation is complete.
- ** The write may take as long as 64 usecs (we'll wait 100 usecs max)
- ** from the CPU Write to the Ready bit assertion.
- **************************************************************/
-
- for (i = 0; i < 10; i++)
- {
- udelay(10);
-
- command = IXGB_READ_REG(hw, MSCA);
-
- if ((command & IXGB_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
-
- /* Operation is complete, return. */
-}
-
-/******************************************************************************
- * Checks to see if the link status of the hardware has changed.
- *
- * hw - Struct containing variables accessed by hw code
- *
- * Called by any function that needs to check the link status of the adapter.
- *****************************************************************************/
-void
-ixgb_check_for_link(struct ixgb_hw *hw)
-{
- u32 status_reg;
- u32 xpcss_reg;
-
- ENTER();
-
- xpcss_reg = IXGB_READ_REG(hw, XPCSS);
- status_reg = IXGB_READ_REG(hw, STATUS);
-
- if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
- (status_reg & IXGB_STATUS_LU)) {
- hw->link_up = true;
- } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
- (status_reg & IXGB_STATUS_LU)) {
- pr_debug("XPCSS Not Aligned while Status:LU is set\n");
- hw->link_up = ixgb_link_reset(hw);
- } else {
- /*
- * 82597EX errata. Since the lane deskew problem may prevent
- * link, reset the link before reporting link down.
- */
- hw->link_up = ixgb_link_reset(hw);
- }
- /* Anything else for 10 Gig?? */
-}
-
-/******************************************************************************
- * Check for a bad link condition that may have occurred.
- * The indication is that the RFC / LFC registers may be incrementing
- * continually. A full adapter reset is required to recover.
- *
- * hw - Struct containing variables accessed by hw code
- *
- * Called by any function that needs to check the link status of the adapter.
- *****************************************************************************/
-bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
-{
- u32 newLFC, newRFC;
- bool bad_link_returncode = false;
-
- if (hw->phy_type == ixgb_phy_type_txn17401) {
- newLFC = IXGB_READ_REG(hw, LFC);
- newRFC = IXGB_READ_REG(hw, RFC);
- if ((hw->lastLFC + 250 < newLFC)
- || (hw->lastRFC + 250 < newRFC)) {
- pr_debug("BAD LINK! too many LFC/RFC since last check\n");
- bad_link_returncode = true;
- }
- hw->lastLFC = newLFC;
- hw->lastRFC = newRFC;
- }
-
- return bad_link_returncode;
-}
-
-/******************************************************************************
- * Clears all hardware statistics counters.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
-{
- ENTER();
-
- /* if we are stopped or resetting exit gracefully */
- if (hw->adapter_stopped) {
- pr_debug("Exiting because the adapter is stopped!!!\n");
- return;
- }
-
- IXGB_READ_REG(hw, TPRL);
- IXGB_READ_REG(hw, TPRH);
- IXGB_READ_REG(hw, GPRCL);
- IXGB_READ_REG(hw, GPRCH);
- IXGB_READ_REG(hw, BPRCL);
- IXGB_READ_REG(hw, BPRCH);
- IXGB_READ_REG(hw, MPRCL);
- IXGB_READ_REG(hw, MPRCH);
- IXGB_READ_REG(hw, UPRCL);
- IXGB_READ_REG(hw, UPRCH);
- IXGB_READ_REG(hw, VPRCL);
- IXGB_READ_REG(hw, VPRCH);
- IXGB_READ_REG(hw, JPRCL);
- IXGB_READ_REG(hw, JPRCH);
- IXGB_READ_REG(hw, GORCL);
- IXGB_READ_REG(hw, GORCH);
- IXGB_READ_REG(hw, TORL);
- IXGB_READ_REG(hw, TORH);
- IXGB_READ_REG(hw, RNBC);
- IXGB_READ_REG(hw, RUC);
- IXGB_READ_REG(hw, ROC);
- IXGB_READ_REG(hw, RLEC);
- IXGB_READ_REG(hw, CRCERRS);
- IXGB_READ_REG(hw, ICBC);
- IXGB_READ_REG(hw, ECBC);
- IXGB_READ_REG(hw, MPC);
- IXGB_READ_REG(hw, TPTL);
- IXGB_READ_REG(hw, TPTH);
- IXGB_READ_REG(hw, GPTCL);
- IXGB_READ_REG(hw, GPTCH);
- IXGB_READ_REG(hw, BPTCL);
- IXGB_READ_REG(hw, BPTCH);
- IXGB_READ_REG(hw, MPTCL);
- IXGB_READ_REG(hw, MPTCH);
- IXGB_READ_REG(hw, UPTCL);
- IXGB_READ_REG(hw, UPTCH);
- IXGB_READ_REG(hw, VPTCL);
- IXGB_READ_REG(hw, VPTCH);
- IXGB_READ_REG(hw, JPTCL);
- IXGB_READ_REG(hw, JPTCH);
- IXGB_READ_REG(hw, GOTCL);
- IXGB_READ_REG(hw, GOTCH);
- IXGB_READ_REG(hw, TOTL);
- IXGB_READ_REG(hw, TOTH);
- IXGB_READ_REG(hw, DC);
- IXGB_READ_REG(hw, PLT64C);
- IXGB_READ_REG(hw, TSCTC);
- IXGB_READ_REG(hw, TSCTFC);
- IXGB_READ_REG(hw, IBIC);
- IXGB_READ_REG(hw, RFC);
- IXGB_READ_REG(hw, LFC);
- IXGB_READ_REG(hw, PFRC);
- IXGB_READ_REG(hw, PFTC);
- IXGB_READ_REG(hw, MCFRC);
- IXGB_READ_REG(hw, MCFTC);
- IXGB_READ_REG(hw, XONRXC);
- IXGB_READ_REG(hw, XONTXC);
- IXGB_READ_REG(hw, XOFFRXC);
- IXGB_READ_REG(hw, XOFFTXC);
- IXGB_READ_REG(hw, RJC);
-}
-
-/******************************************************************************
- * Turns on the software controllable LED
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-void
-ixgb_led_on(struct ixgb_hw *hw)
-{
- u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
-
- /* To turn on the LED, clear software-definable pin 0 (SDP0). */
- ctrl0_reg &= ~IXGB_CTRL0_SDP0;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
-}
-
-/******************************************************************************
- * Turns off the software controllable LED
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-void
-ixgb_led_off(struct ixgb_hw *hw)
-{
- u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
-
- /* To turn off the LED, set software-definable pin 0 (SDP0). */
- ctrl0_reg |= IXGB_CTRL0_SDP0;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
-}
-
-/******************************************************************************
- * Gets the current PCI bus type, speed, and width of the hardware
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_get_bus_info(struct ixgb_hw *hw)
-{
- u32 status_reg;
-
- status_reg = IXGB_READ_REG(hw, STATUS);
-
- hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
- ixgb_bus_type_pcix : ixgb_bus_type_pci;
-
- if (hw->bus.type == ixgb_bus_type_pci) {
- hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
- ixgb_bus_speed_66 : ixgb_bus_speed_33;
- } else {
- switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
- case IXGB_STATUS_PCIX_SPD_66:
- hw->bus.speed = ixgb_bus_speed_66;
- break;
- case IXGB_STATUS_PCIX_SPD_100:
- hw->bus.speed = ixgb_bus_speed_100;
- break;
- case IXGB_STATUS_PCIX_SPD_133:
- hw->bus.speed = ixgb_bus_speed_133;
- break;
- default:
- hw->bus.speed = ixgb_bus_speed_reserved;
- break;
- }
- }
-
- hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
- ixgb_bus_width_64 : ixgb_bus_width_32;
-}
-
-/******************************************************************************
- * Tests a MAC address to ensure it is a valid Individual Address
- *
- * mac_addr - pointer to MAC address.
- *
- *****************************************************************************/
-static bool
-mac_addr_valid(u8 *mac_addr)
-{
- bool is_valid = true;
- ENTER();
-
- /* Make sure it is not a multicast address */
- if (is_multicast_ether_addr(mac_addr)) {
- pr_debug("MAC address is multicast\n");
- is_valid = false;
- }
- /* Not a broadcast address */
- else if (is_broadcast_ether_addr(mac_addr)) {
- pr_debug("MAC address is broadcast\n");
- is_valid = false;
- }
- /* Reject the zero address */
- else if (is_zero_ether_addr(mac_addr)) {
- pr_debug("MAC address is all zeros\n");
- is_valid = false;
- }
- return is_valid;
-}
-
-/******************************************************************************
- * Resets the 10GbE link. Waits the settle time and returns the state of
- * the link.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static bool
-ixgb_link_reset(struct ixgb_hw *hw)
-{
- bool link_status = false;
- u8 wait_retries = MAX_RESET_ITERATIONS;
- u8 lrst_retries = MAX_RESET_ITERATIONS;
-
- do {
- /* Reset the link */
- IXGB_WRITE_REG(hw, CTRL0,
- IXGB_READ_REG(hw, CTRL0) | IXGB_CTRL0_LRST);
-
- /* Wait for link-up and lane re-alignment */
- do {
- udelay(IXGB_DELAY_USECS_AFTER_LINK_RESET);
- link_status =
- ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
- && (IXGB_READ_REG(hw, XPCSS) &
- IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
- } while (!link_status && --wait_retries);
-
- } while (!link_status && --lrst_retries);
-
- return link_status;
-}
-
-/******************************************************************************
- * Resets the 10GbE optics module.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-static void
-ixgb_optics_reset(struct ixgb_hw *hw)
-{
- if (hw->phy_type == ixgb_phy_type_txn17401) {
- ixgb_write_phy_reg(hw,
- MDIO_CTRL1,
- IXGB_PHY_ADDRESS,
- MDIO_MMD_PMAPMD,
- MDIO_CTRL1_RESET);
-
- ixgb_read_phy_reg(hw, MDIO_CTRL1, IXGB_PHY_ADDRESS, MDIO_MMD_PMAPMD);
- }
-}
-
-/******************************************************************************
- * Resets the 10GbE optics module for Sun variant NIC.
- *
- * hw - Struct containing variables accessed by shared code
- *****************************************************************************/
-
-#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG 0xC803
-#define IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL 0x0164
-#define IXGB_BCM8704_USER_CTRL_REG 0xC800
-#define IXGB_BCM8704_USER_CTRL_REG_VAL 0x7FBF
-#define IXGB_BCM8704_USER_DEV3_ADDR 0x0003
-#define IXGB_SUN_PHY_ADDRESS 0x0000
-#define IXGB_SUN_PHY_RESET_DELAY 305
-
-static void
-ixgb_optics_reset_bcm(struct ixgb_hw *hw)
-{
- u32 ctrl = IXGB_READ_REG(hw, CTRL0);
- ctrl &= ~IXGB_CTRL0_SDP2;
- ctrl |= IXGB_CTRL0_SDP3;
- IXGB_WRITE_REG(hw, CTRL0, ctrl);
- IXGB_WRITE_FLUSH(hw);
-
- /* SerDes needs extra delay */
- msleep(IXGB_SUN_PHY_RESET_DELAY);
-
- /* Broadcom 7408L configuration */
- /* Reference clock config */
- ixgb_write_phy_reg(hw,
- IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR,
- IXGB_BCM8704_USER_PMD_TX_CTRL_REG_VAL);
- /* we must read the registers twice */
- ixgb_read_phy_reg(hw,
- IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR);
- ixgb_read_phy_reg(hw,
- IXGB_BCM8704_USER_PMD_TX_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR);
-
- ixgb_write_phy_reg(hw,
- IXGB_BCM8704_USER_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR,
- IXGB_BCM8704_USER_CTRL_REG_VAL);
- ixgb_read_phy_reg(hw,
- IXGB_BCM8704_USER_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR);
- ixgb_read_phy_reg(hw,
- IXGB_BCM8704_USER_CTRL_REG,
- IXGB_SUN_PHY_ADDRESS,
- IXGB_BCM8704_USER_DEV3_ADDR);
-
- /* SerDes needs extra delay */
- msleep(IXGB_SUN_PHY_RESET_DELAY);
-}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
deleted file mode 100644
index 70bcff5fb3db..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ /dev/null
@@ -1,767 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#ifndef _IXGB_HW_H_
-#define _IXGB_HW_H_
-
-#include <linux/mdio.h>
-
-#include "ixgb_osdep.h"
-
-/* Enums */
-typedef enum {
- ixgb_mac_unknown = 0,
- ixgb_82597,
- ixgb_num_macs
-} ixgb_mac_type;
-
-/* Types of physical layer modules */
-typedef enum {
- ixgb_phy_type_unknown = 0,
- ixgb_phy_type_g6005, /* 850nm, MM fiber, XPAK transceiver */
- ixgb_phy_type_g6104, /* 1310nm, SM fiber, XPAK transceiver */
- ixgb_phy_type_txn17201, /* 850nm, MM fiber, XPAK transceiver */
- ixgb_phy_type_txn17401, /* 1310nm, SM fiber, XENPAK transceiver */
- ixgb_phy_type_bcm /* SUN specific board */
-} ixgb_phy_type;
-
-/* XPAK transceiver vendors, for the SR adapters */
-typedef enum {
- ixgb_xpak_vendor_intel,
- ixgb_xpak_vendor_infineon
-} ixgb_xpak_vendor;
-
-/* Media Types */
-typedef enum {
- ixgb_media_type_unknown = 0,
- ixgb_media_type_fiber = 1,
- ixgb_media_type_copper = 2,
- ixgb_num_media_types
-} ixgb_media_type;
-
-/* Flow Control Settings */
-typedef enum {
- ixgb_fc_none = 0,
- ixgb_fc_rx_pause = 1,
- ixgb_fc_tx_pause = 2,
- ixgb_fc_full = 3,
- ixgb_fc_default = 0xFF
-} ixgb_fc_type;
-
-/* PCI bus types */
-typedef enum {
- ixgb_bus_type_unknown = 0,
- ixgb_bus_type_pci,
- ixgb_bus_type_pcix
-} ixgb_bus_type;
-
-/* PCI bus speeds */
-typedef enum {
- ixgb_bus_speed_unknown = 0,
- ixgb_bus_speed_33,
- ixgb_bus_speed_66,
- ixgb_bus_speed_100,
- ixgb_bus_speed_133,
- ixgb_bus_speed_reserved
-} ixgb_bus_speed;
-
-/* PCI bus widths */
-typedef enum {
- ixgb_bus_width_unknown = 0,
- ixgb_bus_width_32,
- ixgb_bus_width_64
-} ixgb_bus_width;
-
-#define IXGB_EEPROM_SIZE 64 /* Size in words */
-
-#define SPEED_10000 10000
-#define FULL_DUPLEX 2
-
-#define MIN_NUMBER_OF_DESCRIPTORS 8
-#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 /* 13 bits in RDLEN/TDLEN, 128B aligned */
-
-#define IXGB_DELAY_BEFORE_RESET 10 /* allow 10ms after idling rx/tx units */
-#define IXGB_DELAY_AFTER_RESET 1 /* allow 1ms after the reset */
-#define IXGB_DELAY_AFTER_EE_RESET 10 /* allow 10ms after the EEPROM reset */
-
-#define IXGB_DELAY_USECS_AFTER_LINK_RESET 13 /* allow 13 microseconds after the reset */
- /* NOTE: this is MICROSECONDS */
-#define MAX_RESET_ITERATIONS 8 /* number of iterations to get things right */
-
-/* General Registers */
-#define IXGB_CTRL0 0x00000 /* Device Control Register 0 - RW */
-#define IXGB_CTRL1 0x00008 /* Device Control Register 1 - RW */
-#define IXGB_STATUS 0x00010 /* Device Status Register - RO */
-#define IXGB_EECD 0x00018 /* EEPROM/Flash Control/Data Register - RW */
-#define IXGB_MFS 0x00020 /* Maximum Frame Size - RW */
-
-/* Interrupt */
-#define IXGB_ICR 0x00080 /* Interrupt Cause Read - R/clr */
-#define IXGB_ICS 0x00088 /* Interrupt Cause Set - RW */
-#define IXGB_IMS 0x00090 /* Interrupt Mask Set/Read - RW */
-#define IXGB_IMC 0x00098 /* Interrupt Mask Clear - WO */
-
-/* Receive */
-#define IXGB_RCTL 0x00100 /* RX Control - RW */
-#define IXGB_FCRTL 0x00108 /* Flow Control Receive Threshold Low - RW */
-#define IXGB_FCRTH 0x00110 /* Flow Control Receive Threshold High - RW */
-#define IXGB_RDBAL 0x00118 /* RX Descriptor Base Low - RW */
-#define IXGB_RDBAH 0x0011C /* RX Descriptor Base High - RW */
-#define IXGB_RDLEN 0x00120 /* RX Descriptor Length - RW */
-#define IXGB_RDH 0x00128 /* RX Descriptor Head - RW */
-#define IXGB_RDT 0x00130 /* RX Descriptor Tail - RW */
-#define IXGB_RDTR 0x00138 /* RX Delay Timer Ring - RW */
-#define IXGB_RXDCTL 0x00140 /* Receive Descriptor Control - RW */
-#define IXGB_RAIDC 0x00148 /* Receive Adaptive Interrupt Delay Control - RW */
-#define IXGB_RXCSUM 0x00158 /* Receive Checksum Control - RW */
-#define IXGB_RA 0x00180 /* Receive Address Array Base - RW */
-#define IXGB_RAL 0x00180 /* Receive Address Low [0:15] - RW */
-#define IXGB_RAH 0x00184 /* Receive Address High [0:15] - RW */
-#define IXGB_MTA 0x00200 /* Multicast Table Array [0:127] - RW */
-#define IXGB_VFTA 0x00400 /* VLAN Filter Table Array [0:127] - RW */
-#define IXGB_REQ_RX_DESCRIPTOR_MULTIPLE 8
-
-/* Transmit */
-#define IXGB_TCTL 0x00600 /* TX Control - RW */
-#define IXGB_TDBAL 0x00608 /* TX Descriptor Base Low - RW */
-#define IXGB_TDBAH 0x0060C /* TX Descriptor Base High - RW */
-#define IXGB_TDLEN 0x00610 /* TX Descriptor Length - RW */
-#define IXGB_TDH 0x00618 /* TX Descriptor Head - RW */
-#define IXGB_TDT 0x00620 /* TX Descriptor Tail - RW */
-#define IXGB_TIDV 0x00628 /* TX Interrupt Delay Value - RW */
-#define IXGB_TXDCTL 0x00630 /* Transmit Descriptor Control - RW */
-#define IXGB_TSPMT 0x00638 /* TCP Segmentation PAD & Min Threshold - RW */
-#define IXGB_PAP 0x00640 /* Pause and Pace - RW */
-#define IXGB_REQ_TX_DESCRIPTOR_MULTIPLE 8
-
-/* Physical */
-#define IXGB_PCSC1 0x00700 /* PCS Control 1 - RW */
-#define IXGB_PCSC2 0x00708 /* PCS Control 2 - RW */
-#define IXGB_PCSS1 0x00710 /* PCS Status 1 - RO */
-#define IXGB_PCSS2 0x00718 /* PCS Status 2 - RO */
-#define IXGB_XPCSS 0x00720 /* 10GBASE-X PCS Status (or XGXS Lane Status) - RO */
-#define IXGB_UCCR 0x00728 /* Unilink Circuit Control Register */
-#define IXGB_XPCSTC 0x00730 /* 10GBASE-X PCS Test Control */
-#define IXGB_MACA 0x00738 /* MDI Autoscan Command and Address - RW */
-#define IXGB_APAE 0x00740 /* Autoscan PHY Address Enable - RW */
-#define IXGB_ARD 0x00748 /* Autoscan Read Data - RO */
-#define IXGB_AIS 0x00750 /* Autoscan Interrupt Status - RO */
-#define IXGB_MSCA 0x00758 /* MDI Single Command and Address - RW */
-#define IXGB_MSRWD 0x00760 /* MDI Single Read and Write Data - RW, RO */
-
-/* Wake-up */
-#define IXGB_WUFC 0x00808 /* Wake Up Filter Control - RW */
-#define IXGB_WUS 0x00810 /* Wake Up Status - RO */
-#define IXGB_FFLT 0x01000 /* Flexible Filter Length Table - RW */
-#define IXGB_FFMT 0x01020 /* Flexible Filter Mask Table - RW */
-#define IXGB_FTVT 0x01420 /* Flexible Filter Value Table - RW */
-
-/* Statistics */
-#define IXGB_TPRL 0x02000 /* Total Packets Received (Low) */
-#define IXGB_TPRH 0x02004 /* Total Packets Received (High) */
-#define IXGB_GPRCL 0x02008 /* Good Packets Received Count (Low) */
-#define IXGB_GPRCH 0x0200C /* Good Packets Received Count (High) */
-#define IXGB_BPRCL 0x02010 /* Broadcast Packets Received Count (Low) */
-#define IXGB_BPRCH 0x02014 /* Broadcast Packets Received Count (High) */
-#define IXGB_MPRCL 0x02018 /* Multicast Packets Received Count (Low) */
-#define IXGB_MPRCH 0x0201C /* Multicast Packets Received Count (High) */
-#define IXGB_UPRCL 0x02020 /* Unicast Packets Received Count (Low) */
-#define IXGB_UPRCH 0x02024 /* Unicast Packets Received Count (High) */
-#define IXGB_VPRCL 0x02028 /* VLAN Packets Received Count (Low) */
-#define IXGB_VPRCH 0x0202C /* VLAN Packets Received Count (High) */
-#define IXGB_JPRCL 0x02030 /* Jumbo Packets Received Count (Low) */
-#define IXGB_JPRCH 0x02034 /* Jumbo Packets Received Count (High) */
-#define IXGB_GORCL 0x02038 /* Good Octets Received Count (Low) */
-#define IXGB_GORCH 0x0203C /* Good Octets Received Count (High) */
-#define IXGB_TORL 0x02040 /* Total Octets Received (Low) */
-#define IXGB_TORH 0x02044 /* Total Octets Received (High) */
-#define IXGB_RNBC 0x02048 /* Receive No Buffers Count */
-#define IXGB_RUC 0x02050 /* Receive Undersize Count */
-#define IXGB_ROC 0x02058 /* Receive Oversize Count */
-#define IXGB_RLEC 0x02060 /* Receive Length Error Count */
-#define IXGB_CRCERRS 0x02068 /* CRC Error Count */
-#define IXGB_ICBC 0x02070 /* Illegal control byte in mid-packet Count */
-#define IXGB_ECBC 0x02078 /* Error Control byte in mid-packet Count */
-#define IXGB_MPC 0x02080 /* Missed Packets Count */
-#define IXGB_TPTL 0x02100 /* Total Packets Transmitted (Low) */
-#define IXGB_TPTH 0x02104 /* Total Packets Transmitted (High) */
-#define IXGB_GPTCL 0x02108 /* Good Packets Transmitted Count (Low) */
-#define IXGB_GPTCH 0x0210C /* Good Packets Transmitted Count (High) */
-#define IXGB_BPTCL 0x02110 /* Broadcast Packets Transmitted Count (Low) */
-#define IXGB_BPTCH 0x02114 /* Broadcast Packets Transmitted Count (High) */
-#define IXGB_MPTCL 0x02118 /* Multicast Packets Transmitted Count (Low) */
-#define IXGB_MPTCH 0x0211C /* Multicast Packets Transmitted Count (High) */
-#define IXGB_UPTCL 0x02120 /* Unicast Packets Transmitted Count (Low) */
-#define IXGB_UPTCH 0x02124 /* Unicast Packets Transmitted Count (High) */
-#define IXGB_VPTCL 0x02128 /* VLAN Packets Transmitted Count (Low) */
-#define IXGB_VPTCH 0x0212C /* VLAN Packets Transmitted Count (High) */
-#define IXGB_JPTCL 0x02130 /* Jumbo Packets Transmitted Count (Low) */
-#define IXGB_JPTCH 0x02134 /* Jumbo Packets Transmitted Count (High) */
-#define IXGB_GOTCL 0x02138 /* Good Octets Transmitted Count (Low) */
-#define IXGB_GOTCH 0x0213C /* Good Octets Transmitted Count (High) */
-#define IXGB_TOTL 0x02140 /* Total Octets Transmitted Count (Low) */
-#define IXGB_TOTH 0x02144 /* Total Octets Transmitted Count (High) */
-#define IXGB_DC 0x02148 /* Defer Count */
-#define IXGB_PLT64C 0x02150 /* Packet Transmitted was less than 64 bytes Count */
-#define IXGB_TSCTC 0x02170 /* TCP Segmentation Context Transmitted Count */
-#define IXGB_TSCTFC 0x02178 /* TCP Segmentation Context Tx Fail Count */
-#define IXGB_IBIC 0x02180 /* Illegal byte during Idle stream count */
-#define IXGB_RFC 0x02188 /* Remote Fault Count */
-#define IXGB_LFC 0x02190 /* Local Fault Count */
-#define IXGB_PFRC 0x02198 /* Pause Frame Receive Count */
-#define IXGB_PFTC 0x021A0 /* Pause Frame Transmit Count */
-#define IXGB_MCFRC 0x021A8 /* MAC Control Frames (non-Pause) Received Count */
-#define IXGB_MCFTC 0x021B0 /* MAC Control Frames (non-Pause) Transmitted Count */
-#define IXGB_XONRXC 0x021B8 /* XON Received Count */
-#define IXGB_XONTXC 0x021C0 /* XON Transmitted Count */
-#define IXGB_XOFFRXC 0x021C8 /* XOFF Received Count */
-#define IXGB_XOFFTXC 0x021D0 /* XOFF Transmitted Count */
-#define IXGB_RJC 0x021D8 /* Receive Jabber Count */
-
-/* CTRL0 Bit Masks */
-#define IXGB_CTRL0_LRST 0x00000008
-#define IXGB_CTRL0_JFE 0x00000010
-#define IXGB_CTRL0_XLE 0x00000020
-#define IXGB_CTRL0_MDCS 0x00000040
-#define IXGB_CTRL0_CMDC 0x00000080
-#define IXGB_CTRL0_SDP0 0x00040000
-#define IXGB_CTRL0_SDP1 0x00080000
-#define IXGB_CTRL0_SDP2 0x00100000
-#define IXGB_CTRL0_SDP3 0x00200000
-#define IXGB_CTRL0_SDP0_DIR 0x00400000
-#define IXGB_CTRL0_SDP1_DIR 0x00800000
-#define IXGB_CTRL0_SDP2_DIR 0x01000000
-#define IXGB_CTRL0_SDP3_DIR 0x02000000
-#define IXGB_CTRL0_RST 0x04000000
-#define IXGB_CTRL0_RPE 0x08000000
-#define IXGB_CTRL0_TPE 0x10000000
-#define IXGB_CTRL0_VME 0x40000000
-
-/* CTRL1 Bit Masks */
-#define IXGB_CTRL1_GPI0_EN 0x00000001
-#define IXGB_CTRL1_GPI1_EN 0x00000002
-#define IXGB_CTRL1_GPI2_EN 0x00000004
-#define IXGB_CTRL1_GPI3_EN 0x00000008
-#define IXGB_CTRL1_SDP4 0x00000010
-#define IXGB_CTRL1_SDP5 0x00000020
-#define IXGB_CTRL1_SDP6 0x00000040
-#define IXGB_CTRL1_SDP7 0x00000080
-#define IXGB_CTRL1_SDP4_DIR 0x00000100
-#define IXGB_CTRL1_SDP5_DIR 0x00000200
-#define IXGB_CTRL1_SDP6_DIR 0x00000400
-#define IXGB_CTRL1_SDP7_DIR 0x00000800
-#define IXGB_CTRL1_EE_RST 0x00002000
-#define IXGB_CTRL1_RO_DIS 0x00020000
-#define IXGB_CTRL1_PCIXHM_MASK 0x00C00000
-#define IXGB_CTRL1_PCIXHM_1_2 0x00000000
-#define IXGB_CTRL1_PCIXHM_5_8 0x00400000
-#define IXGB_CTRL1_PCIXHM_3_4 0x00800000
-#define IXGB_CTRL1_PCIXHM_7_8 0x00C00000
-
-/* STATUS Bit Masks */
-#define IXGB_STATUS_LU 0x00000002
-#define IXGB_STATUS_AIP 0x00000004
-#define IXGB_STATUS_TXOFF 0x00000010
-#define IXGB_STATUS_XAUIME 0x00000020
-#define IXGB_STATUS_RES 0x00000040
-#define IXGB_STATUS_RIS 0x00000080
-#define IXGB_STATUS_RIE 0x00000100
-#define IXGB_STATUS_RLF 0x00000200
-#define IXGB_STATUS_RRF 0x00000400
-#define IXGB_STATUS_PCI_SPD 0x00000800
-#define IXGB_STATUS_BUS64 0x00001000
-#define IXGB_STATUS_PCIX_MODE 0x00002000
-#define IXGB_STATUS_PCIX_SPD_MASK 0x0000C000
-#define IXGB_STATUS_PCIX_SPD_66 0x00000000
-#define IXGB_STATUS_PCIX_SPD_100 0x00004000
-#define IXGB_STATUS_PCIX_SPD_133 0x00008000
-#define IXGB_STATUS_REV_ID_MASK 0x000F0000
-#define IXGB_STATUS_REV_ID_SHIFT 16
-
-/* EECD Bit Masks */
-#define IXGB_EECD_SK 0x00000001
-#define IXGB_EECD_CS 0x00000002
-#define IXGB_EECD_DI 0x00000004
-#define IXGB_EECD_DO 0x00000008
-#define IXGB_EECD_FWE_MASK 0x00000030
-#define IXGB_EECD_FWE_DIS 0x00000010
-#define IXGB_EECD_FWE_EN 0x00000020
-
-/* MFS */
-#define IXGB_MFS_SHIFT 16
-
-/* Interrupt Register Bit Masks (used for ICR, ICS, IMS, and IMC) */
-#define IXGB_INT_TXDW 0x00000001
-#define IXGB_INT_TXQE 0x00000002
-#define IXGB_INT_LSC 0x00000004
-#define IXGB_INT_RXSEQ 0x00000008
-#define IXGB_INT_RXDMT0 0x00000010
-#define IXGB_INT_RXO 0x00000040
-#define IXGB_INT_RXT0 0x00000080
-#define IXGB_INT_AUTOSCAN 0x00000200
-#define IXGB_INT_GPI0 0x00000800
-#define IXGB_INT_GPI1 0x00001000
-#define IXGB_INT_GPI2 0x00002000
-#define IXGB_INT_GPI3 0x00004000
-
-/* RCTL Bit Masks */
-#define IXGB_RCTL_RXEN 0x00000002
-#define IXGB_RCTL_SBP 0x00000004
-#define IXGB_RCTL_UPE 0x00000008
-#define IXGB_RCTL_MPE 0x00000010
-#define IXGB_RCTL_RDMTS_MASK 0x00000300
-#define IXGB_RCTL_RDMTS_1_2 0x00000000
-#define IXGB_RCTL_RDMTS_1_4 0x00000100
-#define IXGB_RCTL_RDMTS_1_8 0x00000200
-#define IXGB_RCTL_MO_MASK 0x00003000
-#define IXGB_RCTL_MO_47_36 0x00000000
-#define IXGB_RCTL_MO_46_35 0x00001000
-#define IXGB_RCTL_MO_45_34 0x00002000
-#define IXGB_RCTL_MO_43_32 0x00003000
-#define IXGB_RCTL_MO_SHIFT 12
-#define IXGB_RCTL_BAM 0x00008000
-#define IXGB_RCTL_BSIZE_MASK 0x00030000
-#define IXGB_RCTL_BSIZE_2048 0x00000000
-#define IXGB_RCTL_BSIZE_4096 0x00010000
-#define IXGB_RCTL_BSIZE_8192 0x00020000
-#define IXGB_RCTL_BSIZE_16384 0x00030000
-#define IXGB_RCTL_VFE 0x00040000
-#define IXGB_RCTL_CFIEN 0x00080000
-#define IXGB_RCTL_CFI 0x00100000
-#define IXGB_RCTL_RPDA_MASK 0x00600000
-#define IXGB_RCTL_RPDA_MC_MAC 0x00000000
-#define IXGB_RCTL_MC_ONLY 0x00400000
-#define IXGB_RCTL_CFF 0x00800000
-#define IXGB_RCTL_SECRC 0x04000000
-#define IXGB_RDT_FPDB 0x80000000
-
-#define IXGB_RCTL_IDLE_RX_UNIT 0
-
-/* FCRTL Bit Masks */
-#define IXGB_FCRTL_XONE 0x80000000
-
-/* RXDCTL Bit Masks */
-#define IXGB_RXDCTL_PTHRESH_MASK 0x000001FF
-#define IXGB_RXDCTL_PTHRESH_SHIFT 0
-#define IXGB_RXDCTL_HTHRESH_MASK 0x0003FE00
-#define IXGB_RXDCTL_HTHRESH_SHIFT 9
-#define IXGB_RXDCTL_WTHRESH_MASK 0x07FC0000
-#define IXGB_RXDCTL_WTHRESH_SHIFT 18
-
-/* RAIDC Bit Masks */
-#define IXGB_RAIDC_HIGHTHRS_MASK 0x0000003F
-#define IXGB_RAIDC_DELAY_MASK 0x000FF800
-#define IXGB_RAIDC_DELAY_SHIFT 11
-#define IXGB_RAIDC_POLL_MASK 0x1FF00000
-#define IXGB_RAIDC_POLL_SHIFT 20
-#define IXGB_RAIDC_RXT_GATE 0x40000000
-#define IXGB_RAIDC_EN 0x80000000
-
-#define IXGB_RAIDC_POLL_1000_INTERRUPTS_PER_SECOND 1220
-#define IXGB_RAIDC_POLL_5000_INTERRUPTS_PER_SECOND 244
-#define IXGB_RAIDC_POLL_10000_INTERRUPTS_PER_SECOND 122
-#define IXGB_RAIDC_POLL_20000_INTERRUPTS_PER_SECOND 61
-
-/* RXCSUM Bit Masks */
-#define IXGB_RXCSUM_IPOFL 0x00000100
-#define IXGB_RXCSUM_TUOFL 0x00000200
-
-/* RAH Bit Masks */
-#define IXGB_RAH_ASEL_MASK 0x00030000
-#define IXGB_RAH_ASEL_DEST 0x00000000
-#define IXGB_RAH_ASEL_SRC 0x00010000
-#define IXGB_RAH_AV 0x80000000
-
-/* TCTL Bit Masks */
-#define IXGB_TCTL_TCE 0x00000001
-#define IXGB_TCTL_TXEN 0x00000002
-#define IXGB_TCTL_TPDE 0x00000004
-
-#define IXGB_TCTL_IDLE_TX_UNIT 0
-
-/* TXDCTL Bit Masks */
-#define IXGB_TXDCTL_PTHRESH_MASK 0x0000007F
-#define IXGB_TXDCTL_HTHRESH_MASK 0x00007F00
-#define IXGB_TXDCTL_HTHRESH_SHIFT 8
-#define IXGB_TXDCTL_WTHRESH_MASK 0x007F0000
-#define IXGB_TXDCTL_WTHRESH_SHIFT 16
-
-/* TSPMT Bit Masks */
-#define IXGB_TSPMT_TSMT_MASK 0x0000FFFF
-#define IXGB_TSPMT_TSPBP_MASK 0xFFFF0000
-#define IXGB_TSPMT_TSPBP_SHIFT 16
-
-/* PAP Bit Masks */
-#define IXGB_PAP_TXPC_MASK 0x0000FFFF
-#define IXGB_PAP_TXPV_MASK 0x000F0000
-#define IXGB_PAP_TXPV_10G 0x00000000
-#define IXGB_PAP_TXPV_1G 0x00010000
-#define IXGB_PAP_TXPV_2G 0x00020000
-#define IXGB_PAP_TXPV_3G 0x00030000
-#define IXGB_PAP_TXPV_4G 0x00040000
-#define IXGB_PAP_TXPV_5G 0x00050000
-#define IXGB_PAP_TXPV_6G 0x00060000
-#define IXGB_PAP_TXPV_7G 0x00070000
-#define IXGB_PAP_TXPV_8G 0x00080000
-#define IXGB_PAP_TXPV_9G 0x00090000
-#define IXGB_PAP_TXPV_WAN 0x000F0000
-
-/* PCSC1 Bit Masks */
-#define IXGB_PCSC1_LOOPBACK 0x00004000
-
-/* PCSC2 Bit Masks */
-#define IXGB_PCSC2_PCS_TYPE_MASK 0x00000003
-#define IXGB_PCSC2_PCS_TYPE_10GBX 0x00000001
-
-/* PCSS1 Bit Masks */
-#define IXGB_PCSS1_LOCAL_FAULT 0x00000080
-#define IXGB_PCSS1_RX_LINK_STATUS 0x00000004
-
-/* PCSS2 Bit Masks */
-#define IXGB_PCSS2_DEV_PRES_MASK 0x0000C000
-#define IXGB_PCSS2_DEV_PRES 0x00004000
-#define IXGB_PCSS2_TX_LF 0x00000800
-#define IXGB_PCSS2_RX_LF 0x00000400
-#define IXGB_PCSS2_10GBW 0x00000004
-#define IXGB_PCSS2_10GBX 0x00000002
-#define IXGB_PCSS2_10GBR 0x00000001
-
-/* XPCSS Bit Masks */
-#define IXGB_XPCSS_ALIGN_STATUS 0x00001000
-#define IXGB_XPCSS_PATTERN_TEST 0x00000800
-#define IXGB_XPCSS_LANE_3_SYNC 0x00000008
-#define IXGB_XPCSS_LANE_2_SYNC 0x00000004
-#define IXGB_XPCSS_LANE_1_SYNC 0x00000002
-#define IXGB_XPCSS_LANE_0_SYNC 0x00000001
-
-/* XPCSTC Bit Masks */
-#define IXGB_XPCSTC_BERT_TRIG 0x00200000
-#define IXGB_XPCSTC_BERT_SST 0x00100000
-#define IXGB_XPCSTC_BERT_PSZ_MASK 0x000C0000
-#define IXGB_XPCSTC_BERT_PSZ_SHIFT 17
-#define IXGB_XPCSTC_BERT_PSZ_INF 0x00000003
-#define IXGB_XPCSTC_BERT_PSZ_68 0x00000001
-#define IXGB_XPCSTC_BERT_PSZ_1028 0x00000000
-
-/* MSCA bit Masks */
-/* New Protocol Address */
-#define IXGB_MSCA_NP_ADDR_MASK 0x0000FFFF
-#define IXGB_MSCA_NP_ADDR_SHIFT 0
-/* Either Device Type or Register Address,depending on ST_CODE */
-#define IXGB_MSCA_DEV_TYPE_MASK 0x001F0000
-#define IXGB_MSCA_DEV_TYPE_SHIFT 16
-#define IXGB_MSCA_PHY_ADDR_MASK 0x03E00000
-#define IXGB_MSCA_PHY_ADDR_SHIFT 21
-#define IXGB_MSCA_OP_CODE_MASK 0x0C000000
-/* OP_CODE == 00, Address cycle, New Protocol */
-/* OP_CODE == 01, Write operation */
-/* OP_CODE == 10, Read operation */
-/* OP_CODE == 11, Read, auto increment, New Protocol */
-#define IXGB_MSCA_ADDR_CYCLE 0x00000000
-#define IXGB_MSCA_WRITE 0x04000000
-#define IXGB_MSCA_READ 0x08000000
-#define IXGB_MSCA_READ_AUTOINC 0x0C000000
-#define IXGB_MSCA_OP_CODE_SHIFT 26
-#define IXGB_MSCA_ST_CODE_MASK 0x30000000
-/* ST_CODE == 00, New Protocol */
-/* ST_CODE == 01, Old Protocol */
-#define IXGB_MSCA_NEW_PROTOCOL 0x00000000
-#define IXGB_MSCA_OLD_PROTOCOL 0x10000000
-#define IXGB_MSCA_ST_CODE_SHIFT 28
-/* Initiate command, self-clearing when command completes */
-#define IXGB_MSCA_MDI_COMMAND 0x40000000
-/*MDI In Progress Enable. */
-#define IXGB_MSCA_MDI_IN_PROG_EN 0x80000000
-
-/* MSRWD bit masks */
-#define IXGB_MSRWD_WRITE_DATA_MASK 0x0000FFFF
-#define IXGB_MSRWD_WRITE_DATA_SHIFT 0
-#define IXGB_MSRWD_READ_DATA_MASK 0xFFFF0000
-#define IXGB_MSRWD_READ_DATA_SHIFT 16
-
-/* Definitions for the optics devices on the MDIO bus. */
-#define IXGB_PHY_ADDRESS 0x0 /* Single PHY, multiple "Devices" */
-
-#define MDIO_PMA_PMD_XPAK_VENDOR_NAME 0x803A /* XPAK/XENPAK devices only */
-
-/* Vendor-specific MDIO registers */
-#define G6XXX_PMA_PMD_VS1 0xC001 /* Vendor-specific register */
-#define G6XXX_XGXS_XAUI_VS2 0x18 /* Vendor-specific register */
-
-#define G6XXX_PMA_PMD_VS1_PLL_RESET 0x80
-#define G6XXX_PMA_PMD_VS1_REMOVE_PLL_RESET 0x00
-#define G6XXX_XGXS_XAUI_VS2_INPUT_MASK 0x0F /* XAUI lanes synchronized */
-
-/* Layout of a single receive descriptor. The controller assumes that this
- * structure is packed into 16 bytes, which is a safe assumption with most
- * compilers. However, some compilers may insert padding between the fields,
- * in which case the structure must be packed in some compiler-specific
- * manner. */
-struct ixgb_rx_desc {
- __le64 buff_addr;
- __le16 length;
- __le16 reserved;
- u8 status;
- u8 errors;
- __le16 special;
-};
-
-#define IXGB_RX_DESC_STATUS_DD 0x01
-#define IXGB_RX_DESC_STATUS_EOP 0x02
-#define IXGB_RX_DESC_STATUS_IXSM 0x04
-#define IXGB_RX_DESC_STATUS_VP 0x08
-#define IXGB_RX_DESC_STATUS_TCPCS 0x20
-#define IXGB_RX_DESC_STATUS_IPCS 0x40
-#define IXGB_RX_DESC_STATUS_PIF 0x80
-
-#define IXGB_RX_DESC_ERRORS_CE 0x01
-#define IXGB_RX_DESC_ERRORS_SE 0x02
-#define IXGB_RX_DESC_ERRORS_P 0x08
-#define IXGB_RX_DESC_ERRORS_TCPE 0x20
-#define IXGB_RX_DESC_ERRORS_IPE 0x40
-#define IXGB_RX_DESC_ERRORS_RXE 0x80
-
-#define IXGB_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
-#define IXGB_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
-#define IXGB_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority is in upper 3 of 16 */
-
-/* Layout of a single transmit descriptor. The controller assumes that this
- * structure is packed into 16 bytes, which is a safe assumption with most
- * compilers. However, some compilers may insert padding between the fields,
- * in which case the structure must be packed in some compiler-specific
- * manner. */
-struct ixgb_tx_desc {
- __le64 buff_addr;
- __le32 cmd_type_len;
- u8 status;
- u8 popts;
- __le16 vlan;
-};
-
-#define IXGB_TX_DESC_LENGTH_MASK 0x000FFFFF
-#define IXGB_TX_DESC_TYPE_MASK 0x00F00000
-#define IXGB_TX_DESC_TYPE_SHIFT 20
-#define IXGB_TX_DESC_CMD_MASK 0xFF000000
-#define IXGB_TX_DESC_CMD_SHIFT 24
-#define IXGB_TX_DESC_CMD_EOP 0x01000000
-#define IXGB_TX_DESC_CMD_TSE 0x04000000
-#define IXGB_TX_DESC_CMD_RS 0x08000000
-#define IXGB_TX_DESC_CMD_VLE 0x40000000
-#define IXGB_TX_DESC_CMD_IDE 0x80000000
-
-#define IXGB_TX_DESC_TYPE 0x00100000
-
-#define IXGB_TX_DESC_STATUS_DD 0x01
-
-#define IXGB_TX_DESC_POPTS_IXSM 0x01
-#define IXGB_TX_DESC_POPTS_TXSM 0x02
-#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
-
-struct ixgb_context_desc {
- u8 ipcss;
- u8 ipcso;
- __le16 ipcse;
- u8 tucss;
- u8 tucso;
- __le16 tucse;
- __le32 cmd_type_len;
- u8 status;
- u8 hdr_len;
- __le16 mss;
-};
-
-#define IXGB_CONTEXT_DESC_CMD_TCP 0x01000000
-#define IXGB_CONTEXT_DESC_CMD_IP 0x02000000
-#define IXGB_CONTEXT_DESC_CMD_TSE 0x04000000
-#define IXGB_CONTEXT_DESC_CMD_RS 0x08000000
-#define IXGB_CONTEXT_DESC_CMD_IDE 0x80000000
-
-#define IXGB_CONTEXT_DESC_TYPE 0x00000000
-
-#define IXGB_CONTEXT_DESC_STATUS_DD 0x01
-
-/* Filters */
-#define IXGB_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */
-#define IXGB_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
-
-#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0
-#define ENET_HEADER_SIZE 14
-#define ENET_FCS_LENGTH 4
-#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
-#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
-#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
-#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
-
-/* Phy Addresses */
-#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
-#define IXGB_XAUII_PHY_ADDR 0x1 /* Xauii transceiver phy address */
-#define IXGB_DIAG_PHY_ADDR 0x1F /* Diagnostic Device phy address */
-
-/* This structure takes a 64k flash and maps it for identification commands */
-struct ixgb_flash_buffer {
- u8 manufacturer_id;
- u8 device_id;
- u8 filler1[0x2AA8];
- u8 cmd2;
- u8 filler2[0x2AAA];
- u8 cmd1;
- u8 filler3[0xAAAA];
-};
-
-/* Flow control parameters */
-struct ixgb_fc {
- u32 high_water; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
- u16 pause_time; /* Flow Control Pause timer */
- bool send_xon; /* Flow control send XON */
- ixgb_fc_type type; /* Type of flow control */
-};
-
-/* The historical defaults for the flow control values are given below. */
-#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */
-#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */
-#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */
-
-/* Phy definitions */
-#define IXGB_MAX_PHY_REG_ADDRESS 0xFFFF
-#define IXGB_MAX_PHY_ADDRESS 31
-#define IXGB_MAX_PHY_DEV_TYPE 31
-
-/* Bus parameters */
-struct ixgb_bus {
- ixgb_bus_speed speed;
- ixgb_bus_width width;
- ixgb_bus_type type;
-};
-
-struct ixgb_hw {
- u8 __iomem *hw_addr;/* Base Address of the hardware */
- void *back; /* Pointer to OS-dependent struct */
- struct ixgb_fc fc; /* Flow control parameters */
- struct ixgb_bus bus; /* Bus parameters */
- u32 phy_id; /* Phy Identifier */
- u32 phy_addr; /* XGMII address of Phy */
- ixgb_mac_type mac_type; /* Identifier for MAC controller */
- ixgb_phy_type phy_type; /* Transceiver/phy identifier */
- u32 max_frame_size; /* Maximum frame size supported */
- u32 mc_filter_type; /* Multicast filter hash type */
- u32 num_mc_addrs; /* Number of current Multicast addrs */
- u8 curr_mac_addr[ETH_ALEN]; /* Individual address currently programmed in MAC */
- u32 num_tx_desc; /* Number of Transmit descriptors */
- u32 num_rx_desc; /* Number of Receive descriptors */
- u32 rx_buffer_size; /* Size of Receive buffer */
- bool link_up; /* true if link is valid */
- bool adapter_stopped; /* State of adapter */
- u16 device_id; /* device id from PCI configuration space */
- u16 vendor_id; /* vendor id from PCI configuration space */
- u8 revision_id; /* revision id from PCI configuration space */
- u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
- u16 subsystem_id; /* subsystem id from PCI configuration space */
- u32 bar0; /* Base Address registers */
- u32 bar1;
- u32 bar2;
- u32 bar3;
- u16 pci_cmd_word; /* PCI command register id from PCI configuration space */
- __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
- unsigned long io_base; /* Our I/O mapped location */
- u32 lastLFC;
- u32 lastRFC;
-};
-
-/* Statistics reported by the hardware */
-struct ixgb_hw_stats {
- u64 tprl;
- u64 tprh;
- u64 gprcl;
- u64 gprch;
- u64 bprcl;
- u64 bprch;
- u64 mprcl;
- u64 mprch;
- u64 uprcl;
- u64 uprch;
- u64 vprcl;
- u64 vprch;
- u64 jprcl;
- u64 jprch;
- u64 gorcl;
- u64 gorch;
- u64 torl;
- u64 torh;
- u64 rnbc;
- u64 ruc;
- u64 roc;
- u64 rlec;
- u64 crcerrs;
- u64 icbc;
- u64 ecbc;
- u64 mpc;
- u64 tptl;
- u64 tpth;
- u64 gptcl;
- u64 gptch;
- u64 bptcl;
- u64 bptch;
- u64 mptcl;
- u64 mptch;
- u64 uptcl;
- u64 uptch;
- u64 vptcl;
- u64 vptch;
- u64 jptcl;
- u64 jptch;
- u64 gotcl;
- u64 gotch;
- u64 totl;
- u64 toth;
- u64 dc;
- u64 plt64c;
- u64 tsctc;
- u64 tsctfc;
- u64 ibic;
- u64 rfc;
- u64 lfc;
- u64 pfrc;
- u64 pftc;
- u64 mcfrc;
- u64 mcftc;
- u64 xonrxc;
- u64 xontxc;
- u64 xoffrxc;
- u64 xofftxc;
- u64 rjc;
-};
-
-/* Function Prototypes */
-bool ixgb_adapter_stop(struct ixgb_hw *hw);
-bool ixgb_init_hw(struct ixgb_hw *hw);
-bool ixgb_adapter_start(struct ixgb_hw *hw);
-void ixgb_check_for_link(struct ixgb_hw *hw);
-bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-void ixgb_rar_set(struct ixgb_hw *hw, const u8 *addr, u32 index);
-
-/* Filters (multicast, vlan, receive) */
-void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, u32 pad);
-
-/* Vfta functions */
-void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
-
-/* Access functions to eeprom data */
-void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
-u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw);
-u16 ixgb_get_ee_device_id(struct ixgb_hw *hw);
-bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
-__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index);
-
-/* Everything else */
-void ixgb_led_on(struct ixgb_hw *hw);
-void ixgb_led_off(struct ixgb_hw *hw);
-void ixgb_write_pci_cfg(struct ixgb_hw *hw,
- u32 reg,
- u16 * value);
-
-
-#endif /* _IXGB_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
deleted file mode 100644
index 9695b8215f01..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#ifndef _IXGB_IDS_H_
-#define _IXGB_IDS_H_
-
-/**********************************************************************
-** The Device and Vendor IDs for 10 Gigabit MACs
-**********************************************************************/
-
-#define IXGB_DEVICE_ID_82597EX 0x1048
-#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
-#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
-#define IXGB_SUBDEVICE_ID_A11F 0xA11F
-#define IXGB_SUBDEVICE_ID_A01F 0xA01F
-
-#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
-#define IXGB_SUBDEVICE_ID_A00C 0xA00C
-#define IXGB_SUBDEVICE_ID_A01C 0xA01C
-#define IXGB_SUBDEVICE_ID_7036 0x7036
-
-#endif /* #ifndef _IXGB_IDS_H_ */
-/* End of File */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
deleted file mode 100644
index 99d481904ce6..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ /dev/null
@@ -1,2295 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/prefetch.h>
-#include "ixgb.h"
-
-char ixgb_driver_name[] = "ixgb";
-static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
-
-static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
-
-#define IXGB_CB_LENGTH 256
-static unsigned int copybreak __read_mostly = IXGB_CB_LENGTH;
-module_param(copybreak, uint, 0644);
-MODULE_PARM_DESC(copybreak,
- "Maximum size of packet that is copied to a new buffer on receive");
-
-/* ixgb_pci_tbl - PCI Device ID Table
- *
- * Wildcard entries (PCI_ANY_ID) should come last
- * Last entry must be all 0s
- *
- * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
- * Class, Class Mask, private data (not used) }
- */
-static const struct pci_device_id ixgb_pci_tbl[] = {
- {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
-
- /* required last entry */
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
-
-/* Local Function Prototypes */
-static int ixgb_init_module(void);
-static void ixgb_exit_module(void);
-static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void ixgb_remove(struct pci_dev *pdev);
-static int ixgb_sw_init(struct ixgb_adapter *adapter);
-static int ixgb_open(struct net_device *netdev);
-static int ixgb_close(struct net_device *netdev);
-static void ixgb_configure_tx(struct ixgb_adapter *adapter);
-static void ixgb_configure_rx(struct ixgb_adapter *adapter);
-static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
-static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
-static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
-static void ixgb_set_multi(struct net_device *netdev);
-static void ixgb_watchdog(struct timer_list *t);
-static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev);
-static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
-static int ixgb_set_mac(struct net_device *netdev, void *p);
-static irqreturn_t ixgb_intr(int irq, void *data);
-static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
-
-static int ixgb_clean(struct napi_struct *, int);
-static bool ixgb_clean_rx_irq(struct ixgb_adapter *, int *, int);
-static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
-
-static void ixgb_tx_timeout(struct net_device *dev, unsigned int txqueue);
-static void ixgb_tx_timeout_task(struct work_struct *work);
-
-static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static int ixgb_vlan_rx_add_vid(struct net_device *netdev,
- __be16 proto, u16 vid);
-static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
- __be16 proto, u16 vid);
-static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
-
-static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
- pci_channel_state_t state);
-static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
-static void ixgb_io_resume (struct pci_dev *pdev);
-
-static const struct pci_error_handlers ixgb_err_handler = {
- .error_detected = ixgb_io_error_detected,
- .slot_reset = ixgb_io_slot_reset,
- .resume = ixgb_io_resume,
-};
-
-static struct pci_driver ixgb_driver = {
- .name = ixgb_driver_name,
- .id_table = ixgb_pci_tbl,
- .probe = ixgb_probe,
- .remove = ixgb_remove,
- .err_handler = &ixgb_err_handler
-};
-
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
-MODULE_LICENSE("GPL v2");
-
-#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-/**
- * ixgb_init_module - Driver Registration Routine
- *
- * ixgb_init_module is the first routine called when the driver is
- * loaded. All it does is register with the PCI subsystem.
- **/
-
-static int __init
-ixgb_init_module(void)
-{
- pr_info("%s\n", ixgb_driver_string);
- pr_info("%s\n", ixgb_copyright);
-
- return pci_register_driver(&ixgb_driver);
-}
-
-module_init(ixgb_init_module);
-
-/**
- * ixgb_exit_module - Driver Exit Cleanup Routine
- *
- * ixgb_exit_module is called just before the driver is removed
- * from memory.
- **/
-
-static void __exit
-ixgb_exit_module(void)
-{
- pci_unregister_driver(&ixgb_driver);
-}
-
-module_exit(ixgb_exit_module);
-
-/**
- * ixgb_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- **/
-
-static void
-ixgb_irq_disable(struct ixgb_adapter *adapter)
-{
- IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
- IXGB_WRITE_FLUSH(&adapter->hw);
- synchronize_irq(adapter->pdev->irq);
-}
-
-/**
- * ixgb_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- **/
-
-static void
-ixgb_irq_enable(struct ixgb_adapter *adapter)
-{
- u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
- IXGB_INT_TXDW | IXGB_INT_LSC;
- if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
- val |= IXGB_INT_GPI0;
- IXGB_WRITE_REG(&adapter->hw, IMS, val);
- IXGB_WRITE_FLUSH(&adapter->hw);
-}
-
-int
-ixgb_up(struct ixgb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- int err, irq_flags = IRQF_SHARED;
- int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
- struct ixgb_hw *hw = &adapter->hw;
-
- /* hardware has been reset, we need to reload some things */
-
- ixgb_rar_set(hw, netdev->dev_addr, 0);
- ixgb_set_multi(netdev);
-
- ixgb_restore_vlan(adapter);
-
- ixgb_configure_tx(adapter);
- ixgb_setup_rctl(adapter);
- ixgb_configure_rx(adapter);
- ixgb_alloc_rx_buffers(adapter, IXGB_DESC_UNUSED(&adapter->rx_ring));
-
- /* disable interrupts and get the hardware into a known state */
- IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
-
- /* only enable MSI if bus is in PCI-X mode */
- if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
- adapter->have_msi = true;
- irq_flags = 0;
- }
- /* proceed to try to request regular interrupt */
- }
-
- err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
- netdev->name, netdev);
- if (err) {
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
- netif_err(adapter, probe, adapter->netdev,
- "Unable to allocate interrupt Error: %d\n", err);
- return err;
- }
-
- if ((hw->max_frame_size != max_frame) ||
- (hw->max_frame_size !=
- (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
-
- hw->max_frame_size = max_frame;
-
- IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
-
- if (hw->max_frame_size >
- IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
- u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
-
- if (!(ctrl0 & IXGB_CTRL0_JFE)) {
- ctrl0 |= IXGB_CTRL0_JFE;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0);
- }
- }
- }
-
- clear_bit(__IXGB_DOWN, &adapter->flags);
-
- napi_enable(&adapter->napi);
- ixgb_irq_enable(adapter);
-
- netif_wake_queue(netdev);
-
- mod_timer(&adapter->watchdog_timer, jiffies);
-
- return 0;
-}
-
-void
-ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
-{
- struct net_device *netdev = adapter->netdev;
-
- /* prevent the interrupt handler from restarting watchdog */
- set_bit(__IXGB_DOWN, &adapter->flags);
-
- netif_carrier_off(netdev);
-
- napi_disable(&adapter->napi);
- /* waiting for NAPI to complete can re-enable interrupts */
- ixgb_irq_disable(adapter);
- free_irq(adapter->pdev->irq, netdev);
-
- if (adapter->have_msi)
- pci_disable_msi(adapter->pdev);
-
- if (kill_watchdog)
- del_timer_sync(&adapter->watchdog_timer);
-
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
- netif_stop_queue(netdev);
-
- ixgb_reset(adapter);
- ixgb_clean_tx_ring(adapter);
- ixgb_clean_rx_ring(adapter);
-}
-
-void
-ixgb_reset(struct ixgb_adapter *adapter)
-{
- struct ixgb_hw *hw = &adapter->hw;
-
- ixgb_adapter_stop(hw);
- if (!ixgb_init_hw(hw))
- netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
-
- /* restore frame size information */
- IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
- if (hw->max_frame_size >
- IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
- u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
- if (!(ctrl0 & IXGB_CTRL0_JFE)) {
- ctrl0 |= IXGB_CTRL0_JFE;
- IXGB_WRITE_REG(hw, CTRL0, ctrl0);
- }
- }
-}
-
-static netdev_features_t
-ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
-{
- /*
- * Tx VLAN insertion does not work per HW design when Rx stripping is
- * disabled.
- */
- if (!(features & NETIF_F_HW_VLAN_CTAG_RX))
- features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-
- return features;
-}
-
-static int
-ixgb_set_features(struct net_device *netdev, netdev_features_t features)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- netdev_features_t changed = features ^ netdev->features;
-
- if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_CTAG_RX)))
- return 0;
-
- adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
-
- if (netif_running(netdev)) {
- ixgb_down(adapter, true);
- ixgb_up(adapter);
- ixgb_set_speed_duplex(netdev);
- } else
- ixgb_reset(adapter);
-
- return 0;
-}
-
-
-static const struct net_device_ops ixgb_netdev_ops = {
- .ndo_open = ixgb_open,
- .ndo_stop = ixgb_close,
- .ndo_start_xmit = ixgb_xmit_frame,
- .ndo_set_rx_mode = ixgb_set_multi,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = ixgb_set_mac,
- .ndo_change_mtu = ixgb_change_mtu,
- .ndo_tx_timeout = ixgb_tx_timeout,
- .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
- .ndo_fix_features = ixgb_fix_features,
- .ndo_set_features = ixgb_set_features,
-};
-
-/**
- * ixgb_probe - Device Initialization Routine
- * @pdev: PCI device information struct
- * @ent: entry in ixgb_pci_tbl
- *
- * Returns 0 on success, negative on failure
- *
- * ixgb_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
- * and a hardware reset occur.
- **/
-
-static int
-ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- struct net_device *netdev = NULL;
- struct ixgb_adapter *adapter;
- static int cards_found = 0;
- int pci_using_dac;
- u8 addr[ETH_ALEN];
- int i;
- int err;
-
- err = pci_enable_device(pdev);
- if (err)
- return err;
-
- pci_using_dac = 0;
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (!err) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA configuration, aborting\n");
- goto err_dma_mask;
- }
- }
-
- err = pci_request_regions(pdev, ixgb_driver_name);
- if (err)
- goto err_request_regions;
-
- pci_set_master(pdev);
-
- netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
- if (!netdev) {
- err = -ENOMEM;
- goto err_alloc_etherdev;
- }
-
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- pci_set_drvdata(pdev, netdev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- adapter->hw.back = adapter;
- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
-
- adapter->hw.hw_addr = pci_ioremap_bar(pdev, BAR_0);
- if (!adapter->hw.hw_addr) {
- err = -EIO;
- goto err_ioremap;
- }
-
- for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
- if (pci_resource_len(pdev, i) == 0)
- continue;
- if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
- adapter->hw.io_base = pci_resource_start(pdev, i);
- break;
- }
- }
-
- netdev->netdev_ops = &ixgb_netdev_ops;
- ixgb_set_ethtool_ops(netdev);
- netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
-
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
-
- adapter->bd_number = cards_found;
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
-
- /* setup the private structure */
-
- err = ixgb_sw_init(adapter);
- if (err)
- goto err_sw_init;
-
- netdev->hw_features = NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- netdev->hw_features |= NETIF_F_RXCSUM;
-
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
-
- /* MTU range: 68 - 16114 */
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = IXGB_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
-
- /* make sure the EEPROM is good */
-
- if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- netif_err(adapter, probe, adapter->netdev,
- "The EEPROM Checksum Is Not Valid\n");
- err = -EIO;
- goto err_eeprom;
- }
-
- ixgb_get_ee_mac_addr(&adapter->hw, addr);
- eth_hw_addr_set(netdev, addr);
-
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
- err = -EIO;
- goto err_eeprom;
- }
-
- adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
-
- timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0);
-
- INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task);
-
- strcpy(netdev->name, "eth%d");
- err = register_netdev(netdev);
- if (err)
- goto err_register;
-
- /* carrier off reporting is important to ethtool even BEFORE open */
- netif_carrier_off(netdev);
-
- netif_info(adapter, probe, adapter->netdev,
- "Intel(R) PRO/10GbE Network Connection\n");
- ixgb_check_options(adapter);
- /* reset the hardware with the new settings */
-
- ixgb_reset(adapter);
-
- cards_found++;
- return 0;
-
-err_register:
-err_sw_init:
-err_eeprom:
- iounmap(adapter->hw.hw_addr);
-err_ioremap:
- free_netdev(netdev);
-err_alloc_etherdev:
- pci_release_regions(pdev);
-err_request_regions:
-err_dma_mask:
- pci_disable_device(pdev);
- return err;
-}
-
-/**
- * ixgb_remove - Device Removal Routine
- * @pdev: PCI device information struct
- *
- * ixgb_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device. The could be caused by a
- * Hot-Plug event, or because the driver is going to be removed from
- * memory.
- **/
-
-static void
-ixgb_remove(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- cancel_work_sync(&adapter->tx_timeout_task);
-
- unregister_netdev(netdev);
-
- iounmap(adapter->hw.hw_addr);
- pci_release_regions(pdev);
-
- free_netdev(netdev);
- pci_disable_device(pdev);
-}
-
-/**
- * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
- * @adapter: board private structure to initialize
- *
- * ixgb_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
- **/
-
-static int
-ixgb_sw_init(struct ixgb_adapter *adapter)
-{
- struct ixgb_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* PCI config space info */
-
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_id = pdev->subsystem_device;
-
- hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
- adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
-
- if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
- (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
- (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
- (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
- hw->mac_type = ixgb_82597;
- else {
- /* should never have loaded on this device */
- netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
- }
-
- /* enable flow control to be programmed */
- hw->fc.send_xon = 1;
-
- set_bit(__IXGB_DOWN, &adapter->flags);
- return 0;
-}
-
-/**
- * ixgb_open - Called when a network interface is made active
- * @netdev: network interface device structure
- *
- * Returns 0 on success, negative value on failure
- *
- * The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
- * for transmit and receive operations are allocated, the interrupt
- * handler is registered with the OS, the watchdog timer is started,
- * and the stack is notified that the interface is ready.
- **/
-
-static int
-ixgb_open(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- int err;
-
- /* allocate transmit descriptors */
- err = ixgb_setup_tx_resources(adapter);
- if (err)
- goto err_setup_tx;
-
- netif_carrier_off(netdev);
-
- /* allocate receive descriptors */
-
- err = ixgb_setup_rx_resources(adapter);
- if (err)
- goto err_setup_rx;
-
- err = ixgb_up(adapter);
- if (err)
- goto err_up;
-
- netif_start_queue(netdev);
-
- return 0;
-
-err_up:
- ixgb_free_rx_resources(adapter);
-err_setup_rx:
- ixgb_free_tx_resources(adapter);
-err_setup_tx:
- ixgb_reset(adapter);
-
- return err;
-}
-
-/**
- * ixgb_close - Disables a network interface
- * @netdev: network interface device structure
- *
- * Returns 0, this is not allowed to fail
- *
- * The close entry point is called when an interface is de-activated
- * by the OS. The hardware is still under the drivers control, but
- * needs to be disabled. A global MAC reset is issued to stop the
- * hardware, and all transmit and receive resources are freed.
- **/
-
-static int
-ixgb_close(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- ixgb_down(adapter, true);
-
- ixgb_free_tx_resources(adapter);
- ixgb_free_rx_resources(adapter);
-
- return 0;
-}
-
-/**
- * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
- *
- * Return 0 on success, negative on failure
- **/
-
-int
-ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int size;
-
- size = sizeof(struct ixgb_buffer) * txdr->count;
- txdr->buffer_info = vzalloc(size);
- if (!txdr->buffer_info)
- return -ENOMEM;
-
- /* round up to nearest 4K */
-
- txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
- txdr->size = ALIGN(txdr->size, 4096);
-
- txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL);
- if (!txdr->desc) {
- vfree(txdr->buffer_info);
- return -ENOMEM;
- }
-
- txdr->next_to_use = 0;
- txdr->next_to_clean = 0;
-
- return 0;
-}
-
-/**
- * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
- * @adapter: board private structure
- *
- * Configure the Tx unit of the MAC after a reset.
- **/
-
-static void
-ixgb_configure_tx(struct ixgb_adapter *adapter)
-{
- u64 tdba = adapter->tx_ring.dma;
- u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
- u32 tctl;
- struct ixgb_hw *hw = &adapter->hw;
-
- /* Setup the Base and Length of the Tx Descriptor Ring
- * tx_ring.dma can be either a 32 or 64 bit value
- */
-
- IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
- IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
-
- IXGB_WRITE_REG(hw, TDLEN, tdlen);
-
- /* Setup the HW Tx Head and Tail descriptor pointers */
-
- IXGB_WRITE_REG(hw, TDH, 0);
- IXGB_WRITE_REG(hw, TDT, 0);
-
- /* don't set up txdctl, it induces performance problems if configured
- * incorrectly */
- /* Set the Tx Interrupt Delay register */
-
- IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
-
- /* Program the Transmit Control Register */
-
- tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
- IXGB_WRITE_REG(hw, TCTL, tctl);
-
- /* Setup Transmit Descriptor Settings for this adapter */
- adapter->tx_cmd_type =
- IXGB_TX_DESC_TYPE |
- (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
-}
-
-/**
- * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
- *
- * Returns 0 on success, negative on failure
- **/
-
-int
-ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
- struct pci_dev *pdev = adapter->pdev;
- int size;
-
- size = sizeof(struct ixgb_buffer) * rxdr->count;
- rxdr->buffer_info = vzalloc(size);
- if (!rxdr->buffer_info)
- return -ENOMEM;
-
- /* Round up to nearest 4K */
-
- rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
- rxdr->size = ALIGN(rxdr->size, 4096);
-
- rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL);
-
- if (!rxdr->desc) {
- vfree(rxdr->buffer_info);
- return -ENOMEM;
- }
-
- rxdr->next_to_clean = 0;
- rxdr->next_to_use = 0;
-
- return 0;
-}
-
-/**
- * ixgb_setup_rctl - configure the receive control register
- * @adapter: Board private structure
- **/
-
-static void
-ixgb_setup_rctl(struct ixgb_adapter *adapter)
-{
- u32 rctl;
-
- rctl = IXGB_READ_REG(&adapter->hw, RCTL);
-
- rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
-
- rctl |=
- IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
- IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
- (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
-
- rctl |= IXGB_RCTL_SECRC;
-
- if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
- rctl |= IXGB_RCTL_BSIZE_2048;
- else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
- rctl |= IXGB_RCTL_BSIZE_4096;
- else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
- rctl |= IXGB_RCTL_BSIZE_8192;
- else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
- rctl |= IXGB_RCTL_BSIZE_16384;
-
- IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
-}
-
-/**
- * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
- * @adapter: board private structure
- *
- * Configure the Rx unit of the MAC after a reset.
- **/
-
-static void
-ixgb_configure_rx(struct ixgb_adapter *adapter)
-{
- u64 rdba = adapter->rx_ring.dma;
- u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
- struct ixgb_hw *hw = &adapter->hw;
- u32 rctl;
- u32 rxcsum;
-
- /* make sure receives are disabled while setting up the descriptors */
-
- rctl = IXGB_READ_REG(hw, RCTL);
- IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
-
- /* set the Receive Delay Timer Register */
-
- IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
-
- /* Setup the Base and Length of the Rx Descriptor Ring */
-
- IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
- IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
-
- IXGB_WRITE_REG(hw, RDLEN, rdlen);
-
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGB_WRITE_REG(hw, RDH, 0);
- IXGB_WRITE_REG(hw, RDT, 0);
-
- /* due to the hardware errata with RXDCTL, we are unable to use any of
- * the performance enhancing features of it without causing other
- * subtle bugs, some of the bugs could include receive length
- * corruption at high data rates (WTHRESH > 0) and/or receive
- * descriptor ring irregularites (particularly in hardware cache) */
- IXGB_WRITE_REG(hw, RXDCTL, 0);
-
- /* Enable Receive Checksum Offload for TCP and UDP */
- if (adapter->rx_csum) {
- rxcsum = IXGB_READ_REG(hw, RXCSUM);
- rxcsum |= IXGB_RXCSUM_TUOFL;
- IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
- }
-
- /* Enable Receives */
-
- IXGB_WRITE_REG(hw, RCTL, rctl);
-}
-
-/**
- * ixgb_free_tx_resources - Free Tx Resources
- * @adapter: board private structure
- *
- * Free all transmit software resources
- **/
-
-void
-ixgb_free_tx_resources(struct ixgb_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
-
- ixgb_clean_tx_ring(adapter);
-
- vfree(adapter->tx_ring.buffer_info);
- adapter->tx_ring.buffer_info = NULL;
-
- dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
- adapter->tx_ring.desc, adapter->tx_ring.dma);
-
- adapter->tx_ring.desc = NULL;
-}
-
-static void
-ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
- struct ixgb_buffer *buffer_info)
-{
- if (buffer_info->dma) {
- if (buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
- buffer_info->length, DMA_TO_DEVICE);
- else
- dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
- buffer_info->length, DMA_TO_DEVICE);
- buffer_info->dma = 0;
- }
-
- if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- buffer_info->time_stamp = 0;
- /* these fields must always be initialized in tx
- * buffer_info->length = 0;
- * buffer_info->next_to_watch = 0; */
-}
-
-/**
- * ixgb_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
- **/
-
-static void
-ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- struct ixgb_buffer *buffer_info;
- unsigned long size;
- unsigned int i;
-
- /* Free all the Tx ring sk_buffs */
-
- for (i = 0; i < tx_ring->count; i++) {
- buffer_info = &tx_ring->buffer_info[i];
- ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
- }
-
- size = sizeof(struct ixgb_buffer) * tx_ring->count;
- memset(tx_ring->buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
-
- memset(tx_ring->desc, 0, tx_ring->size);
-
- tx_ring->next_to_use = 0;
- tx_ring->next_to_clean = 0;
-
- IXGB_WRITE_REG(&adapter->hw, TDH, 0);
- IXGB_WRITE_REG(&adapter->hw, TDT, 0);
-}
-
-/**
- * ixgb_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
- *
- * Free all receive software resources
- **/
-
-void
-ixgb_free_rx_resources(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- struct pci_dev *pdev = adapter->pdev;
-
- ixgb_clean_rx_ring(adapter);
-
- vfree(rx_ring->buffer_info);
- rx_ring->buffer_info = NULL;
-
- dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
- rx_ring->dma);
-
- rx_ring->desc = NULL;
-}
-
-/**
- * ixgb_clean_rx_ring - Free Rx Buffers
- * @adapter: board private structure
- **/
-
-static void
-ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- struct ixgb_buffer *buffer_info;
- struct pci_dev *pdev = adapter->pdev;
- unsigned long size;
- unsigned int i;
-
- /* Free all the Rx ring sk_buffs */
-
- for (i = 0; i < rx_ring->count; i++) {
- buffer_info = &rx_ring->buffer_info[i];
- if (buffer_info->dma) {
- dma_unmap_single(&pdev->dev,
- buffer_info->dma,
- buffer_info->length,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- buffer_info->length = 0;
- }
-
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- }
-
- size = sizeof(struct ixgb_buffer) * rx_ring->count;
- memset(rx_ring->buffer_info, 0, size);
-
- /* Zero out the descriptor ring */
-
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
-
- IXGB_WRITE_REG(&adapter->hw, RDH, 0);
- IXGB_WRITE_REG(&adapter->hw, RDT, 0);
-}
-
-/**
- * ixgb_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-
-static int
-ixgb_set_mac(struct net_device *netdev, void *p)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
-
- ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
-
- return 0;
-}
-
-/**
- * ixgb_set_multi - Multicast and Promiscuous mode set
- * @netdev: network interface device structure
- *
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
- * promiscuous mode, and all-multi behavior.
- **/
-
-static void
-ixgb_set_multi(struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- struct netdev_hw_addr *ha;
- u32 rctl;
-
- /* Check for Promiscuous and All Multicast modes */
-
- rctl = IXGB_READ_REG(hw, RCTL);
-
- if (netdev->flags & IFF_PROMISC) {
- rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
- /* disable VLAN filtering */
- rctl &= ~IXGB_RCTL_CFIEN;
- rctl &= ~IXGB_RCTL_VFE;
- } else {
- if (netdev->flags & IFF_ALLMULTI) {
- rctl |= IXGB_RCTL_MPE;
- rctl &= ~IXGB_RCTL_UPE;
- } else {
- rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
- }
- /* enable VLAN filtering */
- rctl |= IXGB_RCTL_VFE;
- rctl &= ~IXGB_RCTL_CFIEN;
- }
-
- if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
- rctl |= IXGB_RCTL_MPE;
- IXGB_WRITE_REG(hw, RCTL, rctl);
- } else {
- u8 *mta = kmalloc_array(ETH_ALEN,
- IXGB_MAX_NUM_MULTICAST_ADDRESSES,
- GFP_ATOMIC);
- u8 *addr;
- if (!mta)
- goto alloc_failed;
-
- IXGB_WRITE_REG(hw, RCTL, rctl);
-
- addr = mta;
- netdev_for_each_mc_addr(ha, netdev) {
- memcpy(addr, ha->addr, ETH_ALEN);
- addr += ETH_ALEN;
- }
-
- ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
- kfree(mta);
- }
-
-alloc_failed:
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- ixgb_vlan_strip_enable(adapter);
- else
- ixgb_vlan_strip_disable(adapter);
-
-}
-
-/**
- * ixgb_watchdog - Timer Call-back
- * @t: pointer to timer_list containing our private info pointer
- **/
-
-static void
-ixgb_watchdog(struct timer_list *t)
-{
- struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
- struct net_device *netdev = adapter->netdev;
- struct ixgb_desc_ring *txdr = &adapter->tx_ring;
-
- ixgb_check_for_link(&adapter->hw);
-
- if (ixgb_check_for_bad_link(&adapter->hw)) {
- /* force the reset path */
- netif_stop_queue(netdev);
- }
-
- if (adapter->hw.link_up) {
- if (!netif_carrier_ok(netdev)) {
- netdev_info(netdev,
- "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
- (adapter->hw.fc.type == ixgb_fc_full) ?
- "RX/TX" :
- (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
- "RX" :
- (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
- "TX" : "None");
- adapter->link_speed = 10000;
- adapter->link_duplex = FULL_DUPLEX;
- netif_carrier_on(netdev);
- }
- } else {
- if (netif_carrier_ok(netdev)) {
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
- netdev_info(netdev, "NIC Link is Down\n");
- netif_carrier_off(netdev);
- }
- }
-
- ixgb_update_stats(adapter);
-
- if (!netif_carrier_ok(netdev)) {
- if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
- /* We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
- schedule_work(&adapter->tx_timeout_task);
- /* return immediately since reset is imminent */
- return;
- }
- }
-
- /* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = true;
-
- /* generate an interrupt to force clean up of any stragglers */
- IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
-
- /* Reset the timer */
- mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
-}
-
-#define IXGB_TX_FLAGS_CSUM 0x00000001
-#define IXGB_TX_FLAGS_VLAN 0x00000002
-#define IXGB_TX_FLAGS_TSO 0x00000004
-
-static int
-ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
-{
- struct ixgb_context_desc *context_desc;
- unsigned int i;
- u8 ipcss, ipcso, tucss, tucso, hdr_len;
- u16 ipcse, tucse, mss;
-
- if (likely(skb_is_gso(skb))) {
- struct ixgb_buffer *buffer_info;
- struct iphdr *iph;
- int err;
-
- err = skb_cow_head(skb, 0);
- if (err < 0)
- return err;
-
- hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- mss = skb_shinfo(skb)->gso_size;
- iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP, 0);
- ipcss = skb_network_offset(skb);
- ipcso = (void *)&(iph->check) - (void *)skb->data;
- ipcse = skb_transport_offset(skb) - 1;
- tucss = skb_transport_offset(skb);
- tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
- tucse = 0;
-
- i = adapter->tx_ring.next_to_use;
- context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
- buffer_info = &adapter->tx_ring.buffer_info[i];
- WARN_ON(buffer_info->dma != 0);
-
- context_desc->ipcss = ipcss;
- context_desc->ipcso = ipcso;
- context_desc->ipcse = cpu_to_le16(ipcse);
- context_desc->tucss = tucss;
- context_desc->tucso = tucso;
- context_desc->tucse = cpu_to_le16(tucse);
- context_desc->mss = cpu_to_le16(mss);
- context_desc->hdr_len = hdr_len;
- context_desc->status = 0;
- context_desc->cmd_type_len = cpu_to_le32(
- IXGB_CONTEXT_DESC_TYPE
- | IXGB_CONTEXT_DESC_CMD_TSE
- | IXGB_CONTEXT_DESC_CMD_IP
- | IXGB_CONTEXT_DESC_CMD_TCP
- | IXGB_CONTEXT_DESC_CMD_IDE
- | (skb->len - (hdr_len)));
-
-
- if (++i == adapter->tx_ring.count) i = 0;
- adapter->tx_ring.next_to_use = i;
-
- return 1;
- }
-
- return 0;
-}
-
-static bool
-ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
-{
- struct ixgb_context_desc *context_desc;
- unsigned int i;
- u8 css, cso;
-
- if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
- struct ixgb_buffer *buffer_info;
- css = skb_checksum_start_offset(skb);
- cso = css + skb->csum_offset;
-
- i = adapter->tx_ring.next_to_use;
- context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
- buffer_info = &adapter->tx_ring.buffer_info[i];
- WARN_ON(buffer_info->dma != 0);
-
- context_desc->tucss = css;
- context_desc->tucso = cso;
- context_desc->tucse = 0;
- /* zero out any previously existing data in one instruction */
- *(u32 *)&(context_desc->ipcss) = 0;
- context_desc->status = 0;
- context_desc->hdr_len = 0;
- context_desc->mss = 0;
- context_desc->cmd_type_len =
- cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
- | IXGB_TX_DESC_CMD_IDE);
-
- if (++i == adapter->tx_ring.count) i = 0;
- adapter->tx_ring.next_to_use = i;
-
- return true;
- }
-
- return false;
-}
-
-#define IXGB_MAX_TXD_PWR 14
-#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
-
-static int
-ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
- unsigned int first)
-{
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- struct pci_dev *pdev = adapter->pdev;
- struct ixgb_buffer *buffer_info;
- int len = skb_headlen(skb);
- unsigned int offset = 0, size, count = 0, i;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
- unsigned int f;
-
- i = tx_ring->next_to_use;
-
- while (len) {
- buffer_info = &tx_ring->buffer_info[i];
- size = min(len, IXGB_MAX_DATA_PER_TXD);
- /* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && !nr_frags && size == len && size > 8))
- size -= 4;
-
- buffer_info->length = size;
- WARN_ON(buffer_info->dma != 0);
- buffer_info->time_stamp = jiffies;
- buffer_info->mapped_as_page = false;
- buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data + offset,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma))
- goto dma_error;
- buffer_info->next_to_watch = 0;
-
- len -= size;
- offset += size;
- count++;
- if (len) {
- i++;
- if (i == tx_ring->count)
- i = 0;
- }
- }
-
- for (f = 0; f < nr_frags; f++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- len = skb_frag_size(frag);
- offset = 0;
-
- while (len) {
- i++;
- if (i == tx_ring->count)
- i = 0;
-
- buffer_info = &tx_ring->buffer_info[i];
- size = min(len, IXGB_MAX_DATA_PER_TXD);
-
- /* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && (f == (nr_frags - 1))
- && size == len && size > 8))
- size -= 4;
-
- buffer_info->length = size;
- buffer_info->time_stamp = jiffies;
- buffer_info->mapped_as_page = true;
- buffer_info->dma =
- skb_frag_dma_map(&pdev->dev, frag, offset, size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma))
- goto dma_error;
- buffer_info->next_to_watch = 0;
-
- len -= size;
- offset += size;
- count++;
- }
- }
- tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[first].next_to_watch = i;
-
- return count;
-
-dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
- buffer_info->dma = 0;
- if (count)
- count--;
-
- while (count--) {
- if (i==0)
- i += tx_ring->count;
- i--;
- buffer_info = &tx_ring->buffer_info[i];
- ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
- }
-
- return 0;
-}
-
-static void
-ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
-{
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- struct ixgb_tx_desc *tx_desc = NULL;
- struct ixgb_buffer *buffer_info;
- u32 cmd_type_len = adapter->tx_cmd_type;
- u8 status = 0;
- u8 popts = 0;
- unsigned int i;
-
- if (tx_flags & IXGB_TX_FLAGS_TSO) {
- cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
- popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
- }
-
- if (tx_flags & IXGB_TX_FLAGS_CSUM)
- popts |= IXGB_TX_DESC_POPTS_TXSM;
-
- if (tx_flags & IXGB_TX_FLAGS_VLAN)
- cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
-
- i = tx_ring->next_to_use;
-
- while (count--) {
- buffer_info = &tx_ring->buffer_info[i];
- tx_desc = IXGB_TX_DESC(*tx_ring, i);
- tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
- tx_desc->cmd_type_len =
- cpu_to_le32(cmd_type_len | buffer_info->length);
- tx_desc->status = status;
- tx_desc->popts = popts;
- tx_desc->vlan = cpu_to_le16(vlan_id);
-
- if (++i == tx_ring->count) i = 0;
- }
-
- tx_desc->cmd_type_len |=
- cpu_to_le32(IXGB_TX_DESC_CMD_EOP | IXGB_TX_DESC_CMD_RS);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64). */
- wmb();
-
- tx_ring->next_to_use = i;
- IXGB_WRITE_REG(&adapter->hw, TDT, i);
-}
-
-static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
-
- netif_stop_queue(netdev);
- /* Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
- smp_mb();
-
- /* We need to check again in a case another CPU has just
- * made room available. */
- if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
- return -EBUSY;
-
- /* A reprieve! */
- netif_start_queue(netdev);
- ++adapter->restart_queue;
- return 0;
-}
-
-static int ixgb_maybe_stop_tx(struct net_device *netdev,
- struct ixgb_desc_ring *tx_ring, int size)
-{
- if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __ixgb_maybe_stop_tx(netdev, size);
-}
-
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
- (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
- + 1 /* one more needed for sentinel TSO workaround */
-
-static netdev_tx_t
-ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- unsigned int first;
- unsigned int tx_flags = 0;
- int vlan_id = 0;
- int count = 0;
- int tso;
-
- if (test_bit(__IXGB_DOWN, &adapter->flags)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
- DESC_NEEDED)))
- return NETDEV_TX_BUSY;
-
- if (skb_vlan_tag_present(skb)) {
- tx_flags |= IXGB_TX_FLAGS_VLAN;
- vlan_id = skb_vlan_tag_get(skb);
- }
-
- first = adapter->tx_ring.next_to_use;
-
- tso = ixgb_tso(adapter, skb);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (likely(tso))
- tx_flags |= IXGB_TX_FLAGS_TSO;
- else if (ixgb_tx_csum(adapter, skb))
- tx_flags |= IXGB_TX_FLAGS_CSUM;
-
- count = ixgb_tx_map(adapter, skb, first);
-
- if (count) {
- ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
- /* Make sure there is space in the ring for the next send. */
- ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
-
- } else {
- dev_kfree_skb_any(skb);
- adapter->tx_ring.buffer_info[first].time_stamp = 0;
- adapter->tx_ring.next_to_use = first;
- }
-
- return NETDEV_TX_OK;
-}
-
-/**
- * ixgb_tx_timeout - Respond to a Tx Hang
- * @netdev: network interface device structure
- * @txqueue: queue hanging (unused)
- **/
-
-static void
-ixgb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- /* Do the reset outside of interrupt context */
- schedule_work(&adapter->tx_timeout_task);
-}
-
-static void
-ixgb_tx_timeout_task(struct work_struct *work)
-{
- struct ixgb_adapter *adapter =
- container_of(work, struct ixgb_adapter, tx_timeout_task);
-
- adapter->tx_timeout_count++;
- ixgb_down(adapter, true);
- ixgb_up(adapter);
-}
-
-/**
- * ixgb_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
- * @new_mtu: new value for maximum frame size
- *
- * Returns 0 on success, negative on failure
- **/
-
-static int
-ixgb_change_mtu(struct net_device *netdev, int new_mtu)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
-
- if (netif_running(netdev))
- ixgb_down(adapter, true);
-
- adapter->rx_buffer_len = max_frame + 8; /* + 8 for errata */
-
- netdev->mtu = new_mtu;
-
- if (netif_running(netdev))
- ixgb_up(adapter);
-
- return 0;
-}
-
-/**
- * ixgb_update_stats - Update the board statistics counters.
- * @adapter: board private structure
- **/
-
-void
-ixgb_update_stats(struct ixgb_adapter *adapter)
-{
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* Prevent stats update while adapter is being reset */
- if (pci_channel_offline(pdev))
- return;
-
- if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
- u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
- u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
- u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
- u64 bcast = ((u64)bcast_h << 32) | bcast_l;
-
- multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
- /* fix up multicast stats by removing broadcasts */
- if (multi >= bcast)
- multi -= bcast;
-
- adapter->stats.mprcl += (multi & 0xFFFFFFFF);
- adapter->stats.mprch += (multi >> 32);
- adapter->stats.bprcl += bcast_l;
- adapter->stats.bprch += bcast_h;
- } else {
- adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
- adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
- adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
- adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
- }
- adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
- adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
- adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
- adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
- adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
- adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
- adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
- adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
- adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
- adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
- adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
- adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
- adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
- adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
- adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
- adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
- adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
- adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
- adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
- adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
- adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
- adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
- adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
- adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
- adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
- adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
- adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
- adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
- adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
- adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
- adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
- adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
- adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
- adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
- adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
- adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
- adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
- adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
- adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
- adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
- adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
- adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
- adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
- adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
- adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
- adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
- adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
- adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
- adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
- adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
- adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
- adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
- adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
- adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
- adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
- adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
-
- /* Fill out the OS statistics structure */
-
- netdev->stats.rx_packets = adapter->stats.gprcl;
- netdev->stats.tx_packets = adapter->stats.gptcl;
- netdev->stats.rx_bytes = adapter->stats.gorcl;
- netdev->stats.tx_bytes = adapter->stats.gotcl;
- netdev->stats.multicast = adapter->stats.mprcl;
- netdev->stats.collisions = 0;
-
- /* ignore RLEC as it reports errors for padded (<64bytes) frames
- * with a length in the type/len field */
- netdev->stats.rx_errors =
- /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
- adapter->stats.ruc +
- adapter->stats.roc /*+ adapter->stats.rlec */ +
- adapter->stats.icbc +
- adapter->stats.ecbc + adapter->stats.mpc;
-
- /* see above
- * netdev->stats.rx_length_errors = adapter->stats.rlec;
- */
-
- netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
- netdev->stats.rx_fifo_errors = adapter->stats.mpc;
- netdev->stats.rx_missed_errors = adapter->stats.mpc;
- netdev->stats.rx_over_errors = adapter->stats.mpc;
-
- netdev->stats.tx_errors = 0;
- netdev->stats.rx_frame_errors = 0;
- netdev->stats.tx_aborted_errors = 0;
- netdev->stats.tx_carrier_errors = 0;
- netdev->stats.tx_fifo_errors = 0;
- netdev->stats.tx_heartbeat_errors = 0;
- netdev->stats.tx_window_errors = 0;
-}
-
-#define IXGB_MAX_INTR 10
-/**
- * ixgb_intr - Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-
-static irqreturn_t
-ixgb_intr(int irq, void *data)
-{
- struct net_device *netdev = data;
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- struct ixgb_hw *hw = &adapter->hw;
- u32 icr = IXGB_READ_REG(hw, ICR);
-
- if (unlikely(!icr))
- return IRQ_NONE; /* Not our interrupt */
-
- if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
- if (!test_bit(__IXGB_DOWN, &adapter->flags))
- mod_timer(&adapter->watchdog_timer, jiffies);
-
- if (napi_schedule_prep(&adapter->napi)) {
-
- /* Disable interrupts and register for poll. The flush
- of the posted write is intentionally left out.
- */
-
- IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
- __napi_schedule(&adapter->napi);
- }
- return IRQ_HANDLED;
-}
-
-/**
- * ixgb_clean - NAPI Rx polling callback
- * @napi: napi struct pointer
- * @budget: max number of receives to clean
- **/
-
-static int
-ixgb_clean(struct napi_struct *napi, int budget)
-{
- struct ixgb_adapter *adapter = container_of(napi, struct ixgb_adapter, napi);
- int work_done = 0;
-
- ixgb_clean_tx_irq(adapter);
- ixgb_clean_rx_irq(adapter, &work_done, budget);
-
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- if (!test_bit(__IXGB_DOWN, &adapter->flags))
- ixgb_irq_enable(adapter);
- }
-
- return work_done;
-}
-
-/**
- * ixgb_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
- **/
-
-static bool
-ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
-{
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
- struct net_device *netdev = adapter->netdev;
- struct ixgb_tx_desc *tx_desc, *eop_desc;
- struct ixgb_buffer *buffer_info;
- unsigned int i, eop;
- bool cleaned = false;
-
- i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IXGB_TX_DESC(*tx_ring, eop);
-
- while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
-
- rmb(); /* read buffer_info after eop_desc */
- for (cleaned = false; !cleaned; ) {
- tx_desc = IXGB_TX_DESC(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
-
- if (tx_desc->popts &
- (IXGB_TX_DESC_POPTS_TXSM |
- IXGB_TX_DESC_POPTS_IXSM))
- adapter->hw_csum_tx_good++;
-
- ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
-
- *(u32 *)&(tx_desc->status) = 0;
-
- cleaned = (i == eop);
- if (++i == tx_ring->count) i = 0;
- }
-
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IXGB_TX_DESC(*tx_ring, eop);
- }
-
- tx_ring->next_to_clean = i;
-
- if (unlikely(cleaned && netif_carrier_ok(netdev) &&
- IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED)) {
- /* Make sure that anybody stopping the queue after this
- * sees the new next_to_clean. */
- smp_mb();
-
- if (netif_queue_stopped(netdev) &&
- !(test_bit(__IXGB_DOWN, &adapter->flags))) {
- netif_wake_queue(netdev);
- ++adapter->restart_queue;
- }
- }
-
- if (adapter->detect_tx_hung) {
- /* detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
- adapter->detect_tx_hung = false;
- if (tx_ring->buffer_info[eop].time_stamp &&
- time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
- && !(IXGB_READ_REG(&adapter->hw, STATUS) &
- IXGB_STATUS_TXOFF)) {
- /* detected Tx unit hang */
- netif_err(adapter, drv, adapter->netdev,
- "Detected Tx Unit Hang\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- IXGB_READ_REG(&adapter->hw, TDH),
- IXGB_READ_REG(&adapter->hw, TDT),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->status);
- netif_stop_queue(netdev);
- }
- }
-
- return cleaned;
-}
-
-/**
- * ixgb_rx_checksum - Receive Checksum Offload for 82597.
- * @adapter: board private structure
- * @rx_desc: receive descriptor
- * @skb: socket buffer with received data
- **/
-
-static void
-ixgb_rx_checksum(struct ixgb_adapter *adapter,
- struct ixgb_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- /* Ignore Checksum bit is set OR
- * TCP Checksum has not been calculated
- */
- if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
- (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
- skb_checksum_none_assert(skb);
- return;
- }
-
- /* At this point we know the hardware did the TCP checksum */
- /* now look at the TCP checksum error bit */
- if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
- /* let the stack verify checksum errors */
- skb_checksum_none_assert(skb);
- adapter->hw_csum_rx_error++;
- } else {
- /* TCP checksum is good */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_rx_good++;
- }
-}
-
-/*
- * this should improve performance for small packets with large amounts
- * of reassembly being done in the stack
- */
-static void ixgb_check_copybreak(struct napi_struct *napi,
- struct ixgb_buffer *buffer_info,
- u32 length, struct sk_buff **skb)
-{
- struct sk_buff *new_skb;
-
- if (length > copybreak)
- return;
-
- new_skb = napi_alloc_skb(napi, length);
- if (!new_skb)
- return;
-
- skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
- (*skb)->data - NET_IP_ALIGN,
- length + NET_IP_ALIGN);
- /* save the skb in buffer_info as good */
- buffer_info->skb = *skb;
- *skb = new_skb;
-}
-
-/**
- * ixgb_clean_rx_irq - Send received data up the network stack,
- * @adapter: board private structure
- * @work_done: output pointer to amount of packets cleaned
- * @work_to_do: how much work we can complete
- **/
-
-static bool
-ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
-{
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct ixgb_rx_desc *rx_desc, *next_rxd;
- struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
- u32 length;
- unsigned int i, j;
- int cleaned_count = 0;
- bool cleaned = false;
-
- i = rx_ring->next_to_clean;
- rx_desc = IXGB_RX_DESC(*rx_ring, i);
- buffer_info = &rx_ring->buffer_info[i];
-
- while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
- struct sk_buff *skb;
- u8 status;
-
- if (*work_done >= work_to_do)
- break;
-
- (*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
- status = rx_desc->status;
- skb = buffer_info->skb;
- buffer_info->skb = NULL;
-
- prefetch(skb->data - NET_IP_ALIGN);
-
- if (++i == rx_ring->count)
- i = 0;
- next_rxd = IXGB_RX_DESC(*rx_ring, i);
- prefetch(next_rxd);
-
- j = i + 1;
- if (j == rx_ring->count)
- j = 0;
- next2_buffer = &rx_ring->buffer_info[j];
- prefetch(next2_buffer);
-
- next_buffer = &rx_ring->buffer_info[i];
-
- cleaned = true;
- cleaned_count++;
-
- dma_unmap_single(&pdev->dev,
- buffer_info->dma,
- buffer_info->length,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
-
- length = le16_to_cpu(rx_desc->length);
- rx_desc->length = 0;
-
- if (unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
-
- /* All receives must fit into a single buffer */
-
- pr_debug("Receive packet consumed multiple buffers length<%x>\n",
- length);
-
- dev_kfree_skb_irq(skb);
- goto rxdesc_done;
- }
-
- if (unlikely(rx_desc->errors &
- (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE |
- IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) {
- dev_kfree_skb_irq(skb);
- goto rxdesc_done;
- }
-
- ixgb_check_copybreak(&adapter->napi, buffer_info, length, &skb);
-
- /* Good Receive */
- skb_put(skb, length);
-
- /* Receive Checksum Offload */
- ixgb_rx_checksum(adapter, rx_desc, skb);
-
- skb->protocol = eth_type_trans(skb, netdev);
- if (status & IXGB_RX_DESC_STATUS_VP)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- le16_to_cpu(rx_desc->special));
-
- netif_receive_skb(skb);
-
-rxdesc_done:
- /* clean up descriptor, might be written over by hw */
- rx_desc->status = 0;
-
- /* return some buffers to hardware, one at a time is too slow */
- if (unlikely(cleaned_count >= IXGB_RX_BUFFER_WRITE)) {
- ixgb_alloc_rx_buffers(adapter, cleaned_count);
- cleaned_count = 0;
- }
-
- /* use prefetched values */
- rx_desc = next_rxd;
- buffer_info = next_buffer;
- }
-
- rx_ring->next_to_clean = i;
-
- cleaned_count = IXGB_DESC_UNUSED(rx_ring);
- if (cleaned_count)
- ixgb_alloc_rx_buffers(adapter, cleaned_count);
-
- return cleaned;
-}
-
-/**
- * ixgb_alloc_rx_buffers - Replace used receive buffers
- * @adapter: address of board private structure
- * @cleaned_count: how many buffers to allocate
- **/
-
-static void
-ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
-{
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
- struct ixgb_rx_desc *rx_desc;
- struct ixgb_buffer *buffer_info;
- struct sk_buff *skb;
- unsigned int i;
- long cleancount;
-
- i = rx_ring->next_to_use;
- buffer_info = &rx_ring->buffer_info[i];
- cleancount = IXGB_DESC_UNUSED(rx_ring);
-
-
- /* leave three descriptors unused */
- while (--cleancount > 2 && cleaned_count--) {
- /* recycle! its good for you */
- skb = buffer_info->skb;
- if (skb) {
- skb_trim(skb, 0);
- goto map_skb;
- }
-
- skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
- if (unlikely(!skb)) {
- /* Better luck next round */
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- buffer_info->skb = skb;
- buffer_info->length = adapter->rx_buffer_len;
-map_skb:
- buffer_info->dma = dma_map_single(&pdev->dev,
- skb->data,
- adapter->rx_buffer_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- adapter->alloc_rx_buff_failed++;
- break;
- }
-
- rx_desc = IXGB_RX_DESC(*rx_ring, i);
- rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
- /* guarantee DD bit not set now before h/w gets descriptor
- * this is the rest of the workaround for h/w double
- * writeback. */
- rx_desc->status = 0;
-
-
- if (++i == rx_ring->count)
- i = 0;
- buffer_info = &rx_ring->buffer_info[i];
- }
-
- if (likely(rx_ring->next_to_use != i)) {
- rx_ring->next_to_use = i;
- if (unlikely(i-- == 0))
- i = (rx_ring->count - 1);
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs, such
- * as IA-64). */
- wmb();
- IXGB_WRITE_REG(&adapter->hw, RDT, i);
- }
-}
-
-static void
-ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
-{
- u32 ctrl;
-
- /* enable VLAN tag insert/strip */
- ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
- ctrl |= IXGB_CTRL0_VME;
- IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
-}
-
-static void
-ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
-{
- u32 ctrl;
-
- /* disable VLAN tag insert/strip */
- ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
- ctrl &= ~IXGB_CTRL0_VME;
- IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
-}
-
-static int
-ixgb_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 vfta, index;
-
- /* add VID to filter table */
-
- index = (vid >> 5) & 0x7F;
- vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
- vfta |= (1 << (vid & 0x1F));
- ixgb_write_vfta(&adapter->hw, index, vfta);
- set_bit(vid, adapter->active_vlans);
-
- return 0;
-}
-
-static int
-ixgb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
-{
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 vfta, index;
-
- /* remove VID from filter table */
-
- index = (vid >> 5) & 0x7F;
- vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
- vfta &= ~(1 << (vid & 0x1F));
- ixgb_write_vfta(&adapter->hw, index, vfta);
- clear_bit(vid, adapter->active_vlans);
-
- return 0;
-}
-
-static void
-ixgb_restore_vlan(struct ixgb_adapter *adapter)
-{
- u16 vid;
-
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
- ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
-}
-
-/**
- * ixgb_io_error_detected - called when PCI error is detected
- * @pdev: pointer to pci device with error
- * @state: pci channel state after error
- *
- * This callback is called by the PCI subsystem whenever
- * a PCI bus error is detected.
- */
-static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- netif_device_detach(netdev);
-
- if (state == pci_channel_io_perm_failure)
- return PCI_ERS_RESULT_DISCONNECT;
-
- if (netif_running(netdev))
- ixgb_down(adapter, true);
-
- pci_disable_device(pdev);
-
- /* Request a slot reset. */
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-/**
- * ixgb_io_slot_reset - called after the pci bus has been reset.
- * @pdev: pointer to pci device with error
- *
- * This callback is called after the PCI bus has been reset.
- * Basically, this tries to restart the card from scratch.
- * This is a shortened version of the device probe/discovery code,
- * it resembles the first-half of the ixgb_probe() routine.
- */
-static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
- u8 addr[ETH_ALEN];
-
- if (pci_enable_device(pdev)) {
- netif_err(adapter, probe, adapter->netdev,
- "Cannot re-enable PCI device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- /* Perform card reset only on one instance of the card */
- if (0 != PCI_FUNC (pdev->devfn))
- return PCI_ERS_RESULT_RECOVERED;
-
- pci_set_master(pdev);
-
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- ixgb_reset(adapter);
-
- /* Make sure the EEPROM is good */
- if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- netif_err(adapter, probe, adapter->netdev,
- "After reset, the EEPROM checksum is not valid\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- ixgb_get_ee_mac_addr(&adapter->hw, addr);
- eth_hw_addr_set(netdev, addr);
- memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
-
- if (!is_valid_ether_addr(netdev->perm_addr)) {
- netif_err(adapter, probe, adapter->netdev,
- "After reset, invalid MAC address\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_RECOVERED;
-}
-
-/**
- * ixgb_io_resume - called when its OK to resume normal operations
- * @pdev: pointer to pci device with error
- *
- * The error recovery driver tells us that its OK to resume
- * normal operation. Implementation resembles the second-half
- * of the ixgb_probe() routine.
- */
-static void ixgb_io_resume(struct pci_dev *pdev)
-{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct ixgb_adapter *adapter = netdev_priv(netdev);
-
- pci_set_master(pdev);
-
- if (netif_running(netdev)) {
- if (ixgb_up(adapter)) {
- pr_err("can't bring device back up after reset\n");
- return;
- }
- }
-
- netif_device_attach(netdev);
- mod_timer(&adapter->watchdog_timer, jiffies);
-}
-
-/* ixgb_main.c */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h b/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
deleted file mode 100644
index 7bd54efa698d..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_osdep.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-/* glue for the OS independent part of ixgb
- * includes register access macros
- */
-
-#ifndef _IXGB_OSDEP_H_
-#define _IXGB_OSDEP_H_
-
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/if_ether.h>
-
-#undef ASSERT
-#define ASSERT(x) BUG_ON(!(x))
-
-#define ENTER() pr_debug("%s\n", __func__);
-
-#define IXGB_WRITE_REG(a, reg, value) ( \
- writel((value), ((a)->hw_addr + IXGB_##reg)))
-
-#define IXGB_READ_REG(a, reg) ( \
- readl((a)->hw_addr + IXGB_##reg))
-
-#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \
- writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
-
-#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \
- readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
-
-#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
-
-#define IXGB_MEMCPY memcpy
-
-#endif /* _IXGB_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
deleted file mode 100644
index f0cadd532c53..000000000000
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ /dev/null
@@ -1,444 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 1999 - 2008 Intel Corporation. */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include "ixgb.h"
-
-/* This is the only thing that needs to be changed to adjust the
- * maximum number of ports that the driver can manage.
- */
-
-#define IXGB_MAX_NIC 8
-
-#define OPTION_UNSET -1
-#define OPTION_DISABLED 0
-#define OPTION_ENABLED 1
-
-/* All parameters are treated the same, as an integer array of values.
- * This macro just reduces the need to repeat the same declaration code
- * over and over (plus this helps to avoid typo bugs).
- */
-
-#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
-#define IXGB_PARAM(X, desc) \
- static int X[IXGB_MAX_NIC+1] \
- = IXGB_PARAM_INIT; \
- static unsigned int num_##X = 0; \
- module_param_array_named(X, X, int, &num_##X, 0); \
- MODULE_PARM_DESC(X, desc);
-
-/* Transmit Descriptor Count
- *
- * Valid Range: 64-4096
- *
- * Default Value: 256
- */
-
-IXGB_PARAM(TxDescriptors, "Number of transmit descriptors");
-
-/* Receive Descriptor Count
- *
- * Valid Range: 64-4096
- *
- * Default Value: 1024
- */
-
-IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
-
-/* User Specified Flow Control Override
- *
- * Valid Range: 0-3
- * - 0 - No Flow Control
- * - 1 - Rx only, respond to PAUSE frames but do not generate them
- * - 2 - Tx only, generate PAUSE frames but ignore them on receive
- * - 3 - Full Flow Control Support
- *
- * Default Value: 2 - Tx only (silicon bug avoidance)
- */
-
-IXGB_PARAM(FlowControl, "Flow Control setting");
-
-/* XsumRX - Receive Checksum Offload Enable/Disable
- *
- * Valid Range: 0, 1
- * - 0 - disables all checksum offload
- * - 1 - enables receive IP/TCP/UDP checksum offload
- * on 82597 based NICs
- *
- * Default Value: 1
- */
-
-IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
-
-/* Transmit Interrupt Delay in units of 0.8192 microseconds
- *
- * Valid Range: 0-65535
- *
- * Default Value: 32
- */
-
-IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay");
-
-/* Receive Interrupt Delay in units of 0.8192 microseconds
- *
- * Valid Range: 0-65535
- *
- * Default Value: 72
- */
-
-IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay");
-
-/* Receive Flow control high threshold (when we send a pause frame)
- * (FCRTH)
- *
- * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity)
- *
- * Default Value: 196,608 (0x30000)
- */
-
-IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold");
-
-/* Receive Flow control low threshold (when we send a resume frame)
- * (FCRTL)
- *
- * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity)
- * must be less than high threshold by at least 8 bytes
- *
- * Default Value: 163,840 (0x28000)
- */
-
-IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
-
-/* Flow control request timeout (how long to pause the link partner's tx)
- * (PAP 15:0)
- *
- * Valid Range: 1 - 65535
- *
- * Default Value: 65535 (0xffff) (we'll send an xon if we recover)
- */
-
-IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
-
-/* Interrupt Delay Enable
- *
- * Valid Range: 0, 1
- *
- * - 0 - disables transmit interrupt delay
- * - 1 - enables transmmit interrupt delay
- *
- * Default Value: 1
- */
-
-IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
-
-
-#define DEFAULT_TIDV 32
-#define MAX_TIDV 0xFFFF
-#define MIN_TIDV 0
-
-#define DEFAULT_RDTR 72
-#define MAX_RDTR 0xFFFF
-#define MIN_RDTR 0
-
-#define XSUMRX_DEFAULT OPTION_ENABLED
-
-#define DEFAULT_FCRTL 0x28000
-#define DEFAULT_FCRTH 0x30000
-#define MIN_FCRTL 0
-#define MAX_FCRTL 0x3FFE8
-#define MIN_FCRTH 8
-#define MAX_FCRTH 0x3FFF0
-
-#define MIN_FCPAUSE 1
-#define MAX_FCPAUSE 0xffff
-#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
-
-struct ixgb_option {
- enum { enable_option, range_option, list_option } type;
- const char *name;
- const char *err;
- int def;
- union {
- struct { /* range_option info */
- int min;
- int max;
- } r;
- struct { /* list_option info */
- int nr;
- const struct ixgb_opt_list {
- int i;
- const char *str;
- } *p;
- } l;
- } arg;
-};
-
-static int
-ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
-{
- if (*value == OPTION_UNSET) {
- *value = opt->def;
- return 0;
- }
-
- switch (opt->type) {
- case enable_option:
- switch (*value) {
- case OPTION_ENABLED:
- pr_info("%s Enabled\n", opt->name);
- return 0;
- case OPTION_DISABLED:
- pr_info("%s Disabled\n", opt->name);
- return 0;
- }
- break;
- case range_option:
- if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- pr_info("%s set to %i\n", opt->name, *value);
- return 0;
- }
- break;
- case list_option: {
- int i;
- const struct ixgb_opt_list *ent;
-
- for (i = 0; i < opt->arg.l.nr; i++) {
- ent = &opt->arg.l.p[i];
- if (*value == ent->i) {
- if (ent->str[0] != '\0')
- pr_info("%s\n", ent->str);
- return 0;
- }
- }
- }
- break;
- default:
- BUG();
- }
-
- pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
- *value = opt->def;
- return -1;
-}
-
-/**
- * ixgb_check_options - Range Checking for Command Line Parameters
- * @adapter: board private structure
- *
- * This routine checks all command line parameters for valid user
- * input. If an invalid value is given, or if no user specified
- * value exists, a default value is used. The final value is stored
- * in a variable in the adapter structure.
- **/
-
-void
-ixgb_check_options(struct ixgb_adapter *adapter)
-{
- int bd = adapter->bd_number;
- if (bd >= IXGB_MAX_NIC) {
- pr_notice("Warning: no configuration for board #%i\n", bd);
- pr_notice("Using defaults for all values\n");
- }
-
- { /* Transmit Descriptor Count */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Transmit Descriptors",
- .err = "using default of " __MODULE_STRING(DEFAULT_TXD),
- .def = DEFAULT_TXD,
- .arg = { .r = { .min = MIN_TXD,
- .max = MAX_TXD}}
- };
- struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
-
- if (num_TxDescriptors > bd) {
- tx_ring->count = TxDescriptors[bd];
- ixgb_validate_option(&tx_ring->count, &opt);
- } else {
- tx_ring->count = opt.def;
- }
- tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
- }
- { /* Receive Descriptor Count */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Receive Descriptors",
- .err = "using default of " __MODULE_STRING(DEFAULT_RXD),
- .def = DEFAULT_RXD,
- .arg = { .r = { .min = MIN_RXD,
- .max = MAX_RXD}}
- };
- struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
-
- if (num_RxDescriptors > bd) {
- rx_ring->count = RxDescriptors[bd];
- ixgb_validate_option(&rx_ring->count, &opt);
- } else {
- rx_ring->count = opt.def;
- }
- rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
- }
- { /* Receive Checksum Offload Enable */
- static const struct ixgb_option opt = {
- .type = enable_option,
- .name = "Receive Checksum Offload",
- .err = "defaulting to Enabled",
- .def = OPTION_ENABLED
- };
-
- if (num_XsumRX > bd) {
- unsigned int rx_csum = XsumRX[bd];
- ixgb_validate_option(&rx_csum, &opt);
- adapter->rx_csum = rx_csum;
- } else {
- adapter->rx_csum = opt.def;
- }
- }
- { /* Flow Control */
-
- static const struct ixgb_opt_list fc_list[] = {
- { ixgb_fc_none, "Flow Control Disabled" },
- { ixgb_fc_rx_pause, "Flow Control Receive Only" },
- { ixgb_fc_tx_pause, "Flow Control Transmit Only" },
- { ixgb_fc_full, "Flow Control Enabled" },
- { ixgb_fc_default, "Flow Control Hardware Default" }
- };
-
- static const struct ixgb_option opt = {
- .type = list_option,
- .name = "Flow Control",
- .err = "reading default settings from EEPROM",
- .def = ixgb_fc_tx_pause,
- .arg = { .l = { .nr = ARRAY_SIZE(fc_list),
- .p = fc_list }}
- };
-
- if (num_FlowControl > bd) {
- unsigned int fc = FlowControl[bd];
- ixgb_validate_option(&fc, &opt);
- adapter->hw.fc.type = fc;
- } else {
- adapter->hw.fc.type = opt.def;
- }
- }
- { /* Receive Flow Control High Threshold */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Rx Flow Control High Threshold",
- .err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
- .def = DEFAULT_FCRTH,
- .arg = { .r = { .min = MIN_FCRTH,
- .max = MAX_FCRTH}}
- };
-
- if (num_RxFCHighThresh > bd) {
- adapter->hw.fc.high_water = RxFCHighThresh[bd];
- ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
- } else {
- adapter->hw.fc.high_water = opt.def;
- }
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- pr_info("Ignoring RxFCHighThresh when no RxFC\n");
- }
- { /* Receive Flow Control Low Threshold */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Rx Flow Control Low Threshold",
- .err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
- .def = DEFAULT_FCRTL,
- .arg = { .r = { .min = MIN_FCRTL,
- .max = MAX_FCRTL}}
- };
-
- if (num_RxFCLowThresh > bd) {
- adapter->hw.fc.low_water = RxFCLowThresh[bd];
- ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
- } else {
- adapter->hw.fc.low_water = opt.def;
- }
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- pr_info("Ignoring RxFCLowThresh when no RxFC\n");
- }
- { /* Flow Control Pause Time Request*/
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Flow Control Pause Time Request",
- .err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
- .def = DEFAULT_FCPAUSE,
- .arg = { .r = { .min = MIN_FCPAUSE,
- .max = MAX_FCPAUSE}}
- };
-
- if (num_FCReqTimeout > bd) {
- unsigned int pause_time = FCReqTimeout[bd];
- ixgb_validate_option(&pause_time, &opt);
- adapter->hw.fc.pause_time = pause_time;
- } else {
- adapter->hw.fc.pause_time = opt.def;
- }
- if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
- pr_info("Ignoring FCReqTimeout when no RxFC\n");
- }
- /* high low and spacing check for rx flow control thresholds */
- if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
- /* high must be greater than low */
- if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
- /* set defaults */
- pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
- adapter->hw.fc.high_water = DEFAULT_FCRTH;
- adapter->hw.fc.low_water = DEFAULT_FCRTL;
- }
- }
- { /* Receive Interrupt Delay */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Receive Interrupt Delay",
- .err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
- .def = DEFAULT_RDTR,
- .arg = { .r = { .min = MIN_RDTR,
- .max = MAX_RDTR}}
- };
-
- if (num_RxIntDelay > bd) {
- adapter->rx_int_delay = RxIntDelay[bd];
- ixgb_validate_option(&adapter->rx_int_delay, &opt);
- } else {
- adapter->rx_int_delay = opt.def;
- }
- }
- { /* Transmit Interrupt Delay */
- static const struct ixgb_option opt = {
- .type = range_option,
- .name = "Transmit Interrupt Delay",
- .err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
- .def = DEFAULT_TIDV,
- .arg = { .r = { .min = MIN_TIDV,
- .max = MAX_TIDV}}
- };
-
- if (num_TxIntDelay > bd) {
- adapter->tx_int_delay = TxIntDelay[bd];
- ixgb_validate_option(&adapter->tx_int_delay, &opt);
- } else {
- adapter->tx_int_delay = opt.def;
- }
- }
-
- { /* Transmit Interrupt Delay Enable */
- static const struct ixgb_option opt = {
- .type = enable_option,
- .name = "Tx Interrupt Delay Enable",
- .err = "defaulting to Enabled",
- .def = OPTION_ENABLED
- };
-
- if (num_IntDelayEnable > bd) {
- unsigned int ide = IntDelayEnable[bd];
- ixgb_validate_option(&ide, &opt);
- adapter->tx_int_delay_enable = ide;
- } else {
- adapter->tx_int_delay_enable = opt.def;
- }
- }
-}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 4a69823e6abd..63d4e32df029 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -9,7 +9,6 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/cpumask.h>
-#include <linux/aer.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/phy.h>
@@ -39,7 +38,10 @@
/* TX/RX descriptor defines */
#define IXGBE_DEFAULT_TXD 512
#define IXGBE_DEFAULT_TX_WORK 256
-#define IXGBE_MAX_TXD 4096
+#define IXGBE_MAX_TXD_82598 4096
+#define IXGBE_MAX_TXD_82599 8192
+#define IXGBE_MAX_TXD_X540 8192
+#define IXGBE_MAX_TXD_X550 32768
#define IXGBE_MIN_TXD 64
#if (PAGE_SIZE < 8192)
@@ -47,7 +49,10 @@
#else
#define IXGBE_DEFAULT_RXD 128
#endif
-#define IXGBE_MAX_RXD 4096
+#define IXGBE_MAX_RXD_82598 4096
+#define IXGBE_MAX_RXD_82599 8192
+#define IXGBE_MAX_RXD_X540 8192
+#define IXGBE_MAX_RXD_X550 32768
#define IXGBE_MIN_RXD 64
/* flow control */
@@ -67,6 +72,8 @@
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Attempt to maximize the headroom available for incoming frames. We
* use a 2K buffer for receives and need 1536/1534 to store the data for
* the frame. This leaves us with 512 bytes of room. From that we need
@@ -167,21 +174,58 @@ enum ixgbe_tx_flags {
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+
+struct vf_stats {
+ u64 gprc;
+ u64 gorc;
+ u64 gptc;
+ u64 gotc;
+ u64 mprc;
+};
+
struct vf_data_storage {
struct pci_dev *vfdev;
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
bool clear_to_send;
+ struct vf_stats vfstats;
+ struct vf_stats last_vfstats;
+ struct vf_stats saved_rst_vfstats;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
+ int link_enable;
+ int link_state;
u8 spoofchk_enabled;
bool rss_query_enabled;
u8 trusted;
int xcast_mode;
unsigned int vf_api;
+ u8 primary_abort_count;
};
enum ixgbevf_xcast_modes {
@@ -556,6 +600,8 @@ struct ixgbe_mac_addr {
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
+#define IXGBE_PRIMARY_ABORT_LIMIT 5
+
/* board specific private data structure */
struct ixgbe_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -614,6 +660,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
#define IXGBE_FLAG2_IPSEC_ENABLED BIT(17)
#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18)
+#define IXGBE_FLAG2_AUTO_DISABLE_VF BIT(19)
/* Tx fast path data */
int num_tx_queues;
@@ -773,6 +820,7 @@ struct ixgbe_adapter {
#ifdef CONFIG_IXGBE_IPSEC
struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_IXGBE_IPSEC */
+ spinlock_t vfs_lock;
};
static inline int ixgbe_determine_xdp_q_idx(int cpu)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 95c92fe890a1..100388968e4d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -879,7 +879,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* ixgbe_clear_vfta_82598 - Clear VLAN filter table
* @hw: pointer to hardware structure
*
- * Clears the VLAN filer table, and the VMDq index associated with the filter
+ * Clears the VLAN filter table, and the VMDq index associated with the filter
**/
static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index e90b5047e695..878dd8dff528 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -30,7 +30,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
/* Base table for registers values that change by MAC */
const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = {
@@ -746,10 +746,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
usleep_range(1000, 2000);
/*
- * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+ * Prevent the PCI-E bus from hanging by disabling PCI-E primary
* access and verify no pending requests
*/
- return ixgbe_disable_pcie_master(hw);
+ return ixgbe_disable_pcie_primary(hw);
}
/**
@@ -2506,15 +2506,15 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
}
/**
- * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * ixgbe_disable_pcie_primary - Disable PCI-express primary access
* @hw: pointer to hardware structure
*
- * Disables PCI-Express master access and verifies there are no pending
- * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- * bit hasn't caused the master requests to be disabled, else 0
- * is returned signifying master requests disabled.
+ * Disables PCI-Express primary access and verifies there are no pending
+ * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
+ * bit hasn't caused the primary requests to be disabled, else 0
+ * is returned signifying primary requests disabled.
**/
-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+static s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
u32 i, poll;
u16 value;
@@ -2523,23 +2523,23 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Poll for bit to read as set */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
if (IXGBE_READ_REG(hw, IXGBE_CTRL) & IXGBE_CTRL_GIO_DIS)
break;
usleep_range(100, 120);
}
- if (i >= IXGBE_PCI_MASTER_DISABLE_TIMEOUT) {
+ if (i >= IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT) {
hw_dbg(hw, "GIO disable did not set - requesting resets\n");
goto gio_disable_fail;
}
- /* Exit if master requests are blocked */
+ /* Exit if primary requests are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
ixgbe_removed(hw->hw_addr))
return 0;
- /* Poll for master request bit to clear */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ /* Poll for primary request bit to clear */
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
udelay(100);
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
return 0;
@@ -2547,13 +2547,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
/*
* Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
+ * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new primary requests from
* being issued by our device. We then must wait 1usec or more for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
- hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
+ hw_dbg(hw, "GIO Primary Disable bit didn't clear - requesting resets\n");
gio_disable_fail:
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
@@ -2575,7 +2575,7 @@ gio_disable_fail:
}
hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
- return IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ return IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
}
/**
@@ -3237,7 +3237,7 @@ vfta_update:
* ixgbe_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to hardware structure
*
- * Clears the VLAN filer table, and the VMDq index associated with the filter
+ * Clears the VLAN filter table, and the VMDq index associated with the filter
**/
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
@@ -3292,13 +3292,14 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
+ bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);
u32 links_reg, links_orig;
u32 i;
/* If Crosstalk fix enabled do the sanity check of making sure
* the SFP+ cage is full.
*/
- if (ixgbe_need_crosstalk_fix(hw)) {
+ if (crosstalk_fix_active) {
u32 sfp_cage_full;
switch (hw->mac.type) {
@@ -3346,10 +3347,24 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
}
} else {
- if (links_reg & IXGBE_LINKS_UP)
+ if (links_reg & IXGBE_LINKS_UP) {
+ if (crosstalk_fix_active) {
+ /* Check the link state again after a delay
+ * to filter out spurious link up
+ * notifications.
+ */
+ mdelay(5);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (!(links_reg & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ return 0;
+ }
+ }
*link_up = true;
- else
+ } else {
*link_up = false;
+ }
}
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 72e6ebffea33..e85f7d2e8810 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -8,12 +8,10 @@
#include "ixgbe_sriov.h"
/* Callbacks for DCB netlink in the kernel */
-#define BIT_DCB_MODE 0x01
#define BIT_PFC 0x02
#define BIT_PG_RX 0x04
#define BIT_PG_TX 0x08
#define BIT_APP_UPCHG 0x10
-#define BIT_LINKSPEED 0x80
/* Responses for the DCB_C_SET_ALL command */
#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 8362822316a9..0bbad4a5cc2f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -18,8 +18,6 @@
#include "ixgbe_phy.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBE_STATS};
struct ixgbe_stats {
@@ -138,6 +136,8 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
"legacy-rx",
#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
"vf-ipsec",
+#define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2)
+ "mdd-disable-vf",
};
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
@@ -1106,32 +1106,72 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ strscpy(drvinfo->fw_version, adapter->eeprom_id,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
}
+static u32 ixgbe_get_max_rxd(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ return IXGBE_MAX_RXD_82598;
+ case ixgbe_mac_82599EB:
+ return IXGBE_MAX_RXD_82599;
+ case ixgbe_mac_X540:
+ return IXGBE_MAX_RXD_X540;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ return IXGBE_MAX_RXD_X550;
+ default:
+ return IXGBE_MAX_RXD_82598;
+ }
+}
+
+static u32 ixgbe_get_max_txd(struct ixgbe_adapter *adapter)
+{
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ return IXGBE_MAX_TXD_82598;
+ case ixgbe_mac_82599EB:
+ return IXGBE_MAX_TXD_82599;
+ case ixgbe_mac_X540:
+ return IXGBE_MAX_TXD_X540;
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ return IXGBE_MAX_TXD_X550;
+ default:
+ return IXGBE_MAX_TXD_82598;
+ }
+}
+
static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
- ring->rx_max_pending = IXGBE_MAX_RXD;
- ring->tx_max_pending = IXGBE_MAX_TXD;
+ ring->rx_max_pending = ixgbe_get_max_rxd(adapter);
+ ring->tx_max_pending = ixgbe_get_max_txd(adapter);
ring->rx_pending = rx_ring->count;
ring->tx_pending = tx_ring->count;
}
static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
@@ -1142,11 +1182,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
return -EINVAL;
new_tx_count = clamp_t(u32, ring->tx_pending,
- IXGBE_MIN_TXD, IXGBE_MAX_TXD);
+ IXGBE_MIN_TXD, ixgbe_get_max_txd(adapter));
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
new_rx_count = clamp_t(u32, ring->rx_pending,
- IXGBE_MIN_RXD, IXGBE_MAX_RXD);
+ IXGBE_MIN_RXD, ixgbe_get_max_rxd(adapter));
new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
if ((new_tx_count == adapter->tx_ring_count) &&
@@ -1331,10 +1371,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
@@ -1347,10 +1387,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i+1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
@@ -1956,20 +1996,13 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
unsigned int frame_size)
{
unsigned char *data;
- bool match = true;
frame_size >>= 1;
- data = kmap(rx_buffer->page) + rx_buffer->page_offset;
-
- if (data[3] != 0xFF ||
- data[frame_size + 10] != 0xBE ||
- data[frame_size + 12] != 0xAF)
- match = false;
-
- kunmap(rx_buffer->page);
+ data = page_address(rx_buffer->page) + rx_buffer->page_offset;
- return match;
+ return data[3] == 0xFF && data[frame_size + 10] == 0xBE &&
+ data[frame_size + 12] == 0xAF;
}
static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
@@ -2632,6 +2665,14 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
return 0;
}
+static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
+{
+ if (adapter->hw.mac.type < ixgbe_mac_X550)
+ return 16;
+ else
+ return 64;
+}
+
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -2640,7 +2681,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = adapter->num_rx_queues;
+ cmd->data = min_t(int, adapter->num_rx_queues,
+ ixgbe_rss_indir_tbl_max(adapter));
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
@@ -3042,14 +3084,6 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
-static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter)
-{
- if (adapter->hw.mac.type < ixgbe_mac_X550)
- return 16;
- else
- return 64;
-}
-
static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev)
{
return IXGBE_RSS_KEY_SIZE;
@@ -3098,8 +3132,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
int i;
u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
- if (hfunc)
- return -EINVAL;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
/* Fill out the redirection table */
if (indir) {
@@ -3506,6 +3540,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev)
if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)
priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN;
+ if (adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF)
+ priv_flags |= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF;
+
return priv_flags;
}
@@ -3513,6 +3550,7 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
unsigned int flags2 = adapter->flags2;
+ unsigned int i;
flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
@@ -3522,6 +3560,21 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN)
flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED;
+ flags2 &= ~IXGBE_FLAG2_AUTO_DISABLE_VF;
+ if (priv_flags & IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF) {
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ /* Reset primary abort counter */
+ for (i = 0; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].primary_abort_count = 0;
+
+ flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+ } else {
+ e_info(probe,
+ "Cannot set private flags: Operation not supported\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
if (flags2 != adapter->flags2) {
adapter->flags2 = flags2;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 0fcd82036d4e..7311bd545acf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1004,7 +1004,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
UTS_RELEASE);
/* Firmware Version */
- strlcpy(info->firmware_version, adapter->eeprom_id,
+ strscpy(info->firmware_version, adapter->eeprom_id,
sizeof(info->firmware_version));
/* Model */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index e596e1a9fc75..13a6fca31004 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -557,8 +557,10 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
/**
* ixgbe_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
+static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = xs->xso.real_dev;
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -570,33 +572,37 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
int i;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
- netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
- xs->id.proto);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload");
return -EINVAL;
}
if (xs->props.mode != XFRM_MODE_TRANSPORT) {
- netdev_err(dev, "Unsupported mode for ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload");
return -EINVAL;
}
if (ixgbe_ipsec_check_mgmt_ip(xs)) {
- netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
+ NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters");
return -EINVAL;
}
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
+ return -EINVAL;
+ }
+
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa rsa;
if (xs->calg) {
- netdev_err(dev, "Compression offload not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
/* find the first unused index */
ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Rx table!\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
@@ -611,7 +617,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* get the key and salt */
ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
}
@@ -671,7 +677,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
/* no match and no empty slot */
- netdev_err(dev, "No space for SA in Rx IP SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx IP SA table");
memset(&rsa, 0, sizeof(rsa));
return -ENOSPC;
}
@@ -706,7 +712,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* find the first unused index */
ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Tx table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table");
return ret;
}
sa_idx = (u16)ret;
@@ -720,7 +726,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
return ret;
}
@@ -757,7 +763,7 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
u32 zerobuf[4] = {0, 0, 0, 0};
u16 sa_idx;
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa *rsa;
u8 ipi;
@@ -903,7 +909,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* Tx IPsec offload doesn't seem to work on this
* device, so block these requests for now.
*/
- if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) {
+ if (sam->dir != XFRM_DEV_OFFLOAD_IN) {
err = -EOPNOTSUPP;
goto err_out;
}
@@ -914,7 +920,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
goto err_out;
}
- xs->xso.flags = sam->flags;
+ xs->xso.dir = sam->dir;
xs->id.spi = sam->spi;
xs->id.proto = sam->proto;
xs->props.family = sam->family;
@@ -945,7 +951,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
- err = ixgbe_ipsec_add_sa(xs);
+ err = ixgbe_ipsec_add_sa(xs, NULL);
if (err)
goto err_aead;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
index d2b64ff8eb4e..809ab51a7842 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h
@@ -74,7 +74,7 @@ struct ixgbe_ipsec {
struct sa_mbx_msg {
__be32 spi;
- u8 flags;
+ u8 dir;
u8 proto;
u16 family;
__be32 addr[4];
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 86b11164655e..0ee943db3dc9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -874,8 +874,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbe_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -1036,9 +1035,6 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
__netif_napi_del(&q_vector->napi);
- if (static_key_enabled(&ixgbe_xdp_locking_key))
- static_branch_dec(&ixgbe_xdp_locking_key);
-
/*
* after a call to __netif_napi_del() napi may still be used and
* ixgbe_get_stats64() might access the rings on this vector,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 45e2ec4d264d..5d83c887a3fc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -36,6 +36,7 @@
#include <net/tc_act/tc_mirred.h>
#include <net/vxlan.h>
#include <net/mpls.h>
+#include <net/netdev_queues.h>
#include <net/xdp_sock_drv.h>
#include <net/xfrm.h>
@@ -151,8 +152,8 @@ MODULE_PARM_DESC(max_vfs,
"Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
#endif /* CONFIG_PCI_IOV */
-static unsigned int allow_unsupported_sfp;
-module_param(allow_unsupported_sfp, uint, 0);
+static bool allow_unsupported_sfp;
+module_param(allow_unsupported_sfp, bool, 0);
MODULE_PARM_DESC(allow_unsupported_sfp,
"Allow unsupported and untested SFP+ modules on 82599-based adapters");
@@ -1119,6 +1120,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0;
unsigned int budget = q_vector->tx.work_limit;
unsigned int i = tx_ring->next_to_clean;
+ struct netdev_queue *txq;
if (test_bit(__IXGBE_DOWN, &adapter->state))
return true;
@@ -1249,24 +1251,14 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (ring_is_xdp(tx_ring))
return !!budget;
- netdev_tx_completed_queue(txring_txq(tx_ring),
- total_packets, total_bytes);
-
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
- (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
- /* Make sure that anybody stopping the queue after this
- * sees the new next_to_clean.
- */
- smp_mb();
- if (__netif_subqueue_stopped(tx_ring->netdev,
- tx_ring->queue_index)
- && !test_bit(__IXGBE_DOWN, &adapter->state)) {
- netif_wake_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
- ++tx_ring->tx_stats.restart_queue;
- }
- }
+ txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+ if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
+ ixgbe_desc_unused(tx_ring),
+ TX_WAKE_THRESHOLD,
+ netif_carrier_ok(tx_ring->netdev) &&
+ test_bit(__IXGBE_DOWN, &adapter->state)))
+ ++tx_ring->tx_stats.restart_queue;
return !!budget;
}
@@ -2170,7 +2162,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -2235,7 +2227,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
result = IXGBE_XDP_REDIR;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -2344,6 +2336,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
@@ -3247,8 +3240,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* If Flow Director is enabled, set interrupt affinity */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/* assign the mask for this irq */
- irq_set_affinity_hint(entry->vector,
- &q_vector->affinity_mask);
+ irq_update_affinity_hint(entry->vector,
+ &q_vector->affinity_mask);
}
}
@@ -3264,8 +3257,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
while (vector) {
vector--;
- irq_set_affinity_hint(adapter->msix_entries[vector].vector,
- NULL);
+ irq_update_affinity_hint(adapter->msix_entries[vector].vector,
+ NULL);
free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
}
@@ -3398,7 +3391,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
continue;
/* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(entry->vector, NULL);
+ irq_update_affinity_hint(entry->vector, NULL);
free_irq(entry->vector, q_vector);
}
@@ -5051,12 +5044,12 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
if (hw->mac.type == ixgbe_mac_82598EB)
- netif_set_gso_max_size(adapter->netdev, 65536);
+ netif_set_tso_max_size(adapter->netdev, 65536);
return;
}
if (hw->mac.type == ixgbe_mac_82598EB)
- netif_set_gso_max_size(adapter->netdev, 32768);
+ netif_set_tso_max_size(adapter->netdev, 32768);
#ifdef IXGBE_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
@@ -5160,7 +5153,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
}
/**
- * ixgbe_lpbthresh - calculate low water mark for for flow control
+ * ixgbe_lpbthresh - calculate low water mark for flow control
*
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
@@ -5548,6 +5541,47 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
return ret;
}
+/**
+ * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset
+ * @adapter: board private structure
+ *
+ * On a reset we need to clear out the VF stats or accounting gets
+ * messed up because they're not clear on read.
+ **/
+static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ adapter->vfinfo[i].last_vfstats.gprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gprc +=
+ adapter->vfinfo[i].vfstats.gprc;
+ adapter->vfinfo[i].vfstats.gprc = 0;
+ adapter->vfinfo[i].last_vfstats.gptc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gptc +=
+ adapter->vfinfo[i].vfstats.gptc;
+ adapter->vfinfo[i].vfstats.gptc = 0;
+ adapter->vfinfo[i].last_vfstats.gorc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gorc +=
+ adapter->vfinfo[i].vfstats.gorc;
+ adapter->vfinfo[i].vfstats.gorc = 0;
+ adapter->vfinfo[i].last_vfstats.gotc =
+ IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i));
+ adapter->vfinfo[i].saved_rst_vfstats.gotc +=
+ adapter->vfinfo[i].vfstats.gotc;
+ adapter->vfinfo[i].vfstats.gotc = 0;
+ adapter->vfinfo[i].last_vfstats.mprc =
+ IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i));
+ adapter->vfinfo[i].saved_rst_vfstats.mprc +=
+ adapter->vfinfo[i].vfstats.mprc;
+ adapter->vfinfo[i].vfstats.mprc = 0;
+ }
+}
+
static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -5683,10 +5717,14 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->service_timer, jiffies);
+ ixgbe_clear_vf_stats_counters(adapter);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+ /* update setting rx tx for all active vfs */
+ ixgbe_set_all_vfs(adapter);
}
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
@@ -5948,8 +5986,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_SFP_NOT_PRESENT:
case IXGBE_ERR_SFP_NOT_SUPPORTED:
break;
- case IXGBE_ERR_MASTER_REQUESTS_PENDING:
- e_dev_err("master disable timed out\n");
+ case IXGBE_ERR_PRIMARY_REQUESTS_PENDING:
+ e_dev_err("primary disable timed out\n");
break;
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
@@ -6144,11 +6182,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
for (i = 0 ; i < adapter->num_vfs; i++)
adapter->vfinfo[i].clear_to_send = false;
- /* ping all the active vfs to let them know we are going down */
- ixgbe_ping_all_vfs(adapter);
-
- /* Disable all VFTE/VFRE TX/RX */
- ixgbe_disable_tx_rx(adapter);
+ /* update setting rx tx for all active vfs */
+ ixgbe_set_all_vfs(adapter);
}
/* disable transmits in the hardware now that interrupts are off */
@@ -6402,6 +6437,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
+ /* init spinlock to avoid concurrency of VF resources */
+ spin_lock_init(&adapter->vfs_lock);
+
#ifdef CONFIG_IXGBE_DCB
ixgbe_init_dcb(adapter);
#endif
@@ -6449,6 +6487,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
set_bit(0, adapter->fwd_bitmask);
set_bit(__IXGBE_DOWN, &adapter->state);
+ /* enable locking for XDP_TX if we have more CPUs than queues */
+ if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
+ static_branch_enable(&ixgbe_xdp_locking_key);
+
return 0;
}
@@ -6601,7 +6643,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
goto err;
- rx_ring->xdp_prog = adapter->xdp_prog;
+ WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog);
return 0;
err:
@@ -6732,6 +6774,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @adapter: device handle, pointer to adapter
+ */
+static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
+{
+ if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ return IXGBE_RXBUFFER_2K;
+ else
+ return IXGBE_RXBUFFER_3K;
+}
+
+/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -6742,18 +6796,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (adapter->xdp_prog) {
- int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- int i;
+ if (ixgbe_enabled_xdp_adapter(adapter)) {
+ int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
-
- if (new_frame_size > ixgbe_rx_bufsz(ring)) {
- e_warn(probe, "Requested MTU size is not supported with XDP\n");
- return -EINVAL;
- }
+ if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
+ e_warn(probe, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
}
}
@@ -7270,6 +7318,32 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
netdev->stats.rx_length_errors = hwstats->rlec;
netdev->stats.rx_crc_errors = hwstats->crcerrs;
netdev->stats.rx_missed_errors = total_mpc;
+
+ /* VF Stats Collection - skip while resetting because these
+ * are not clear on read and otherwise you'll sometimes get
+ * crazy values.
+ */
+ if (!test_bit(__IXGBE_RESETTING, &adapter->state)) {
+ for (i = 0; i < adapter->num_vfs; i++) {
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i),
+ adapter->vfinfo[i].last_vfstats.gprc,
+ adapter->vfinfo[i].vfstats.gprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i),
+ adapter->vfinfo[i].last_vfstats.gptc,
+ adapter->vfinfo[i].vfstats.gptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i),
+ IXGBE_PVFGORC_MSB(i),
+ adapter->vfinfo[i].last_vfstats.gorc,
+ adapter->vfinfo[i].vfstats.gorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i),
+ IXGBE_PVFGOTC_MSB(i),
+ adapter->vfinfo[i].last_vfstats.gotc,
+ adapter->vfinfo[i].vfstats.gotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i),
+ adapter->vfinfo[i].last_vfstats.mprc,
+ adapter->vfinfo[i].vfstats.mprc);
+ }
+ }
}
/**
@@ -7613,6 +7687,27 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
}
#ifdef CONFIG_PCI_IOV
+static void ixgbe_bad_vf_abort(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
+ adapter->flags2 & IXGBE_FLAG2_AUTO_DISABLE_VF) {
+ adapter->vfinfo[vf].primary_abort_count++;
+ if (adapter->vfinfo[vf].primary_abort_count ==
+ IXGBE_PRIMARY_ABORT_LIMIT) {
+ ixgbe_set_vf_link_state(adapter, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ adapter->vfinfo[vf].primary_abort_count = 0;
+
+ e_info(drv,
+ "Malicious Driver Detection event detected on PF %d VF %d MAC: %pM mdd-disable-vf=on",
+ hw->bus.func, vf,
+ adapter->vfinfo[vf].vf_mac_addresses);
+ }
+ }
+}
+
static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -7644,8 +7739,10 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
continue;
pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
- status_reg & PCI_STATUS_REC_MASTER_ABORT)
+ status_reg & PCI_STATUS_REC_MASTER_ABORT) {
+ ixgbe_bad_vf_abort(adapter, vf);
pcie_flr(vfdev);
+ }
}
}
@@ -8169,22 +8266,10 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
-
- /* Herbert's original patch had:
- * smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it.
- */
- smp_mb();
-
- /* We need to check again in a case another CPU has just
- * made room available.
- */
- if (likely(ixgbe_desc_unused(tx_ring) < size))
+ if (!netif_subqueue_try_stop(tx_ring->netdev, tx_ring->queue_index,
+ ixgbe_desc_unused(tx_ring), size))
return -EBUSY;
- /* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}
@@ -8548,57 +8633,83 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf)
{
- struct ixgbe_tx_buffer *tx_buffer;
- union ixgbe_adv_tx_desc *tx_desc;
- u32 len, cmd_type;
- dma_addr_t dma;
- u16 i;
-
- len = xdpf->len;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 i = 0, index = ring->next_to_use;
+ struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index];
+ struct ixgbe_tx_buffer *tx_buff = tx_head;
+ union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index);
+ u32 cmd_type, len = xdpf->len;
+ void *data = xdpf->data;
- if (unlikely(!ixgbe_desc_unused(ring)))
+ if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags))
return IXGBE_XDP_CONSUMED;
- dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma))
- return IXGBE_XDP_CONSUMED;
+ tx_head->bytecount = xdp_get_frame_len(xdpf);
+ tx_head->gso_segs = 1;
+ tx_head->xdpf = xdpf;
- /* record the location of the first descriptor for this packet */
- tx_buffer = &ring->tx_buffer_info[ring->next_to_use];
- tx_buffer->bytecount = len;
- tx_buffer->gso_segs = 1;
- tx_buffer->protocol = 0;
+ tx_desc->read.olinfo_status =
+ cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ for (;;) {
+ dma_addr_t dma;
- i = ring->next_to_use;
- tx_desc = IXGBE_TX_DESC(ring, i);
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma))
+ goto unmap;
- dma_unmap_len_set(tx_buffer, len, len);
- dma_unmap_addr_set(tx_buffer, dma, dma);
- tx_buffer->xdpf = xdpf;
+ dma_unmap_len_set(tx_buff, len, len);
+ dma_unmap_addr_set(tx_buff, dma, dma);
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT |
+ IXGBE_ADVTXD_DCMD_IFCS | len;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buff->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+ tx_buff = &ring->tx_buffer_info[index];
+ tx_desc = IXGBE_TX_DESC(ring, index);
+ tx_desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
/* put descriptor type bits */
- cmd_type = IXGBE_ADVTXD_DTYP_DATA |
- IXGBE_ADVTXD_DCMD_DEXT |
- IXGBE_ADVTXD_DCMD_IFCS;
- cmd_type |= len | IXGBE_TXD_CMD;
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
- tx_desc->read.olinfo_status =
- cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+ tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
/* Avoid any potential race with xdp_xmit and cleanup */
smp_wmb();
- /* set next_to_watch value indicating a packet is present */
- i++;
- if (i == ring->count)
- i = 0;
-
- tx_buffer->next_to_watch = tx_desc;
- ring->next_to_use = i;
+ tx_head->next_to_watch = tx_desc;
+ ring->next_to_use = index;
return IXGBE_XDP_TX;
+
+unmap:
+ for (;;) {
+ tx_buff = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(tx_buff, len))
+ dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma),
+ dma_unmap_len(tx_buff, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buff, len, 0);
+ if (tx_buff == tx_head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return IXGBE_XDP_CONSUMED;
}
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
@@ -8691,7 +8802,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (skb_cow_head(skb, 0))
goto out_drop;
- vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr = skb_vlan_eth_hdr(skb);
vhdr->h_vlan_TCI = htons(tx_flags >>
IXGBE_TX_FLAGS_VLAN_SHIFT);
} else {
@@ -8816,7 +8927,8 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
int regnum = addr;
if (devad != MDIO_DEVAD_NONE)
- regnum |= (devad << 16) | MII_ADDR_C45;
+ return mdiobus_c45_read(adapter->mii_bus, prtad,
+ devad, regnum);
return mdiobus_read(adapter->mii_bus, prtad, regnum);
}
@@ -8839,7 +8951,8 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
int regnum = addr;
if (devad != MDIO_DEVAD_NONE)
- regnum |= (devad << 16) | MII_ADDR_C45;
+ return mdiobus_c45_write(adapter->mii_bus, prtad, devad,
+ regnum, value);
return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
}
@@ -8920,10 +9033,10 @@ static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
if (ring) {
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}
@@ -8943,10 +9056,10 @@ static void ixgbe_get_stats64(struct net_device *netdev,
if (ring) {
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
packets = ring->stats.packets;
bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
}
@@ -8972,6 +9085,23 @@ static void ixgbe_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = netdev->stats.rx_missed_errors;
}
+static int ixgbe_ndo_get_vf_stats(struct net_device *netdev, int vf,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (vf < 0 || vf >= adapter->num_vfs)
+ return -EINVAL;
+
+ vf_stats->rx_packets = adapter->vfinfo[vf].vfstats.gprc;
+ vf_stats->rx_bytes = adapter->vfinfo[vf].vfstats.gorc;
+ vf_stats->tx_packets = adapter->vfinfo[vf].vfstats.gptc;
+ vf_stats->tx_bytes = adapter->vfinfo[vf].vfstats.gotc;
+ vf_stats->multicast = adapter->vfinfo[vf].vfstats.mprc;
+
+ return 0;
+}
+
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
@@ -10144,8 +10274,6 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
*/
if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)
return -ENOMEM;
- else if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
- static_branch_inc(&ixgbe_xdp_locking_key);
old_prog = xchg(&adapter->xdp_prog, prog);
need_reset = (!!prog != !!old_prog);
@@ -10159,14 +10287,15 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
synchronize_rcu();
err = ixgbe_setup_tc(dev, adapter->hw_tcs);
- if (err) {
- rcu_assign_pointer(adapter->xdp_prog, old_prog);
+ if (err)
return -EINVAL;
- }
+ if (!prog)
+ xdp_features_clear_redirect_target(dev);
} else {
- for (i = 0; i < adapter->num_rx_queues; i++)
- (void)xchg(&adapter->rx_ring[i]->xdp_prog,
- adapter->xdp_prog);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ WRITE_ONCE(adapter->rx_ring[i]->xdp_prog,
+ adapter->xdp_prog);
+ }
}
if (old_prog)
@@ -10182,6 +10311,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
+ xdp_features_set_redirect_target(dev, true);
}
return 0;
@@ -10284,9 +10414,11 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
+ .ndo_set_vf_link_state = ixgbe_ndo_set_vf_link_state,
.ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
+ .ndo_get_vf_stats = ixgbe_ndo_get_vf_stats,
.ndo_get_stats64 = ixgbe_get_stats64,
.ndo_setup_tc = __ixgbe_setup_tc,
#ifdef IXGBE_FCOE
@@ -10632,9 +10764,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_adapter *adapter = NULL;
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
- int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
+ int i, err, expected_gts;
bool disable_dev = false;
#ifdef IXGBE_FCOE
u16 device_caps;
@@ -10654,16 +10786,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev,
+ "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_mem_regions(pdev, ixgbe_driver_name);
@@ -10673,8 +10800,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -10714,7 +10839,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
@@ -10750,6 +10875,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ adapter->flags2 |= IXGBE_FLAG2_AUTO_DISABLE_VF;
+
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
@@ -10861,8 +10989,7 @@ skip_sriov:
netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC;
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->hw_enc_features |= netdev->vlan_features;
@@ -10880,6 +11007,9 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
@@ -11003,7 +11133,7 @@ skip_sriov:
err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
if (err)
- strlcpy(part_str, "Unknown", sizeof(part_str));
+ strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
@@ -11100,7 +11230,6 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -11189,8 +11318,6 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a148534d7256..8f4316b19278 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -85,6 +85,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 24aa97f993ca..689470c1e8ad 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -680,14 +680,14 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
}
/**
- * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags
+ * ixgbe_mii_bus_read_generic_c22 - Read a clause 22 register with gssr flags
* @hw: pointer to hardware structure
* @addr: address
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
- int regnum, u32 gssr)
+static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
+ int regnum, u32 gssr)
{
u32 hwaddr, cmd;
s32 data;
@@ -696,31 +696,52 @@ static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
return -EBUSY;
hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
- if (regnum & MII_ADDR_C45) {
- hwaddr |= regnum & GENMASK(21, 0);
- cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- } else {
- hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
- cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
- IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
- }
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
+ IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
data = ixgbe_msca_cmd(hw, cmd);
if (data < 0)
goto mii_bus_read_done;
- /* For a clause 45 access the address cycle just completed, we still
- * need to do the read command, otherwise just get the data
- */
- if (!(regnum & MII_ADDR_C45))
- goto do_mii_bus_read;
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
+
+mii_bus_read_done:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return data;
+}
+
+/**
+ * ixgbe_mii_bus_read_generic_c45 - Read a clause 45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
+ int devad, int regnum, u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 data;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ hwaddr |= devad << 16 | regnum;
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
+
+ data = ixgbe_msca_cmd(hw, cmd);
+ if (data < 0)
+ goto mii_bus_read_done;
cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
data = ixgbe_msca_cmd(hw, cmd);
if (data < 0)
goto mii_bus_read_done;
-do_mii_bus_read:
data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
@@ -730,15 +751,15 @@ mii_bus_read_done:
}
/**
- * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags
+ * ixgbe_mii_bus_write_generic_c22 - Write a clause 22 register with gssr flags
* @hw: pointer to hardware structure
* @addr: address
* @regnum: register number
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
- int regnum, u16 val, u32 gssr)
+static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
+ int regnum, u16 val, u32 gssr)
{
u32 hwaddr, cmd;
s32 err;
@@ -749,20 +770,43 @@ static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
- if (regnum & MII_ADDR_C45) {
- hwaddr |= regnum & GENMASK(21, 0);
- cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- } else {
- hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
- cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
- IXGBE_MSCA_MDI_COMMAND;
- }
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ err = ixgbe_msca_cmd(hw, cmd);
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return err;
+}
+
+/**
+ * ixgbe_mii_bus_write_generic_c45 - Write a clause 45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ * @val: value to write
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
+ int devad, int regnum, u16 val,
+ u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 err;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ hwaddr |= devad << 16 | regnum;
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- /* For clause 45 this is an address cycle, for clause 22 this is the
- * entire transaction
- */
err = ixgbe_msca_cmd(hw, cmd);
- if (err < 0 || !(regnum & MII_ADDR_C45))
+ if (err < 0)
goto mii_bus_write_done;
cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
@@ -774,70 +818,144 @@ mii_bus_write_done:
}
/**
- * ixgbe_mii_bus_read - Read a clause 22/45 register
+ * ixgbe_mii_bus_read_c22 - Read a clause 22 register
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
+static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
- return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+ return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr);
}
/**
- * ixgbe_mii_bus_write - Write a clause 22/45 register
+ * ixgbe_mii_bus_read_c45 - Read a clause 45 register
* @bus: pointer to mii_bus structure which points to our driver private
+ * @devad: device address to read
* @addr: address
* @regnum: register number
+ **/
+static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
+ int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr);
+}
+
+/**
+ * ixgbe_mii_bus_write_c22 - Write a clause 22 register
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr);
+}
+
+/**
+ * ixgbe_mii_bus_write_c45 - Write a clause 45 register
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
+static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
+ int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
- return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+ return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val,
+ gssr);
}
/**
- * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
+ * ixgbe_x550em_a_mii_bus_read_c22 - Read a clause 22 register on x550em_a
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
- int regnum)
+static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
+ int regnum)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
- return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+ return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr);
}
/**
- * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
+ * ixgbe_x550em_a_mii_bus_read_c45 - Read a clause 45 register on x550em_a
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ **/
+static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
+ int devad, int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_write_c22 - Write a clause 22 register on x550em_a
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
- int regnum, u16 val)
+static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
+ int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
- return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+ return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_write_c45 - Write a clause 45 register on x550em_a
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
+ int devad, int regnum, u16 val)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val,
+ gssr);
}
/**
@@ -855,9 +973,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
if (rp_pdev && rp_pdev->subordinate) {
bus = rp_pdev->subordinate->number;
+ pci_dev_put(rp_pdev);
return pci_get_domain_bus_and_slot(0, bus, 0);
}
+ pci_dev_put(rp_pdev);
return NULL;
}
@@ -874,6 +994,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct pci_dev *func0_pdev;
+ bool has_mii = false;
/* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
* are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
@@ -884,15 +1005,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
if (func0_pdev) {
if (func0_pdev == pdev)
- return true;
- else
- return false;
+ has_mii = true;
+ goto out;
}
func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
if (func0_pdev == pdev)
- return true;
+ has_mii = true;
- return false;
+out:
+ pci_dev_put(func0_pdev);
+ return has_mii;
}
/**
@@ -905,8 +1027,11 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
**/
s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
- s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
- s32 (*read)(struct mii_bus *bus, int addr, int regnum);
+ s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum);
+ s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
+ u16 val);
+ s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
@@ -925,12 +1050,16 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
if (!ixgbe_x550em_a_has_mii(hw))
return 0;
- read = &ixgbe_x550em_a_mii_bus_read;
- write = &ixgbe_x550em_a_mii_bus_write;
+ read_c22 = ixgbe_x550em_a_mii_bus_read_c22;
+ write_c22 = ixgbe_x550em_a_mii_bus_write_c22;
+ read_c45 = ixgbe_x550em_a_mii_bus_read_c45;
+ write_c45 = ixgbe_x550em_a_mii_bus_write_c45;
break;
default:
- read = &ixgbe_mii_bus_read;
- write = &ixgbe_mii_bus_write;
+ read_c22 = ixgbe_mii_bus_read_c22;
+ write_c22 = ixgbe_mii_bus_write_c22;
+ read_c45 = ixgbe_mii_bus_read_c45;
+ write_c45 = ixgbe_mii_bus_write_c45;
break;
}
@@ -938,8 +1067,10 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
if (!bus)
return -ENOMEM;
- bus->read = read;
- bus->write = write;
+ bus->read = read_c22;
+ bus->write = write_c22;
+ bus->read_c45 = read_c45;
+ bus->write_c45 = write_c45;
/* Use the position of the device in the PCI hierarchy as the id */
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 23ddfd79fc8b..0310af851086 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -113,12 +113,16 @@
* the sign bit. This register enables software to calculate frequency
* adjustments and apply them directly to the clock rate.
*
- * The math for converting ppb into TIMINCA values is fairly straightforward.
- * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL
+ * The math for converting scaled_ppm into TIMINCA values is fairly
+ * straightforward.
*
- * This assumes that ppb is never high enough to create a value bigger than
- * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this
- * value is also simple.
+ * TIMINCA value = ( Base_Frequency * scaled_ppm ) / 1000000ULL << 16
+ *
+ * To avoid overflow, we simply use mul_u64_u64_div_u64.
+ *
+ * This assumes that scaled_ppm is never high enough to create a value bigger
+ * than TIMINCA's 31 bits can store. This is ensured by the stack, and is
+ * measured in parts per billion. Calculating this value is also simple.
* Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL
*
* For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is
@@ -138,7 +142,6 @@
#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL
#define INCVALUE_MASK 0x7FFFFFFF
#define ISGN 0x80000000
-#define MAX_TIMADJ 0x7FFFFFFF
/**
* ixgbe_ptp_setup_sdp_X540
@@ -434,45 +437,35 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
}
/**
- * ixgbe_ptp_adjfreq_82599
+ * ixgbe_ptp_adjfine_82599
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
*
- * adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated scaled_ppm from the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
+static int ixgbe_ptp_adjfine_82599(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
- u64 freq, incval;
- u32 diff;
- int neg_adj = 0;
-
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
+ u64 incval;
smp_mb();
incval = READ_ONCE(adapter->base_incval);
-
- freq = incval;
- freq *= ppb;
- diff = div_u64(freq, 1000000000ULL);
-
- incval = neg_adj ? (incval - diff) : (incval + diff);
+ incval = adjust_by_scaled_ppm(incval, scaled_ppm);
switch (hw->mac.type) {
case ixgbe_mac_X540:
if (incval > 0xFFFFFFFFULL)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval);
break;
case ixgbe_mac_82599EB:
if (incval > 0x00FFFFFFULL)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL));
@@ -485,32 +478,29 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
}
/**
- * ixgbe_ptp_adjfreq_X550
+ * ixgbe_ptp_adjfine_X550
* @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * @scaled_ppm: scaled parts per million adjustment from base
*
- * adjust the frequency of the SYSTIME registers by the indicated ppb from base
- * frequency
+ * Adjust the frequency of the SYSTIME registers by the indicated scaled_ppm
+ * from base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
*/
-static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb)
+static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
struct ixgbe_hw *hw = &adapter->hw;
- int neg_adj = 0;
- u64 rate = IXGBE_X550_BASE_PERIOD;
+ bool neg_adj;
+ u64 rate;
u32 inca;
- if (ppb < 0) {
- neg_adj = 1;
- ppb = -ppb;
- }
- rate *= ppb;
- rate = div_u64(rate, 1000000000ULL);
+ neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate);
/* warn if rate is too large */
if (rate >= INCVALUE_MASK)
- e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
+ e_dev_warn("PTP scaled_ppm adjusted SYSTIME rate overflowed!\n");
inca = rate & INCVALUE_MASK;
if (neg_adj)
@@ -992,10 +982,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
bool is_l2 = false;
u32 regval;
- /* reserved for future extensions */
- if (config->flags)
- return -EINVAL;
-
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
@@ -1212,7 +1198,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
struct cyclecounter cc;
unsigned long flags;
u32 incval = 0;
- u32 tsauxc = 0;
u32 fuse0 = 0;
/* For some of the boards below this mask is technically incorrect.
@@ -1247,18 +1232,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
-
- /* enable SYSTIME counter */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
-
- IXGBE_WRITE_FLUSH(hw);
break;
case ixgbe_mac_X540:
cc.read = ixgbe_ptp_read_82599;
@@ -1291,6 +1264,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
+ * @adapter: the ixgbe private board structure
+ *
+ * Initialize and start the SYSTIME registers.
+ */
+static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 tsauxc;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ case ixgbe_mac_X550:
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+
+ /* Reset interrupt settings */
+ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
+
+ /* Activate the SYSTIME counter */
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
+ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ /* Reset SYSTIME registers to 0 */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
+ break;
+ default:
+ /* Other devices aren't supported */
+ return;
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
@@ -1316,6 +1333,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
ixgbe_ptp_start_cyclecounter(adapter);
+ ixgbe_ptp_init_systime(adapter);
+
spin_lock_irqsave(&adapter->tmreg_lock, flags);
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
ktime_to_ns(ktime_get_real()));
@@ -1360,7 +1379,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
@@ -1377,7 +1396,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 0;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
@@ -1393,7 +1412,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.n_ext_ts = 0;
adapter->ptp_caps.n_per_out = 0;
adapter->ptp_caps.pps = 1;
- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
+ adapter->ptp_caps.adjfine = ixgbe_ptp_adjfine_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 214a38de3f41..29cc60988071 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -77,7 +77,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
adapter->bridge_mode = BRIDGE_MODE_VEB;
- /* limit trafffic classes based on VFs enabled */
+ /* limit traffic classes based on VFs enabled */
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && (num_vfs < 16)) {
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
@@ -96,6 +96,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
for (i = 0; i < num_vfs; i++) {
/* enable spoof checking for all VFs */
adapter->vfinfo[i].spoofchk_enabled = true;
+ adapter->vfinfo[i].link_enable = true;
/* We support VF RSS querying only for 82599 and x540
* devices at the moment. These devices share RSS
@@ -204,10 +205,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs)
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
unsigned int num_vfs = adapter->num_vfs, vf;
+ unsigned long flags;
int rss;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
/* set num VFs to 0 to prevent access to vfinfo */
adapter->num_vfs = 0;
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
/* put the reference to all of the vf devices */
for (vf = 0; vf < num_vfs; ++vf) {
@@ -820,6 +824,57 @@ static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf,
}
}
+/**
+ * ixgbe_set_vf_rx_tx - Set VF rx tx
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ *
+ * Set or reset correct transmit and receive for vf
+ **/
+static void ixgbe_set_vf_rx_tx(struct ixgbe_adapter *adapter, int vf)
+{
+ u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ reg_cur_tx = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg_cur_rx = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+
+ if (adapter->vfinfo[vf].link_enable) {
+ reg_req_tx = reg_cur_tx | 1 << vf_shift;
+ reg_req_rx = reg_cur_rx | 1 << vf_shift;
+ } else {
+ reg_req_tx = reg_cur_tx & ~(1 << vf_shift);
+ reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+ }
+
+ /* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+ * For more info take a look at ixgbe_set_vf_lpe
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#if IS_ENABLED(CONFIG_FCOE)
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif /* CONFIG_FCOE */
+
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg_req_rx = reg_cur_rx & ~(1 << vf_shift);
+ }
+
+ /* Enable/Disable particular VF */
+ if (reg_cur_tx != reg_req_tx)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg_req_tx);
+ if (reg_cur_rx != reg_req_rx)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg_req_rx);
+}
+
static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
@@ -845,11 +900,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
vf_shift = vf % 32;
reg_offset = vf / 32;
- /* enable transmit for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= BIT(vf_shift);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
-
/* force drop enable for all VF Rx queues */
reg = IXGBE_QDE_ENABLE;
if (adapter->vfinfo[vf].pf_vlan)
@@ -857,27 +907,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_write_qde(adapter, vf, reg);
- /* enable receive for vf */
- reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= BIT(vf_shift);
- /*
- * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
- * For more info take a look at ixgbe_set_vf_lpe
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- struct net_device *dev = adapter->netdev;
- int pf_max_frame = dev->mtu + ETH_HLEN;
-
-#ifdef CONFIG_FCOE
- if (dev->features & NETIF_F_FCOE_MTU)
- pf_max_frame = max_t(int, pf_max_frame,
- IXGBE_FCOE_JUMBO_FRAME_SIZE);
-
-#endif /* CONFIG_FCOE */
- if (pf_max_frame > ETH_FRAME_LEN)
- reg &= ~BIT(vf_shift);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+ ixgbe_set_vf_rx_tx(adapter, vf);
/* enable VF mailbox for further messages */
adapter->vfinfo[vf].clear_to_send = true;
@@ -1157,9 +1187,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
switch (xcast_mode) {
case IXGBEVF_XCAST_MODE_NONE:
- disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+ disable = IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
- enable = 0;
+ enable = IXGBE_VMOLR_BAM;
break;
case IXGBEVF_XCAST_MODE_MULTI:
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
@@ -1181,9 +1211,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
return -EPERM;
}
- disable = 0;
+ disable = IXGBE_VMOLR_VPE;
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
break;
default:
return -EOPNOTSUPP;
@@ -1202,6 +1232,26 @@ out:
return 0;
}
+static int ixgbe_get_vf_link_state(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u32 *link_state = &msgbuf[1];
+
+ /* verify the PF is supporting the correct API */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ *link_state = adapter->vfinfo[vf].link_enable;
+
+ return 0;
+}
+
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
@@ -1267,6 +1317,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
case IXGBE_VF_UPDATE_XCAST_MODE:
retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf);
break;
+ case IXGBE_VF_GET_LINK_STATE:
+ retval = ixgbe_get_vf_link_state(adapter, msgbuf, vf);
+ break;
case IXGBE_VF_IPSEC_ADD:
retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf);
break;
@@ -1305,8 +1358,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
void ixgbe_msg_task(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ unsigned long flags;
u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags);
for (vf = 0; vf < adapter->num_vfs; vf++) {
/* process any reset requests */
if (!ixgbe_check_for_rst(hw, vf))
@@ -1320,18 +1375,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter)
if (!ixgbe_check_for_ack(hw, vf))
ixgbe_rcv_ack_from_vf(adapter, vf);
}
-}
-
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
-
- /* disable transmit and receive for all vfs */
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
-
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+ spin_unlock_irqrestore(&adapter->vfs_lock, flags);
}
static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf)
@@ -1359,6 +1403,21 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
}
}
+/**
+ * ixgbe_set_all_vfs - update vfs queues
+ * @adapter: Pointer to adapter struct
+ *
+ * Update setting transmit and receive queues for all vfs
+ **/
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter)
+{
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ ixgbe_set_vf_link_state(adapter, i,
+ adapter->vfinfo[i].link_state);
+}
+
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -1656,6 +1715,84 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
return 0;
}
+/**
+ * ixgbe_set_vf_link_state - Set link state
+ * @adapter: Pointer to adapter struct
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set a link force state on/off a single vf
+ **/
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state)
+{
+ adapter->vfinfo[vf].link_state = state;
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ adapter->vfinfo[vf].link_enable = false;
+ else
+ adapter->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ adapter->vfinfo[vf].link_enable = true;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ adapter->vfinfo[vf].link_enable = false;
+ break;
+ }
+
+ ixgbe_set_vf_rx_tx(adapter, vf);
+
+ /* restart the VF */
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_ping_vf(adapter, vf);
+}
+
+/**
+ * ixgbe_ndo_set_vf_link_state - Set link state
+ * @netdev: network interface device structure
+ * @vf: VF identifier
+ * @state: required link state
+ *
+ * Set the link state of a specified VF, regardless of physical link state
+ **/
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret = 0;
+
+ if (vf < 0 || vf >= adapter->num_vfs) {
+ dev_err(&adapter->pdev->dev,
+ "NDO set VF link - invalid VF identifier %d\n", vf);
+ return -EINVAL;
+ }
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state %d - not supported\n",
+ vf, state);
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state disable\n", vf);
+ ixgbe_set_vf_link_state(adapter, vf, state);
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ dev_info(&adapter->pdev->dev,
+ "NDO set VF %d link state auto\n", vf);
+ ixgbe_set_vf_link_state(adapter, vf, state);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "NDO set VF %d - invalid link state %d\n", vf, state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
bool setting)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 3ec21923c89c..0690ecb8dfa3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -17,8 +17,8 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
-void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_set_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos, __be16 vlan_proto);
@@ -31,7 +31,9 @@ int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf,
int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
+int ixgbe_ndo_set_vf_link_state(struct net_device *netdev, int vf, int state);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
+void ixgbe_set_vf_link_state(struct ixgbe_adapter *adapter, int vf, int state);
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index a82533f21d36..f1f69ce67420 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -8,6 +8,7 @@
#define IXGBE_XDP_CONSUMED BIT(0)
#define IXGBE_XDP_TX BIT(1)
#define IXGBE_XDP_REDIR BIT(2)
+#define IXGBE_XDP_EXIT BIT(3)
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
@@ -35,8 +36,6 @@ int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
struct xsk_buff_pool *pool,
u16 qid);
-void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
-
bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count);
int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 2647937f7f4d..2b00db92b08f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1247,7 +1247,7 @@ struct ixgbe_nvm_version {
#define IXGBE_PSRTYPE_RQPL_SHIFT 29
/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Primary Disable bit */
#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
@@ -1811,7 +1811,7 @@ enum {
/* STATUS Bit Masks */
#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Primary Enable Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
@@ -2193,8 +2193,8 @@ enum {
#define IXGBE_PCIDEVCTRL2_4_8s 0xd
#define IXGBE_PCIDEVCTRL2_17_34s 0xe
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+/* Number of 100 microseconds we wait for PCI Express primary disable */
+#define IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -2533,6 +2533,13 @@ enum {
#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P)))
#define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P)))
#define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P)))
+#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x)))
+#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x)))
+#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x)))
+#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x)))
+#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x)))
+#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x)))
+#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x)))
#define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \
(IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index)))
@@ -3671,7 +3678,7 @@ struct ixgbe_info {
#define IXGBE_ERR_ADAPTER_STOPPED -9
#define IXGBE_ERR_INVALID_MAC_ADDR -10
#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
#define IXGBE_ERR_RESET_FAILED -15
@@ -3705,7 +3712,9 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
@@ -3715,6 +3724,7 @@ struct ixgbe_info {
#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index e4b50c7781ff..aa4bf6c9a2f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1721,9 +1721,59 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return IXGBE_ERR_LINK_SETUP;
}
- status = mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* change mode enforcement rules to hybrid */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x0400;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* manually control the config */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x20002240;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* move the AN base page values */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x1;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* set the AN37 over CB mode */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= 0x20000000;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* restart AN manually */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* Toggle port SW reset by AN reset. */
status = ixgbe_restart_an_internal_phy_x550em(hw);
@@ -1737,7 +1787,7 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for native SFP support.
+ * Configure the integrated PHY for native SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
@@ -1786,7 +1836,7 @@ ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
* @speed: link speed
* @autoneg_wait_to_complete: unused
*
- * Configure the the integrated PHY for SFP support.
+ * Configure the integrated PHY for SFP support.
*/
static s32
ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index db2bc58dfcfd..1703c640a434 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -109,9 +109,13 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return IXGBE_XDP_REDIR;
+ if (!err)
+ return IXGBE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = IXGBE_XDP_EXIT;
+ else
+ result = IXGBE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -130,16 +134,16 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = IXGBE_XDP_CONSUMED;
+ break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = IXGBE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough; /* handle aborts by dropping packet */
- case XDP_DROP:
- result = IXGBE_XDP_CONSUMED;
- break;
}
return result;
}
@@ -207,26 +211,28 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
}
static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
- struct ixgbe_rx_buffer *bi)
+ const struct xdp_buff *xdp)
{
- unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
- unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
+ unsigned int totalsize = xdp->data_end - xdp->data_meta;
+ unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
+ net_prefetch(xdp->data_meta);
+
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- bi->xdp->data_end - bi->xdp->data_hard_start,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
- if (metasize)
+ memcpy(__skb_put(skb, totalsize), xdp->data_meta,
+ ALIGN(totalsize, sizeof(long)));
+
+ if (metasize) {
skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
- xsk_buff_free(bi->xdp);
- bi->xdp = NULL;
return skb;
}
@@ -301,28 +307,36 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
- if (xdp_res) {
- if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(bi->xdp);
+ if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
+ xdp_xmit |= xdp_res;
+ } else if (xdp_res == IXGBE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == IXGBE_XDP_CONSUMED) {
+ xsk_buff_free(bi->xdp);
+ } else if (xdp_res == IXGBE_XDP_PASS) {
+ goto construct_skb;
+ }
- bi->xdp = NULL;
- total_rx_packets++;
- total_rx_bytes += size;
+ bi->xdp = NULL;
+ total_rx_packets++;
+ total_rx_bytes += size;
- cleaned_count++;
- ixgbe_inc_ntc(rx_ring);
- continue;
- }
+ cleaned_count++;
+ ixgbe_inc_ntc(rx_ring);
+ continue;
+construct_skb:
/* XDP_PASS path */
- skb = ixgbe_construct_skb_zc(rx_ring, bi);
+ skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp);
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
break;
}
+ xsk_buff_free(bi->xdp);
+ bi->xdp = NULL;
+
cleaned_count++;
ixgbe_inc_ntc(rx_ring);
@@ -390,12 +404,14 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
u32 cmd_type;
while (budget-- > 0) {
- if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
- !netif_carrier_ok(xdp_ring->netdev)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
work_done = false;
break;
}
+ if (!netif_carrier_ok(xdp_ring->netdev))
+ break;
+
if (!xsk_tx_peek_desc(pool, &desc))
break;
@@ -509,10 +525,10 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
return -ENETDOWN;
if (!READ_ONCE(adapter->xdp_prog))
- return -ENXIO;
+ return -EINVAL;
if (qid >= adapter->num_xdp_queues)
- return -ENXIO;
+ return -EINVAL;
ring = adapter->xdp_ring[qid];
@@ -520,7 +536,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
return -ENETDOWN;
if (!ring->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
u64 eics = BIT_ULL(ring->q_vector->v_idx);
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 6bace746eaac..5f08779c0e4e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -281,6 +281,10 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_INVALID_MAC_ADDR -1
#define IXGBE_ERR_RESET_FAILED -2
#define IXGBE_ERR_INVALID_ARGUMENT -3
+#define IXGBE_ERR_CONFIG -4
+#define IXGBE_ERR_MBX -5
+#define IXGBE_ERR_TIMEOUT -6
+#define IXGBE_ERR_PARAM -7
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 8380f905e708..296915414a7c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -17,8 +17,6 @@
#include "ixgbevf.h"
-#define IXGBE_ALL_RAR_ENTRIES 16
-
enum {NETDEV_STATS, IXGBEVF_STATS};
struct ixgbe_stats {
@@ -130,8 +128,6 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
adapter->msg_enable = data;
}
-#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
-
static int ixgbevf_get_regs_len(struct net_device *netdev)
{
#define IXGBE_REGS_LEN 45
@@ -217,15 +213,17 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -236,7 +234,9 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
@@ -458,10 +458,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
@@ -475,10 +475,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
@@ -492,10 +492,10 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
}
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
data[i] = ring->stats.packets;
data[i + 1] = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
i += 2;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index e3e4676af9e4..66cf17f19408 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -25,7 +25,7 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
/* send the important bits to the PF */
sam = (struct sa_mbx_msg *)(&msgbuf[1]);
- sam->flags = xs->xso.flags;
+ sam->dir = xs->xso.dir;
sam->spi = xs->id.spi;
sam->proto = xs->id.proto;
sam->family = xs->props.family;
@@ -40,16 +40,16 @@ static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
spin_lock_bh(&adapter->mbx_lock);
- ret = hw->mbx.ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
if (ret)
goto out;
- ret = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ ret = ixgbevf_poll_mbx(hw, msgbuf, 2);
if (ret)
goto out;
ret = (int)msgbuf[1];
- if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0)
+ if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0)
ret = -1;
out:
@@ -77,11 +77,11 @@ static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
spin_lock_bh(&adapter->mbx_lock);
- err = hw->mbx.ops.write_posted(hw, msgbuf, 2);
+ err = ixgbevf_write_mbx(hw, msgbuf, 2);
if (err)
goto out;
- err = hw->mbx.ops.read_posted(hw, msgbuf, 2);
+ err = ixgbevf_poll_mbx(hw, msgbuf, 2);
if (err)
goto out;
@@ -257,8 +257,10 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
+static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
@@ -270,28 +272,32 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
ipsec = adapter->ipsec;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
- netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
- xs->id.proto);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
return -EINVAL;
}
if (xs->props.mode != XFRM_MODE_TRANSPORT) {
- netdev_err(dev, "Unsupported mode for ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload");
return -EINVAL;
}
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
+ return -EINVAL;
+ }
+
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
struct rx_sa rsa;
if (xs->calg) {
- netdev_err(dev, "Compression offload not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
/* find the first unused index */
ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Rx table!\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
@@ -306,7 +312,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
/* get the key and salt */
ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
}
@@ -345,7 +351,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
/* find the first unused index */
ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Tx table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table");
return ret;
}
sa_idx = (u16)ret;
@@ -359,7 +365,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
return ret;
}
@@ -394,7 +400,7 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
adapter = netdev_priv(dev);
ipsec = adapter->ipsec;
- if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
if (!ipsec->rx_tbl[sa_idx].used) {
@@ -623,6 +629,7 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h
index 3740725041c3..d22990165353 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h
@@ -57,7 +57,7 @@ struct ixgbevf_ipsec {
struct sa_mbx_msg {
__be32 spi;
- u8 flags;
+ u8 dir;
u8 proto;
u16 family;
__be32 addr[4];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index a0e325774819..149c733fcc2b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -387,6 +387,8 @@ struct ixgbevf_adapter {
u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
u32 flags;
+ bool link_state;
+
#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
#ifdef CONFIG_XFRM
@@ -430,6 +432,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy;
extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
@@ -491,4 +494,8 @@ void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
#define hw_dbg(hw, format, arg...) \
netdev_dbg(ixgbevf_hw_to_netdev(hw), format, ## arg)
+
+s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
+s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size);
+
#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d81811ab4ec4..a44e4bd56142 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -944,7 +944,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
@@ -1070,7 +1070,7 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
goto out_failure;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
out_failure:
@@ -1984,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
- set_ring_build_skb_enabled(rx_ring);
+ if (PAGE_SIZE < 8192)
+ if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
+ set_ring_uses_large_buffer(rx_ring);
- if (PAGE_SIZE < 8192) {
- if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
- return;
+ /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
+ return;
- set_ring_uses_large_buffer(rx_ring);
- }
+ set_ring_build_skb_enabled(rx_ring);
}
/**
@@ -2043,12 +2044,16 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
spin_unlock_bh(&adapter->mbx_lock);
- /* translate error return types so error makes sense */
- if (err == IXGBE_ERR_MBX)
- return -EIO;
+ if (err) {
+ netdev_err(netdev, "VF could not set VLAN %d\n", vid);
+
+ /* translate error return types so error makes sense */
+ if (err == IXGBE_ERR_MBX)
+ return -EIO;
- if (err == IXGBE_ERR_INVALID_ARGUMENT)
- return -EACCES;
+ if (err == IXGBE_ERR_INVALID_ARGUMENT)
+ return -EACCES;
+ }
set_bit(vid, adapter->active_vlans);
@@ -2069,6 +2074,9 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
spin_unlock_bh(&adapter->mbx_lock);
+ if (err)
+ netdev_err(netdev, "Could not remove VLAN %d\n", vid);
+
clear_bit(vid, adapter->active_vlans);
return err;
@@ -2266,6 +2274,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
static const int api[] = {
+ ixgbe_mbox_api_15,
ixgbe_mbox_api_14,
ixgbe_mbox_api_13,
ixgbe_mbox_api_12,
@@ -2284,13 +2293,21 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
+ if (hw->api_version >= ixgbe_mbox_api_15) {
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mbx_operations));
+ }
+
spin_unlock_bh(&adapter->mbx_lock);
}
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
struct ixgbe_hw *hw = &adapter->hw;
+ bool state;
ixgbevf_configure_msix(adapter);
@@ -2303,6 +2320,11 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_unlock_bh(&adapter->mbx_lock);
+ state = adapter->link_state;
+ hw->mac.ops.get_link_state(hw, &adapter->link_state);
+ if (state && state != adapter->link_state)
+ dev_info(&pdev->dev, "VF is administratively disabled\n");
+
smp_mb__before_atomic();
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -2627,6 +2649,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
if (adapter->xdp_prog &&
hw->mac.max_tx_queues == rss)
rss = rss > 3 ? 2 : 1;
@@ -2717,7 +2740,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -2744,7 +2767,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
ring->reg_idx = reg_idx;
/* assign ring to adapter */
- adapter->tx_ring[txr_idx] = ring;
+ adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
@@ -3072,6 +3095,8 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+ adapter->link_state = true;
+
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -3304,7 +3329,7 @@ static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
ixgbevf_watchdog_update_link(adapter);
- if (adapter->link_up)
+ if (adapter->link_up && adapter->link_state)
ixgbevf_watchdog_link_is_up(adapter);
else
ixgbevf_watchdog_link_is_down(adapter);
@@ -4332,10 +4357,10 @@ static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats,
if (ring) {
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
@@ -4358,10 +4383,10 @@ static void ixgbevf_get_stats(struct net_device *netdev,
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = adapter->rx_ring[i];
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
+ start = u64_stats_fetch_begin(&ring->syncp);
bytes = ring->stats.bytes;
packets = ring->stats.packets;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
stats->rx_bytes += bytes;
stats->rx_packets += packets;
}
@@ -4503,22 +4528,17 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbevf_adapter *adapter = NULL;
struct ixgbe_hw *hw = NULL;
const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
- int err, pci_using_dac;
bool disable_dev = false;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
- goto err_dma;
- }
- pci_using_dac = 0;
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
+ goto err_dma;
}
err = pci_request_regions(pdev, ixgbevf_driver_name);
@@ -4565,7 +4585,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac;
- memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
sizeof(struct ixgbe_mbx_operations));
/* setup the private structure */
@@ -4598,10 +4618,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_GSO_PARTIAL |
IXGBEVF_GSO_PARTIAL_FEATURES;
- netdev->features = netdev->hw_features;
-
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features | NETIF_F_HIGHDMA;
netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_SG |
@@ -4617,6 +4634,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
/* MTU range: 68 - 1504 or 9710 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -4625,6 +4643,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
(ETH_HLEN + ETH_FCS_LEN);
break;
@@ -4776,7 +4795,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
pci_disable_device(pdev);
rtnl_unlock();
- /* Request a slot slot reset. */
+ /* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
@@ -4858,6 +4877,8 @@ static struct pci_driver ixgbevf_driver = {
**/
static int __init ixgbevf_init_module(void)
{
+ int err;
+
pr_info("%s\n", ixgbevf_driver_string);
pr_info("%s\n", ixgbevf_copyright);
ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
@@ -4866,7 +4887,13 @@ static int __init ixgbevf_init_module(void)
return -ENOMEM;
}
- return pci_register_driver(&ixgbevf_driver);
+ err = pci_register_driver(&ixgbevf_driver);
+ if (err) {
+ destroy_workqueue(ixgbevf_wq);
+ return err;
+ }
+
+ return 0;
}
module_init(ixgbevf_init_module);
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 6bc1953263b9..a55dd978f7ca 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -15,16 +15,15 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ if (!countdown || !mbx->ops.check_for_msg)
+ return IXGBE_ERR_CONFIG;
+
while (countdown && mbx->ops.check_for_msg(hw)) {
countdown--;
udelay(mbx->udelay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
+ return countdown ? 0 : IXGBE_ERR_TIMEOUT;
}
/**
@@ -38,87 +37,82 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
+ if (!countdown || !mbx->ops.check_for_ack)
+ return IXGBE_ERR_CONFIG;
+
while (countdown && mbx->ops.check_for_ack(hw)) {
countdown--;
udelay(mbx->udelay);
}
- /* if we failed, all future posted messages fail until reset */
- if (!countdown)
- mbx->timeout = 0;
-
- return countdown ? 0 : IXGBE_ERR_MBX;
+ return countdown ? 0 : IXGBE_ERR_TIMEOUT;
}
/**
- * ixgbevf_read_posted_mbx - Wait for message notification and receive message
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
+ * ixgbevf_read_mailbox_vf - read VF's mailbox register
+ * @hw: pointer to the HW structure
*
- * returns 0 if it successfully received a message notification and
- * copied it into the receive buffer.
+ * This function is used to read the mailbox register dedicated for VF without
+ * losing the read to clear status bits.
**/
-static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+static u32 ixgbevf_read_mailbox_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
- if (!mbx->ops.read)
- goto out;
+ vf_mailbox |= hw->mbx.vf_mailbox;
+ hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
- ret_val = ixgbevf_poll_for_msg(hw);
-
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size);
-out:
- return ret_val;
+ return vf_mailbox;
}
/**
- * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
+ * ixgbevf_clear_msg_vf - clear PF status bit
+ * @hw: pointer to the HW structure
*
- * returns 0 if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
+ * This function is used to clear PFSTS bit in the VFMAILBOX register
**/
-static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+static void ixgbevf_clear_msg_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- /* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
- goto out;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
+ hw->mbx.stats.reqs++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
+ }
+}
- /* send msg */
- ret_val = mbx->ops.write(hw, msg, size);
+/**
+ * ixgbevf_clear_ack_vf - clear PF ACK bit
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to clear PFACK bit in the VFMAILBOX register
+ **/
+static void ixgbevf_clear_ack_vf(struct ixgbe_hw *hw)
+{
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = ixgbevf_poll_for_ack(hw);
-out:
- return ret_val;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
+ hw->mbx.stats.acks++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
+ }
}
/**
- * ixgbevf_read_v2p_mailbox - read v2p mailbox
- * @hw: pointer to the HW structure
+ * ixgbevf_clear_rst_vf - clear PF reset bit
+ * @hw: pointer to the HW structure
*
- * This function is used to read the v2p mailbox without losing the read to
- * clear status bits.
+ * This function is used to clear reset indication and reset done bit in
+ * VFMAILBOX register after reset the shared resources and the reset sequence.
**/
-static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+static void ixgbevf_clear_rst_vf(struct ixgbe_hw *hw)
{
- u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
-
- v2p_mailbox |= hw->mbx.v2p_mailbox;
- hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
- return v2p_mailbox;
+ if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
+ hw->mbx.stats.rsts++;
+ hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
+ IXGBE_VFMAILBOX_RSTD);
+ }
}
/**
@@ -131,14 +125,12 @@ static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
- u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ u32 vf_mailbox = ixgbevf_read_mailbox_vf(hw);
s32 ret_val = IXGBE_ERR_MBX;
- if (v2p_mailbox & mask)
+ if (vf_mailbox & mask)
ret_val = 0;
- hw->mbx.v2p_mailbox &= ~mask;
-
return ret_val;
}
@@ -172,6 +164,7 @@ static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
ret_val = 0;
+ ixgbevf_clear_ack_vf(hw);
hw->mbx.stats.acks++;
}
@@ -191,6 +184,7 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
IXGBE_VFMAILBOX_RSTI))) {
ret_val = 0;
+ ixgbevf_clear_rst_vf(hw);
hw->mbx.stats.rsts++;
}
@@ -205,19 +199,59 @@ static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
- s32 ret_val = IXGBE_ERR_MBX;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+ int countdown = mbx->timeout;
+ u32 vf_mailbox;
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+ if (!mbx->timeout)
+ return ret_val;
- /* reserve mailbox for VF use */
- if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
- ret_val = 0;
+ while (countdown--) {
+ /* Reserve mailbox for VF use */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* Verify that VF is the owner of the lock */
+ if (ixgbevf_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
+ ret_val = 0;
+ break;
+ }
+
+ /* Wait a bit before trying again */
+ udelay(mbx->udelay);
+ }
+
+ if (ret_val)
+ ret_val = IXGBE_ERR_TIMEOUT;
return ret_val;
}
/**
+ * ixgbevf_release_mbx_lock_vf - release mailbox lock
+ * @hw: pointer to the HW structure
+ **/
+static void ixgbevf_release_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ u32 vf_mailbox;
+
+ /* Return ownership of the buffer */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+}
+
+/**
+ * ixgbevf_release_mbx_lock_vf_legacy - release mailbox lock
+ * @hw: pointer to the HW structure
+ **/
+static void ixgbevf_release_mbx_lock_vf_legacy(struct ixgbe_hw *__always_unused hw)
+{
+}
+
+/**
* ixgbevf_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
@@ -227,6 +261,50 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
**/
static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent PF/VF race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_clear_msg_vf(hw);
+ ixgbevf_clear_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* interrupt the PF to tell it a message has been sent */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* if msg sent wait until we receive an ack */
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+out_no_write:
+ hw->mbx.ops.release(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf_legacy - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
s32 ret_val;
u16 i;
@@ -237,7 +315,9 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
/* flush msg and acks as we are overwriting the message buffer */
ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_clear_msg_vf(hw);
ixgbevf_check_for_ack_vf(hw);
+ ixgbevf_clear_ack_vf(hw);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
@@ -263,6 +343,42 @@ out_no_write:
**/
static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ /* check if there is a message from PF */
+ ret_val = ixgbevf_check_for_msg_vf(hw);
+ if (ret_val)
+ return ret_val;
+
+ ixgbevf_clear_msg_vf(hw);
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt */
+ vf_mailbox = ixgbevf_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_ACK;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf_legacy - Reads a message from the inbox intended for VF
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
s32 ret_val = 0;
u16 i;
@@ -298,7 +414,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
/* start mailbox as timed out and let the reset_hw call set the timeout
* value to begin communications
*/
- mbx->timeout = 0;
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
@@ -312,12 +428,79 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
return 0;
}
+/**
+ * ixgbevf_poll_mbx - Wait for message and read it from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully read message from buffer
+ **/
+s32 ixgbevf_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ if (!mbx->ops.read || !mbx->ops.check_for_msg || !mbx->timeout)
+ return ret_val;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx - Write a message to the mailbox and wait for ACK
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ACK to that message within specified period
+ **/
+s32 ixgbevf_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /**
+ * exit if either we can't write, release
+ * or there is no timeout defined
+ */
+ if (!mbx->ops.write || !mbx->ops.check_for_ack || !mbx->ops.release ||
+ !mbx->timeout)
+ return ret_val;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_PARAM;
+ else
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ return ret_val;
+}
+
const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.init_params = ixgbevf_init_mbx_params_vf,
+ .release = ixgbevf_release_mbx_lock_vf,
.read = ixgbevf_read_mbx_vf,
.write = ixgbevf_write_mbx_vf,
- .read_posted = ixgbevf_read_posted_mbx,
- .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
+const struct ixgbe_mbx_operations ixgbevf_mbx_ops_legacy = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .release = ixgbevf_release_mbx_lock_vf_legacy,
+ .read = ixgbevf_read_mbx_vf_legacy,
+ .write = ixgbevf_write_mbx_vf_legacy,
.check_for_msg = ixgbevf_check_for_msg_vf,
.check_for_ack = ixgbevf_check_for_ack_vf,
.check_for_rst = ixgbevf_check_for_rst_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 853796c8ef0e..835bbcc5cc8e 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -7,7 +7,6 @@
#include "vf.h"
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
@@ -39,14 +38,17 @@
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
+ * Message results are the value or'd with 0xF0000000
*/
-/* Messages below or'd with this are the ACK */
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000
-/* Messages below or'd with this are the NACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000
-/* Indicates that VF is still clear to send requests */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000
+#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this
+ * have succeeded
+ */
+#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this
+ * have failed
+ */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests
+ */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
@@ -63,6 +65,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -97,6 +100,8 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_IPSEC_ADD 0x0d
#define IXGBE_VF_IPSEC_DEL 0x0e
+#define IXGBE_VF_GET_LINK_STATE 0x10 /* get vf link state */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index d459f5c8e98f..1641d00d8ed3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -13,13 +13,12 @@
static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
u32 *retmsg, u16 size)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 retval = mbx->ops.write_posted(hw, msg, size);
+ s32 retval = ixgbevf_write_mbx(hw, msg, size);
if (retval)
return retval;
- return mbx->ops.read_posted(hw, retmsg, size);
+ return ixgbevf_poll_mbx(hw, retmsg, size);
}
/**
@@ -75,6 +74,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
+ hw->mbx.ops.init_params(hw);
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
+ sizeof(struct ixgbe_mbx_operations));
IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
IXGBE_WRITE_FLUSH(hw);
@@ -92,7 +94,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
- mbx->ops.write_posted(hw, msgbuf, 1);
+ ixgbevf_write_mbx(hw, msgbuf, 1);
mdelay(10);
@@ -100,7 +102,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3
*/
- ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val)
return ret_val;
@@ -108,11 +110,11 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
* to indicate that no MAC address has yet been assigned for
* the VF.
*/
- if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
- msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_INVALID_MAC_ADDR;
- if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
ether_addr_copy(hw->mac.perm_addr, addr);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
@@ -269,7 +271,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
return -ENOMEM;
}
@@ -311,6 +313,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
* is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -323,12 +326,12 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
msgbuf[0] = IXGBE_VF_GET_RETA;
- err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+ err = ixgbevf_write_mbx(hw, msgbuf, 1);
if (err)
return err;
- err = hw->mbx.ops.read_posted(hw, msgbuf, dwords + 1);
+ err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
if (err)
return err;
@@ -336,14 +339,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
/* If we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such.
*/
- if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* ixgbevf doesn't support more than 2 queues at the moment */
@@ -379,6 +382,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
* or if the operation is not supported for this device type.
*/
switch (hw->api_version) {
+ case ixgbe_mbox_api_15:
case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_12:
@@ -390,12 +394,12 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
}
msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
- err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
+ err = ixgbevf_write_mbx(hw, msgbuf, 1);
if (err)
return err;
- err = hw->mbx.ops.read_posted(hw, msgbuf, 11);
+ err = ixgbevf_poll_mbx(hw, msgbuf, 11);
if (err)
return err;
@@ -403,14 +407,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* If the operation has been refused by a PF return -EPERM */
- if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
/* If we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such.
*/
- if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
@@ -442,7 +446,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
return IXGBE_ERR_MBX;
}
@@ -545,8 +549,9 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
return -EOPNOTSUPP;
fallthrough;
- case ixgbe_mbox_api_14:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return -EOPNOTSUPP;
@@ -561,7 +566,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
return err;
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
return -EPERM;
return 0;
@@ -580,6 +585,46 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
}
/**
+ * ixgbevf_get_link_state_vf - Get VF link state from PF
+ * @hw: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Returns state of the operation error or success.
+ */
+static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ u32 msgbuf[2];
+ s32 ret_val;
+ s32 err;
+
+ msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
+ msgbuf[1] = 0x0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ ret_val = IXGBE_ERR_MBX;
+ } else {
+ ret_val = 0;
+ *link_state = msgbuf[1];
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
+ * @hw: unused
+ * @link_state: unused
+ *
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -606,7 +651,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
- if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
err = IXGBE_ERR_INVALID_ARGUMENT;
mbx_err:
@@ -705,12 +750,15 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
- if (mbx->ops.read(hw, &in_msg, 1))
+ if (mbx->ops.read(hw, &in_msg, 1)) {
+ if (hw->api_version >= ixgbe_mbox_api_15)
+ mac->get_link_status = false;
goto out;
+ }
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
- if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
ret_val = -1;
goto out;
}
@@ -816,7 +864,7 @@ static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
if (ret_val)
return ret_val;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
- (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_MBX;
return 0;
@@ -863,7 +911,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
- if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
+ IXGBE_VT_MSGTYPE_SUCCESS)) {
hw->api_version = api;
return 0;
}
@@ -901,6 +950,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
case ixgbe_mbox_api_14:
+ case ixgbe_mbox_api_15:
break;
default:
return 0;
@@ -914,11 +964,11 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- /* if we we didn't get an ACK there must have been
+ /* if we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it
* as such
*/
- if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
+ if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* record and validate values from message */
@@ -958,6 +1008,7 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
+ .get_link_state = ixgbevf_get_link_state_vf,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
.set_rlpml = ixgbevf_set_rlpml_vf,
@@ -975,6 +1026,7 @@ static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
.set_rar = ixgbevf_hv_set_rar_vf,
.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+ .get_link_state = ixgbevf_hv_get_link_state_vf,
.set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
.set_vfta = ixgbevf_hv_set_vfta_vf,
.set_rlpml = ixgbevf_hv_set_rlpml_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 1d8209df4162..b4eef5b6c172 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -39,6 +39,7 @@ struct ixgbe_mac_operations {
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
+ s32 (*get_link_state)(struct ixgbe_hw *hw, bool *link_state);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
@@ -73,10 +74,9 @@ struct ixgbe_mac_info {
struct ixgbe_mbx_operations {
s32 (*init_params)(struct ixgbe_hw *hw);
+ void (*release)(struct ixgbe_hw *hw);
s32 (*read)(struct ixgbe_hw *, u32 *, u16);
s32 (*write)(struct ixgbe_hw *, u32 *, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
s32 (*check_for_msg)(struct ixgbe_hw *);
s32 (*check_for_ack)(struct ixgbe_hw *);
s32 (*check_for_rst)(struct ixgbe_hw *);
@@ -96,7 +96,7 @@ struct ixgbe_mbx_info {
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 udelay;
- u32 v2p_mailbox;
+ u32 vf_mailbox;
u16 size;
};
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 439674fc9765..1732ec3c3dbd 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -28,6 +28,7 @@
#include <linux/udp.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
#include "jme.h"
@@ -2179,7 +2180,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
}
if (unlikely(txbi->start_xmit &&
- (jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
+ time_is_before_eq_jiffies(txbi->start_xmit + TX_TIMEOUT) &&
txbi->skb)) {
netif_stop_queue(jme->dev);
netif_info(jme, tx_queued, jme->dev,
@@ -2331,9 +2332,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -3008,7 +3009,7 @@ jme_init_one(struct pci_dev *pdev,
jwrite32(jme, JME_APMC, apmc);
}
- NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
+ netif_napi_add(netdev, &jme->napi, jme_poll);
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 2af76329b4a2..860494ff3714 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -379,8 +379,6 @@ struct jme_ring {
#define DECLARE_NET_DEVICE_STATS
#define DECLARE_NAPI_STRUCT struct napi_struct napi;
-#define NETIF_NAPI_SET(dev, napis, pollfn, q) \
- netif_napi_add(dev, napis, pollfn, q);
#define JME_NAPI_HOLDER(holder) struct napi_struct *holder
#define JME_NAPI_WEIGHT(w) int w
#define JME_NAPI_WEIGHT_VAL(w) w
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index df9a8eefa007..2b9335cb4bb3 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -416,7 +416,8 @@ static void korina_abort_rx(struct net_device *dev)
}
/* transmit packet */
-static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t korina_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
u32 chain_prev, chain_next;
@@ -938,9 +939,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct korina_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1354,7 +1355,7 @@ static int korina_probe(struct platform_device *pdev)
dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &lp->napi, korina_poll);
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = korina_mdio_read;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 072391c494ce..f5961bdcc480 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -65,8 +65,8 @@
/* use 2 static channels for TX/RX */
#define LTQ_ETOP_TX_CHANNEL 1
#define LTQ_ETOP_RX_CHANNEL 6
-#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
-#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
+#define IS_TX(x) ((x) == LTQ_ETOP_TX_CHANNEL)
+#define IS_RX(x) ((x) == LTQ_ETOP_RX_CHANNEL)
#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
@@ -111,9 +111,9 @@ ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(&priv->pdev->dev,
- ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
- DMA_FROM_DEVICE);
+ ch->dma.desc_base[ch->dma.desc].addr =
+ dma_map_single(&priv->pdev->dev, ch->skb[ch->dma.desc]->data,
+ MAX_DMA_DATA_LEN, DMA_FROM_DEVICE);
ch->dma.desc_base[ch->dma.desc].addr =
CPHYSADDR(ch->skb[ch->dma.desc]->data);
ch->dma.desc_base[ch->dma.desc].ctl =
@@ -135,7 +135,7 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
spin_lock_irqsave(&priv->lock, flags);
if (ltq_etop_alloc_skb(ch)) {
netdev_err(ch->netdev,
- "failed to allocate new rx buffer, stopping DMA\n");
+ "failed to allocate new rx buffer, stopping DMA\n");
ltq_dma_close(&ch->dma);
}
ch->dma.desc++;
@@ -185,7 +185,7 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget)
dev_kfree_skb_any(ch->skb[ch->tx_free]);
ch->skb[ch->tx_free] = NULL;
memset(&ch->dma.desc_base[ch->tx_free], 0,
- sizeof(struct ltq_dma_desc));
+ sizeof(struct ltq_dma_desc));
ch->tx_free++;
ch->tx_free %= LTQ_DESC_NUM;
}
@@ -218,6 +218,7 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
free_irq(ch->dma.irq, priv);
if (IS_RX(ch->idx)) {
int desc;
+
for (desc = 0; desc < LTQ_DESC_NUM; desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
}
@@ -246,18 +247,18 @@ ltq_etop_hw_init(struct net_device *dev)
switch (priv->pldata->mii_mode) {
case PHY_INTERFACE_MODE_RMII:
- ltq_etop_w32_mask(ETOP_MII_MASK,
- ETOP_MII_REVERSE, LTQ_ETOP_CFG);
+ ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_REVERSE,
+ LTQ_ETOP_CFG);
break;
case PHY_INTERFACE_MODE_MII:
- ltq_etop_w32_mask(ETOP_MII_MASK,
- ETOP_MII_NORMAL, LTQ_ETOP_CFG);
+ ltq_etop_w32_mask(ETOP_MII_MASK, ETOP_MII_NORMAL,
+ LTQ_ETOP_CFG);
break;
default:
netdev_err(dev, "unknown mii mode %d\n",
- priv->pldata->mii_mode);
+ priv->pldata->mii_mode);
return -ENOTSUPP;
}
@@ -270,7 +271,8 @@ ltq_etop_hw_init(struct net_device *dev)
int irq = LTQ_DMA_CH0_INT + i;
struct ltq_etop_chan *ch = &priv->ch[i];
- ch->idx = ch->dma.nr = i;
+ ch->dma.nr = i;
+ ch->idx = ch->dma.nr;
ch->dma.dev = &priv->pdev->dev;
if (IS_TX(i)) {
@@ -305,9 +307,9 @@ ltq_etop_hw_init(struct net_device *dev)
static void
ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
- strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
+ strscpy(info->bus_info, "internal", sizeof(info->bus_info));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops ltq_etop_ethtool_ops = {
@@ -399,7 +401,7 @@ ltq_etop_mdio_init(struct net_device *dev)
priv->mii_bus->write = ltq_etop_mdio_wr;
priv->mii_bus->name = "ltq_mii";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- priv->pdev->name, priv->pdev->id);
+ priv->pdev->name, priv->pdev->id);
if (mdiobus_register(priv->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdiobus;
@@ -468,7 +470,7 @@ ltq_etop_stop(struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
{
int queue = skb_get_queue_mapping(skb);
@@ -483,7 +485,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
- dev_kfree_skb_any(skb);
netdev_err(dev, "tx ring full\n");
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
@@ -496,8 +497,9 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
netif_trans_update(dev);
spin_lock_irqsave(&priv->lock, flags);
- desc->addr = ((unsigned int) dma_map_single(&priv->pdev->dev, skb->data, len,
+ desc->addr = ((unsigned int)dma_map_single(&priv->pdev->dev, skb->data, len,
DMA_TO_DEVICE)) - byte_offset;
+ /* Make sure the address is written before we give it to HW */
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
@@ -539,7 +541,7 @@ ltq_etop_set_mac_address(struct net_device *dev, void *p)
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
- LTQ_ETOP_MAC_DA1);
+ LTQ_ETOP_MAC_DA1);
spin_unlock_irqrestore(&priv->lock, flags);
}
return ret;
@@ -652,15 +654,15 @@ ltq_etop_probe(struct platform_device *pdev)
}
res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
+ resource_size(res), dev_name(&pdev->dev));
if (!res) {
dev_err(&pdev->dev, "failed to request etop resource\n");
err = -EBUSY;
goto err_out;
}
- ltq_etop_membase = devm_ioremap(&pdev->dev,
- res->start, resource_size(res));
+ ltq_etop_membase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (!ltq_etop_membase) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
@@ -687,22 +689,22 @@ ltq_etop_probe(struct platform_device *pdev)
err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
if (err < 0) {
dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
- return err;
+ goto err_free;
}
err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
if (err < 0) {
dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
- return err;
+ goto err_free;
}
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))
- netif_napi_add(dev, &priv->ch[i].napi,
- ltq_etop_poll_tx, 8);
+ netif_napi_add_weight(dev, &priv->ch[i].napi,
+ ltq_etop_poll_tx, 8);
else if (IS_RX(i))
- netif_napi_add(dev, &priv->ch[i].napi,
- ltq_etop_poll_rx, 32);
+ netif_napi_add_weight(dev, &priv->ch[i].napi,
+ ltq_etop_poll_rx, 32);
priv->ch[i].netdev = dev;
}
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 0da09ea81980..8d646c7f8c82 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -27,6 +27,9 @@
#define XRX200_DMA_TX 1
#define XRX200_DMA_BURST_LEN 8
+#define XRX200_DMA_PACKET_COMPLETE 0
+#define XRX200_DMA_PACKET_IN_PROGRESS 1
+
/* cpu port mac */
#define PMAC_RX_IPG 0x0024
#define PMAC_RX_IPG_MASK 0xf
@@ -60,7 +63,14 @@ struct xrx200_chan {
struct napi_struct napi;
struct ltq_dma_channel dma;
- struct sk_buff *skb[LTQ_DESC_NUM];
+
+ union {
+ struct sk_buff *skb[LTQ_DESC_NUM];
+ void *rx_buff[LTQ_DESC_NUM];
+ };
+
+ struct sk_buff *skb_head;
+ struct sk_buff *skb_tail;
struct xrx200_priv *priv;
};
@@ -71,6 +81,9 @@ struct xrx200_priv {
struct xrx200_chan chan_tx;
struct xrx200_chan chan_rx;
+ u16 rx_buf_size;
+ u16 rx_skb_size;
+
struct net_device *net_dev;
struct device *dev;
@@ -97,6 +110,22 @@ static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set,
xrx200_pmac_w32(priv, val, offset);
}
+static int xrx200_max_frame_len(int mtu)
+{
+ return VLAN_ETH_HLEN + mtu;
+}
+
+static int xrx200_buffer_size(int mtu)
+{
+ return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN);
+}
+
+static int xrx200_skb_size(u16 buf_size)
+{
+ return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
/* drop all the packets from the DMA ring */
static void xrx200_flush_dma(struct xrx200_chan *ch)
{
@@ -109,8 +138,7 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
- ETH_FCS_LEN);
+ ch->priv->rx_buf_size;
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -156,35 +184,35 @@ static int xrx200_close(struct net_device *net_dev)
return 0;
}
-static int xrx200_alloc_skb(struct xrx200_chan *ch)
+static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size))
{
- int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
- struct sk_buff *skb = ch->skb[ch->dma.desc];
+ void *buf = ch->rx_buff[ch->dma.desc];
+ struct xrx200_priv *priv = ch->priv;
dma_addr_t mapping;
int ret = 0;
- ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- len);
- if (!ch->skb[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size);
+ if (!ch->rx_buff[ch->dma.desc]) {
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
- mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
- dev_kfree_skb_any(ch->skb[ch->dma.desc]);
- ch->skb[ch->dma.desc] = skb;
+ mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping))) {
+ skb_free_frag(ch->rx_buff[ch->dma.desc]);
+ ch->rx_buff[ch->dma.desc] = buf;
ret = -ENOMEM;
goto skip;
}
- ch->dma.desc_base[ch->dma.desc].addr = mapping;
+ ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN;
/* Make sure the address is written before we give it to HW */
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size;
return ret;
}
@@ -193,12 +221,14 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
{
struct xrx200_priv *priv = ch->priv;
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
- struct sk_buff *skb = ch->skb[ch->dma.desc];
- int len = (desc->ctl & LTQ_DMA_SIZE_MASK);
+ void *buf = ch->rx_buff[ch->dma.desc];
+ u32 ctl = desc->ctl;
+ int len = (ctl & LTQ_DMA_SIZE_MASK);
struct net_device *net_dev = priv->net_dev;
+ struct sk_buff *skb;
int ret;
- ret = xrx200_alloc_skb(ch);
+ ret = xrx200_alloc_buf(ch, napi_alloc_frag);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
@@ -209,13 +239,45 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
return ret;
}
+ skb = build_skb(buf, priv->rx_skb_size);
+ if (!skb) {
+ skb_free_frag(buf);
+ net_dev->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, net_dev);
- netif_receive_skb(skb);
- net_dev->stats.rx_packets++;
- net_dev->stats.rx_bytes += len - ETH_FCS_LEN;
- return 0;
+ /* add buffers to skb via skb->frag_list */
+ if (ctl & LTQ_DMA_SOP) {
+ ch->skb_head = skb;
+ ch->skb_tail = skb;
+ skb_reserve(skb, NET_IP_ALIGN);
+ } else if (ch->skb_head) {
+ if (ch->skb_head == ch->skb_tail)
+ skb_shinfo(ch->skb_tail)->frag_list = skb;
+ else
+ ch->skb_tail->next = skb;
+ ch->skb_tail = skb;
+ ch->skb_head->len += skb->len;
+ ch->skb_head->data_len += skb->len;
+ ch->skb_head->truesize += skb->truesize;
+ }
+
+ if (ctl & LTQ_DMA_EOP) {
+ ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev);
+ net_dev->stats.rx_packets++;
+ net_dev->stats.rx_bytes += ch->skb_head->len;
+ netif_receive_skb(ch->skb_head);
+ ch->skb_head = NULL;
+ ch->skb_tail = NULL;
+ ret = XRX200_DMA_PACKET_COMPLETE;
+ } else {
+ ret = XRX200_DMA_PACKET_IN_PROGRESS;
+ }
+
+ return ret;
}
static int xrx200_poll_rx(struct napi_struct *napi, int budget)
@@ -230,8 +292,10 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget)
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
ret = xrx200_hw_receive(ch);
- if (ret)
- return ret;
+ if (ret == XRX200_DMA_PACKET_IN_PROGRESS)
+ continue;
+ if (ret != XRX200_DMA_PACKET_COMPLETE)
+ break;
rx++;
} else {
break;
@@ -351,11 +415,13 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
struct xrx200_chan *ch_rx = &priv->chan_rx;
int old_mtu = net_dev->mtu;
bool running = false;
- struct sk_buff *skb;
+ void *buff;
int curr_desc;
int ret = 0;
net_dev->mtu = new_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(new_mtu);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
if (new_mtu <= old_mtu)
return ret;
@@ -371,13 +437,15 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
ch_rx->dma.desc++) {
- skb = ch_rx->skb[ch_rx->dma.desc];
- ret = xrx200_alloc_skb(ch_rx);
+ buff = ch_rx->rx_buff[ch_rx->dma.desc];
+ ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret) {
net_dev->mtu = old_mtu;
+ priv->rx_buf_size = xrx200_buffer_size(old_mtu);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
break;
}
- dev_kfree_skb_any(skb);
+ skb_free_frag(buff);
}
ch_rx->dma.desc = curr_desc;
@@ -430,7 +498,7 @@ static int xrx200_dma_init(struct xrx200_priv *priv)
ltq_dma_alloc_rx(&ch_rx->dma);
for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
ch_rx->dma.desc++) {
- ret = xrx200_alloc_skb(ch_rx);
+ ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret)
goto rx_free;
}
@@ -465,7 +533,7 @@ rx_ring_free:
/* free the allocated RX ring */
for (i = 0; i < LTQ_DESC_NUM; i++) {
if (priv->chan_rx.skb[i])
- dev_kfree_skb_any(priv->chan_rx.skb[i]);
+ skb_free_frag(priv->chan_rx.rx_buff[i]);
}
rx_free:
@@ -482,7 +550,7 @@ static void xrx200_hw_cleanup(struct xrx200_priv *priv)
/* free the allocated RX ring */
for (i = 0; i < LTQ_DESC_NUM; i++)
- dev_kfree_skb_any(priv->chan_rx.skb[i]);
+ skb_free_frag(priv->chan_rx.rx_buff[i]);
}
static int xrx200_probe(struct platform_device *pdev)
@@ -505,7 +573,9 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0);
+ priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN);
+ priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
@@ -550,8 +620,9 @@ static int xrx200_probe(struct platform_device *pdev)
PMAC_HD_CTL);
/* setup NAPI */
- netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32);
- netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32);
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx);
+ netif_napi_add_tx(net_dev, &priv->chan_tx.napi,
+ xrx200_tx_housekeeping);
platform_set_drvdata(pdev, priv);
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index f99adbf26ab4..04345b929d8e 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF
+ depends on OF && HAS_IOMEM
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index fdd99f0de424..35f24e0f0934 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -152,7 +152,8 @@ static int liteeth_stop(struct net_device *netdev)
return 0;
}
-static int liteeth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct liteeth *priv = netdev_priv(netdev);
void __iomem *txbuffer;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index fe0989c0fc25..884d64114bff 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -34,6 +34,7 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
depends on HAS_IOMEM
+ select MDIO_DEVRES
select PHYLIB
help
This driver supports the MDIO interface found in the network
@@ -62,6 +63,7 @@ config MVNETA
select MVMDIO
select PHYLINK
select PAGE_POOL
+ select PAGE_POOL_STATS
help
This driver supports the network interface units in the
Marvell ARMADA XP, ARMADA 370, ARMADA 38x and
@@ -177,6 +179,7 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 9f88fe822555..ceba4aa4f026 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -11,5 +11,6 @@ obj-$(CONFIG_MVPP2) += mvpp2/
obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
+obj-y += octeon_ep/
obj-y += octeontx2/
obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index bb14fa2241a3..3b129a1c3381 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -108,6 +108,7 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define TXQ_COMMAND 0x0048
#define TXQ_FIX_PRIO_CONF 0x004c
#define PORT_SERIAL_CONTROL1 0x004c
+#define RGMII_EN 0x00000008
#define CLK125_BYPASS_EN 0x00000010
#define TX_BW_RATE 0x0050
#define TX_BW_MTU 0x0058
@@ -775,7 +776,7 @@ txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
u32 *first_cmd_sts, bool first_desc)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int hdr_len = skb_tcp_all_headers(skb);
int tx_index;
struct tx_desc *desc;
int ret;
@@ -1603,12 +1604,12 @@ mv643xx_eth_set_link_ksettings(struct net_device *dev,
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ strscpy(drvinfo->driver, mv643xx_eth_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ strscpy(drvinfo->version, mv643xx_eth_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
static int mv643xx_eth_get_coalesce(struct net_device *dev,
@@ -1638,7 +1639,9 @@ static int mv643xx_eth_set_coalesce(struct net_device *dev,
}
static void
-mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -1650,14 +1653,16 @@ mv643xx_eth_get_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
}
static int
-mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
+mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er,
+ struct kernel_ethtool_ringparam *kernel_er,
+ struct netlink_ext_ack *extack)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
if (er->rx_mini_pending || er->rx_jumbo_pending)
return -EINVAL;
- mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
+ mp->rx_ring_size = min(er->rx_pending, 4096U);
mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
MV643XX_MAX_SKB_DESCS * 2, 4096);
if (mp->tx_ring_size != er->tx_pending)
@@ -2477,6 +2482,7 @@ out_free:
for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i);
out:
+ napi_disable(&mp->napi);
free_irq(dev->irq, dev);
return err;
@@ -2700,6 +2706,16 @@ MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
static struct platform_device *port_platdev[3];
+static void mv643xx_eth_shared_of_remove(void)
+{
+ int n;
+
+ for (n = 0; n < 3; n++) {
+ platform_device_del(port_platdev[n]);
+ port_platdev[n] = NULL;
+ }
+}
+
static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
struct device_node *pnp)
{
@@ -2736,7 +2752,9 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
return -EINVAL;
}
- of_get_mac_address(pnp, ppd.mac_addr);
+ ret = of_get_mac_address(pnp, ppd.mac_addr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2745,6 +2763,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
mv643xx_eth_property(pnp, "rx-sram-addr", ppd.rx_sram_addr);
mv643xx_eth_property(pnp, "rx-sram-size", ppd.rx_sram_size);
+ of_get_phy_mode(pnp, &ppd.interface);
+
ppd.phy_node = of_parse_phandle(pnp, "phy-handle", 0);
if (!ppd.phy_node) {
ppd.phy_addr = MV643XX_ETH_PHY_NONE;
@@ -2800,21 +2820,13 @@ static int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_add_port(pdev, pnp);
if (ret) {
of_node_put(pnp);
+ mv643xx_eth_shared_of_remove();
return ret;
}
}
return 0;
}
-static void mv643xx_eth_shared_of_remove(void)
-{
- int n;
-
- for (n = 0; n < 3; n++) {
- platform_device_del(port_platdev[n]);
- port_platdev[n] = NULL;
- }
-}
#else
static inline int mv643xx_eth_shared_of_probe(struct platform_device *pdev)
{
@@ -3084,8 +3096,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct mv643xx_eth_private *mp;
struct net_device *dev;
struct phy_device *phydev = NULL;
- struct resource *res;
- int err;
+ u32 psc1r;
+ int err, irq;
pd = dev_get_platdata(&pdev->dev);
if (pd == NULL) {
@@ -3112,14 +3124,45 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;
- /* Kirkwood resets some registers on gated clocks. Especially
- * CLK125_BYPASS_EN must be cleared but is not available on
- * all other SoCs/System Controllers using this driver.
- */
if (of_device_is_compatible(pdev->dev.of_node,
- "marvell,kirkwood-eth-port"))
- wrlp(mp, PORT_SERIAL_CONTROL1,
- rdlp(mp, PORT_SERIAL_CONTROL1) & ~CLK125_BYPASS_EN);
+ "marvell,kirkwood-eth-port")) {
+ psc1r = rdlp(mp, PORT_SERIAL_CONTROL1);
+
+ /* Kirkwood resets some registers on gated clocks. Especially
+ * CLK125_BYPASS_EN must be cleared but is not available on
+ * all other SoCs/System Controllers using this driver.
+ */
+ psc1r &= ~CLK125_BYPASS_EN;
+
+ /* On Kirkwood with two Ethernet controllers, if both of them
+ * have RGMII_EN disabled, the first controller will be in GMII
+ * mode and the second one is effectively disabled, instead of
+ * two MII interfaces.
+ *
+ * To enable GMII in the first controller, the second one must
+ * also be configured (and may be enabled) with RGMII_EN
+ * disabled too, even though it cannot be used at all.
+ */
+ switch (pd->interface) {
+ /* Use internal to denote second controller being disabled */
+ case PHY_INTERFACE_MODE_INTERNAL:
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ psc1r &= ~RGMII_EN;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ psc1r |= RGMII_EN;
+ break;
+ default:
+ /* Unknown; don't touch */
+ break;
+ }
+
+ wrlp(mp, PORT_SERIAL_CONTROL1, psc1r);
+ }
/*
* Start with a default rate, and if there is a clock, allow
@@ -3176,14 +3219,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
- netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- BUG_ON(!res);
- dev->irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (WARN_ON(irq < 0)) {
+ err = irq;
+ goto out;
+ }
+ dev->irq = irq;
dev->netdev_ops = &mv643xx_eth_netdev_ops;
@@ -3197,7 +3243,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
- dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
+ netif_set_tso_max_segs(dev, MV643XX_MAX_TSO_SEGS);
/* MTU range: 64 - 9500 */
dev->min_mtu = 64;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index ef878973b859..8662543ca5c8 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -146,9 +146,6 @@ static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
u32 val;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
return ret;
@@ -177,9 +174,6 @@ static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id,
struct orion_mdio_dev *dev = bus->priv;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
return ret;
@@ -204,21 +198,17 @@ static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
.poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
};
-static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id,
- int regnum)
+static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum)
{
struct orion_mdio_dev *dev = bus->priv;
- u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
int ret;
- if (!(regnum & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
if (ret < 0)
return ret;
- writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG);
writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
(dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
MVMDIO_XSMI_READ_OPERATION,
@@ -237,21 +227,17 @@ static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id,
return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0);
}
-static int orion_mdio_xsmi_write(struct mii_bus *bus, int mii_id,
- int regnum, u16 value)
+static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum, u16 value)
{
struct orion_mdio_dev *dev = bus->priv;
- u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
int ret;
- if (!(regnum & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
if (ret < 0)
return ret;
- writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG);
writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
(dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
MVMDIO_XSMI_WRITE_OPERATION | value,
@@ -302,8 +288,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->write = orion_mdio_smi_write;
break;
case BUS_TYPE_XSMI:
- bus->read = orion_mdio_xsmi_read;
- bus->write = orion_mdio_xsmi_write;
+ bus->read_c45 = orion_mdio_xsmi_read_c45;
+ bus->write_c45 = orion_mdio_xsmi_write_c45;
break;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5a7bdca22a63..2cad76d0a50e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -38,6 +38,7 @@
#include <net/ipv6.h>
#include <net/tso.h>
#include <net/page_pool.h>
+#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>
/* Registers */
@@ -75,6 +76,8 @@
#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
#define MVNETA_BASE_ADDR_ENABLE 0x2290
+#define MVNETA_AC5_CNM_DDR_TARGET 0x2
+#define MVNETA_AC5_CNM_DDR_ATTR 0xb
#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
#define MVNETA_PORT_CONFIG 0x2400
#define MVNETA_UNI_PROMISC_MODE BIT(0)
@@ -247,12 +250,39 @@
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
#define MVNETA_PORT_TX_RESET 0x3cf0
#define MVNETA_PORT_TX_DMA_RESET BIT(0)
+#define MVNETA_TXQ_CMD1_REG 0x3e00
+#define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3)
+#define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0)
+#define MVNETA_REFILL_NUM_CLK_REG 0x3e08
+#define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff
#define MVNETA_TX_MTU 0x3e0c
#define MVNETA_TX_TOKEN_SIZE 0x3e14
#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
+#define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2))
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20
+#define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff
#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
+/* The values of the bucket refill base period and refill period are taken from
+ * the reference manual, and adds up to a base resolution of 10Kbps. This allows
+ * to cover all rate-limit values from 10Kbps up to 5Gbps
+ */
+
+/* Base period for the rate limit algorithm */
+#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100
+
+/* Number of Base Period to wait between each bucket refill */
+#define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000
+
+/* The base resolution for rate limiting, in bps. Any max_rate value should be
+ * a multiple of that value.
+ */
+#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
+ (MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
+ MVNETA_TXQ_BUCKET_REFILL_PERIOD))
+
#define MVNETA_LPI_CTRL_0 0x2cc0
#define MVNETA_LPI_CTRL_1 0x2cc4
#define MVNETA_LPI_REQUEST_ENABLE BIT(0)
@@ -492,13 +522,13 @@ struct mvneta_port {
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
- u8 prio_tc_map[8];
phy_interface_t phy_interface;
struct device_node *dn;
unsigned int tx_csum_limit;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
struct phy *comphy;
struct mvneta_bm *bm_priv;
@@ -516,6 +546,7 @@ struct mvneta_port {
/* Flags for special SoC configurations */
bool neta_armada3700;
+ bool neta_ac5;
u16 rx_offset_correction;
const struct mbus_dram_target_info *dram_target_info;
};
@@ -782,14 +813,14 @@ mvneta_get_stats64(struct net_device *dev,
cpu_stats = per_cpu_ptr(pp->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
rx_packets = cpu_stats->es.ps.rx_packets;
rx_bytes = cpu_stats->es.ps.rx_bytes;
rx_dropped = cpu_stats->rx_dropped;
rx_errors = cpu_stats->rx_errors;
tx_packets = cpu_stats->es.ps.tx_packets;
tx_bytes = cpu_stats->es.ps.tx_bytes;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -1856,8 +1887,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
bytes_compl += buf->skb->len;
pkts_compl++;
dev_kfree_skb_any(buf->skb);
- } else if (buf->type == MVNETA_TYPE_XDP_TX ||
- buf->type == MVNETA_TYPE_XDP_NDO) {
+ } else if ((buf->type == MVNETA_TYPE_XDP_TX ||
+ buf->type == MVNETA_TYPE_XDP_NDO) && buf->xdpf) {
if (napi && buf->type == MVNETA_TYPE_XDP_TX)
xdp_return_frame_rx_napi(buf->xdpf);
else
@@ -2032,61 +2063,104 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
static void
mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
- struct xdp_buff *xdp, struct skb_shared_info *sinfo,
- int sync_len)
+ struct xdp_buff *xdp, int sync_len)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
skb_frag_page(&sinfo->frags[i]), true);
+
+out:
page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
sync_len, true);
}
static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
- struct xdp_frame *xdpf, bool dma_map)
+ struct xdp_frame *xdpf, int *nxmit_byte, bool dma_map)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct device *dev = pp->dev->dev.parent;
struct mvneta_tx_desc *tx_desc;
- struct mvneta_tx_buf *buf;
- dma_addr_t dma_addr;
+ int i, num_frames = 1;
+ struct page *page;
- if (txq->count >= txq->tx_stop_threshold)
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ num_frames += sinfo->nr_frags;
+
+ if (txq->count + num_frames >= txq->size)
return MVNETA_XDP_DROPPED;
- tx_desc = mvneta_txq_next_desc_get(txq);
+ for (i = 0; i < num_frames; i++) {
+ struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ skb_frag_t *frag = NULL;
+ int len = xdpf->len;
+ dma_addr_t dma_addr;
- buf = &txq->buf[txq->txq_put_index];
- if (dma_map) {
- /* ndo_xdp_xmit */
- dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
- xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
- mvneta_txq_desc_put(txq);
- return MVNETA_XDP_DROPPED;
+ if (unlikely(i)) { /* paged area */
+ frag = &sinfo->frags[i - 1];
+ len = skb_frag_size(frag);
}
- buf->type = MVNETA_TYPE_XDP_NDO;
- } else {
- struct page *page = virt_to_page(xdpf->data);
- dma_addr = page_pool_get_dma_addr(page) +
- sizeof(*xdpf) + xdpf->headroom;
- dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
- xdpf->len, DMA_BIDIRECTIONAL);
- buf->type = MVNETA_TYPE_XDP_TX;
- }
- buf->xdpf = xdpf;
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ if (dma_map) {
+ /* ndo_xdp_xmit */
+ void *data;
+
+ data = unlikely(frag) ? skb_frag_address(frag)
+ : xdpf->data;
+ dma_addr = dma_map_single(dev, data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ mvneta_txq_desc_put(txq);
+ goto unmap;
+ }
- tx_desc->command = MVNETA_TXD_FLZ_DESC;
- tx_desc->buf_phys_addr = dma_addr;
- tx_desc->data_size = xdpf->len;
+ buf->type = MVNETA_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag)
+ : virt_to_page(xdpf->data);
+ dma_addr = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma_addr += skb_frag_off(frag);
+ else
+ dma_addr += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dev, dma_addr, len,
+ DMA_BIDIRECTIONAL);
+ buf->type = MVNETA_TYPE_XDP_TX;
+ }
+ buf->xdpf = unlikely(i) ? NULL : xdpf;
- mvneta_txq_inc_put(txq);
- txq->pending++;
- txq->count++;
+ tx_desc->command = unlikely(i) ? 0 : MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = dma_addr;
+ tx_desc->data_size = len;
+ *nxmit_byte += len;
+
+ mvneta_txq_inc_put(txq);
+ }
+ /*last descriptor */
+ tx_desc->command |= MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
+
+ txq->pending += num_frames;
+ txq->count += num_frames;
return MVNETA_XDP_TX;
+
+unmap:
+ for (i--; i >= 0; i--) {
+ mvneta_txq_desc_put(txq);
+ tx_desc = txq->descs + txq->next_desc_to_proc;
+ dma_unmap_single(dev, tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ }
+
+ return MVNETA_XDP_DROPPED;
}
static int
@@ -2095,8 +2169,8 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
+ int cpu, nxmit_byte = 0;
struct xdp_frame *xdpf;
- int cpu;
u32 ret;
xdpf = xdp_convert_buff_to_frame(xdp);
@@ -2108,10 +2182,10 @@ mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, cpu);
- ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
+ ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
if (ret == MVNETA_XDP_TX) {
u64_stats_update_begin(&stats->syncp);
- stats->es.ps.tx_bytes += xdpf->len;
+ stats->es.ps.tx_bytes += nxmit_byte;
stats->es.ps.tx_packets++;
stats->es.ps.xdp_tx++;
u64_stats_update_end(&stats->syncp);
@@ -2150,11 +2224,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
- ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
+ ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
+ true);
if (ret != MVNETA_XDP_TX)
break;
- nxmit_byte += frames[i]->len;
nxmit++;
}
@@ -2177,7 +2251,6 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp,
u32 frame_sz, struct mvneta_stats *stats)
{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
unsigned int len, data_len, sync;
u32 ret, act;
@@ -2198,7 +2271,7 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
err = xdp_do_redirect(pp->dev, xdp, prog);
if (unlikely(err)) {
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED;
} else {
ret = MVNETA_XDP_REDIR;
@@ -2209,16 +2282,16 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(pp->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(pp->dev, prog, act);
fallthrough;
case XDP_DROP:
- mvneta_xdp_put_buff(pp, rxq, xdp, sinfo, sync);
+ mvneta_xdp_put_buff(pp, rxq, xdp, sync);
ret = MVNETA_XDP_DROPPED;
stats->xdp_drop++;
break;
@@ -2241,7 +2314,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
int data_len = -MVNETA_MH_SIZE, len;
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
- struct skb_shared_info *sinfo;
if (*size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2261,11 +2333,9 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */
prefetch(data);
+ xdp_buff_clear_frags_flag(xdp);
xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
data_len, false);
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = 0;
}
static void
@@ -2273,9 +2343,9 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
- struct skb_shared_info *xdp_sinfo,
struct page *page)
{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir;
int data_len, len;
@@ -2293,25 +2363,25 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
len, dma_dir);
rx_desc->buf_phys_addr = 0;
- if (data_len > 0 && xdp_sinfo->nr_frags < MAX_SKB_FRAGS) {
- skb_frag_t *frag = &xdp_sinfo->frags[xdp_sinfo->nr_frags++];
+ if (!xdp_buff_has_frags(xdp))
+ sinfo->nr_frags = 0;
+
+ if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags++];
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->xdp_frags_size = *size;
+ xdp_buff_set_frags_flag(xdp);
+ }
+ if (page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
-
- /* last fragment */
- if (len == *size) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = xdp_sinfo->nr_frags;
- memcpy(sinfo->frags, xdp_sinfo->frags,
- sinfo->nr_frags * sizeof(skb_frag_t));
- }
*size -= len;
}
@@ -2320,8 +2390,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
- int i, num_frags = sinfo->nr_frags;
struct sk_buff *skb;
+ u8 num_frags;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ num_frags = sinfo->nr_frags;
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
@@ -2333,13 +2406,11 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_put(skb, xdp->data_end - xdp->data);
skb->ip_summed = mvneta_rx_csum(pp, desc_status);
- for (i = 0; i < num_frags; i++) {
- skb_frag_t *frag = &sinfo->frags[i];
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- skb_frag_page(frag), skb_frag_off(frag),
- skb_frag_size(frag), PAGE_SIZE);
- }
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, num_frags,
+ sinfo->xdp_frags_size,
+ num_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
@@ -2351,7 +2422,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
{
int rx_proc = 0, rx_todo, refill, size = 0;
struct net_device *dev = pp->dev;
- struct skb_shared_info sinfo;
struct mvneta_stats ps = {};
struct bpf_prog *xdp_prog;
u32 desc_status, frame_sz;
@@ -2360,8 +2430,6 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL;
- sinfo.nr_frags = 0;
-
/* Get number of received packets */
rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
@@ -2403,7 +2471,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
}
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
- &size, &sinfo, page);
+ &size, page);
} /* Middle or Last descriptor */
if (!(rx_status & MVNETA_RXD_LAST_DESC))
@@ -2411,7 +2479,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
continue;
if (size) {
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
goto next;
}
@@ -2423,7 +2491,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
u64_stats_update_begin(&stats->syncp);
stats->es.skb_alloc_error++;
@@ -2440,11 +2508,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
napi_gro_receive(napi, skb);
next:
xdp_buf.data_hard_start = NULL;
- sinfo.nr_frags = 0;
}
if (xdp_buf.data_hard_start)
- mvneta_xdp_put_buff(pp, rxq, &xdp_buf, &sinfo, -1);
+ mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
xdp_do_flush_map();
@@ -2597,8 +2664,8 @@ err_drop_frame:
static inline void
mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
- int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
+ int hdr_len = skb_tcp_all_headers(skb);
struct mvneta_tx_desc *tx_desc;
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2660,7 +2727,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
if ((txq->count + tso_count_descs(skb)) >= txq->size)
return 0;
- if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ if (skb_headlen(skb) < skb_tcp_all_headers(skb)) {
pr_info("*** Is this even possible?\n");
return 0;
}
@@ -3232,7 +3299,8 @@ static int mvneta_create_page_pool(struct mvneta_port *pp,
return err;
}
- err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0);
+ err = __xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id, 0,
+ PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -3481,6 +3549,8 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
netdev_tx_reset_queue(nq);
+ txq->buf = NULL;
+ txq->tso_hdrs = NULL;
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
@@ -3712,6 +3782,7 @@ static void mvneta_percpu_disable(void *arg)
static int mvneta_change_mtu(struct net_device *dev, int mtu)
{
struct mvneta_port *pp = netdev_priv(dev);
+ struct bpf_prog *prog = pp->xdp_prog;
int ret;
if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
@@ -3720,8 +3791,11 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
}
- if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
- netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
+ if (prog && !prog->aux->xdp_has_frags &&
+ mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ netdev_info(dev, "Illegal MTU %d for XDP prog without frags\n",
+ mtu);
+
return -EINVAL;
}
@@ -3819,58 +3893,31 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
return 0;
}
-static void mvneta_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct mvneta_port *mvneta_pcs_to_port(struct phylink_pcs *pcs)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ return container_of(pcs, struct mvneta_port, phylink_pcs);
+}
+static int mvneta_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
* When in 802.3z mode, we must have AN enabled:
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
*/
if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg)) {
- linkmode_zero(supported);
- return;
- }
-
- /* Allow all the expected bits */
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- /* Asymmetric pause is unsupported */
- phylink_set(mask, Pause);
-
- /* Half-duplex at speeds higher than 100Mbit is unsupported */
- if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
-
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
-
- if (!phy_interface_mode_is_8023z(state->interface)) {
- /* 10M and 100M are only supported in non-802.3z mode */
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- }
+ !phylink_test(state->advertising, Autoneg))
+ return -EINVAL;
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ return 0;
}
-static void mvneta_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void mvneta_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct mvneta_port *pp = netdev_priv(ndev);
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
u32 gmac_stat;
gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
@@ -3888,17 +3935,71 @@ static void mvneta_mac_pcs_get_state(struct phylink_config *config,
state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
- state->pause = 0;
if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
state->pause |= MLO_PAUSE_RX;
if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
state->pause |= MLO_PAUSE_TX;
}
-static void mvneta_mac_an_restart(struct phylink_config *config)
+static int mvneta_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode, phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct mvneta_port *pp = netdev_priv(ndev);
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVNETA_GMAC_INBAND_AN_ENABLE |
+ MVNETA_GMAC_INBAND_RESTART_AN |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_FLOW_CTRL_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+
+ if (phylink_autoneg_inband(mode)) {
+ mask |= MVNETA_GMAC_CONFIG_MII_SPEED |
+ MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVNETA_GMAC_INBAND_AN_ENABLE;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED |
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_EN bit selects either the hardware
+ * automatically or the CONFIG_FLOW_CTRL manually
+ * controls the GMAC pause mode.
+ */
+ if (permit_pause_to_mac)
+ val |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
+
+ /* Update the advertisement bits */
+ mask |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ if (phylink_test(advertising, Pause))
+ val |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
+ }
+ } else {
+ /* Phy or fixed speed - disable in-band AN modes */
+ val = 0;
+ }
+
+ old_an = an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = old_an ^ an;
+ if (changed)
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, an);
+
+ /* We are only interested in the advertisement bits changing */
+ return !!(changed & MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL);
+}
+
+static void mvneta_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvneta_port *pp = mvneta_pcs_to_port(pcs);
u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
@@ -3907,6 +4008,56 @@ static void mvneta_mac_an_restart(struct phylink_config *config)
gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
}
+static const struct phylink_pcs_ops mvneta_phylink_pcs_ops = {
+ .pcs_validate = mvneta_pcs_validate,
+ .pcs_get_state = mvneta_pcs_get_state,
+ .pcs_config = mvneta_pcs_config,
+ .pcs_an_restart = mvneta_pcs_an_restart,
+};
+
+static struct phylink_pcs *mvneta_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+
+ return &pp->phylink_pcs;
+}
+
+static int mvneta_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val;
+
+ if (pp->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode. According to Armada 370 documentation, we
+ * can only change the port mode and in-band enable when the
+ * link is down.
+ */
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
+ val |= MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ if (pp->phy_interface != interface)
+ WARN_ON(phy_power_off(pp->comphy));
+
+ /* Enable the 1ms clock */
+ if (phylink_autoneg_inband(mode)) {
+ unsigned long rate = clk_get_rate(pp->clk);
+
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER,
+ MVNETA_GMAC_1MS_CLOCK_ENABLE | (rate / 1000));
+ }
+
+ return 0;
+}
+
static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -3915,20 +4066,11 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
- u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
MVNETA_GMAC2_PORT_RESET);
new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
- new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_INBAND_RESTART_AN |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
- MVNETA_GMAC_AN_FLOW_CTRL_EN |
- MVNETA_GMAC_AN_DUPLEX_EN);
/* Even though it might look weird, when we're configured in
* SGMII or QSGMII mode, the RGMII bit needs to be set.
@@ -3940,9 +4082,6 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
phy_interface_mode_is_8023z(state->interface))
new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
- if (phylink_test(state->advertising, Pause))
- new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
-
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - nothing to do, leave the
* configured speed, duplex and flow control as-is.
@@ -3950,66 +4089,23 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII mode receives the state from the PHY */
new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
- new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_CONFIG_MII_SPEED |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
- MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_AN_SPEED_EN |
- MVNETA_GMAC_AN_DUPLEX_EN;
} else {
/* 802.3z negotiation - only 1000base-X */
new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
- new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
- MVNETA_GMAC_FORCE_LINK_PASS |
- MVNETA_GMAC_CONFIG_MII_SPEED)) |
- MVNETA_GMAC_INBAND_AN_ENABLE |
- MVNETA_GMAC_CONFIG_GMII_SPEED |
- /* The MAC only supports FD mode */
- MVNETA_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->pause & MLO_PAUSE_AN && state->an_enabled)
- new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
- }
-
- /* Armada 370 documentation says we can only change the port mode
- * and in-band enable when the link is down, so force it down
- * while making these changes. We also do this for GMAC_CTRL2
- */
- if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
- (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
- (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
- (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
- MVNETA_GMAC_FORCE_LINK_DOWN);
}
-
/* When at 2.5G, the link partner can send frames with shortened
* preambles.
*/
if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
- if (pp->phy_interface != state->interface) {
- if (pp->comphy)
- WARN_ON(phy_power_off(pp->comphy));
- WARN_ON(mvneta_config_interface(pp, state->interface));
- }
-
if (new_ctrl0 != gmac_ctrl0)
mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
if (new_ctrl2 != gmac_ctrl2)
mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
if (new_ctrl4 != gmac_ctrl4)
mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
- if (new_clk != gmac_clk)
- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
- if (new_an != gmac_an)
- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
@@ -4018,6 +4114,36 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
}
}
+static int mvneta_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+ u32 val, clk;
+
+ /* Disable 1ms clock if not in in-band mode */
+ if (!phylink_autoneg_inband(mode)) {
+ clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ clk &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, clk);
+ }
+
+ if (pp->phy_interface != interface)
+ /* Enable the Serdes PHY */
+ WARN_ON(mvneta_config_interface(pp, interface));
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+ val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ return 0;
+}
+
static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
{
u32 lpi_ctl1;
@@ -4098,16 +4224,16 @@ static void mvneta_mac_link_up(struct phylink_config *config,
mvneta_port_up(pp);
if (phy && pp->eee_enabled) {
- pp->eee_active = phy_init_eee(phy, 0) >= 0;
+ pp->eee_active = phy_init_eee(phy, false) >= 0;
mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
}
}
static const struct phylink_mac_ops mvneta_phylink_ops = {
- .validate = mvneta_validate,
- .mac_pcs_get_state = mvneta_mac_pcs_get_state,
- .mac_an_restart = mvneta_mac_an_restart,
+ .mac_select_pcs = mvneta_mac_select_pcs,
+ .mac_prepare = mvneta_mac_prepare,
.mac_config = mvneta_mac_config,
+ .mac_finish = mvneta_mac_finish,
.mac_link_down = mvneta_mac_link_down,
.mac_link_up = mvneta_mac_link_up,
};
@@ -4141,12 +4267,12 @@ static void mvneta_mdio_remove(struct mvneta_port *pp)
*/
static void mvneta_percpu_elect(struct mvneta_port *pp)
{
- int elected_cpu = 0, max_cpu, cpu, i = 0;
+ int elected_cpu = 0, max_cpu, cpu;
/* Use the cpu associated to the rxq when it is online, in all
* the other cases, use the cpu 0 which can't be offline.
*/
- if (cpu_online(pp->rxq_def))
+ if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
elected_cpu = pp->rxq_def;
max_cpu = num_present_cpus();
@@ -4181,8 +4307,6 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
*/
smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
pp, true);
- i++;
-
}
};
@@ -4419,8 +4543,9 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct mvneta_port *pp = netdev_priv(dev);
struct bpf_prog *old_prog;
- if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ if (prog && !prog->aux->xdp_has_frags &&
+ dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "prog does not support XDP frags");
return -EOPNOTSUPP;
}
@@ -4530,17 +4655,20 @@ mvneta_ethtool_get_coalesce(struct net_device *dev,
static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVNETA_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVNETA_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
-static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+static void
+mvneta_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(netdev);
@@ -4550,8 +4678,11 @@ static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
ring->tx_pending = pp->tx_ring_size;
}
-static int mvneta_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static int
+mvneta_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvneta_port *pp = netdev_priv(dev);
@@ -4603,6 +4734,9 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN,
mvneta_statistics[i].name, ETH_GSTRING_LEN);
+
+ data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
+ page_pool_ethtool_stats_get_strings(data);
}
}
@@ -4627,7 +4761,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
stats = per_cpu_ptr(pp->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin(&stats->syncp);
skb_alloc_error = stats->es.skb_alloc_error;
refill_error = stats->es.refill_error;
xdp_redirect = stats->es.ps.xdp_redirect;
@@ -4637,7 +4771,7 @@ mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
xdp_xmit_err = stats->es.ps.xdp_xmit_err;
xdp_tx = stats->es.ps.xdp_tx;
xdp_tx_err = stats->es.ps.xdp_tx_err;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
es->skb_alloc_error += skb_alloc_error;
es->refill_error += refill_error;
@@ -4715,6 +4849,17 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
}
}
+static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < rxq_number; i++)
+ page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
+
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mvneta_ethtool_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -4725,12 +4870,16 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i];
+
+ mvneta_ethtool_pp_stats(pp, data);
}
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(mvneta_statistics);
+ return ARRAY_SIZE(mvneta_statistics) +
+ page_pool_ethtool_stats_get_count();
+
return -EOPNOTSUPP;
}
@@ -4919,43 +5068,144 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
}
-static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
+static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
{
- u32 val = 0;
- int i;
+ u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
- for (i = 0; i < rxq_number; i++)
- val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
+ val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
+ val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
}
+static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
+{
+ unsigned long core_clk_rate;
+ u32 refill_cycles;
+ u32 val;
+
+ core_clk_rate = clk_get_rate(pp->clk);
+ if (!core_clk_rate)
+ return -EINVAL;
+
+ refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
+ (NSEC_PER_SEC / core_clk_rate);
+
+ if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
+ return -EINVAL;
+
+ /* Enable bw limit algorithm version 3 */
+ val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
+ val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
+ mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
+
+ /* Set the base refill rate */
+ mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
+
+ return 0;
+}
+
+static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
+{
+ u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
+
+ val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
+ mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
+}
+
+static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
+ u64 min_rate, u64 max_rate)
+{
+ u32 refill_val, rem;
+ u32 val = 0;
+
+ /* Convert to from Bps to bps */
+ max_rate *= 8;
+
+ if (min_rate)
+ return -EINVAL;
+
+ refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
+ &rem);
+
+ if (rem || !refill_val ||
+ refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
+ return -EINVAL;
+
+ val = refill_val;
+ val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
+ MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
+
+ mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
+
+ return 0;
+}
+
static int mvneta_setup_mqprio(struct net_device *dev,
- struct tc_mqprio_qopt *qopt)
+ struct tc_mqprio_qopt_offload *mqprio)
{
struct mvneta_port *pp = netdev_priv(dev);
+ int rxq, txq, tc, ret;
u8 num_tc;
- int i;
- qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = qopt->num_tc;
+ if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
+ return 0;
+
+ num_tc = mqprio->qopt.num_tc;
if (num_tc > rxq_number)
return -EINVAL;
+ mvneta_clear_rx_prio_map(pp);
+
if (!num_tc) {
- mvneta_clear_rx_prio_map(pp);
+ mvneta_disable_per_queue_rate_limit(pp);
netdev_reset_tc(dev);
return 0;
}
- memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map));
+ netdev_set_num_tc(dev, mqprio->qopt.num_tc);
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
+ mqprio->qopt.offset[tc]);
+
+ for (rxq = mqprio->qopt.offset[tc];
+ rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+ rxq++) {
+ if (rxq >= rxq_number)
+ return -EINVAL;
+
+ mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
+ }
+ }
- mvneta_setup_rx_prio_map(pp);
+ if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
+ mvneta_disable_per_queue_rate_limit(pp);
+ return 0;
+ }
- netdev_set_num_tc(dev, qopt->num_tc);
- for (i = 0; i < qopt->num_tc; i++)
- netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]);
+ if (mqprio->qopt.num_tc > txq_number)
+ return -EINVAL;
+
+ ret = mvneta_enable_per_queue_rate_limit(pp);
+ if (ret)
+ return ret;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
+ for (txq = mqprio->qopt.offset[tc];
+ txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
+ txq++) {
+ if (txq >= txq_number)
+ return -EINVAL;
+
+ ret = mvneta_setup_queue_rates(pp, txq,
+ mqprio->min_rate[tc],
+ mqprio->max_rate[tc]);
+ if (ret)
+ return ret;
+ }
+ }
return 0;
}
@@ -5094,6 +5344,10 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
win_protect |= 3 << (2 * i);
}
} else {
+ if (pp->neta_ac5)
+ mvreg_write(pp, MVNETA_WIN_BASE(0),
+ (MVNETA_AC5_CNM_DDR_ATTR << 8) |
+ MVNETA_AC5_CNM_DDR_TARGET);
/* For Armada3700 open default 4GB Mbus window, leaving
* arbitration of target/attribute to a different layer
* of configuration.
@@ -5143,29 +5397,72 @@ static int mvneta_probe(struct platform_device *pdev)
if (!dev)
return -ENOMEM;
- dev->irq = irq_of_parse_and_map(dn, 0);
- if (dev->irq == 0)
- return -EINVAL;
+ dev->tx_queue_len = MVNETA_MAX_TXD;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->netdev_ops = &mvneta_netdev_ops;
+ dev->ethtool_ops = &mvneta_eth_tool_ops;
+
+ pp = netdev_priv(dev);
+ spin_lock_init(&pp->lock);
+ pp->dn = dn;
+
+ pp->rxq_def = rxq_def;
+ pp->indir[0] = rxq_def;
err = of_get_phy_mode(dn, &phy_mode);
if (err) {
dev_err(&pdev->dev, "incorrect phy-mode\n");
- goto err_free_irq;
+ return err;
}
+ pp->phy_interface = phy_mode;
+
comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
- if (comphy == ERR_PTR(-EPROBE_DEFER)) {
- err = -EPROBE_DEFER;
- goto err_free_irq;
- } else if (IS_ERR(comphy)) {
+ if (comphy == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ if (IS_ERR(comphy))
comphy = NULL;
+
+ pp->comphy = comphy;
+
+ pp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(pp->base))
+ return PTR_ERR(pp->base);
+
+ /* Get special SoC configurations */
+ if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
+ pp->neta_armada3700 = true;
+ if (of_device_is_compatible(dn, "marvell,armada-ac5-neta")) {
+ pp->neta_armada3700 = true;
+ pp->neta_ac5 = true;
}
- pp = netdev_priv(dev);
- spin_lock_init(&pp->lock);
+ dev->irq = irq_of_parse_and_map(dn, 0);
+ if (dev->irq == 0)
+ return -EINVAL;
+
+ pp->clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(pp->clk))
+ pp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pp->clk)) {
+ err = PTR_ERR(pp->clk);
+ goto err_free_irq;
+ }
+
+ clk_prepare_enable(pp->clk);
+
+ pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
+ if (!IS_ERR(pp->clk_bus))
+ clk_prepare_enable(pp->clk_bus);
+
+ pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops;
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
+ pp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD | MAC_2500FD;
+
phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_QSGMII,
pp->phylink_config.supported_interfaces);
@@ -5196,52 +5493,16 @@ static int mvneta_probe(struct platform_device *pdev)
phy_mode, &mvneta_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
- goto err_free_irq;
+ goto err_clk;
}
- dev->tx_queue_len = MVNETA_MAX_TXD;
- dev->watchdog_timeo = 5 * HZ;
- dev->netdev_ops = &mvneta_netdev_ops;
-
- dev->ethtool_ops = &mvneta_eth_tool_ops;
-
pp->phylink = phylink;
- pp->comphy = comphy;
- pp->phy_interface = phy_mode;
- pp->dn = dn;
-
- pp->rxq_def = rxq_def;
- pp->indir[0] = rxq_def;
-
- /* Get special SoC configurations */
- if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
- pp->neta_armada3700 = true;
-
- pp->clk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(pp->clk))
- pp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(pp->clk)) {
- err = PTR_ERR(pp->clk);
- goto err_free_phylink;
- }
-
- clk_prepare_enable(pp->clk);
-
- pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(pp->clk_bus))
- clk_prepare_enable(pp->clk_bus);
-
- pp->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pp->base)) {
- err = PTR_ERR(pp->base);
- goto err_clk;
- }
/* Alloc per-cpu port structure */
pp->ports = alloc_percpu(struct mvneta_pcpu_port);
if (!pp->ports) {
err = -ENOMEM;
- goto err_clk;
+ goto err_free_phylink;
}
/* Alloc per-cpu stats */
@@ -5338,14 +5599,13 @@ static int mvneta_probe(struct platform_device *pdev)
* operation, so only single NAPI should be initialized.
*/
if (pp->neta_armada3700) {
- netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &pp->napi, mvneta_poll);
} else {
for_each_present_cpu(cpu) {
struct mvneta_pcpu_port *port =
per_cpu_ptr(pp->ports, cpu);
- netif_napi_add(dev, &port->napi, mvneta_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &port->napi, mvneta_poll);
port->pp = pp;
}
}
@@ -5354,8 +5614,14 @@ static int mvneta_probe(struct platform_device *pdev)
NETIF_F_TSO | NETIF_F_RXCSUM;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ if (!pp->bm_priv)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
- dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+ netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
/* MTU range: 68 - 9676 */
dev->min_mtu = ETH_MIN_MTU;
@@ -5385,12 +5651,12 @@ err_netdev:
free_percpu(pp->stats);
err_free_ports:
free_percpu(pp->ports);
-err_clk:
- clk_disable_unprepare(pp->clk_bus);
- clk_disable_unprepare(pp->clk);
err_free_phylink:
if (pp->phylink)
phylink_destroy(pp->phylink);
+err_clk:
+ clk_disable_unprepare(pp->clk_bus);
+ clk_disable_unprepare(pp->clk);
err_free_irq:
irq_dispose_mapping(dev->irq);
return err;
@@ -5536,6 +5802,7 @@ static const struct of_device_id mvneta_match[] = {
{ .compatible = "marvell,armada-370-neta" },
{ .compatible = "marvell,armada-xp-neta" },
{ .compatible = "marvell,armada-3700-neta" },
+ { .compatible = "marvell,armada-ac5-neta" },
{ }
};
MODULE_DEVICE_TABLE(of, mvneta_match);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index cf8acabb90ac..11e603686a27 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1239,7 +1239,8 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
- struct phylink_pcs phylink_pcs;
+ struct phylink_pcs pcs_gmac;
+ struct phylink_pcs pcs_xlg;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
@@ -1529,6 +1530,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp2_dbgfs_exit(void);
void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 41d935d1aaf6..40aeaa7bd739 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -62,35 +62,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
- MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
@@ -132,35 +135,38 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
- MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, fragmented, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
- MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
+ MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_IP_FRAG_TRUE |
+ MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, no vlan tag */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..75e83ea2a926 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
return 0;
}
+static struct dentry *mvpp2_root;
+
+void mvpp2_dbgfs_exit(void)
+{
+ debugfs_remove(mvpp2_root);
+}
+
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
@@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6da8a595026b..adc953611913 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1488,6 +1488,7 @@ static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
static bool mvpp2_is_xlg(phy_interface_t interface)
{
return interface == PHY_INTERFACE_MODE_10GBASER ||
+ interface == PHY_INTERFACE_MODE_5GBASER ||
interface == PHY_INTERFACE_MODE_XAUI;
}
@@ -1627,6 +1628,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port, phy_interface_t interface)
case PHY_INTERFACE_MODE_2500BASEX:
mvpp22_gop_init_sgmii(port);
break;
+ case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
if (!mvpp2_port_supports_xlg(port))
goto invalid_conf;
@@ -1867,7 +1869,7 @@ static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
* design, incremented at different moments in the chain of packet processing,
* it is very likely that incoming packets could have been dropped after being
* counted by hardware but before reaching software statistics (most probably
- * multicast packets), and in the oppposite way, during transmission, FCS bytes
+ * multicast packets), and in the opposite way, during transmission, FCS bytes
* are added in between as well as TSO skb will be split and header bytes added.
* Hence, statistics gathered from userspace with ifconfig (software) and
* ethtool (hardware) cannot be compared.
@@ -2006,7 +2008,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
cpu_stats = per_cpu_ptr(port->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
xdp_redirect = cpu_stats->xdp_redirect;
xdp_pass = cpu_stats->xdp_pass;
xdp_drop = cpu_stats->xdp_drop;
@@ -2014,7 +2016,7 @@ mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
xdp_xmit_err = cpu_stats->xdp_xmit_err;
xdp_tx = cpu_stats->xdp_tx;
xdp_tx_err = cpu_stats->xdp_tx_err;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
xdp_stats->xdp_redirect += xdp_redirect;
xdp_stats->xdp_pass += xdp_pass;
@@ -2186,6 +2188,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port,
xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
switch (interface) {
+ case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_10GBASER:
val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
@@ -3820,7 +3823,7 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
}
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(port->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(port->dev, prog, act);
@@ -4993,6 +4996,14 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
+ if (percpu && port->ntxqs >= num_possible_cpus() * 2)
+ xdp_set_features_flag(port->dev,
+ NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT);
+ else
+ xdp_clear_features_flag(port->dev);
+
mvpp2_swf_bm_pool_init(port);
if (status[i])
mvpp2_open(port->dev);
@@ -5112,12 +5123,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
cpu_stats = per_cpu_ptr(port->stats, cpu);
do {
- start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ start = u64_stats_fetch_begin(&cpu_stats->syncp);
rx_packets = cpu_stats->rx_packets;
rx_bytes = cpu_stats->rx_bytes;
tx_packets = cpu_stats->tx_packets;
tx_bytes = cpu_stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
@@ -5139,9 +5150,6 @@ static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- if (config.flags)
- return -EINVAL;
-
if (config.tx_type != HWTSTAMP_TX_OFF &&
config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -5425,16 +5433,19 @@ mvpp2_ethtool_get_coalesce(struct net_device *dev,
static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
-static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static void
+mvpp2_ethtool_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -5444,8 +5455,11 @@ static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
ring->tx_pending = port->tx_ring_size;
}
-static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring)
+static int
+mvpp2_ethtool_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct mvpp2_port *port = netdev_priv(dev);
u16 prev_rx_ring_size = port->rx_ring_size;
@@ -5764,8 +5778,7 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
v->irq = irq_of_parse_and_map(port_node, 0);
if (v->irq <= 0)
return -EINVAL;
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
port->nqvecs = 1;
@@ -5825,8 +5838,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
goto err;
}
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
}
return 0;
@@ -6077,18 +6089,19 @@ static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
return true;
}
-static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
- struct fwnode_handle *fwnode,
- char **mac_from)
+static int mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
+ struct fwnode_handle *fwnode,
+ char **mac_from)
{
struct mvpp2_port *port = netdev_priv(dev);
char hw_mac_addr[ETH_ALEN] = {0};
char fw_mac_addr[ETH_ALEN];
+ int ret;
if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
*mac_from = "firmware node";
eth_hw_addr_set(dev, fw_mac_addr);
- return;
+ return 0;
}
if (priv->hw_version == MVPP21) {
@@ -6096,12 +6109,24 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
if (is_valid_ether_addr(hw_mac_addr)) {
*mac_from = "hardware";
eth_hw_addr_set(dev, hw_mac_addr);
- return;
+ return 0;
}
}
+ /* Only valid on OF enabled platforms */
+ ret = of_get_mac_address_nvmem(to_of_node(fwnode), fw_mac_addr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (!ret) {
+ *mac_from = "nvmem cell";
+ eth_hw_addr_set(dev, fw_mac_addr);
+ return 0;
+ }
+
*mac_from = "random";
eth_hw_addr_random(dev);
+
+ return 0;
}
static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
@@ -6109,18 +6134,26 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
-static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
+static struct mvpp2_port *mvpp2_pcs_xlg_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, pcs_xlg);
+}
+
+static struct mvpp2_port *mvpp2_pcs_gmac_to_port(struct phylink_pcs *pcs)
{
- return container_of(pcs, struct mvpp2_port, phylink_pcs);
+ return container_of(pcs, struct mvpp2_port, pcs_gmac);
}
static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_xlg_to_port(pcs);
u32 val;
- state->speed = SPEED_10000;
+ if (port->phy_interface == PHY_INTERFACE_MODE_5GBASER)
+ state->speed = SPEED_5000;
+ else
+ state->speed = SPEED_10000;
state->duplex = 1;
state->an_complete = 1;
@@ -6149,10 +6182,25 @@ static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
.pcs_config = mvpp2_xlg_pcs_config,
};
+static int mvpp2_gmac_pcs_validate(struct phylink_pcs *pcs,
+ unsigned long *supported,
+ const struct phylink_link_state *state)
+{
+ /* When in 802.3z mode, we must have AN enabled:
+ * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ */
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg))
+ return -EINVAL;
+
+ return 0;
+}
+
static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 val;
val = readl(port->base + MVPP2_GMAC_STATUS0);
@@ -6189,7 +6237,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 mask, val, an, old_an, changed;
mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
@@ -6243,7 +6291,7 @@ static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ struct mvpp2_port *port = mvpp2_pcs_gmac_to_port(pcs);
u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
@@ -6253,78 +6301,12 @@ static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
}
static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
+ .pcs_validate = mvpp2_gmac_pcs_validate,
.pcs_get_state = mvpp2_gmac_pcs_get_state,
.pcs_config = mvpp2_gmac_pcs_config,
.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
};
-static void mvpp2_phylink_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* When in 802.3z mode, we must have AN enabled:
- * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
- * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
- */
- if (phy_interface_mode_is_8023z(state->interface) &&
- !phylink_test(state->advertising, Autoneg))
- goto empty_set;
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- if (port->priv->global_tx_fc) {
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
- }
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
- if (mvpp2_port_supports_xlg(port)) {
- phylink_set_10g_modes(mask);
- phylink_set(mask, 10000baseKR_Full);
- }
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- break;
-
- case PHY_INTERFACE_MODE_1000BASEX:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- break;
-
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
-
- default:
- goto empty_set;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
- return;
-
-empty_set:
- linkmode_zero(supported);
-}
-
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -6404,8 +6386,23 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
}
-static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
+static struct phylink_pcs *mvpp2_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ /* Select the appropriate PCS operations depending on the
+ * configured interface mode. We will only switch to a mode
+ * that the validate() checks have already passed.
+ */
+ if (mvpp2_is_xlg(interface))
+ return &port->pcs_xlg;
+ else
+ return &port->pcs_gmac;
+}
+
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
@@ -6454,31 +6451,9 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
}
}
- /* Select the appropriate PCS operations depending on the
- * configured interface mode. We will only switch to a mode
- * that the validate() checks have already passed.
- */
- if (mvpp2_is_xlg(interface))
- port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
- else
- port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
-
return 0;
}
-static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- int ret;
-
- ret = mvpp2__mac_prepare(config, mode, interface);
- if (ret == 0)
- phylink_set_pcs(port->phylink, &port->phylink_pcs);
-
- return ret;
-}
-
static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -6649,7 +6624,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
}
static const struct phylink_mac_ops mvpp2_phylink_ops = {
- .validate = mvpp2_phylink_validate,
+ .mac_select_pcs = mvpp2_select_pcs,
.mac_prepare = mvpp2_mac_prepare,
.mac_config = mvpp2_mac_config,
.mac_finish = mvpp2_mac_finish,
@@ -6667,12 +6642,15 @@ static void mvpp2_acpi_start(struct mvpp2_port *port)
struct phylink_link_state state = {
.interface = port->phy_interface,
};
- mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
- port->phy_interface);
+ struct phylink_pcs *pcs;
+
+ pcs = mvpp2_select_pcs(&port->phylink_config, port->phy_interface);
+
+ mvpp2_mac_prepare(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
- port->phy_interface,
- state.advertising, false);
+ pcs->ops->pcs_config(pcs, MLO_AN_INBAND, port->phy_interface,
+ state.advertising, false);
mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
port->phy_interface);
mvpp2_mac_link_up(&port->phylink_config, NULL,
@@ -6851,7 +6829,9 @@ static int mvpp2_port_probe(struct platform_device *pdev,
mutex_init(&port->gather_stats_lock);
INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
- mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
+ err = mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
+ if (err < 0)
+ goto err_free_stats;
port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
@@ -6899,9 +6879,14 @@ static int mvpp2_port_probe(struct platform_device *pdev,
if (!port->priv->percpu_pools)
mvpp2_set_hw_csum(port, port->pool_long->id);
+ else if (port->ntxqs >= num_possible_cpus() * 2)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
dev->vlan_features |= features;
- dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
+ netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
+
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
@@ -6910,15 +6895,50 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
dev->dev.of_node = port_node;
+ port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
+ port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops;
+
if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
+ port->phylink_config.mac_capabilities =
+ MAC_2500FD | MAC_1000FD | MAC_100 | MAC_10;
+
+ if (port->priv->global_tx_fc)
+ port->phylink_config.mac_capabilities |=
+ MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
if (mvpp2_port_supports_xlg(port)) {
- __set_bit(PHY_INTERFACE_MODE_10GBASER,
- port->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_XAUI,
- port->phylink_config.supported_interfaces);
+ /* If a COMPHY is present, we can support any of
+ * the serdes modes and switch between them.
+ */
+ if (comphy) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_5GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_5GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_10GBASER) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_XAUI) {
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ }
+
+ if (comphy)
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD | MAC_5000FD;
+ else if (phy_mode == PHY_INTERFACE_MODE_5GBASER)
+ port->phylink_config.mac_capabilities |=
+ MAC_5000FD;
+ else
+ port->phylink_config.mac_capabilities |=
+ MAC_10000FD;
}
if (mvpp2_port_supports_rgmii(port))
@@ -7357,6 +7377,7 @@ static int mvpp2_get_sram(struct platform_device *pdev,
struct mvpp2 *priv)
{
struct resource *res;
+ void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (!res) {
@@ -7367,9 +7388,12 @@ static int mvpp2_get_sram(struct platform_device *pdev,
return 0;
}
- priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- return PTR_ERR_OR_ZERO(priv->cm3_base);
+ priv->cm3_base = base;
+ return 0;
}
static int mvpp2_probe(struct platform_device *pdev)
@@ -7711,7 +7735,18 @@ static struct platform_driver mvpp2_driver = {
},
};
-module_platform_driver(mvpp2_driver);
+static int __init mvpp2_driver_init(void)
+{
+ return platform_driver_register(&mvpp2_driver);
+}
+module_init(mvpp2_driver_init);
+
+static void __exit mvpp2_driver_exit(void)
+{
+ platform_driver_unregister(&mvpp2_driver);
+ mvpp2_dbgfs_exit();
+}
+module_exit(mvpp2_driver_exit);
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 75ba57bd1d46..9af22f497a40 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -1539,8 +1539,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
if (!priv->prs_double_vlans)
return -ENOMEM;
- /* Double VLAN: 0x8100, 0x88A8 */
- err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
+ /* Double VLAN: 0x88A8, 0x8100 */
+ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q,
MVPP2_PRS_PORT_MASK);
if (err)
return err;
@@ -1607,59 +1607,45 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
{
struct mvpp2_prs_entry pe;
- int tid;
-
- /* IPv4 over PPPoE with options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
-
- memset(&pe, 0, sizeof(pe));
- mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
- pe.index = tid;
-
- mvpp2_prs_match_etype(&pe, 0, PPP_IP);
-
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
- MVPP2_PRS_RI_L3_PROTO_MASK);
- /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
- sizeof(struct iphdr) - 4,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
- /* Set L3 offset */
- mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
- MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
-
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
+ int tid, ihl;
- /* IPv4 over PPPoE without options */
- tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
- MVPP2_PE_LAST_FREE_TID);
- if (tid < 0)
- return tid;
+ /* IPv4 over PPPoE with header length >= 5 */
+ for (ihl = MVPP2_PRS_IPV4_IHL_MIN; ihl <= MVPP2_PRS_IPV4_IHL_MAX; ihl++) {
+ tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+ MVPP2_PE_LAST_FREE_TID);
+ if (tid < 0)
+ return tid;
- pe.index = tid;
+ memset(&pe, 0, sizeof(pe));
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+ pe.index = tid;
- mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
- MVPP2_PRS_IPV4_HEAD |
- MVPP2_PRS_IPV4_IHL_MIN,
- MVPP2_PRS_IPV4_HEAD_MASK |
- MVPP2_PRS_IPV4_IHL_MASK);
+ mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+ mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_IPV4_HEAD | ihl,
+ MVPP2_PRS_IPV4_HEAD_MASK |
+ MVPP2_PRS_IPV4_IHL_MASK);
- /* Clear ri before updating */
- pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
- pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
- mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
- MVPP2_PRS_RI_L3_PROTO_MASK);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+ MVPP2_PRS_RI_L3_PROTO_MASK);
+ /* goto ipv4 dst-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Set L3 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+ MVPP2_ETH_TYPE_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+ /* Set L4 offset */
+ mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+ MVPP2_ETH_TYPE_LEN + (ihl * 4),
+ MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- /* Update shadow table and hw entry */
- mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
- mvpp2_prs_hw_write(priv, &pe);
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+ mvpp2_prs_hw_write(priv, &pe);
+ }
/* IPv6 over PPPoE */
tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
new file mode 100644
index 000000000000..0d7db815340e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell's Octeon PCI Endpoint NIC Driver Configuration
+#
+
+config OCTEON_EP
+ tristate "Marvell Octeon PCI Endpoint NIC Driver"
+ depends on 64BIT
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver supports networking functionality of Marvell's
+ Octeon PCI Endpoint NIC.
+
+ To know the list of devices supported by this driver, refer
+ documentation in
+ <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>.
+
+ To compile this drivers as a module, choose M here. Name of the
+ module is octeon_ep.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile
new file mode 100644
index 000000000000..2026c8118158
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Network driver for Marvell's Octeon PCI Endpoint NIC
+#
+
+obj-$(CONFIG_OCTEON_EP) += octeon_ep.o
+
+octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \
+ octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
new file mode 100644
index 000000000000..90c3a419932d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -0,0 +1,755 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_regs_cn9k_pf.h"
+
+#define CTRL_MBOX_MAX_PF 128
+#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
+
+#define FW_HB_INTERVAL_IN_SECS 1
+#define FW_HB_MISS_COUNT 10
+
+/* Names of Hardware non-queue generic interrupts */
+static char *cn93_non_ioq_msix_names[] = {
+ "epf_ire_rint",
+ "epf_ore_rint",
+ "epf_vfire_rint0",
+ "epf_vfire_rint1",
+ "epf_vfore_rint0",
+ "epf_vfore_rint1",
+ "epf_mbox_rint0",
+ "epf_mbox_rint1",
+ "epf_oei_rint",
+ "epf_dma_rint",
+ "epf_dma_vf_rint0",
+ "epf_dma_vf_rint1",
+ "epf_pp_vf_rint0",
+ "epf_pp_vf_rint1",
+ "epf_misc_rint",
+ "epf_rsvd",
+};
+
+/* Dump useful hardware CSRs for debug purpose */
+static void cn93_dump_regs(struct octep_device *oct, int qno)
+{
+ struct device *dev = &oct->pdev->dev;
+
+ dev_info(dev, "IQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno)));
+ dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno)));
+ dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno)));
+ dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INSTR_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno)));
+ dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno)));
+ dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_IN_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno)));
+
+ dev_info(dev, "OQ-%d register dump\n", qno);
+ dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_DBELL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno)));
+ dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CONTROL(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno)));
+ dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_ENABLE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_BADDR(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno)));
+ dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno)));
+ dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_CNTS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno)));
+ dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_INT_LEVELS(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno)));
+ dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_PKT_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno)));
+ dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_OUT_BYTE_CNT(qno),
+ octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno)));
+ dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
+ qno, CN93_SDP_R_ERR_TYPE(qno),
+ octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno)));
+}
+
+/* Reset Hardware Tx queue */
+static int cn93_reset_iq(struct octep_device *oct, int q_no)
+{
+ struct octep_config *conf = oct->conf;
+ u64 val = 0ULL;
+
+ dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
+
+ /* Get absolute queue number */
+ q_no += conf->pf_ring_cfg.srn;
+
+ /* Disable the Tx/Instruction Ring */
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val);
+
+ /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
+ octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val);
+
+ val = 0xFFFFFFFF;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val);
+
+ return 0;
+}
+
+/* Reset Hardware Rx queue */
+static void cn93_reset_oq(struct octep_device *oct, int q_no)
+{
+ u64 val = 0ULL;
+
+ q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ /* Disable Output (Rx) Ring */
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val);
+
+ /* Clear count CSRs */
+ val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no));
+ octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
+}
+
+/* Reset all hardware Tx/Rx queues */
+static void octep_reset_io_queues_cn93_pf(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int q;
+
+ dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n");
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ cn93_reset_iq(oct, q);
+ cn93_reset_oq(oct, q);
+ }
+}
+
+/* Initialize windowed addresses to access some hardware registers */
+static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+
+ oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64);
+ oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64);
+ oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64);
+ oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64);
+}
+
+/* Configure Hardware mapping: inform hardware which rings belong to PF. */
+static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ int q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
+ u64 regval = 0;
+
+ if (oct->pcie_port)
+ regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS;
+
+ octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval);
+
+ regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q));
+ dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
+ CN93_SDP_EPVF_RING(pf_srn + q), regval);
+ }
+}
+
+/* Initialize configuration limits and initial active config 93xx PF. */
+static void octep_init_config_cn93_pf(struct octep_device *oct)
+{
+ struct octep_config *conf = oct->conf;
+ struct pci_dev *pdev = oct->pdev;
+ u8 link = 0;
+ u64 val;
+ int pos;
+
+ /* Read ring configuration:
+ * PF ring count, number of VFs and rings per VF supported
+ */
+ val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO);
+ conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val);
+ conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
+ conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val);
+ conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
+ conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val);
+
+ val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port));
+ conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val);
+ conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val);
+ conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
+ dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
+ conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
+ conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
+
+ conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
+ conf->iq.instr_type = OCTEP_64BYTE_INSTR;
+ conf->iq.pkind = 0;
+ conf->iq.db_min = OCTEP_DB_MIN;
+ conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
+
+ conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
+ conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
+ conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
+ conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
+ conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
+
+ conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR;
+ conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
+ conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names;
+
+ pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (pos) {
+ pci_read_config_byte(oct->pdev,
+ pos + PCI_SRIOV_FUNC_LINK,
+ &link);
+ link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
+ }
+ conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
+ (0x400000ull * 7) +
+ (link * CTRL_MBOX_SZ);
+
+ conf->hb_interval = FW_HB_INTERVAL_IN_SECS;
+ conf->max_hb_miss_cnt = FW_HB_MISS_COUNT;
+
+}
+
+/* Setup registers for a hardware Tx Queue */
+static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ struct octep_iq *iq = oct->iq[iq_no];
+ u32 reset_instr_cnt;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_IN_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no));
+ } while (!(reg_val & CN93_R_IN_CTL_IDLE));
+ }
+
+ reg_val |= CN93_R_IN_CTL_RDSIZE;
+ reg_val |= CN93_R_IN_CTL_IS_64B;
+ reg_val |= CN93_R_IN_CTL_ESR;
+ octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val);
+
+ /* Write the start of the input queue's ring and its size */
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no),
+ iq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no),
+ iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INSTR_DBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_CNTS(iq_no);
+ iq->intr_lvl_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_IN_INT_LEVELS(iq_no);
+
+ /* Store the current instruction counter (used in flush_iq calculation) */
+ reset_instr_cnt = readl(iq->inst_cnt_reg);
+ writel(reset_instr_cnt, iq->inst_cnt_reg);
+
+ /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
+ reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+}
+
+/* Setup registers for a hardware Rx Queue */
+static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val;
+ u64 oq_ctl = 0ULL;
+ u32 time_threshold = 0;
+ struct octep_oq *oq = oct->oq[oq_no];
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+
+ /* wait for IDLE to set to 1 */
+ if (!(reg_val & CN93_R_OUT_CTL_IDLE)) {
+ do {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ } while (!(reg_val & CN93_R_OUT_CTL_IDLE));
+ }
+
+ reg_val &= ~(CN93_R_OUT_CTL_IMODE);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_P);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_I);
+ reg_val &= ~(CN93_R_OUT_CTL_ROR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_NSR_D);
+ reg_val &= ~(CN93_R_OUT_CTL_ES_D);
+ reg_val |= (CN93_R_OUT_CTL_ES_P);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no),
+ oq->desc_ring_dma);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no),
+ oq->max_count);
+
+ oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no));
+ oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0)
+ oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0)
+ octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no);
+ oq->pkts_credit_reg = oct->mmio[0].hw_addr +
+ CN93_SDP_R_OUT_SLIST_DBELL(oq_no);
+
+ time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
+ reg_val = ((u64)time_threshold << 32) |
+ CFG_GET_OQ_INTR_PKT(oct->conf);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+}
+
+/* Setup registers for a PF mailbox */
+static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
+{
+ struct octep_mbox *mbox = oct->mbox[q_no];
+
+ mbox->q_no = q_no;
+
+ /* PF mbox interrupt reg */
+ mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0);
+
+ /* PF to VF DATA reg. PF writes into this reg */
+ mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no);
+
+ /* VF to PF DATA reg. PF reads from this reg */
+ mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
+}
+
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ */
+static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
+{
+ bool handled = false;
+ u64 reg0;
+
+ /* Check for OEI INTR */
+ reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg0) {
+ dev_info(&oct->pdev->dev,
+ "Received OEI_RINT intr: 0x%llx\n",
+ reg0);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0);
+ if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+ queue_work(octep_wq, &oct->ctrl_mbox_task);
+ else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ atomic_set(&oct->hb_miss_cnt, 0);
+
+ handled = true;
+ }
+
+ return handled;
+}
+
+/* Interrupts handler for all non-queue generic interrupts. */
+static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
+
+ /* Check for IRERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "received IRERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct,
+ CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on IQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+ goto irq_handled;
+ }
+
+ /* Check for ORERR INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received ORERR_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val);
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received err type on OQ-%d: 0x%llx\n",
+ i, reg_val);
+ octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i),
+ reg_val);
+ }
+ }
+
+ goto irq_handled;
+ }
+
+ /* Check for VFIRE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for VFORE INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received VFORE_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MBOX INTR and OEI INTR */
+ if (octep_poll_non_ioq_interrupts_cn93_pf(oct))
+ goto irq_handled;
+
+ /* Check for DMA INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
+ if (reg_val) {
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for DMA VF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for PPVF INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
+ goto irq_handled;
+ }
+
+ /* Check for MISC INTR */
+ reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
+ if (reg_val) {
+ dev_info(&pdev->dev,
+ "Received MISC_RINT intr: 0x%llx\n", reg_val);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
+ goto irq_handled;
+ }
+
+ dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
+irq_handled:
+ return IRQ_HANDLED;
+}
+
+/* Tx/Rx queue interrupt handler */
+static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data)
+{
+ struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
+ struct octep_oq *oq = vector->oq;
+
+ napi_schedule_irqoff(oq->napi);
+ return IRQ_HANDLED;
+}
+
+/* soft reset of 93xx */
+static int octep_soft_reset_cn93_pf(struct octep_device *oct)
+{
+ dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n");
+
+ octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF);
+
+ /* Set core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1);
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+ /* clear core domain reset bit */
+ OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1);
+
+ return 0;
+}
+
+/* Re-initialize Octeon hardware registers */
+static void octep_reinit_regs_cn93_pf(struct octep_device *oct)
+{
+ u32 i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_iq_regs(oct, i);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ oct->hw_ops.setup_oq_regs(oct, i);
+
+ oct->hw_ops.enable_interrupts(oct);
+ oct->hw_ops.enable_io_queues(oct);
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/* Enable all interrupts */
+static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+}
+
+/* Disable all interrupts */
+static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
+{
+ u64 intr_mask = 0ULL;
+ int srn, num_rings, i;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (i = 0; i < num_rings; i++)
+ intr_mask |= (0x1ULL << (srn + i));
+
+ octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+}
+
+/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
+static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done, new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ new_idx = (iq->octep_read_index + last_done) % iq->max_count;
+
+ return new_idx;
+}
+
+/* Enable a hardware Tx Queue */
+static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 loop = HZ;
+ u64 reg_val;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
+
+ while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) &&
+ loop--) {
+ schedule_timeout_interruptible(1);
+ }
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Enable a hardware Rx Queue */
+static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no));
+ reg_val |= (0x1ULL << 62);
+ octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
+
+ octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val |= 0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Enable all hardware Tx/Rx Queues assined to PF */
+static void octep_enable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ u8 q;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_enable_iq_cn93_pf(oct, q);
+ octep_enable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Disable a hardware Tx Queue assined to PF */
+static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no)
+{
+ u64 reg_val = 0ULL;
+
+ iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val);
+}
+
+/* Disable a hardware Rx Queue assined to PF */
+static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no)
+{
+ u64 reg_val = 0ULL;
+
+ oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
+ reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no));
+ reg_val &= ~0x1ULL;
+ octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val);
+}
+
+/* Disable all hardware Tx/Rx Queues assined to PF */
+static void octep_disable_io_queues_cn93_pf(struct octep_device *oct)
+{
+ int q = 0;
+
+ for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
+ octep_disable_iq_cn93_pf(oct, q);
+ octep_disable_oq_cn93_pf(oct, q);
+ }
+}
+
+/* Dump hardware registers (including Tx/Rx queues) for debugging. */
+static void octep_dump_registers_cn93_pf(struct octep_device *oct)
+{
+ u8 srn, num_rings, q;
+
+ srn = CFG_GET_PORTS_PF_SRN(oct->conf);
+ num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ for (q = srn; q < srn + num_rings; q++)
+ cn93_dump_regs(oct, q);
+}
+
+/**
+ * octep_device_setup_cn93_pf() - Setup Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * - initialize hardware operations.
+ * - get target side pcie port number for the device.
+ * - setup window access to hardware registers.
+ * - set initial configuration and max limits.
+ * - setup hardware mapping of rings to the PF device.
+ */
+void octep_device_setup_cn93_pf(struct octep_device *oct)
+{
+ oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf;
+ oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
+ oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
+
+ oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
+ oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
+
+ oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf;
+ oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf;
+ oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cn93_pf;
+
+ oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf;
+
+ oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf;
+ oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf;
+ oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf;
+
+ oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf;
+ oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf;
+ oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf;
+ oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf;
+
+ oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf;
+
+ octep_setup_pci_window_regs_cn93_pf(oct);
+
+ oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff;
+ dev_info(&oct->pdev->dev,
+ "Octeon device using PCIE Port %d\n", oct->pcie_port);
+
+ octep_init_config_cn93_pf(oct);
+ octep_configure_ring_mapping_cn93_pf(oct);
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
new file mode 100644
index 000000000000..df7cd39d9fce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_CONFIG_H_
+#define _OCTEP_CONFIG_H_
+
+/* Tx instruction types by length */
+#define OCTEP_32BYTE_INSTR 32
+#define OCTEP_64BYTE_INSTR 64
+
+/* Tx Queue: maximum descriptors per ring */
+#define OCTEP_IQ_MAX_DESCRIPTORS 1024
+/* Minimum input (Tx) requests to be enqueued to ring doorbell */
+#define OCTEP_DB_MIN 1
+/* Packet threshold for Tx queue interrupt */
+#define OCTEP_IQ_INTR_THRESHOLD 0x0
+
+/* Rx Queue: maximum descriptors per ring */
+#define OCTEP_OQ_MAX_DESCRIPTORS 1024
+
+/* Rx buffer size: Use page size buffers.
+ * Build skb from allocated page buffer once the packet is received.
+ * When a gathered packet is received, make head page as skb head and
+ * page buffers in consecutive Rx descriptors as fragments.
+ */
+#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
+#define OCTEP_OQ_PKTS_PER_INTR 128
+#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4)
+
+#define OCTEP_OQ_INTR_PKT_THRESHOLD 1
+#define OCTEP_OQ_INTR_TIME_THRESHOLD 10
+
+#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32)
+
+/* Tx Queue wake threshold
+ * wakeup a stopped Tx queue if minimum 2 descriptors are available.
+ * Even a skb with fragments consume only one Tx queue descriptor entry.
+ */
+#define OCTEP_WAKE_QUEUE_THRESHOLD 2
+
+/* Minimum MTU supported by Octeon network interface */
+#define OCTEP_MIN_MTU ETH_MIN_MTU
+/* Maximum MTU supported by Octeon interface*/
+#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
+/* Default MTU */
+#define OCTEP_DEFAULT_MTU 1500
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind)
+#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
+
+#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
+#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+
+#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings)
+#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings)
+#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn)
+
+#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind)
+#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
+#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
+
+#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs)
+#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs)
+#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf)
+#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf)
+#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn)
+
+#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix)
+#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names)
+
+#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr)
+
+/* Hardware Tx Queue configuration. */
+struct octep_iq_config {
+ /* Size of the Input queue (number of commands) */
+ u16 num_descs;
+
+ /* Command size - 32 or 64 bytes */
+ u16 instr_type;
+
+ /* pkind for packets sent to Octeon */
+ u16 pkind;
+
+ /* Minimum number of commands pending to be posted to Octeon before driver
+ * hits the Input queue doorbell.
+ */
+ u16 db_min;
+
+ /* Trigger the IQ interrupt when processed cmd count reaches
+ * this level.
+ */
+ u32 intr_threshold;
+};
+
+/* Hardware Rx Queue configuration. */
+struct octep_oq_config {
+ /* Size of Output queue (number of descriptors) */
+ u16 num_descs;
+
+ /* Size of buffer in this Output queue. */
+ u16 buf_size;
+
+ /* The number of buffers that were consumed during packet processing
+ * by the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ u16 refill_threshold;
+
+ /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver usually does not use packet count interrupt coalescing.
+ */
+ u32 oq_intr_pkt;
+
+ /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
+ * if at least one packet was sent in the time interval specified by
+ * this field. The driver uses time interval interrupt coalescing by
+ * default. The time is specified in microseconds.
+ */
+ u32 oq_intr_time;
+};
+
+/* Tx/Rx configuration */
+struct octep_pf_ring_config {
+ /* Max number of IOQs */
+ u16 max_io_rings;
+
+ /* Number of active IOQs */
+ u16 active_io_rings;
+
+ /* Starting IOQ number: this changes based on which PEM is used */
+ u16 srn;
+};
+
+/* Octeon Hardware SRIOV config */
+struct octep_sriov_config {
+ /* Max number of VF devices supported */
+ u16 max_vfs;
+
+ /* Number of VF devices enabled */
+ u16 active_vfs;
+
+ /* Max number of rings assigned to VF */
+ u8 max_rings_per_vf;
+
+ /* Number of rings enabled per VF */
+ u8 active_rings_per_vf;
+
+ /* starting ring number of VF's: ring-0 of VF-0 of the PF */
+ u16 vf_srn;
+};
+
+/* Octeon MSI-x config. */
+struct octep_msix_config {
+ /* Number of IOQ interrupts */
+ u16 ioq_msix;
+
+ /* Number of Non IOQ interrupts */
+ u16 non_ioq_msix;
+
+ /* Names of Non IOQ interrupts */
+ char **non_ioq_msix_names;
+};
+
+struct octep_ctrl_mbox_config {
+ /* Barmem address for control mbox */
+ void __iomem *barmem_addr;
+};
+
+/* Data Structure to hold configuration limits and active config */
+struct octep_config {
+ /* Input Queue attributes. */
+ struct octep_iq_config iq;
+
+ /* Output Queue attributes. */
+ struct octep_oq_config oq;
+
+ /* NIC Port Configuration */
+ struct octep_pf_ring_config pf_ring_cfg;
+
+ /* SRIOV configuration of the PF */
+ struct octep_sriov_config sriov_cfg;
+
+ /* MSI-X interrupt config */
+ struct octep_msix_config msix_cfg;
+
+ /* ctrl mbox config */
+ struct octep_ctrl_mbox_config ctrl_mbox_cfg;
+
+ /* Configured maximum heartbeat miss count */
+ u32 max_hb_miss_cnt;
+
+ /* Configured firmware heartbeat interval in secs */
+ u32 hb_interval;
+};
+#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
new file mode 100644
index 000000000000..035ead7935c7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "octep_ctrl_mbox.h"
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Timeout in msecs for message response */
+#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100
+/* Time in msecs to wait for message response */
+#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10
+
+/* Size of mbox info in bytes */
+#define OCTEP_CTRL_MBOX_INFO_SZ 256
+/* Size of mbox host to fw queue info in bytes */
+#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16
+/* Size of mbox fw to host queue info in bytes */
+#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16
+
+#define OCTEP_CTRL_MBOX_TOTAL_INFO_SZ (OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \
+ OCTEP_CTRL_MBOX_F2HQ_INFO_SZ)
+
+#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(m) (m)
+#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(m) ((m) + 8)
+#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS(m) ((m) + 24)
+#define OCTEP_CTRL_MBOX_INFO_FW_STATUS(m) ((m) + 144)
+
+#define OCTEP_CTRL_MBOX_H2FQ_INFO(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ)
+#define OCTEP_CTRL_MBOX_H2FQ_PROD(m) (OCTEP_CTRL_MBOX_H2FQ_INFO(m))
+#define OCTEP_CTRL_MBOX_H2FQ_CONS(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 4)
+#define OCTEP_CTRL_MBOX_H2FQ_SZ(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 8)
+
+#define OCTEP_CTRL_MBOX_F2HQ_INFO(m) ((m) + \
+ OCTEP_CTRL_MBOX_INFO_SZ + \
+ OCTEP_CTRL_MBOX_H2FQ_INFO_SZ)
+#define OCTEP_CTRL_MBOX_F2HQ_PROD(m) (OCTEP_CTRL_MBOX_F2HQ_INFO(m))
+#define OCTEP_CTRL_MBOX_F2HQ_CONS(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 4)
+#define OCTEP_CTRL_MBOX_F2HQ_SZ(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 8)
+
+static const u32 mbox_hdr_sz = sizeof(union octep_ctrl_mbox_msg_hdr);
+
+static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 inc, u32 sz)
+{
+ return (index + inc) % sz;
+}
+
+static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 sz)
+{
+ return sz - (abs(pi - ci) % sz);
+}
+
+static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 sz)
+{
+ return (abs(pi - ci) % sz);
+}
+
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox)
+{
+ u64 magic_num, status;
+
+ if (!mbox)
+ return -EINVAL;
+
+ if (!mbox->barmem) {
+ pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem);
+ return -EINVAL;
+ }
+
+ magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(mbox->barmem));
+ if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) {
+ pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num);
+ return -EINVAL;
+ }
+
+ status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem));
+ if (status != OCTEP_CTRL_MBOX_STATUS_READY) {
+ pr_info("octep_ctrl_mbox : Firmware is not ready.\n");
+ return -EINVAL;
+ }
+
+ mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(mbox->barmem));
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
+
+ mbox->h2fq.sz = readl(OCTEP_CTRL_MBOX_H2FQ_SZ(mbox->barmem));
+ mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD(mbox->barmem);
+ mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS(mbox->barmem);
+ mbox->h2fq.hw_q = mbox->barmem + OCTEP_CTRL_MBOX_TOTAL_INFO_SZ;
+
+ mbox->f2hq.sz = readl(OCTEP_CTRL_MBOX_F2HQ_SZ(mbox->barmem));
+ mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD(mbox->barmem);
+ mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS(mbox->barmem);
+ mbox->f2hq.hw_q = mbox->barmem +
+ OCTEP_CTRL_MBOX_TOTAL_INFO_SZ +
+ mbox->h2fq.sz;
+
+ /* ensure ready state is seen after everything is initialized */
+ wmb();
+ writeq(OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
+
+ pr_info("Octep ctrl mbox : Init successful.\n");
+
+ return 0;
+}
+
+static void
+octep_write_mbox_data(struct octep_ctrl_mbox_q *q, u32 *pi, u32 ci, void *buf, u32 w_sz)
+{
+ u8 __iomem *qbuf;
+ u32 cp_sz;
+
+ /* Assumption: Caller has ensured enough write space */
+ qbuf = (q->hw_q + *pi);
+ if (*pi < ci) {
+ /* copy entire w_sz */
+ memcpy_toio(qbuf, buf, w_sz);
+ *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz);
+ } else {
+ /* copy up to end of queue */
+ cp_sz = min((q->sz - *pi), w_sz);
+ memcpy_toio(qbuf, buf, cp_sz);
+ w_sz -= cp_sz;
+ *pi = octep_ctrl_mbox_circq_inc(*pi, cp_sz, q->sz);
+ if (w_sz) {
+ /* roll over and copy remaining w_sz */
+ buf += cp_sz;
+ qbuf = (q->hw_q + *pi);
+ memcpy_toio(qbuf, buf, w_sz);
+ *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz);
+ }
+ }
+}
+
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_mbox_msg_buf *sg;
+ struct octep_ctrl_mbox_q *q;
+ u32 pi, ci, buf_sz, w_sz;
+ int s;
+
+ if (!mbox || !msg)
+ return -EINVAL;
+
+ if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY)
+ return -EIO;
+
+ mutex_lock(&mbox->h2fq_lock);
+ q = &mbox->h2fq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+
+ if (octep_ctrl_mbox_circq_space(pi, ci, q->sz) < (msg->hdr.s.sz + mbox_hdr_sz)) {
+ mutex_unlock(&mbox->h2fq_lock);
+ return -EAGAIN;
+ }
+
+ octep_write_mbox_data(q, &pi, ci, (void *)&msg->hdr, mbox_hdr_sz);
+ buf_sz = msg->hdr.s.sz;
+ for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) {
+ sg = &msg->sg_list[s];
+ w_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz;
+ octep_write_mbox_data(q, &pi, ci, sg->msg, w_sz);
+ buf_sz -= w_sz;
+ }
+ writel(pi, q->hw_prod);
+ mutex_unlock(&mbox->h2fq_lock);
+
+ return 0;
+}
+
+static void
+octep_read_mbox_data(struct octep_ctrl_mbox_q *q, u32 pi, u32 *ci, void *buf, u32 r_sz)
+{
+ u8 __iomem *qbuf;
+ u32 cp_sz;
+
+ /* Assumption: Caller has ensured enough read space */
+ qbuf = (q->hw_q + *ci);
+ if (*ci < pi) {
+ /* copy entire r_sz */
+ memcpy_fromio(buf, qbuf, r_sz);
+ *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz);
+ } else {
+ /* copy up to end of queue */
+ cp_sz = min((q->sz - *ci), r_sz);
+ memcpy_fromio(buf, qbuf, cp_sz);
+ r_sz -= cp_sz;
+ *ci = octep_ctrl_mbox_circq_inc(*ci, cp_sz, q->sz);
+ if (r_sz) {
+ /* roll over and copy remaining r_sz */
+ buf += cp_sz;
+ qbuf = (q->hw_q + *ci);
+ memcpy_fromio(buf, qbuf, r_sz);
+ *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz);
+ }
+ }
+}
+
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_mbox_msg_buf *sg;
+ u32 pi, ci, r_sz, buf_sz, q_depth;
+ struct octep_ctrl_mbox_q *q;
+ int s;
+
+ if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY)
+ return -EIO;
+
+ mutex_lock(&mbox->f2hq_lock);
+ q = &mbox->f2hq;
+ pi = readl(q->hw_prod);
+ ci = readl(q->hw_cons);
+
+ q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz);
+ if (q_depth < mbox_hdr_sz) {
+ mutex_unlock(&mbox->f2hq_lock);
+ return -EAGAIN;
+ }
+
+ octep_read_mbox_data(q, pi, &ci, (void *)&msg->hdr, mbox_hdr_sz);
+ buf_sz = msg->hdr.s.sz;
+ for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) {
+ sg = &msg->sg_list[s];
+ r_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz;
+ octep_read_mbox_data(q, pi, &ci, sg->msg, r_sz);
+ buf_sz -= r_sz;
+ }
+ writel(ci, q->hw_cons);
+ mutex_unlock(&mbox->f2hq_lock);
+
+ return 0;
+}
+
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox)
+{
+ if (!mbox)
+ return -EINVAL;
+ if (!mbox->barmem)
+ return -EINVAL;
+
+ writeq(OCTEP_CTRL_MBOX_STATUS_INVALID,
+ OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem));
+ /* ensure uninit state is written before uninitialization */
+ wmb();
+
+ mutex_destroy(&mbox->h2fq_lock);
+ mutex_destroy(&mbox->f2hq_lock);
+
+ pr_info("Octep ctrl mbox : Uninit successful.\n");
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
new file mode 100644
index 000000000000..9c4ff0fba6a0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+ #ifndef __OCTEP_CTRL_MBOX_H__
+#define __OCTEP_CTRL_MBOX_H__
+
+/* barmem structure
+ * |===========================================|
+ * |Info (16 + 120 + 120 = 256 bytes) |
+ * |-------------------------------------------|
+ * |magic number (8 bytes) |
+ * |bar memory size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |-------------------------------------------|
+ * |host version (8 bytes) |
+ * |host status (8 bytes) |
+ * |host reserved (104 bytes) |
+ * |-------------------------------------------|
+ * |fw version (8 bytes) |
+ * |fw status (8 bytes) |
+ * |fw reserved (104 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |max element size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |===========================================|
+ * |Fw to Host Queue info (16 bytes) |
+ * |-------------------------------------------|
+ * |producer index (4 bytes) |
+ * |consumer index (4 bytes) |
+ * |max element size (4 bytes) |
+ * |reserved (4 bytes) |
+ * |===========================================|
+ * |Host to Fw Queue ((total size-288/2) bytes)|
+ * |-------------------------------------------|
+ * | |
+ * |===========================================|
+ * |===========================================|
+ * |Fw to Host Queue ((total size-288/2) bytes)|
+ * |-------------------------------------------|
+ * | |
+ * |===========================================|
+ */
+
+#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull
+
+/* Valid request message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0)
+/* Valid response message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1)
+/* Valid notification, no response required */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2)
+/* Valid custom message */
+#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_CUSTOM BIT(3)
+
+#define OCTEP_CTRL_MBOX_MSG_DESC_MAX 4
+
+enum octep_ctrl_mbox_status {
+ OCTEP_CTRL_MBOX_STATUS_INVALID = 0,
+ OCTEP_CTRL_MBOX_STATUS_INIT,
+ OCTEP_CTRL_MBOX_STATUS_READY,
+ OCTEP_CTRL_MBOX_STATUS_UNINIT
+};
+
+/* mbox message */
+union octep_ctrl_mbox_msg_hdr {
+ u64 words[2];
+ struct {
+ /* must be 0 */
+ u16 reserved1:15;
+ /* vf_idx is valid if 1 */
+ u16 is_vf:1;
+ /* sender vf index 0-(n-1), 0 if (is_vf==0) */
+ u16 vf_idx;
+ /* total size of message excluding header */
+ u32 sz;
+ /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */
+ u32 flags;
+ /* identifier to match responses */
+ u16 msg_id;
+ u16 reserved2;
+ } s;
+};
+
+/* mbox message buffer */
+struct octep_ctrl_mbox_msg_buf {
+ u32 reserved1;
+ u16 reserved2;
+ /* size of buffer */
+ u16 sz;
+ /* pointer to message buffer */
+ void *msg;
+};
+
+/* mbox message */
+struct octep_ctrl_mbox_msg {
+ /* mbox transaction header */
+ union octep_ctrl_mbox_msg_hdr hdr;
+ /* number of sg buffer's */
+ int sg_num;
+ /* message buffer's */
+ struct octep_ctrl_mbox_msg_buf sg_list[OCTEP_CTRL_MBOX_MSG_DESC_MAX];
+};
+
+/* Mbox queue */
+struct octep_ctrl_mbox_q {
+ /* size of queue buffer */
+ u32 sz;
+ /* producer address in bar mem */
+ u8 __iomem *hw_prod;
+ /* consumer address in bar mem */
+ u8 __iomem *hw_cons;
+ /* q base address in bar mem */
+ u8 __iomem *hw_q;
+};
+
+struct octep_ctrl_mbox {
+ /* size of bar memory */
+ u32 barmem_sz;
+ /* pointer to BAR memory */
+ u8 __iomem *barmem;
+ /* host-to-fw queue */
+ struct octep_ctrl_mbox_q h2fq;
+ /* fw-to-host queue */
+ struct octep_ctrl_mbox_q f2hq;
+ /* lock for h2fq */
+ struct mutex h2fq_lock;
+ /* lock for f2hq */
+ struct mutex f2hq_lock;
+};
+
+/* Initialize control mbox.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox);
+
+/* Send mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ * @param msg: non-null pointer to struct octep_ctrl_mbox_msg.
+ * Caller should fill msg.sz and msg.desc.sz for each message.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Retrieve mbox message.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ * @param msg: non-null pointer to struct octep_ctrl_mbox_msg.
+ * Caller should fill msg.sz and msg.desc.sz for each message.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg);
+
+/* Uninitialize control mbox.
+ *
+ * @param mbox: non-null pointer to struct octep_ctrl_mbox.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox);
+
+#endif /* __OCTEP_CTRL_MBOX_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
new file mode 100644
index 000000000000..1cc6af2feb38
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+static const u32 req_hdr_sz = sizeof(union octep_ctrl_net_req_hdr);
+static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu);
+static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac);
+static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state);
+static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info);
+static atomic_t ctrl_net_msg_id;
+
+static void init_send_req(struct octep_ctrl_mbox_msg *msg, void *buf,
+ u16 sz, int vfid)
+{
+ msg->hdr.s.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ;
+ msg->hdr.s.msg_id = atomic_inc_return(&ctrl_net_msg_id) &
+ GENMASK(sizeof(msg->hdr.s.msg_id) * BITS_PER_BYTE, 0);
+ msg->hdr.s.sz = req_hdr_sz + sz;
+ msg->sg_num = 1;
+ msg->sg_list[0].msg = buf;
+ msg->sg_list[0].sz = msg->hdr.s.sz;
+ if (vfid != OCTEP_CTRL_NET_INVALID_VFID) {
+ msg->hdr.s.is_vf = 1;
+ msg->hdr.s.vf_idx = vfid;
+ }
+}
+
+static int octep_send_mbox_req(struct octep_device *oct,
+ struct octep_ctrl_net_wait_data *d,
+ bool wait_for_response)
+{
+ int err, ret;
+
+ err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &d->msg);
+ if (err < 0)
+ return err;
+
+ if (!wait_for_response)
+ return 0;
+
+ d->done = 0;
+ INIT_LIST_HEAD(&d->list);
+ list_add_tail(&d->list, &oct->ctrl_req_wait_list);
+ ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q,
+ (d->done != 0),
+ jiffies + msecs_to_jiffies(500));
+ list_del(&d->list);
+ if (ret == 0 || ret == 1)
+ return -EAGAIN;
+
+ /**
+ * (ret == 0) cond = false && timeout, return 0
+ * (ret < 0) interrupted by signal, return 0
+ * (ret == 1) cond = true && timeout, return 1
+ * (ret >= 1) cond = true && !timeout, return 1
+ */
+
+ if (d->data.resp.hdr.s.reply != OCTEP_CTRL_NET_REPLY_OK)
+ return -EAGAIN;
+
+ return 0;
+}
+
+int octep_ctrl_net_init(struct octep_device *oct)
+{
+ struct octep_ctrl_mbox *ctrl_mbox;
+ struct pci_dev *pdev = oct->pdev;
+ int ret;
+
+ init_waitqueue_head(&oct->ctrl_req_wait_q);
+ INIT_LIST_HEAD(&oct->ctrl_req_wait_list);
+
+ /* Initialize control mbox */
+ ctrl_mbox = &oct->ctrl_mbox;
+ ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf);
+ ret = octep_ctrl_mbox_init(ctrl_mbox);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize control mbox\n");
+ return ret;
+ }
+ oct->ctrl_mbox_ifstats_offset = ctrl_mbox->barmem_sz;
+
+ return 0;
+}
+
+int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ int err;
+
+ init_send_req(&d.msg, (void *)req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ return d.data.resp.link.state;
+}
+
+int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP :
+ OCTEP_CTRL_NET_STATE_DOWN;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, state_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP :
+ OCTEP_CTRL_NET_STATE_DOWN;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ int err;
+
+ init_send_req(&d.msg, req, mac_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req->link.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ memcpy(addr, d.data.resp.mac.addr, ETH_ALEN);
+
+ return 0;
+}
+
+int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, mac_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC;
+ req->mac.cmd = OCTEP_CTRL_NET_CMD_SET;
+ memcpy(&req->mac.addr, addr, ETH_ALEN);
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, mtu_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU;
+ req->mtu.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->mtu.val = mtu;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
+ struct octep_iface_rx_stats *rx_stats,
+ struct octep_iface_tx_stats *tx_stats)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ struct octep_ctrl_net_h2f_resp *resp;
+ int err;
+
+ init_send_req(&d.msg, req, 0, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ resp = &d.data.resp;
+ memcpy(rx_stats, &resp->if_stats.rx_stats, sizeof(struct octep_iface_rx_stats));
+ memcpy(tx_stats, &resp->if_stats.tx_stats, sizeof(struct octep_iface_tx_stats));
+ return 0;
+}
+
+int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+ struct octep_ctrl_net_h2f_resp *resp;
+ int err;
+
+ init_send_req(&d.msg, req, link_info_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ resp = &d.data.resp;
+ link_info->supported_modes = resp->link_info.supported_modes;
+ link_info->advertised_modes = resp->link_info.advertised_modes;
+ link_info->autoneg = resp->link_info.autoneg;
+ link_info->pause = resp->link_info.pause;
+ link_info->speed = resp->link_info.speed;
+
+ return 0;
+}
+
+int octep_ctrl_net_set_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info,
+ bool wait_for_response)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_req *req = &d.data.req;
+
+ init_send_req(&d.msg, req, link_info_sz, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_SET;
+ req->link_info.info.advertised_modes = link_info->advertised_modes;
+ req->link_info.info.autoneg = link_info->autoneg;
+ req->link_info.info.pause = link_info->pause;
+ req->link_info.info.speed = link_info->speed;
+
+ return octep_send_mbox_req(oct, &d, wait_for_response);
+}
+
+static void process_mbox_resp(struct octep_device *oct,
+ struct octep_ctrl_mbox_msg *msg)
+{
+ struct octep_ctrl_net_wait_data *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list) {
+ if (pos->msg.hdr.s.msg_id == msg->hdr.s.msg_id) {
+ memcpy(&pos->data.resp,
+ msg->sg_list[0].msg,
+ msg->hdr.s.sz);
+ pos->done = 1;
+ wake_up_interruptible_all(&oct->ctrl_req_wait_q);
+ break;
+ }
+ }
+}
+
+static int process_mbox_notify(struct octep_device *oct,
+ struct octep_ctrl_mbox_msg *msg)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ctrl_net_f2h_req *req;
+
+ req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg;
+ switch (req->hdr.s.cmd) {
+ case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS:
+ if (netif_running(netdev)) {
+ if (req->link.state) {
+ dev_info(&oct->pdev->dev, "netif_carrier_on\n");
+ netif_carrier_on(netdev);
+ } else {
+ dev_info(&oct->pdev->dev, "netif_carrier_off\n");
+ netif_carrier_off(netdev);
+ }
+ }
+ break;
+ default:
+ pr_info("Unknown mbox req : %u\n", req->hdr.s.cmd);
+ break;
+ }
+
+ return 0;
+}
+
+void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
+{
+ static u16 msg_sz = sizeof(union octep_ctrl_net_max_data);
+ union octep_ctrl_net_max_data data = {0};
+ struct octep_ctrl_mbox_msg msg = {0};
+ int ret;
+
+ msg.hdr.s.sz = msg_sz;
+ msg.sg_num = 1;
+ msg.sg_list[0].sz = msg_sz;
+ msg.sg_list[0].msg = &data;
+ while (true) {
+ /* mbox will overwrite msg.hdr.s.sz so initialize it */
+ msg.hdr.s.sz = msg_sz;
+ ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, (struct octep_ctrl_mbox_msg *)&msg);
+ if (ret < 0)
+ break;
+
+ if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP)
+ process_mbox_resp(oct, &msg);
+ else if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY)
+ process_mbox_notify(oct, &msg);
+ }
+}
+
+int octep_ctrl_net_uninit(struct octep_device *oct)
+{
+ struct octep_ctrl_net_wait_data *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list)
+ pos->done = 1;
+
+ wake_up_interruptible_all(&oct->ctrl_req_wait_q);
+
+ octep_ctrl_mbox_uninit(&oct->ctrl_mbox);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
new file mode 100644
index 000000000000..37880dd79116
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+#ifndef __OCTEP_CTRL_NET_H__
+#define __OCTEP_CTRL_NET_H__
+
+#define OCTEP_CTRL_NET_INVALID_VFID (-1)
+
+/* Supported commands */
+enum octep_ctrl_net_cmd {
+ OCTEP_CTRL_NET_CMD_GET = 0,
+ OCTEP_CTRL_NET_CMD_SET,
+};
+
+/* Supported states */
+enum octep_ctrl_net_state {
+ OCTEP_CTRL_NET_STATE_DOWN = 0,
+ OCTEP_CTRL_NET_STATE_UP,
+};
+
+/* Supported replies */
+enum octep_ctrl_net_reply {
+ OCTEP_CTRL_NET_REPLY_OK = 0,
+ OCTEP_CTRL_NET_REPLY_GENERIC_FAIL,
+ OCTEP_CTRL_NET_REPLY_INVALID_PARAM,
+};
+
+/* Supported host to fw commands */
+enum octep_ctrl_net_h2f_cmd {
+ OCTEP_CTRL_NET_H2F_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_H2F_CMD_MTU,
+ OCTEP_CTRL_NET_H2F_CMD_MAC,
+ OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS,
+ OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
+ OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
+ OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+};
+
+/* Supported fw to host commands */
+enum octep_ctrl_net_f2h_cmd {
+ OCTEP_CTRL_NET_F2H_CMD_INVALID = 0,
+ OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS,
+};
+
+union octep_ctrl_net_req_hdr {
+ u64 words[1];
+ struct {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* reserved */
+ u16 rsvd0;
+ } s;
+};
+
+/* get/set mtu request */
+struct octep_ctrl_net_h2f_req_cmd_mtu {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get/set mac request */
+struct octep_ctrl_net_h2f_req_cmd_mac {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get/set link state, rx state */
+struct octep_ctrl_net_h2f_req_cmd_state {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* link info */
+struct octep_ctrl_net_link_info {
+ /* Bitmap of Supported link speeds/modes */
+ u64 supported_modes;
+ /* Bitmap of Advertised link speeds/modes */
+ u64 advertised_modes;
+ /* Autonegotation state; bit 0=disabled; bit 1=enabled */
+ u8 autoneg;
+ /* Pause frames setting. bit 0=disabled; bit 1=enabled */
+ u8 pause;
+ /* Negotiated link speed in Mbps */
+ u32 speed;
+};
+
+/* get/set link info */
+struct octep_ctrl_net_h2f_req_cmd_link_info {
+ /* enum octep_ctrl_net_cmd */
+ u16 cmd;
+ /* struct octep_ctrl_net_link_info */
+ struct octep_ctrl_net_link_info info;
+};
+
+/* Host to fw request data */
+struct octep_ctrl_net_h2f_req {
+ union octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_req_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_req_cmd_mac mac;
+ struct octep_ctrl_net_h2f_req_cmd_state link;
+ struct octep_ctrl_net_h2f_req_cmd_state rx;
+ struct octep_ctrl_net_h2f_req_cmd_link_info link_info;
+ };
+} __packed;
+
+union octep_ctrl_net_resp_hdr {
+ u64 words[1];
+ struct {
+ /* sender id */
+ u16 sender;
+ /* receiver id */
+ u16 receiver;
+ /* octep_ctrl_net_h2t_cmd */
+ u16 cmd;
+ /* octep_ctrl_net_reply */
+ u16 reply;
+ } s;
+};
+
+/* get mtu response */
+struct octep_ctrl_net_h2f_resp_cmd_mtu {
+ /* 0-65535 */
+ u16 val;
+};
+
+/* get mac response */
+struct octep_ctrl_net_h2f_resp_cmd_mac {
+ /* xx:xx:xx:xx:xx:xx */
+ u8 addr[ETH_ALEN];
+};
+
+/* get if_stats, xstats, q_stats request */
+struct octep_ctrl_net_h2f_resp_cmd_get_stats {
+ struct octep_iface_rx_stats rx_stats;
+ struct octep_iface_tx_stats tx_stats;
+};
+
+/* get link state, rx state response */
+struct octep_ctrl_net_h2f_resp_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Host to fw response data */
+struct octep_ctrl_net_h2f_resp {
+ union octep_ctrl_net_resp_hdr hdr;
+ union {
+ struct octep_ctrl_net_h2f_resp_cmd_mtu mtu;
+ struct octep_ctrl_net_h2f_resp_cmd_mac mac;
+ struct octep_ctrl_net_h2f_resp_cmd_get_stats if_stats;
+ struct octep_ctrl_net_h2f_resp_cmd_state link;
+ struct octep_ctrl_net_h2f_resp_cmd_state rx;
+ struct octep_ctrl_net_link_info link_info;
+ };
+} __packed;
+
+/* link state notofication */
+struct octep_ctrl_net_f2h_req_cmd_state {
+ /* enum octep_ctrl_net_state */
+ u16 state;
+};
+
+/* Fw to host request data */
+struct octep_ctrl_net_f2h_req {
+ union octep_ctrl_net_req_hdr hdr;
+ union {
+ struct octep_ctrl_net_f2h_req_cmd_state link;
+ };
+};
+
+/* Fw to host response data */
+struct octep_ctrl_net_f2h_resp {
+ union octep_ctrl_net_resp_hdr hdr;
+};
+
+/* Max data size to be transferred over mbox */
+union octep_ctrl_net_max_data {
+ struct octep_ctrl_net_h2f_req h2f_req;
+ struct octep_ctrl_net_h2f_resp h2f_resp;
+ struct octep_ctrl_net_f2h_req f2h_req;
+ struct octep_ctrl_net_f2h_resp f2h_resp;
+};
+
+struct octep_ctrl_net_wait_data {
+ struct list_head list;
+ int done;
+ struct octep_ctrl_mbox_msg msg;
+ union {
+ struct octep_ctrl_net_h2f_req req;
+ struct octep_ctrl_net_h2f_resp resp;
+ } data;
+};
+
+/** Initialize data for ctrl net.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on error.
+ */
+int octep_ctrl_net_init(struct octep_device *oct);
+
+/** Get link status from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ *
+ * return value: link status 0=down, 1=up.
+ */
+int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid);
+
+/** Set link status in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param up: boolean status.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure
+ */
+int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response);
+
+/** Set rx state in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param up: boolean status.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up,
+ bool wait_for_response);
+
+/** Get mac address from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param addr: non-null pointer to mac address.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr);
+
+/** Set mac address in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param addr: non-null pointer to mac address.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr,
+ bool wait_for_response);
+
+/** Set mtu in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param mtu: mtu.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu,
+ bool wait_for_response);
+
+/** Get interface statistics from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param rx_stats: non-null pointer struct octep_iface_rx_stats.
+ * @param tx_stats: non-null pointer struct octep_iface_tx_stats.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid,
+ struct octep_iface_rx_stats *rx_stats,
+ struct octep_iface_tx_stats *tx_stats);
+
+/** Get link info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param link_info: non-null pointer to struct octep_iface_link_info.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid,
+ struct octep_iface_link_info *link_info);
+
+/** Set link info in firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param link_info: non-null pointer to struct octep_iface_link_info.
+ * @param wait_for_response: poll for response.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_set_link_info(struct octep_device *oct,
+ int vfid,
+ struct octep_iface_link_info *link_info,
+ bool wait_for_response);
+
+/** Poll for firmware messages and process them.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ */
+void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
+
+/** Uninitialize data for ctrl net.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ *
+ * return value: 0 on success, -errno on error.
+ */
+int octep_ctrl_net_uninit(struct octep_device *oct);
+
+#endif /* __OCTEP_CTRL_NET_H__ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
new file mode 100644
index 000000000000..7d0124b283da
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_alloc_errors",
+ "tx_busy_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "tx_hw_pkts",
+ "tx_hw_octs",
+ "tx_hw_bcast",
+ "tx_hw_mcast",
+ "tx_hw_underflow",
+ "tx_hw_control",
+ "tx_less_than_64",
+ "tx_equal_64",
+ "tx_equal_65_to_127",
+ "tx_equal_128_to_255",
+ "tx_equal_256_to_511",
+ "tx_equal_512_to_1023",
+ "tx_equal_1024_to_1518",
+ "tx_greater_than_1518",
+ "rx_hw_pkts",
+ "rx_hw_bytes",
+ "rx_hw_bcast",
+ "rx_hw_mcast",
+ "rx_pause_pkts",
+ "rx_pause_bytes",
+ "rx_dropped_pkts_fifo_full",
+ "rx_dropped_bytes_fifo_full",
+ "rx_err_pkts",
+};
+
+#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
+ "tx_packets_posted[Q-%u]",
+ "tx_packets_completed[Q-%u]",
+ "tx_bytes[Q-%u]",
+ "tx_busy[Q-%u]",
+};
+
+#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
+
+static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
+ "rx_packets[Q-%u]",
+ "rx_bytes[Q-%u]",
+ "rx_alloc_errors[Q-%u]",
+};
+
+#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
+
+static void octep_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
+}
+
+static void octep_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+ char *strings = (char *)data;
+ int i, j;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_global_stats[i]);
+ strings += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_tx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) {
+ snprintf(strings, ETH_GSTRING_LEN,
+ octep_gstrings_rx_q_stats[j], i);
+ strings += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int octep_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return OCTEP_GLOBAL_STATS_CNT + (num_queues *
+ (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+octep_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_tx_stats *iface_tx_stats;
+ struct octep_iface_rx_stats *iface_rx_stats;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+ u64 rx_alloc_errors, tx_busy_errors;
+ int q, i;
+
+ rx_packets = 0;
+ rx_bytes = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_alloc_errors = 0;
+ tx_busy_errors = 0;
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+
+ iface_tx_stats = &oct->iface_tx_stats;
+ iface_rx_stats = &oct->iface_rx_stats;
+ octep_ctrl_net_get_if_stats(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ iface_rx_stats,
+ iface_tx_stats);
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ tx_busy_errors += iq->stats.tx_busy;
+
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ rx_alloc_errors += oq->stats.alloc_failures;
+ }
+ i = 0;
+ data[i++] = rx_packets;
+ data[i++] = tx_packets;
+ data[i++] = rx_bytes;
+ data[i++] = tx_bytes;
+ data[i++] = rx_alloc_errors;
+ data[i++] = tx_busy_errors;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full +
+ iface_rx_stats->err_pkts;
+ data[i++] = iface_tx_stats->xscol +
+ iface_tx_stats->xsdef;
+ data[i++] = iface_tx_stats->pkts;
+ data[i++] = iface_tx_stats->octs;
+ data[i++] = iface_tx_stats->bcst;
+ data[i++] = iface_tx_stats->mcst;
+ data[i++] = iface_tx_stats->undflw;
+ data[i++] = iface_tx_stats->ctl;
+ data[i++] = iface_tx_stats->hist_lt64;
+ data[i++] = iface_tx_stats->hist_eq64;
+ data[i++] = iface_tx_stats->hist_65to127;
+ data[i++] = iface_tx_stats->hist_128to255;
+ data[i++] = iface_tx_stats->hist_256to511;
+ data[i++] = iface_tx_stats->hist_512to1023;
+ data[i++] = iface_tx_stats->hist_1024to1518;
+ data[i++] = iface_tx_stats->hist_gt1518;
+ data[i++] = iface_rx_stats->pkts;
+ data[i++] = iface_rx_stats->octets;
+ data[i++] = iface_rx_stats->mcast_pkts;
+ data[i++] = iface_rx_stats->bcast_pkts;
+ data[i++] = iface_rx_stats->pause_pkts;
+ data[i++] = iface_rx_stats->pause_octets;
+ data[i++] = iface_rx_stats->dropped_pkts_fifo_full;
+ data[i++] = iface_rx_stats->dropped_octets_fifo_full;
+ data[i++] = iface_rx_stats->err_pkts;
+
+ /* Per Tx Queue stats */
+ for (q = 0; q < oct->num_iqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+
+ data[i++] = iq->stats.instr_posted;
+ data[i++] = iq->stats.instr_completed;
+ data[i++] = iq->stats.bytes_sent;
+ data[i++] = iq->stats.tx_busy;
+ }
+
+ /* Per Rx Queue stats */
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_oq *oq = oct->oq[q];
+
+ data[i++] = oq->stats.packets;
+ data[i++] = oq->stats.bytes;
+ data[i++] = oq->stats.alloc_failures;
+ }
+}
+
+#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \
+{ \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
+ if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \
+ ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
+}
+
+static int octep_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ u32 advertised_modes, supported_modes;
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+ link_info = &oct->link_info;
+ octep_ctrl_net_get_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID, link_info);
+
+ advertised_modes = oct->link_info.advertised_modes;
+ supported_modes = oct->link_info.supported_modes;
+
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
+ OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
+
+ if (link_info->autoneg) {
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+ if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) {
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+ } else {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ if (link_info->pause) {
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED)
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED)
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+ }
+
+ cmd->base.port = PORT_FIBRE;
+ ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+ if (netif_carrier_ok(netdev)) {
+ cmd->base.speed = link_info->speed;
+ cmd->base.duplex = DUPLEX_FULL;
+ } else {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+ return 0;
+}
+
+static int octep_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info link_info_new;
+ struct octep_iface_link_info *link_info;
+ u64 advertised = 0;
+ u8 autoneg = 0;
+ int err;
+
+ link_info = &oct->link_info;
+ memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info));
+
+ /* Only Full duplex is supported;
+ * Assume full duplex when duplex is unknown.
+ */
+ if (cmd->base.duplex != DUPLEX_FULL &&
+ cmd->base.duplex != DUPLEX_UNKNOWN)
+ return -EOPNOTSUPP;
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED))
+ return -EOPNOTSUPP;
+ autoneg = 1;
+ }
+
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseT_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseR_FEC))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseLR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 25000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseLR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 40000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR2_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseCR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseKR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseLR_ER_FR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 50000baseSR_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseCR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseKR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseLR4_ER4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4);
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100000baseSR4_Full))
+ advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4);
+
+ if (advertised == link_info->advertised_modes &&
+ cmd->base.speed == link_info->speed &&
+ cmd->base.autoneg == link_info->autoneg)
+ return 0;
+
+ link_info_new.advertised_modes = advertised;
+ link_info_new.speed = cmd->base.speed;
+ link_info_new.autoneg = autoneg;
+
+ err = octep_ctrl_net_set_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID,
+ &link_info_new, true);
+ if (err)
+ return err;
+
+ memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info));
+ return 0;
+}
+
+static const struct ethtool_ops octep_ethtool_ops = {
+ .get_drvinfo = octep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = octep_get_strings,
+ .get_sset_count = octep_get_sset_count,
+ .get_ethtool_stats = octep_get_ethtool_stats,
+ .get_link_ksettings = octep_get_link_ksettings,
+ .set_link_ksettings = octep_set_link_ksettings,
+};
+
+void octep_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &octep_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
new file mode 100644
index 000000000000..e1853da280f9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -0,0 +1,1267 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+#include "octep_ctrl_net.h"
+
+#define OCTEP_INTR_POLL_TIME_MSECS 100
+struct workqueue_struct *octep_wq;
+
+/* Supported Devices */
+static const struct pci_device_id octep_pci_id_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)},
+ {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)},
+ {0, },
+};
+MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl);
+
+MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
+MODULE_DESCRIPTION(OCTEP_DRV_STRING);
+MODULE_LICENSE("GPL");
+
+/**
+ * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate resources to hold per Tx/Rx queue interrupt info.
+ * This is the information passed to interrupt handler, from which napi poll
+ * is scheduled and includes quick access to private data of Tx/Rx queue
+ * corresponding to the interrupt being handled.
+ *
+ * Return: 0, on successful allocation of resources for all queue interrupts.
+ * -1, if failed to allocate any resource.
+ */
+static int octep_alloc_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+ struct octep_ioq_vector *ioq_vector;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
+ if (!oct->ioq_vector[i])
+ goto free_ioq_vector;
+
+ ioq_vector = oct->ioq_vector[i];
+ ioq_vector->iq = oct->iq[i];
+ ioq_vector->oq = oct->oq[i];
+ ioq_vector->octep_dev = oct;
+ }
+
+ dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
+ return 0;
+
+free_ioq_vector:
+ while (i) {
+ i--;
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ return -1;
+}
+
+/**
+ * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_free_ioq_vectors(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (oct->ioq_vector[i]) {
+ vfree(oct->ioq_vector[i]);
+ oct->ioq_vector[i] = NULL;
+ }
+ }
+ netdev_info(oct->netdev, "Freed IOQ Vectors\n");
+}
+
+/**
+ * octep_enable_msix_range() - enable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
+ * for the Octeon device.
+ *
+ * Return: 0, on successfully enabling all MSI-x interrupts.
+ * -1, if failed to enable any MSI-x interrupt.
+ */
+static int octep_enable_msix_range(struct octep_device *oct)
+{
+ int num_msix, msix_allocated;
+ int i;
+
+ /* Generic interrupts apart from input/output queues */
+ num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
+ oct->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!oct->msix_entries)
+ goto msix_alloc_err;
+
+ for (i = 0; i < num_msix; i++)
+ oct->msix_entries[i].entry = i;
+
+ msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
+ num_msix, num_msix);
+ if (msix_allocated != num_msix) {
+ dev_err(&oct->pdev->dev,
+ "Failed to enable %d msix irqs; got only %d\n",
+ num_msix, msix_allocated);
+ goto enable_msix_err;
+ }
+ oct->num_irqs = msix_allocated;
+ dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
+
+ return 0;
+
+enable_msix_err:
+ if (msix_allocated > 0)
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+msix_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_disable_msix() - disable MSI-x interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Disable MSI-x on the Octeon device.
+ */
+static void octep_disable_msix(struct octep_device *oct)
+{
+ pci_disable_msix(oct->pdev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
+}
+
+/**
+ * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.non_ioq_intr_handler(oct);
+}
+
+/**
+ * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data contains pointers to Tx/Rx queue private data
+ * and correspong NAPI context.
+ *
+ * this is common handler for all non-queue (generic) interrupts.
+ */
+static irqreturn_t octep_ioq_intr_handler(int irq, void *data)
+{
+ struct octep_ioq_vector *ioq_vector = data;
+ struct octep_device *oct = ioq_vector->octep_dev;
+
+ return oct->hw_ops.ioq_intr_handler(ioq_vector);
+}
+
+/**
+ * octep_request_irqs() - Register interrupt handlers.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Register handlers for all queue and non-queue interrupts.
+ *
+ * Return: 0, on successful registration of all interrupt handlers.
+ * -1, on any error.
+ */
+static int octep_request_irqs(struct octep_device *oct)
+{
+ struct net_device *netdev = oct->netdev;
+ struct octep_ioq_vector *ioq_vector;
+ struct msix_entry *msix_entry;
+ char **non_ioq_msix_names;
+ int num_non_ioq_msix;
+ int ret, i, j;
+
+ num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf);
+ non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf);
+
+ oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix,
+ OCTEP_MSIX_NAME_SIZE, GFP_KERNEL);
+ if (!oct->non_ioq_irq_names)
+ goto alloc_err;
+
+ /* First few MSI-X interrupts are non-queue interrupts */
+ for (i = 0; i < num_non_ioq_msix; i++) {
+ char *irq_name;
+
+ irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE];
+ msix_entry = &oct->msix_entries[i];
+
+ snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
+ "%s-%s", netdev->name, non_ioq_msix_names[i]);
+ ret = request_irq(msix_entry->vector,
+ octep_non_ioq_intr_handler, 0,
+ irq_name, oct);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for %s; err=%d",
+ irq_name, ret);
+ goto non_ioq_irq_err;
+ }
+ }
+
+ /* Request IRQs for Tx/Rx queues */
+ for (j = 0; j < oct->num_oqs; j++) {
+ ioq_vector = oct->ioq_vector[j];
+ msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
+
+ snprintf(ioq_vector->name, sizeof(ioq_vector->name),
+ "%s-q%d", netdev->name, j);
+ ret = request_irq(msix_entry->vector,
+ octep_ioq_intr_handler, 0,
+ ioq_vector->name, ioq_vector);
+ if (ret) {
+ netdev_err(netdev,
+ "request_irq failed for Q-%d; err=%d",
+ j, ret);
+ goto ioq_irq_err;
+ }
+
+ cpumask_set_cpu(j % num_online_cpus(),
+ &ioq_vector->affinity_mask);
+ irq_set_affinity_hint(msix_entry->vector,
+ &ioq_vector->affinity_mask);
+ }
+
+ return 0;
+ioq_irq_err:
+ while (j) {
+ --j;
+ ioq_vector = oct->ioq_vector[j];
+ msix_entry = &oct->msix_entries[j + num_non_ioq_msix];
+
+ irq_set_affinity_hint(msix_entry->vector, NULL);
+ free_irq(msix_entry->vector, ioq_vector);
+ }
+non_ioq_irq_err:
+ while (i) {
+ --i;
+ free_irq(oct->msix_entries[i].vector, oct);
+ }
+ kfree(oct->non_ioq_irq_names);
+ oct->non_ioq_irq_names = NULL;
+alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_irqs() - free all registered interrupts.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free all queue and non-queue interrupts of the Octeon device.
+ */
+static void octep_free_irqs(struct octep_device *oct)
+{
+ int i;
+
+ /* First few MSI-X interrupts are non queue interrupts; free them */
+ for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++)
+ free_irq(oct->msix_entries[i].vector, oct);
+ kfree(oct->non_ioq_irq_names);
+
+ /* Free IRQs for Input/Output (Tx/Rx) queues */
+ for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) {
+ irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
+ free_irq(oct->msix_entries[i].vector,
+ oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]);
+ }
+ netdev_info(oct->netdev, "IRQs freed\n");
+}
+
+/**
+ * octep_setup_irqs() - setup interrupts for the Octeon device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Allocate data structures to hold per interrupt information, allocate/enable
+ * MSI-x interrupt and register interrupt handlers.
+ *
+ * Return: 0, on successful allocation and registration of all interrupts.
+ * -1, on any error.
+ */
+static int octep_setup_irqs(struct octep_device *oct)
+{
+ if (octep_alloc_ioq_vectors(oct))
+ goto ioq_vector_err;
+
+ if (octep_enable_msix_range(oct))
+ goto enable_msix_err;
+
+ if (octep_request_irqs(oct))
+ goto request_irq_err;
+
+ return 0;
+
+request_irq_err:
+ octep_disable_msix(oct);
+enable_msix_err:
+ octep_free_ioq_vectors(oct);
+ioq_vector_err:
+ return -1;
+}
+
+/**
+ * octep_clean_irqs() - free all interrupts and its resources.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_clean_irqs(struct octep_device *oct)
+{
+ octep_free_irqs(oct);
+ octep_disable_msix(oct);
+ octep_free_ioq_vectors(oct);
+}
+
+/**
+ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+{
+ u32 pkts_pend = oq->pkts_pending;
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+ if (iq->pkts_processed) {
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ }
+ if (oq->last_pkt_count - pkts_pend) {
+ writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+ oq->last_pkt_count = pkts_pend;
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ wmb();
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+}
+
+/**
+ * octep_napi_poll() - NAPI poll function for Tx/Rx.
+ *
+ * @napi: pointer to napi context.
+ * @budget: max number of packets to be processed in single invocation.
+ */
+static int octep_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octep_ioq_vector *ioq_vector =
+ container_of(napi, struct octep_ioq_vector, napi);
+ u32 tx_pending, rx_done;
+
+ tx_pending = octep_iq_process_completions(ioq_vector->iq, budget);
+ rx_done = octep_oq_process_rx(ioq_vector->oq, budget);
+
+ /* need more polling if tx completion processing is still pending or
+ * processed at least 'budget' number of rx packets.
+ */
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+ napi_complete(napi);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+}
+
+/**
+ * octep_napi_add() - Add NAPI poll for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_add(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
+ netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
+ octep_napi_poll);
+ oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
+ }
+}
+
+/**
+ * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_delete(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
+ netif_napi_del(&oct->ioq_vector[i]->napi);
+ oct->oq[i]->napi = NULL;
+ }
+}
+
+/**
+ * octep_napi_enable() - enable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_enable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
+ napi_enable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+/**
+ * octep_napi_disable() - disable NAPI for all Tx/Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+static void octep_napi_disable(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
+ napi_disable(&oct->ioq_vector[i]->napi);
+ }
+}
+
+static void octep_link_up(struct net_device *netdev)
+{
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+}
+
+/**
+ * octep_open() - start the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
+ * and interrupts..
+ *
+ * Return: 0, on successfully setting up device and bring it up.
+ * -1, on any error.
+ */
+static int octep_open(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ int err, ret;
+
+ netdev_info(netdev, "Starting netdev ...\n");
+ netif_carrier_off(netdev);
+
+ oct->hw_ops.reset_io_queues(oct);
+
+ if (octep_setup_iqs(oct))
+ goto setup_iq_err;
+ if (octep_setup_oqs(oct))
+ goto setup_oq_err;
+ if (octep_setup_irqs(oct))
+ goto setup_irq_err;
+
+ err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
+ if (err)
+ goto set_queues_err;
+ err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
+ if (err)
+ goto set_queues_err;
+
+ octep_napi_add(oct);
+ octep_napi_enable(oct);
+
+ oct->link_info.admin_up = 1;
+ octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true,
+ false);
+ octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true,
+ false);
+ oct->poll_non_ioq_intr = false;
+
+ /* Enable the input and output queues for this Octeon device */
+ oct->hw_ops.enable_io_queues(oct);
+
+ /* Enable Octeon device interrupts */
+ oct->hw_ops.enable_interrupts(oct);
+
+ octep_oq_dbell_init(oct);
+
+ ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID);
+ if (ret > 0)
+ octep_link_up(netdev);
+
+ return 0;
+
+set_queues_err:
+ octep_clean_irqs(oct);
+setup_irq_err:
+ octep_free_oqs(oct);
+setup_oq_err:
+ octep_free_iqs(oct);
+setup_iq_err:
+ return -1;
+}
+
+/**
+ * octep_stop() - stop the octeon network device.
+ *
+ * @netdev: pointer to kernel network device.
+ *
+ * stop the device Tx/Rx operations, bring down the link and
+ * free up all resources allocated for Tx/Rx queues and interrupts.
+ */
+static int octep_stop(struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ netdev_info(netdev, "Stopping the device ...\n");
+
+ octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false,
+ false);
+ octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false,
+ false);
+
+ /* Stop Tx from stack */
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ oct->link_info.admin_up = 0;
+ oct->link_info.oper_up = 0;
+
+ oct->hw_ops.disable_interrupts(oct);
+ octep_napi_disable(oct);
+ octep_napi_delete(oct);
+
+ octep_clean_irqs(oct);
+ octep_clean_iqs(oct);
+
+ oct->hw_ops.disable_io_queues(oct);
+ oct->hw_ops.reset_io_queues(oct);
+ octep_free_oqs(oct);
+ octep_free_iqs(oct);
+
+ oct->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
+ netdev_info(netdev, "Device stopped !!\n");
+ return 0;
+}
+
+/**
+ * octep_iq_full_check() - check if a Tx queue is full.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Return: 0, if the Tx queue is not full.
+ * 1, if the Tx queue is full.
+ */
+static inline int octep_iq_full_check(struct octep_iq *iq)
+{
+ if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ return 0;
+
+ /* Stop the queue if unable to send */
+ netif_stop_subqueue(iq->netdev, iq->q_no);
+
+ /* check again and restart the queue, in case NAPI has just freed
+ * enough Tx ring entries.
+ */
+ if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >=
+ OCTEP_WAKE_QUEUE_THRESHOLD)) {
+ netif_start_subqueue(iq->netdev, iq->q_no);
+ iq->stats.restart_cnt++;
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
+ *
+ * @skb: packet skbuff pointer.
+ * @netdev: kernel network device.
+ *
+ * Return: NETDEV_TX_BUSY, if Tx Queue is full.
+ * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
+ */
+static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_tx_sglist_desc *sglist;
+ struct octep_tx_buffer *tx_buffer;
+ struct octep_tx_desc_hw *hw_desc;
+ struct skb_shared_info *shinfo;
+ struct octep_instr_hdr *ih;
+ struct octep_iq *iq;
+ skb_frag_t *frag;
+ u16 nr_frags, si;
+ u16 q_no, wi;
+
+ q_no = skb_get_queue_mapping(skb);
+ if (q_no >= oct->num_iqs) {
+ netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
+ q_no = q_no % oct->num_iqs;
+ }
+
+ iq = oct->iq[q_no];
+ if (octep_iq_full_check(iq)) {
+ iq->stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ shinfo = skb_shinfo(skb);
+ nr_frags = shinfo->nr_frags;
+
+ wi = iq->host_write_index;
+ hw_desc = &iq->desc_ring[wi];
+ hw_desc->ih64 = 0;
+
+ tx_buffer = iq->buff_info + wi;
+ tx_buffer->skb = skb;
+
+ ih = &hw_desc->ih;
+ ih->tlen = skb->len;
+ ih->pkind = oct->pkind;
+
+ if (!nr_frags) {
+ tx_buffer->gather = 0;
+ tx_buffer->dma = dma_map_single(iq->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, tx_buffer->dma))
+ goto dma_map_err;
+ hw_desc->dptr = tx_buffer->dma;
+ } else {
+ /* Scatter/Gather */
+ dma_addr_t dma;
+ u16 len;
+
+ sglist = tx_buffer->sglist;
+
+ ih->gsz = nr_frags + 1;
+ ih->gather = 1;
+ tx_buffer->gather = 1;
+
+ len = skb_headlen(skb);
+ dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_err;
+
+ dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+ memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT);
+ sglist[0].len[3] = len;
+ sglist[0].dma_ptr[0] = dma;
+
+ si = 1; /* entry 0 is main skb, mapped above */
+ frag = &shinfo->frags[0];
+ while (nr_frags--) {
+ len = skb_frag_size(frag);
+ dma = skb_frag_dma_map(iq->dev, frag, 0,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(iq->dev, dma))
+ goto dma_map_sg_err;
+
+ sglist[si >> 2].len[3 - (si & 3)] = len;
+ sglist[si >> 2].dma_ptr[si & 3] = dma;
+
+ frag++;
+ si++;
+ }
+ dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma,
+ OCTEP_SGLIST_SIZE_PER_PKT,
+ DMA_TO_DEVICE);
+
+ hw_desc->dptr = tx_buffer->sglist_dma;
+ }
+
+ /* Flush the hw descriptor before writing to doorbell */
+ wmb();
+
+ /* Ring Doorbell to notify the NIC there is a new packet */
+ writel(1, iq->doorbell_reg);
+ atomic_inc(&iq->instr_pending);
+ wi++;
+ if (wi == iq->max_count)
+ wi = 0;
+ iq->host_write_index = wi;
+
+ netdev_tx_sent_queue(iq->netdev_q, skb->len);
+ iq->stats.instr_posted++;
+ skb_tx_timestamp(skb);
+ return NETDEV_TX_OK;
+
+dma_map_sg_err:
+ if (si > 0) {
+ dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
+ sglist[0].len[0], DMA_TO_DEVICE);
+ sglist[0].len[0] = 0;
+ }
+ while (si > 1) {
+ dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
+ sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
+ sglist[si >> 2].len[si & 3] = 0;
+ si--;
+ }
+ tx_buffer->gather = 0;
+dma_map_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * octep_get_stats64() - Get Octeon network device statistics.
+ *
+ * @netdev: kernel network device.
+ * @stats: pointer to stats structure to be filled in.
+ */
+static void octep_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
+ struct octep_device *oct = netdev_priv(netdev);
+ int q;
+
+ if (netif_running(netdev))
+ octep_ctrl_net_get_if_stats(oct,
+ OCTEP_CTRL_NET_INVALID_VFID,
+ &oct->iface_rx_stats,
+ &oct->iface_tx_stats);
+
+ tx_packets = 0;
+ tx_bytes = 0;
+ rx_packets = 0;
+ rx_bytes = 0;
+ for (q = 0; q < oct->num_oqs; q++) {
+ struct octep_iq *iq = oct->iq[q];
+ struct octep_oq *oq = oct->oq[q];
+
+ tx_packets += iq->stats.instr_completed;
+ tx_bytes += iq->stats.bytes_sent;
+ rx_packets += oq->stats.packets;
+ rx_bytes += oq->stats.bytes;
+ }
+ stats->tx_packets = tx_packets;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->multicast = oct->iface_rx_stats.mcast_pkts;
+ stats->rx_errors = oct->iface_rx_stats.err_pkts;
+ stats->collisions = oct->iface_tx_stats.xscol;
+ stats->tx_fifo_errors = oct->iface_tx_stats.undflw;
+}
+
+/**
+ * octep_tx_timeout_task - work queue task to Handle Tx queue timeout.
+ *
+ * @work: pointer to Tx queue timeout work_struct
+ *
+ * Stop and start the device so that it frees up all queue resources
+ * and restarts the queues, that potentially clears a Tx queue timeout
+ * condition.
+ **/
+static void octep_tx_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ tx_timeout_task);
+ struct net_device *netdev = oct->netdev;
+
+ rtnl_lock();
+ if (netif_running(netdev)) {
+ octep_stop(netdev);
+ octep_open(netdev);
+ }
+ rtnl_unlock();
+}
+
+/**
+ * octep_tx_timeout() - Handle Tx Queue timeout.
+ *
+ * @netdev: pointer to kernel network device.
+ * @txqueue: Timed out Tx queue number.
+ *
+ * Schedule a work to handle Tx queue timeout.
+ */
+static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+
+ queue_work(octep_wq, &oct->tx_timeout_task);
+}
+
+static int octep_set_mac(struct net_device *netdev, void *p)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID,
+ addr->sa_data, true);
+ if (err)
+ return err;
+
+ memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ return 0;
+}
+
+static int octep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octep_device *oct = netdev_priv(netdev);
+ struct octep_iface_link_info *link_info;
+ int err = 0;
+
+ link_info = &oct->link_info;
+ if (link_info->mtu == new_mtu)
+ return 0;
+
+ err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu,
+ true);
+ if (!err) {
+ oct->link_info.mtu = new_mtu;
+ netdev->mtu = new_mtu;
+ }
+
+ return err;
+}
+
+static const struct net_device_ops octep_netdev_ops = {
+ .ndo_open = octep_open,
+ .ndo_stop = octep_stop,
+ .ndo_start_xmit = octep_start_xmit,
+ .ndo_get_stats64 = octep_get_stats64,
+ .ndo_tx_timeout = octep_tx_timeout,
+ .ndo_set_mac_address = octep_set_mac,
+ .ndo_change_mtu = octep_change_mtu,
+};
+
+/**
+ * octep_intr_poll_task - work queue task to process non-ioq interrupts.
+ *
+ * @work: pointer to mbox work_struct
+ *
+ * Process non-ioq interrupts to handle control mailbox, pfvf mailbox.
+ **/
+static void octep_intr_poll_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ intr_poll_task.work);
+
+ if (!oct->poll_non_ioq_intr) {
+ dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n");
+ return;
+ }
+
+ oct->hw_ops.poll_non_ioq_interrupts(oct);
+ queue_delayed_work(octep_wq, &oct->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+}
+
+/**
+ * octep_hb_timeout_task - work queue task to check firmware heartbeat.
+ *
+ * @work: pointer to hb work_struct
+ *
+ * Check for heartbeat miss count. Uninitialize oct device if miss count
+ * exceeds configured max heartbeat miss count.
+ *
+ **/
+static void octep_hb_timeout_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ hb_task.work);
+
+ int miss_cnt;
+
+ miss_cnt = atomic_inc_return(&oct->hb_miss_cnt);
+ if (miss_cnt < oct->conf->max_hb_miss_cnt) {
+ queue_delayed_work(octep_wq, &oct->hb_task,
+ msecs_to_jiffies(oct->conf->hb_interval * 1000));
+ return;
+ }
+
+ dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n",
+ miss_cnt);
+ rtnl_lock();
+ if (netif_running(oct->netdev))
+ octep_stop(oct->netdev);
+ rtnl_unlock();
+}
+
+/**
+ * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages.
+ *
+ * @work: pointer to ctrl mbox work_struct
+ *
+ * Poll ctrl mbox message queue and handle control messages from firmware.
+ **/
+static void octep_ctrl_mbox_task(struct work_struct *work)
+{
+ struct octep_device *oct = container_of(work, struct octep_device,
+ ctrl_mbox_task);
+
+ octep_ctrl_net_recv_fw_messages(oct);
+}
+
+static const char *octep_devid_to_str(struct octep_device *oct)
+{
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ return "CN93XX";
+ case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
+ return "CNF95N";
+ default:
+ return "Unsupported";
+ }
+}
+
+/**
+ * octep_device_setup() - Setup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Setup Octeon device hardware operations, configuration, etc ...
+ */
+int octep_device_setup(struct octep_device *oct)
+{
+ struct pci_dev *pdev = oct->pdev;
+ int i, ret;
+
+ /* allocate memory for oct->conf */
+ oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
+ if (!oct->conf)
+ return -ENOMEM;
+
+ /* Map BAR regions */
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ oct->mmio[i].hw_addr =
+ ioremap(pci_resource_start(oct->pdev, i * 2),
+ pci_resource_len(oct->pdev, i * 2));
+ oct->mmio[i].mapped = 1;
+ }
+
+ oct->chip_id = pdev->device;
+ oct->rev_id = pdev->revision;
+ dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
+
+ switch (oct->chip_id) {
+ case OCTEP_PCI_DEVICE_ID_CN93_PF:
+ case OCTEP_PCI_DEVICE_ID_CNF95N_PF:
+ dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n",
+ octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct),
+ OCTEP_MINOR_REV(oct));
+ octep_device_setup_cn93_pf(oct);
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "%s: unsupported device\n", __func__);
+ goto unsupported_dev;
+ }
+
+ oct->pkind = CFG_GET_IQ_PKIND(oct->conf);
+
+ ret = octep_ctrl_net_init(oct);
+ if (ret)
+ return ret;
+
+ atomic_set(&oct->hb_miss_cnt, 0);
+ INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
+ queue_delayed_work(octep_wq, &oct->hb_task,
+ msecs_to_jiffies(oct->conf->hb_interval * 1000));
+ return 0;
+
+unsupported_dev:
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++)
+ iounmap(oct->mmio[i].hw_addr);
+
+ kfree(oct->conf);
+ return -1;
+}
+
+/**
+ * octep_device_cleanup() - Cleanup Octeon Device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Cleanup Octeon device allocated resources.
+ */
+static void octep_device_cleanup(struct octep_device *oct)
+{
+ int i;
+
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ for (i = 0; i < OCTEP_MAX_VF; i++) {
+ vfree(oct->mbox[i]);
+ oct->mbox[i] = NULL;
+ }
+
+ octep_ctrl_net_uninit(oct);
+ cancel_delayed_work_sync(&oct->hb_task);
+
+ oct->hw_ops.soft_reset(oct);
+ for (i = 0; i < OCTEP_MMIO_REGIONS; i++) {
+ if (oct->mmio[i].mapped)
+ iounmap(oct->mmio[i].hw_addr);
+ }
+
+ kfree(oct->conf);
+ oct->conf = NULL;
+}
+
+static bool get_fw_ready_status(struct pci_dev *pdev)
+{
+ u32 pos = 0;
+ u16 vsec_id;
+ u8 status;
+
+ while ((pos = pci_find_next_ext_capability(pdev, pos,
+ PCI_EXT_CAP_ID_VNDR))) {
+ pci_read_config_word(pdev, pos + 4, &vsec_id);
+#define FW_STATUS_VSEC_ID 0xA3
+ if (vsec_id != FW_STATUS_VSEC_ID)
+ continue;
+
+ pci_read_config_byte(pdev, (pos + 8), &status);
+ dev_info(&pdev->dev, "Firmware ready status = %u\n", status);
+ return status;
+ }
+ return false;
+}
+
+/**
+ * octep_probe() - Octeon PCI device probe handler.
+ *
+ * @pdev: PCI device structure.
+ * @ent: entry in Octeon PCI device ID table.
+ *
+ * Initializes and enables the Octeon PCI device for network operations.
+ * Initializes Octeon private data structure and registers a network device.
+ */
+static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct octep_device *octep_dev = NULL;
+ struct net_device *netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
+ goto err_dma_mask;
+ }
+
+ err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
+ goto err_pci_regions;
+ }
+
+ pci_set_master(pdev);
+
+ if (!get_fw_ready_status(pdev)) {
+ dev_notice(&pdev->dev, "Firmware not ready; defer probe.\n");
+ err = -EPROBE_DEFER;
+ goto err_alloc_netdev;
+ }
+
+ netdev = alloc_etherdev_mq(sizeof(struct octep_device),
+ OCTEP_MAX_QUEUES);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Failed to allocate netdev\n");
+ err = -ENOMEM;
+ goto err_alloc_netdev;
+ }
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ octep_dev = netdev_priv(netdev);
+ octep_dev->netdev = netdev;
+ octep_dev->pdev = pdev;
+ octep_dev->dev = &pdev->dev;
+ pci_set_drvdata(pdev, octep_dev);
+
+ err = octep_device_setup(octep_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Device setup failed\n");
+ goto err_octep_config;
+ }
+ INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
+ INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
+ INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
+ octep_dev->poll_non_ioq_intr = true;
+ queue_delayed_work(octep_wq, &octep_dev->intr_poll_task,
+ msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS));
+
+ netdev->netdev_ops = &octep_netdev_ops;
+ octep_set_ethtool_ops(netdev);
+ netif_carrier_off(netdev);
+
+ netdev->hw_features = NETIF_F_SG;
+ netdev->features |= netdev->hw_features;
+ netdev->min_mtu = OCTEP_MIN_MTU;
+ netdev->max_mtu = OCTEP_MAX_MTU;
+ netdev->mtu = OCTEP_DEFAULT_MTU;
+
+ err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+ octep_dev->mac_addr);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get mac address\n");
+ goto register_dev_err;
+ }
+ eth_hw_addr_set(netdev, octep_dev->mac_addr);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register netdev\n");
+ goto register_dev_err;
+ }
+ dev_info(&pdev->dev, "Device probe successful\n");
+ return 0;
+
+register_dev_err:
+ octep_device_cleanup(octep_dev);
+err_octep_config:
+ free_netdev(netdev);
+err_alloc_netdev:
+ pci_release_mem_regions(pdev);
+err_pci_regions:
+err_dma_mask:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * octep_remove() - Remove Octeon PCI device from driver control.
+ *
+ * @pdev: PCI device structure of the Octeon device.
+ *
+ * Cleanup all resources allocated for the Octeon device.
+ * Unregister from network device and disable the PCI device.
+ */
+static void octep_remove(struct pci_dev *pdev)
+{
+ struct octep_device *oct = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+
+ if (!oct)
+ return;
+
+ cancel_work_sync(&oct->tx_timeout_task);
+ cancel_work_sync(&oct->ctrl_mbox_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+ oct->poll_non_ioq_intr = false;
+ cancel_delayed_work_sync(&oct->intr_poll_task);
+ octep_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver octep_driver = {
+ .name = OCTEP_DRV_NAME,
+ .id_table = octep_pci_id_tbl,
+ .probe = octep_probe,
+ .remove = octep_remove,
+};
+
+/**
+ * octep_init_module() - Module initialiation.
+ *
+ * create common resource for the driver and register PCI driver.
+ */
+static int __init octep_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING);
+
+ /* work queue for all deferred tasks */
+ octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME);
+ if (!octep_wq) {
+ pr_err("%s: Failed to create common workqueue\n",
+ OCTEP_DRV_NAME);
+ return -ENOMEM;
+ }
+
+ ret = pci_register_driver(&octep_driver);
+ if (ret < 0) {
+ pr_err("%s: Failed to register PCI driver; err=%d\n",
+ OCTEP_DRV_NAME, ret);
+ destroy_workqueue(octep_wq);
+ return ret;
+ }
+
+ pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME);
+
+ return ret;
+}
+
+/**
+ * octep_exit_module() - Module exit routine.
+ *
+ * unregister the driver with PCI subsystem and cleanup common resources.
+ */
+static void __exit octep_exit_module(void)
+{
+ pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME);
+
+ pci_unregister_driver(&octep_driver);
+ destroy_workqueue(octep_wq);
+
+ pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME);
+}
+
+module_init(octep_init_module);
+module_exit(octep_exit_module);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
new file mode 100644
index 000000000000..e0907a719133
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_MAIN_H_
+#define _OCTEP_MAIN_H_
+
+#include "octep_tx.h"
+#include "octep_rx.h"
+#include "octep_ctrl_mbox.h"
+
+#define OCTEP_DRV_NAME "octeon_ep"
+#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver"
+
+#define OCTEP_PCIID_CN93_PF 0xB200177d
+#define OCTEP_PCIID_CN93_VF 0xB203177d
+
+#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200
+#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203
+
+#define OCTEP_PCI_DEVICE_ID_CNF95N_PF 0xB400 //95N PF
+
+#define OCTEP_MAX_QUEUES 63
+#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES
+#define OCTEP_MAX_VF 64
+
+#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ
+
+/* Flags to disable and enable Interrupts */
+#define OCTEP_INPUT_INTR (1)
+#define OCTEP_OUTPUT_INTR (2)
+#define OCTEP_MBOX_INTR (4)
+#define OCTEP_ALL_INTR 0xff
+
+#define OCTEP_IQ_INTR_RESEND_BIT 59
+#define OCTEP_OQ_INTR_RESEND_BIT 59
+
+#define OCTEP_MMIO_REGIONS 3
+/* PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octep_mmio {
+ /* The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /* Flag indicating the mapping was successful. */
+ int mapped;
+};
+
+struct octep_pci_win_regs {
+ u8 __iomem *pci_win_wr_addr;
+ u8 __iomem *pci_win_rd_addr;
+ u8 __iomem *pci_win_wr_data;
+ u8 __iomem *pci_win_rd_data;
+};
+
+struct octep_hw_ops {
+ void (*setup_iq_regs)(struct octep_device *oct, int q);
+ void (*setup_oq_regs)(struct octep_device *oct, int q);
+ void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
+
+ irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
+ int (*soft_reset)(struct octep_device *oct);
+ void (*reinit_regs)(struct octep_device *oct);
+ u32 (*update_iq_read_idx)(struct octep_iq *iq);
+
+ void (*enable_interrupts)(struct octep_device *oct);
+ void (*disable_interrupts)(struct octep_device *oct);
+ bool (*poll_non_ioq_interrupts)(struct octep_device *oct);
+
+ void (*enable_io_queues)(struct octep_device *oct);
+ void (*disable_io_queues)(struct octep_device *oct);
+ void (*enable_iq)(struct octep_device *oct, int q);
+ void (*disable_iq)(struct octep_device *oct, int q);
+ void (*enable_oq)(struct octep_device *oct, int q);
+ void (*disable_oq)(struct octep_device *oct, int q);
+ void (*reset_io_queues)(struct octep_device *oct);
+ void (*dump_registers)(struct octep_device *oct);
+};
+
+/* Octeon mailbox data */
+struct octep_mbox_data {
+ u32 cmd;
+ u32 total_len;
+ u32 recv_len;
+ u32 rsvd;
+ u64 *data;
+};
+
+/* Octeon device mailbox */
+struct octep_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ spinlock_t lock;
+
+ u32 q_no;
+ u32 state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ u8 __iomem *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ u8 __iomem *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ u8 __iomem *mbox_read_reg;
+
+ struct octep_mbox_data mbox_data;
+};
+
+/* Tx/Rx queue vector per interrupt. */
+struct octep_ioq_vector {
+ char name[OCTEP_MSIX_NAME_SIZE];
+ struct napi_struct napi;
+ struct octep_device *octep_dev;
+ struct octep_iq *iq;
+ struct octep_oq *oq;
+ cpumask_t affinity_mask;
+};
+
+/* Octeon hardware/firmware offload capability flags. */
+#define OCTEP_CAP_TX_CHECKSUM BIT(0)
+#define OCTEP_CAP_RX_CHECKSUM BIT(1)
+#define OCTEP_CAP_TSO BIT(2)
+
+/* Link modes */
+enum octep_link_mode_bit_indices {
+ OCTEP_LINK_MODE_10GBASE_T = 0,
+ OCTEP_LINK_MODE_10GBASE_R,
+ OCTEP_LINK_MODE_10GBASE_CR,
+ OCTEP_LINK_MODE_10GBASE_KR,
+ OCTEP_LINK_MODE_10GBASE_LR,
+ OCTEP_LINK_MODE_10GBASE_SR,
+ OCTEP_LINK_MODE_25GBASE_CR,
+ OCTEP_LINK_MODE_25GBASE_KR,
+ OCTEP_LINK_MODE_25GBASE_SR,
+ OCTEP_LINK_MODE_40GBASE_CR4,
+ OCTEP_LINK_MODE_40GBASE_KR4,
+ OCTEP_LINK_MODE_40GBASE_LR4,
+ OCTEP_LINK_MODE_40GBASE_SR4,
+ OCTEP_LINK_MODE_50GBASE_CR2,
+ OCTEP_LINK_MODE_50GBASE_KR2,
+ OCTEP_LINK_MODE_50GBASE_SR2,
+ OCTEP_LINK_MODE_50GBASE_CR,
+ OCTEP_LINK_MODE_50GBASE_KR,
+ OCTEP_LINK_MODE_50GBASE_LR,
+ OCTEP_LINK_MODE_50GBASE_SR,
+ OCTEP_LINK_MODE_100GBASE_CR4,
+ OCTEP_LINK_MODE_100GBASE_KR4,
+ OCTEP_LINK_MODE_100GBASE_LR4,
+ OCTEP_LINK_MODE_100GBASE_SR4,
+ OCTEP_LINK_MODE_NBITS
+};
+
+/* Hardware interface link state information. */
+struct octep_iface_link_info {
+ /* Bitmap of Supported link speeds/modes. */
+ u64 supported_modes;
+
+ /* Bitmap of Advertised link speeds/modes. */
+ u64 advertised_modes;
+
+ /* Negotiated link speed in Mbps. */
+ u32 speed;
+
+ /* MTU */
+ u16 mtu;
+
+ /* Autonegotation state. */
+#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
+ u8 autoneg;
+
+ /* Pause frames setting. */
+#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0)
+#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1)
+ u8 pause;
+
+ /* Admin state of the link (ifconfig <iface> up/down */
+ u8 admin_up;
+
+ /* Operational state of the link: physical link is up down */
+ u8 oper_up;
+};
+
+/* The Octeon device specific private data structure.
+ * Each Octeon device has this structure to represent all its components.
+ */
+struct octep_device {
+ struct octep_config *conf;
+
+ /* Octeon Chip type. */
+ u16 chip_id;
+ u16 rev_id;
+
+ /* Device capabilities enabled */
+ u64 caps_enabled;
+ /* Device capabilities supported */
+ u64 caps_supported;
+
+ /* Pointer to basic Linux device */
+ struct device *dev;
+ /* Linux PCI device pointer */
+ struct pci_dev *pdev;
+ /* Netdev corresponding to the Octeon device */
+ struct net_device *netdev;
+
+ /* memory mapped io range */
+ struct octep_mmio mmio[OCTEP_MMIO_REGIONS];
+
+ /* MAC address */
+ u8 mac_addr[ETH_ALEN];
+
+ /* Tx queues (IQ: Instruction Queue) */
+ u16 num_iqs;
+ /* pkind value to be used in every Tx hardware descriptor */
+ u8 pkind;
+ /* Pointers to Octeon Tx queues */
+ struct octep_iq *iq[OCTEP_MAX_IQ];
+
+ /* Rx queues (OQ: Output Queue) */
+ u16 num_oqs;
+ /* Pointers to Octeon Rx queues */
+ struct octep_oq *oq[OCTEP_MAX_OQ];
+
+ /* Hardware port number of the PCIe interface */
+ u16 pcie_port;
+
+ /* PCI Window registers to access some hardware CSRs */
+ struct octep_pci_win_regs pci_win_regs;
+ /* Hardware operations */
+ struct octep_hw_ops hw_ops;
+
+ /* IRQ info */
+ u16 num_irqs;
+ u16 num_non_ioq_irqs;
+ char *non_ioq_irq_names;
+ struct msix_entry *msix_entries;
+ /* IOq information of it's corresponding MSI-X interrupt. */
+ struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES];
+
+ /* Hardware Interface Tx statistics */
+ struct octep_iface_tx_stats iface_tx_stats;
+ /* Hardware Interface Rx statistics */
+ struct octep_iface_rx_stats iface_rx_stats;
+
+ /* Hardware Interface Link info like supported modes, aneg support */
+ struct octep_iface_link_info link_info;
+
+ /* Mailbox to talk to VFs */
+ struct octep_mbox *mbox[OCTEP_MAX_VF];
+
+ /* Work entry to handle Tx timeout */
+ struct work_struct tx_timeout_task;
+
+ /* control mbox over pf */
+ struct octep_ctrl_mbox ctrl_mbox;
+
+ /* offset for iface stats */
+ u32 ctrl_mbox_ifstats_offset;
+
+ /* Work entry to handle ctrl mbox interrupt */
+ struct work_struct ctrl_mbox_task;
+ /* Wait queue for host to firmware requests */
+ wait_queue_head_t ctrl_req_wait_q;
+ /* List of objects waiting for h2f response */
+ struct list_head ctrl_req_wait_list;
+
+ /* Enable non-ioq interrupt polling */
+ bool poll_non_ioq_intr;
+ /* Work entry to poll non-ioq interrupts */
+ struct delayed_work intr_poll_task;
+
+ /* Firmware heartbeat timer */
+ struct timer_list hb_timer;
+ /* Firmware heartbeat miss count tracked by timer */
+ atomic_t hb_miss_cnt;
+ /* Task to reset device on heartbeat miss */
+ struct delayed_work hb_task;
+};
+
+static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct)
+{
+ u16 rev = (oct->rev_id & 0xC) >> 2;
+
+ return (rev == 0) ? 1 : rev;
+}
+
+static inline u16 OCTEP_MINOR_REV(struct octep_device *oct)
+{
+ return (oct->rev_id & 0x3);
+}
+
+/* Octeon CSR read/write access APIs */
+#define octep_write_csr(octep_dev, reg_off, value) \
+ writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_write_csr64(octep_dev, reg_off, val64) \
+ writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr(octep_dev, reg_off) \
+ readl((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octep_read_csr64(octep_dev, reg_off) \
+ readq((octep_dev)->mmio[0].hw_addr + (reg_off))
+
+/* Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+static inline u64
+OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr)
+{
+ u64 val64;
+
+ addr |= 1ull << 53; /* read 8 bytes */
+ writeq(addr, oct->pci_win_regs.pci_win_rd_addr);
+ val64 = readq(oct->pci_win_regs.pci_win_rd_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64);
+
+ return val64;
+}
+
+/* Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to write
+ * @param val - Value to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+static inline void
+OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val)
+{
+ writeq(addr, oct->pci_win_regs.pci_win_wr_addr);
+ writeq(val, oct->pci_win_regs.pci_win_wr_data);
+
+ dev_dbg(&oct->pdev->dev,
+ "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val);
+}
+
+extern struct workqueue_struct *octep_wq;
+
+int octep_device_setup(struct octep_device *oct);
+int octep_setup_iqs(struct octep_device *oct);
+void octep_free_iqs(struct octep_device *oct);
+void octep_clean_iqs(struct octep_device *oct);
+int octep_setup_oqs(struct octep_device *oct);
+void octep_free_oqs(struct octep_device *oct);
+void octep_oq_dbell_init(struct octep_device *oct);
+void octep_device_setup_cn93_pf(struct octep_device *oct);
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget);
+int octep_oq_process_rx(struct octep_oq *oq, int budget);
+void octep_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _OCTEP_MAIN_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
new file mode 100644
index 000000000000..b25c3093dc7b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_REGS_CN9K_PF_H_
+#define _OCTEP_REGS_CN9K_PF_H_
+
+/* ############################ RST ######################### */
+#define CN93_RST_BOOT 0x000087E006001600ULL
+#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL
+#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL
+
+#define CN93_CONFIG_XPANSION_BAR 0x38
+#define CN93_CONFIG_PCIE_CAP 0x70
+#define CN93_CONFIG_PCIE_DEVCAP 0x74
+#define CN93_CONFIG_PCIE_DEVCTL 0x78
+#define CN93_CONFIG_PCIE_LINKCAP 0x7C
+#define CN93_CONFIG_PCIE_LINKCTL 0x80
+#define CN93_CONFIG_PCIE_SLOTCAP 0x84
+#define CN93_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */
+#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN93_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN93_CONFIG_PCIE_FLTMSK 0x720
+
+/* ################# Offsets of RING, EPF, MAC ######################### */
+#define CN93_RING_OFFSET (0x1ULL << 17)
+#define CN93_EPF_OFFSET (0x1ULL << 25)
+#define CN93_MAC_OFFSET (0x1ULL << 4)
+#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4)
+#define CN93_EPVF_RING_OFFSET (0x1ULL << 4)
+
+/* ################# Scratch Registers ######################### */
+#define CN93_SDP_EPF_SCRATCH 0x205E0
+
+/* ################# Window Registers ######################### */
+#define CN93_SDP_WIN_WR_ADDR64 0x20000
+#define CN93_SDP_WIN_RD_ADDR64 0x20010
+#define CN93_SDP_WIN_WR_DATA64 0x20020
+#define CN93_SDP_WIN_WR_MASK_REG 0x20030
+#define CN93_SDP_WIN_RD_DATA64 0x20040
+
+#define CN93_SDP_MAC_NUMBER 0x2C100
+
+/* ################# Global Previliged registers ######################### */
+#define CN93_SDP_EPF_RINFO 0x205F0
+
+#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF)
+#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF)
+#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0xFF)
+
+/* SDP Function select */
+#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8
+#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0
+
+/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */
+#define CN93_SDP_R_IN_CONTROL_START 0x10000
+#define CN93_SDP_R_IN_ENABLE_START 0x10010
+#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020
+#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030
+#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040
+#define CN93_SDP_R_IN_CNTS_START 0x10050
+#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060
+#define CN93_SDP_R_IN_PKT_CNT_START 0x10080
+#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090
+
+#define CN93_SDP_R_IN_CONTROL(ring) \
+ (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_ENABLE(ring) \
+ (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_BADDR(ring) \
+ (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \
+ (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INSTR_DBELL(ring) \
+ (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS(ring) \
+ (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_LEVELS(ring) \
+ (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_PKT_CNT(ring) \
+ (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_BYTE_CNT(ring) \
+ (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/* Rings per Virtual Function */
+#define CN93_R_IN_CTL_RPVF_MASK (0xF)
+#define CN93_R_IN_CTL_RPVF_POS (48)
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN93_R_IN_CTL_IDLE (0x1ULL << 28)
+#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25)
+#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24)
+#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8)
+#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6)
+#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5)
+#define CN93_R_IN_CTL_NSR (0x1ULL << 3)
+#define CN93_R_IN_CTL_ESR (0x1ULL << 1)
+#define CN93_R_IN_CTL_ROR (0x1ULL << 0)
+
+#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B)
+
+/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */
+#define CN93_SDP_R_OUT_CNTS_START 0x10100
+#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110
+#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120
+#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130
+#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140
+#define CN93_SDP_R_OUT_CONTROL_START 0x10150
+#define CN93_SDP_R_OUT_ENABLE_START 0x10160
+#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180
+#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190
+
+#define CN93_SDP_R_OUT_CONTROL(ring) \
+ (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_ENABLE(ring) \
+ (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \
+ (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \
+ (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \
+ (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS(ring) \
+ (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_LEVELS(ring) \
+ (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_PKT_CNT(ring) \
+ (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_BYTE_CNT(ring) \
+ (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET))
+
+/*------------------ R_OUT Masks ----------------*/
+#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
+#define CN93_R_OUT_INT_LEVELS_TIMET (32)
+
+#define CN93_R_OUT_CTL_IDLE BIT_ULL(40)
+#define CN93_R_OUT_CTL_ES_I BIT_ULL(34)
+#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33)
+#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32)
+#define CN93_R_OUT_CTL_ES_D BIT_ULL(30)
+#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29)
+#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28)
+#define CN93_R_OUT_CTL_ES_P BIT_ULL(26)
+#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25)
+#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24)
+#define CN93_R_OUT_CTL_IMODE BIT_ULL(23)
+
+/* ############### Interrupt Moderation Registers ############### */
+#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280
+#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0
+#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0
+#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \
+ (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Mail Box Registers ########################## */
+/* INT register for VF. when a MBOX write from PF happed to a VF,
+ * corresponding bit will be set in this register as well as in
+ * PF_VF_INT register.
+ *
+ * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT
+ */
+/* Basically first 3 are from PF to VF. The last one is data from VF to PF */
+#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210
+#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220
+#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230
+
+#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \
+ (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \
+ (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET))
+
+/* ##################### Interrupt Registers ########################## */
+#define CN93_SDP_R_ERR_TYPE_START 0x10400
+
+#define CN93_SDP_R_ERR_TYPE(ring) \
+ (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_MBOX_ISM_START 0x10500
+#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510
+#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520
+
+#define CN93_SDP_R_MBOX_ISM(ring) \
+ (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_OUT_CNTS_ISM(ring) \
+ (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_R_IN_CNTS_ISM(ring) \
+ (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET))
+
+#define CN93_SDP_EPF_MBOX_RINT_START 0x20100
+#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160
+
+#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180
+#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0
+
+#define CN93_SDP_EPF_IRERR_RINT 0x20200
+#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220
+#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230
+
+#define CN93_SDP_EPF_VFORE_RINT_START 0x20240
+#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0
+
+#define CN93_SDP_EPF_ORERR_RINT 0x20320
+#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340
+#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350
+
+#define CN93_SDP_EPF_OEI_RINT 0x20360
+#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380
+#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390
+
+#define CN93_SDP_EPF_DMA_RINT 0x20400
+#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420
+#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430
+
+#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440
+#define CN93_SDP_EPF_DMA_CNT_START 0x20460
+#define CN93_SDP_EPF_DMA_TIM_START 0x20480
+
+#define CN93_SDP_EPF_MISC_RINT 0x204A0
+#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0
+#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0
+
+#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540
+
+#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560
+#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0
+
+#define CN93_SDP_EPF_MBOX_RINT(index) \
+ (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFIRE_RINT(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_VFORE_RINT(index) \
+ (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_DMA_VF_RINT(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+#define CN93_SDP_EPF_PP_VF_RINT(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \
+ (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN93_INTR_R_SEND_ISM BIT_ULL(63)
+#define CN93_INTR_R_OUT_INT BIT_ULL(62)
+#define CN93_INTR_R_IN_INT BIT_ULL(61)
+#define CN93_INTR_R_MBOX_INT BIT_ULL(60)
+#define CN93_INTR_R_RESEND BIT_ULL(59)
+#define CN93_INTR_R_CLR_TIM BIT_ULL(58)
+
+/* ####################### Ring Mapping Registers ################################## */
+#define CN93_SDP_EPVF_RING_START 0x26000
+#define CN93_SDP_IN_RING_TB_MAP_START 0x28000
+#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000
+#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000
+
+#define CN93_SDP_EPVF_RING(ring) \
+ (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RING_TB_MAP(ring) \
+ (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_IN_RATE_LIMIT(ring) \
+ (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET))
+#define CN93_SDP_MAC_PF_RING_CTL(mac) \
+ (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET))
+
+#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF)
+#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF)
+#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F)
+
+/* Number of non-queue interrupts in CN93xx */
+#define CN93_NUM_NON_IOQ_INTR 16
+
+/* bit 0 for control mbox interrupt */
+#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0)
+/* bit 1 for firmware heartbeat interrupt */
+#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+
+#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
new file mode 100644
index 000000000000..392d9b0da0d7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+static void octep_oq_reset_indices(struct octep_oq *oq)
+{
+ oq->host_read_idx = 0;
+ oq->host_refill_idx = 0;
+ oq->refill_count = 0;
+ oq->last_pkt_count = 0;
+ oq->pkts_pending = 0;
+}
+
+/**
+ * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: 0, if successfully filled receive buffers for all descriptors.
+ * -1, if failed to allocate a buffer or failed to map for DMA.
+ */
+static int octep_oq_fill_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 i;
+
+ for (i = 0; i < oq->max_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "Rx buffer alloc failed\n");
+ goto rx_buf_alloc_err;
+ }
+ desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer alloc: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ goto dma_map_err;
+ }
+ oq->buff_info[i].page = page;
+ }
+
+ return 0;
+
+dma_map_err:
+rx_buf_alloc_err:
+ while (i) {
+ i--;
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ }
+
+ return -1;
+}
+
+/**
+ * octep_oq_refill() - refill buffers for used Rx ring descriptors.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: number of descriptors successfully refilled with receive buffers.
+ */
+static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ struct page *page;
+ u32 refill_idx, i;
+
+ refill_idx = oq->host_refill_idx;
+ for (i = 0; i < oq->refill_count; i++) {
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ dev_err(oq->dev, "refill: rx buffer alloc failed\n");
+ oq->stats.alloc_failures++;
+ break;
+ }
+
+ desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
+ dev_err(oq->dev,
+ "OQ-%d buffer refill: DMA mapping error!\n",
+ oq->q_no);
+ put_page(page);
+ oq->stats.alloc_failures++;
+ break;
+ }
+ oq->buff_info[refill_idx].page = page;
+ refill_idx++;
+ if (refill_idx == oq->max_count)
+ refill_idx = 0;
+ }
+ oq->host_refill_idx = refill_idx;
+ oq->refill_count -= i;
+
+ return i;
+}
+
+/**
+ * octep_setup_oq() - Setup a Rx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Rx queue number to be setup.
+ *
+ * Allocate resources for a Rx queue.
+ */
+static int octep_setup_oq(struct octep_device *oct, int q_no)
+{
+ struct octep_oq *oq;
+ u32 desc_ring_size;
+
+ oq = vzalloc(sizeof(*oq));
+ if (!oq)
+ goto create_oq_fail;
+ oct->oq[q_no] = oq;
+
+ oq->octep_dev = oct;
+ oq->netdev = oct->netdev;
+ oq->dev = &oct->pdev->dev;
+ oq->q_no = q_no;
+ oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
+ oq->ring_size_mask = oq->max_count - 1;
+ oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
+ oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE;
+
+ /* When the hardware/firmware supports additional capabilities,
+ * additional header is filled-in by Octeon after length field in
+ * Rx packets. this header contains additional packet information.
+ */
+ if (oct->caps_enabled)
+ oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+
+ oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
+
+ desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE;
+ oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
+ &oq->desc_ring_dma, GFP_KERNEL);
+
+ if (unlikely(!oq->desc_ring)) {
+ dev_err(oq->dev,
+ "Failed to allocate DMA memory for OQ-%d !!\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
+ if (unlikely(!oq->buff_info)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to allocate buffer info for OQ-%d\n", q_no);
+ goto buf_list_err;
+ }
+
+ if (octep_oq_fill_ring_buffers(oq))
+ goto oq_fill_buff_err;
+
+ octep_oq_reset_indices(oq);
+ oct->hw_ops.setup_oq_regs(oct, q_no);
+ oct->num_oqs++;
+
+ return 0;
+
+oq_fill_buff_err:
+ vfree(oq->buff_info);
+ oq->buff_info = NULL;
+buf_list_err:
+ dma_free_coherent(oq->dev, desc_ring_size,
+ oq->desc_ring, oq->desc_ring_dma);
+ oq->desc_ring = NULL;
+desc_dma_alloc_err:
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+create_oq_fail:
+ return -1;
+}
+
+/**
+ * octep_oq_free_ring_buffers() - Free ring buffers.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free receive buffers in unused Rx queue descriptors.
+ */
+static void octep_oq_free_ring_buffers(struct octep_oq *oq)
+{
+ struct octep_oq_desc_hw *desc_ring = oq->desc_ring;
+ int i;
+
+ if (!oq->desc_ring || !oq->buff_info)
+ return;
+
+ for (i = 0; i < oq->max_count; i++) {
+ if (oq->buff_info[i].page) {
+ dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ put_page(oq->buff_info[i].page);
+ oq->buff_info[i].page = NULL;
+ desc_ring[i].buffer_ptr = 0;
+ }
+ }
+ octep_oq_reset_indices(oq);
+}
+
+/**
+ * octep_free_oq() - Free Rx queue resources.
+ *
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Free all resources of a Rx queue.
+ */
+static int octep_free_oq(struct octep_oq *oq)
+{
+ struct octep_device *oct = oq->octep_dev;
+ int q_no = oq->q_no;
+
+ octep_oq_free_ring_buffers(oq);
+
+ vfree(oq->buff_info);
+
+ if (oq->desc_ring)
+ dma_free_coherent(oq->dev,
+ oq->max_count * OCTEP_OQ_DESC_SIZE,
+ oq->desc_ring, oq->desc_ring_dma);
+
+ vfree(oq);
+ oct->oq[q_no] = NULL;
+ oct->num_oqs--;
+ return 0;
+}
+
+/**
+ * octep_setup_oqs() - setup resources for all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_oqs(struct octep_device *oct)
+{
+ int i, retval = 0;
+
+ oct->num_oqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ retval = octep_setup_oq(oct, i);
+ if (retval) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup OQ(RxQ)-%d.\n", i);
+ goto oq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+oq_setup_err:
+ while (i) {
+ i--;
+ octep_free_oq(oct->oq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_oq_dbell_init() - Initialize Rx queue doorbell.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Write number of descriptors to Rx queue doorbell register.
+ */
+void octep_oq_dbell_init(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
+}
+
+/**
+ * octep_free_oqs() - Free resources of all Rx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_oqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (!oct->oq[i])
+ continue;
+ octep_free_oq(oct->oq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully freed OQ(RxQ)-%d.\n", i);
+ }
+}
+
+/**
+ * octep_oq_check_hw_for_pkts() - Check for new Rx packets.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ *
+ * Return: packets received after previous check.
+ */
+static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+{
+ u32 pkt_count, new_pkts;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts = pkt_count - oq->last_pkt_count;
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+ * this counter is not cleared every time read, to save write cycles.
+ */
+ if (unlikely(pkt_count > 0xF0000000U)) {
+ writel(pkt_count, oq->pkts_sent_reg);
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+ oq->last_pkt_count = pkt_count;
+ oq->pkts_pending += new_pkts;
+ return new_pkts;
+}
+
+/**
+ * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+ * @oct: Octeon device private data structure.
+ * @oq: Octeon Rx queue data structure.
+ * @pkts_to_process: number of packets to be processed.
+ *
+ * Process the new packets in Rx queue.
+ * Packets larger than single Rx buffer arrive in consecutive descriptors.
+ * But, count returned by the API only accounts full packets, not fragments.
+ *
+ * Return: number of packets processed and pushed to stack.
+ */
+static int __octep_oq_process_rx(struct octep_device *oct,
+ struct octep_oq *oq, u16 pkts_to_process)
+{
+ struct octep_oq_resp_hw_ext *resp_hw_ext = NULL;
+ struct octep_rx_buffer *buff_info;
+ struct octep_oq_resp_hw *resp_hw;
+ u32 pkt, rx_bytes, desc_used;
+ struct sk_buff *skb;
+ u16 data_offset;
+ u32 read_idx;
+
+ read_idx = oq->host_read_idx;
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+ buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+ if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) {
+ /* Extended response header is immediately after
+ * response header (resp_hw)
+ */
+ resp_hw_ext = (struct octep_oq_resp_hw_ext *)
+ (resp_hw + 1);
+ buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE;
+ /* Packet Data is immediately after
+ * extended response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE +
+ OCTEP_OQ_RESP_HW_EXT_SIZE;
+ } else {
+ /* Data is immediately after
+ * Hardware Rx response header.
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ }
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+ skb = build_skb((void *)resp_hw, PAGE_SIZE);
+ skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+ dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+ buff_info->len = data_len;
+ data_len = 0;
+ } else {
+ buff_info->len = oq->buffer_size;
+ data_len -= oq->buffer_size;
+ }
+
+ skb_add_rx_frag(skb, shinfo->nr_frags,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+ buff_info->page = NULL;
+ read_idx++;
+ desc_used++;
+ if (read_idx == oq->max_count)
+ read_idx = 0;
+ }
+ }
+
+ skb->dev = oq->netdev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (resp_hw_ext &&
+ resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ napi_gro_receive(oq->napi, skb);
+ }
+
+ oq->host_read_idx = read_idx;
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+
+ return pkt;
+}
+
+/**
+ * octep_oq_process_rx() - Process Rx queue.
+ *
+ * @oq: Octeon Rx queue data structure.
+ * @budget: max number of packets can be processed in one invocation.
+ *
+ * Check for newly received packets and process them.
+ * Keeps checking for new packets until budget is used or no new packets seen.
+ *
+ * Return: number of packets processed.
+ */
+int octep_oq_process_rx(struct octep_oq *oq, int budget)
+{
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+ if (oq->pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
+ pkts_available = min(budget - total_pkts_processed,
+ oq->pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+ oq->pkts_pending -= pkts_processed;
+ total_pkts_processed += pkts_processed;
+ }
+
+ if (oq->refill_count >= oq->refill_threshold) {
+ u32 desc_refilled = octep_oq_refill(oct, oq);
+
+ /* flush pending writes before updating credits */
+ wmb();
+ writel(desc_refilled, oq->pkts_credit_reg);
+ }
+
+ return total_pkts_processed;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
new file mode 100644
index 000000000000..782a24f27f3e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_RX_H_
+#define _OCTEP_RX_H_
+
+/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format.
+ *
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ *
+ * @buffer_ptr: DMA address of the skb->data
+ * @info_ptr: DMA address of host memory, used to update pkt count by hw.
+ * This is currently unused to save pci writes.
+ */
+struct octep_oq_desc_hw {
+ dma_addr_t buffer_ptr;
+ u64 info_ptr;
+};
+
+#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
+
+#define OCTEP_CSUM_L4_VERIFIED 0x1
+#define OCTEP_CSUM_IP_VERIFIED 0x2
+#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED)
+
+/* Extended Response Header in packet data received from Hardware.
+ * Includes metadata like checksum status.
+ * this is valid only if hardware/firmware published support for this.
+ * This is at offset 0 of packet data (skb->data).
+ */
+struct octep_oq_resp_hw_ext {
+ /* Reserved. */
+ u64 reserved:62;
+
+ /* checksum verified. */
+ u64 csum_verified:2;
+};
+
+#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
+
+/* Length of Rx packet DMA'ed by Octeon to Host.
+ * this is in bigendian; so need to be converted to cpu endian.
+ * Octeon writes this at the beginning of Rx buffer (skb->data).
+ */
+struct octep_oq_resp_hw {
+ /* The Length of the packet. */
+ __be64 length;
+};
+
+#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
+
+/* Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers. The fields are operated by
+ * OS-dependent routines.
+ */
+struct octep_rx_buffer {
+ struct page *page;
+
+ /* length from rx hardware descriptor after converting to cpu endian */
+ u64 len;
+};
+
+#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer))
+
+/* Output Queue statistics. Each output queue has four stats fields. */
+struct octep_oq_stats {
+ /* Number of packets received from the Device. */
+ u64 packets;
+
+ /* Number of bytes received from the Device. */
+ u64 bytes;
+
+ /* Number of times failed to allocate buffers. */
+ u64 alloc_failures;
+};
+
+#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats))
+
+/* Hardware interface Rx statistics */
+struct octep_iface_rx_stats {
+ /* Received packets */
+ u64 pkts;
+
+ /* Octets of received packets */
+ u64 octets;
+
+ /* Received PAUSE and Control packets */
+ u64 pause_pkts;
+
+ /* Received PAUSE and Control octets */
+ u64 pause_octets;
+
+ /* Filtered DMAC0 packets */
+ u64 dmac0_pkts;
+
+ /* Filtered DMAC0 octets */
+ u64 dmac0_octets;
+
+ /* Packets dropped due to RX FIFO full */
+ u64 dropped_pkts_fifo_full;
+
+ /* Octets dropped due to RX FIFO full */
+ u64 dropped_octets_fifo_full;
+
+ /* Error packets */
+ u64 err_pkts;
+
+ /* Filtered DMAC1 packets */
+ u64 dmac1_pkts;
+
+ /* Filtered DMAC1 octets */
+ u64 dmac1_octets;
+
+ /* NCSI-bound packets dropped */
+ u64 ncsi_dropped_pkts;
+
+ /* NCSI-bound octets dropped */
+ u64 ncsi_dropped_octets;
+
+ /* Multicast packets received. */
+ u64 mcast_pkts;
+
+ /* Broadcast packets received. */
+ u64 bcast_pkts;
+
+};
+
+/* The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon OQ.
+ */
+struct octep_oq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+
+ struct napi_struct *napi;
+
+ /* The receive buffer list. This list has the virtual addresses
+ * of the buffers.
+ */
+ struct octep_rx_buffer *buff_info;
+
+ /* Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ u8 __iomem *pkts_credit_reg;
+
+ /* Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ u8 __iomem *pkts_sent_reg;
+
+ /* Statistics for this OQ. */
+ struct octep_oq_stats stats;
+
+ /* Packets pending to be processed */
+ u32 pkts_pending;
+ u32 last_pkt_count;
+
+ /* Index in the ring where the driver should read the next packet */
+ u32 host_read_idx;
+
+ /* Number of descriptors in this ring. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ /* The number of descriptors pending refill. */
+ u32 refill_count;
+
+ /* Index in the ring where the driver will refill the
+ * descriptor's buffer
+ */
+ u32 host_refill_idx;
+ u32 refill_threshold;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+ u32 max_single_buffer_size;
+
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct octep_oq_desc_hw *desc_ring;
+
+ /* DMA mapped address of the OQ descriptor ring. */
+ dma_addr_t desc_ring_dma;
+};
+
+#define OCTEP_OQ_SIZE (sizeof(struct octep_oq))
+#endif /* _OCTEP_RX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
new file mode 100644
index 000000000000..5a520d37bea0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+
+#include "octep_config.h"
+#include "octep_main.h"
+
+/* Reset various index of Tx queue data structure. */
+static void octep_iq_reset_indices(struct octep_iq *iq)
+{
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octep_read_index = 0;
+ iq->flush_index = 0;
+ iq->pkts_processed = 0;
+ iq->pkt_in_done = 0;
+ atomic_set(&iq->instr_pending, 0);
+}
+
+/**
+ * octep_iq_process_completions() - Process Tx queue completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @budget: max number of completions to be processed in one invocation.
+ */
+int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
+{
+ u32 compl_pkts, compl_bytes, compl_sg;
+ struct octep_device *oct = iq->octep_dev;
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ compl_pkts = 0;
+ compl_sg = 0;
+ compl_bytes = 0;
+ iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq);
+
+ while (likely(budget && (fi != iq->octep_read_index))) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+ compl_bytes += skb->len;
+ compl_pkts++;
+ budget--;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+ compl_sg++;
+
+ dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ iq->pkts_processed += compl_pkts;
+ atomic_sub(compl_pkts, &iq->instr_pending);
+ iq->stats.instr_completed += compl_pkts;
+ iq->stats.bytes_sent += compl_bytes;
+ iq->stats.sgentry_sent += compl_sg;
+ iq->flush_index = fi;
+
+ netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
+
+ if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
+ ((iq->max_count - atomic_read(&iq->instr_pending)) >
+ OCTEP_WAKE_QUEUE_THRESHOLD))
+ netif_wake_subqueue(iq->netdev, iq->q_no);
+ return !budget;
+}
+
+/**
+ * octep_iq_free_pending() - Free Tx buffers for pending completions.
+ *
+ * @iq: Octeon Tx queue data structure.
+ */
+static void octep_iq_free_pending(struct octep_iq *iq)
+{
+ struct octep_tx_buffer *tx_buffer;
+ struct skb_shared_info *shinfo;
+ u32 fi = iq->flush_index;
+ struct sk_buff *skb;
+ u8 frags, i;
+
+ while (fi != iq->host_write_index) {
+ tx_buffer = iq->buff_info + fi;
+ skb = tx_buffer->skb;
+
+ fi++;
+ if (unlikely(fi == iq->max_count))
+ fi = 0;
+
+ if (!tx_buffer->gather) {
+ dma_unmap_single(iq->dev, tx_buffer->dma,
+ tx_buffer->skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ /* Scatter/Gather */
+ shinfo = skb_shinfo(skb);
+ frags = shinfo->nr_frags;
+
+ dma_unmap_single(iq->dev,
+ tx_buffer->sglist[0].dma_ptr[0],
+ tx_buffer->sglist[0].len[0],
+ DMA_TO_DEVICE);
+
+ i = 1; /* entry 0 is main skb, unmapped above */
+ while (frags--) {
+ dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
+ tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ i++;
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ atomic_set(&iq->instr_pending, 0);
+ iq->flush_index = fi;
+ netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
+}
+
+/**
+ * octep_clean_iqs() - Clean Tx queues to shutdown the device.
+ *
+ * @oct: Octeon device private data structure.
+ *
+ * Free the buffers in Tx queue descriptors pending completion and
+ * reset queue indices
+ */
+void octep_clean_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ octep_iq_free_pending(oct->iq[i]);
+ octep_iq_reset_indices(oct->iq[i]);
+ }
+}
+
+/**
+ * octep_setup_iq() - Setup a Tx queue.
+ *
+ * @oct: Octeon device private data structure.
+ * @q_no: Tx queue number to be setup.
+ *
+ * Allocate resources for a Tx queue.
+ */
+static int octep_setup_iq(struct octep_device *oct, int q_no)
+{
+ u32 desc_ring_size, buff_info_size, sglist_size;
+ struct octep_iq *iq;
+ int i;
+
+ iq = vzalloc(sizeof(*iq));
+ if (!iq)
+ goto iq_alloc_err;
+ oct->iq[q_no] = iq;
+
+ iq->octep_dev = oct;
+ iq->netdev = oct->netdev;
+ iq->dev = &oct->pdev->dev;
+ iq->q_no = q_no;
+ iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->ring_size_mask = iq->max_count - 1;
+ iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
+ iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
+
+ /* Allocate memory for hardware queue descriptors */
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
+ &iq->desc_ring_dma, GFP_KERNEL);
+ if (unlikely(!iq->desc_ring)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d\n", q_no);
+ goto desc_dma_alloc_err;
+ }
+
+ /* Allocate memory for hardware SGLIST descriptors */
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
+ &iq->sglist_dma, GFP_KERNEL);
+ if (unlikely(!iq->sglist)) {
+ dev_err(iq->dev,
+ "Failed to allocate DMA memory for IQ-%d SGLIST\n",
+ q_no);
+ goto sglist_alloc_err;
+ }
+
+ /* allocate memory to manage Tx packets pending completion */
+ buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count;
+ iq->buff_info = vzalloc(buff_info_size);
+ if (!iq->buff_info) {
+ dev_err(iq->dev,
+ "Failed to allocate buff info for IQ-%d\n", q_no);
+ goto buff_info_err;
+ }
+
+ /* Setup sglist addresses in tx_buffer entries */
+ for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
+ struct octep_tx_buffer *tx_buffer;
+
+ tx_buffer = &iq->buff_info[i];
+ tx_buffer->sglist =
+ &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT];
+ tx_buffer->sglist_dma =
+ iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT);
+ }
+
+ octep_iq_reset_indices(iq);
+ oct->hw_ops.setup_iq_regs(oct, q_no);
+
+ oct->num_iqs++;
+ return 0;
+
+buff_info_err:
+ dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
+sglist_alloc_err:
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+desc_dma_alloc_err:
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+iq_alloc_err:
+ return -1;
+}
+
+/**
+ * octep_free_iq() - Free Tx queue resources.
+ *
+ * @iq: Octeon Tx queue data structure.
+ *
+ * Free all the resources allocated for a Tx queue.
+ */
+static void octep_free_iq(struct octep_iq *iq)
+{
+ struct octep_device *oct = iq->octep_dev;
+ u64 desc_ring_size, sglist_size;
+ int q_no = iq->q_no;
+
+ desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
+
+ vfree(iq->buff_info);
+
+ if (iq->desc_ring)
+ dma_free_coherent(iq->dev, desc_ring_size,
+ iq->desc_ring, iq->desc_ring_dma);
+
+ sglist_size = OCTEP_SGLIST_SIZE_PER_PKT *
+ CFG_GET_IQ_NUM_DESC(oct->conf);
+ if (iq->sglist)
+ dma_free_coherent(iq->dev, sglist_size,
+ iq->sglist, iq->sglist_dma);
+
+ vfree(iq);
+ oct->iq[q_no] = NULL;
+ oct->num_iqs--;
+}
+
+/**
+ * octep_setup_iqs() - setup resources for all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+int octep_setup_iqs(struct octep_device *oct)
+{
+ int i;
+
+ oct->num_iqs = 0;
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ if (octep_setup_iq(oct, i)) {
+ dev_err(&oct->pdev->dev,
+ "Failed to setup IQ(TxQ)-%d.\n", i);
+ goto iq_setup_err;
+ }
+ dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
+ }
+
+ return 0;
+
+iq_setup_err:
+ while (i) {
+ i--;
+ octep_free_iq(oct->iq[i]);
+ }
+ return -1;
+}
+
+/**
+ * octep_free_iqs() - Free resources of all Tx queues.
+ *
+ * @oct: Octeon device private data structure.
+ */
+void octep_free_iqs(struct octep_device *oct)
+{
+ int i;
+
+ for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
+ octep_free_iq(oct->iq[i]);
+ dev_dbg(&oct->pdev->dev,
+ "Successfully destroyed IQ(TxQ)-%d.\n", i);
+ }
+ oct->num_iqs = 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
new file mode 100644
index 000000000000..2ef57980eb47
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell Octeon EP (EndPoint) Ethernet Driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef _OCTEP_TX_H_
+#define _OCTEP_TX_H_
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+#define TX_BUFTYPE_NONE 0
+#define TX_BUFTYPE_NET 1
+#define TX_BUFTYPE_NET_SG 2
+#define NUM_TX_BUFTYPES 3
+
+/* Hardware format for Scatter/Gather list */
+struct octep_tx_sglist_desc {
+ u16 len[4];
+ dma_addr_t dma_ptr[4];
+};
+
+/* Each Scatter/Gather entry sent to hardwar hold four pointers.
+ * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
+ * is for main skb which also goes as a gather buffer to Octeon hardware.
+ * To allocate sufficient SGLIST entries for a packet with max fragments,
+ * align by adding 3 before calcuating max SGLIST entries per packet.
+ */
+#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
+#define OCTEP_SGLIST_SIZE_PER_PKT \
+ (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc))
+
+struct octep_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct octep_tx_sglist_desc *sglist;
+ dma_addr_t sglist_dma;
+ u8 gather;
+};
+
+#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer))
+
+/* Hardware interface Tx statistics */
+struct octep_iface_tx_stats {
+ /* Packets dropped due to excessive collisions */
+ u64 xscol;
+
+ /* Packets dropped due to excessive deferral */
+ u64 xsdef;
+
+ /* Packets sent that experienced multiple collisions before successful
+ * transmission
+ */
+ u64 mcol;
+
+ /* Packets sent that experienced a single collision before successful
+ * transmission
+ */
+ u64 scol;
+
+ /* Total octets sent on the interface */
+ u64 octs;
+
+ /* Total frames sent on the interface */
+ u64 pkts;
+
+ /* Packets sent with an octet count < 64 */
+ u64 hist_lt64;
+
+ /* Packets sent with an octet count == 64 */
+ u64 hist_eq64;
+
+ /* Packets sent with an octet count of 65–127 */
+ u64 hist_65to127;
+
+ /* Packets sent with an octet count of 128–255 */
+ u64 hist_128to255;
+
+ /* Packets sent with an octet count of 256–511 */
+ u64 hist_256to511;
+
+ /* Packets sent with an octet count of 512–1023 */
+ u64 hist_512to1023;
+
+ /* Packets sent with an octet count of 1024-1518 */
+ u64 hist_1024to1518;
+
+ /* Packets sent with an octet count of > 1518 */
+ u64 hist_gt1518;
+
+ /* Packets sent to a broadcast DMAC */
+ u64 bcst;
+
+ /* Packets sent to the multicast DMAC */
+ u64 mcst;
+
+ /* Packets sent that experienced a transmit underflow and were
+ * truncated
+ */
+ u64 undflw;
+
+ /* Control/PAUSE packets sent */
+ u64 ctl;
+};
+
+/* Input Queue statistics. Each input queue has four stats fields. */
+struct octep_iq_stats {
+ /* Instructions posted to this queue. */
+ u64 instr_posted;
+
+ /* Instructions copied by hardware for processing. */
+ u64 instr_completed;
+
+ /* Instructions that could not be processed. */
+ u64 instr_dropped;
+
+ /* Bytes sent through this queue. */
+ u64 bytes_sent;
+
+ /* Gather entries sent through this queue. */
+ u64 sgentry_sent;
+
+ /* Number of transmit failures due to TX_BUSY */
+ u64 tx_busy;
+
+ /* Number of times the queue is restarted */
+ u64 restart_cnt;
+};
+
+/* The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (up to 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octep_iq {
+ u32 q_no;
+
+ struct octep_device *octep_dev;
+ struct net_device *netdev;
+ struct device *dev;
+ struct netdev_queue *netdev_q;
+
+ /* Index in input ring where driver should write the next packet */
+ u16 host_write_index;
+
+ /* Index in input ring where Octeon is expected to read next packet */
+ u16 octep_read_index;
+
+ /* This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u16 flush_index;
+
+ /* Statistics for this input queue. */
+ struct octep_iq_stats stats;
+
+ /* This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ /* Pointer to the Virtual Base addr of the input ring. */
+ struct octep_tx_desc_hw *desc_ring;
+
+ /* DMA mapped base address of the input descriptor ring. */
+ dma_addr_t desc_ring_dma;
+
+ /* Info of Tx buffers pending completion. */
+ struct octep_tx_buffer *buff_info;
+
+ /* Base pointer to Scatter/Gather lists for all ring descriptors. */
+ struct octep_tx_sglist_desc *sglist;
+
+ /* DMA mapped addr of Scatter Gather Lists */
+ dma_addr_t sglist_dma;
+
+ /* Octeon doorbell register for the ring. */
+ u8 __iomem *doorbell_reg;
+
+ /* Octeon instruction count register for this ring. */
+ u8 __iomem *inst_cnt_reg;
+
+ /* interrupt level register for this ring */
+ u8 __iomem *intr_lvl_reg;
+
+ /* Maximum no. of instructions in this queue. */
+ u32 max_count;
+ u32 ring_size_mask;
+
+ u32 pkt_in_done;
+ u32 pkts_processed;
+
+ u32 status;
+
+ /* Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /* The max. number of instructions that can be held pending by the
+ * driver before ringing doorbell.
+ */
+ u32 fill_threshold;
+};
+
+/* Hardware Tx Instruction Header */
+struct octep_instr_hdr {
+ /* Data Len */
+ u64 tlen:16;
+
+ /* Reserved */
+ u64 rsvd:20;
+
+ /* PKIND for SDP */
+ u64 pkind:6;
+
+ /* Front Data size */
+ u64 fsz:6;
+
+ /* No. of entries in gather list */
+ u64 gsz:14;
+
+ /* Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /* Reserved3 */
+ u64 reserved3:1;
+};
+
+/* Hardware Tx completion response header */
+struct octep_instr_resp_hdr {
+ /* Request ID */
+ u64 rid:16;
+
+ /* PCIe port to use for response */
+ u64 pcie_port:3;
+
+ /* Scatter indicator 1=scatter */
+ u64 scatter:1;
+
+ /* Size of Expected result OR no. of entries in scatter list */
+ u64 rlenssz:14;
+
+ /* Desired destination port for result */
+ u64 dport:6;
+
+ /* Opcode Specific parameters */
+ u64 param:8;
+
+ /* Opcode for the return packet */
+ u64 opcode:16;
+};
+
+/* 64-byte Tx instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ *
+ * only first 16-bytes (dptr and ih) are mandatory; rest are optional
+ * and filled by the driver based on firmware/hardware capabilities.
+ * These optional headers together called Front Data and its size is
+ * described by ih->fsz.
+ */
+struct octep_tx_desc_hw {
+ /* Pointer where the input data is available. */
+ u64 dptr;
+
+ /* Instruction Header. */
+ union {
+ struct octep_instr_hdr ih;
+ u64 ih64;
+ };
+
+ /* Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /* Input Instruction Response Header. */
+ struct octep_instr_resp_hdr irh;
+
+ /* Additional headers available in a 64-byte instruction. */
+ u64 exhdr[4];
+};
+
+#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
+#endif /* _OCTEP_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 639893d87055..993ac180a5db 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -33,8 +33,10 @@ config OCTEONTX2_PF
select OCTEONTX2_MBOX
select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
+ select DIMLIB
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ depends on MACSEC || !MACSEC
help
This driver supports Marvell's OcteonTX2 NIC physical function.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 7f4a4ca9af78..3cf4c8285c90 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 186d00a9ab35..bd77152bb8d7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -64,6 +64,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
{ 0, } /* end of table */
};
@@ -73,12 +74,13 @@ static bool is_dev_rpm(void *cgxd)
{
struct cgx *cgx = cgxd;
- return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
+ return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
+ (cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
}
bool is_lmac_valid(struct cgx *cgx, int lmac_id)
{
- if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
+ if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
return false;
return test_bit(lmac_id, &cgx->lmac_bmap);
}
@@ -90,7 +92,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
{
int tmp, id = 0;
- for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
if (tmp == lmac_id)
break;
id++;
@@ -121,7 +123,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{
- if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
+ if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
return NULL;
return cgx->lmac_idmap[lmac_id];
@@ -485,7 +487,7 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
- cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
+ cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
return 0;
}
@@ -498,6 +500,32 @@ static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
}
+static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = cgx->mac_ops->fifo_len;
+ num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC0 gets half of the FIFO, reset 1/4th */
+ if (lmac_id == 0)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
/* Configure CGX LMAC in internal loopback mode */
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
{
@@ -578,31 +606,78 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
}
}
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
+ return 0;
+}
+
/* Enable or disable forwarding received pause frames to Tx block */
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
+ u8 rx_pause, tx_pause;
+ bool is_pfc_enabled;
+ struct lmac *lmac;
u64 cfg;
if (!cgx)
return;
- if (enable) {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return;
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
+ cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
+ is_pfc_enabled = rx_pause ? false : true;
+
+ if (enable) {
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
} else {
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ if (!is_pfc_enabled) {
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+ }
}
}
@@ -667,6 +742,10 @@ int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
if (!cgx || lmac_id >= cgx->lmac_count)
return -ENODEV;
+
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
+ return 0;
+
fec_stats_count =
cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
@@ -695,9 +774,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
if (enable)
- cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+ cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
else
- cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+ cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
return 0;
}
@@ -722,26 +801,6 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
-{
- struct cgx *cgx = cgxd;
- u64 cfg;
-
- if (is_dev_rpm(cgx))
- return 0;
-
- if (!is_lmac_valid(cgx, lmac_id))
- return -ENODEV;
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
- return 0;
-}
-
static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause)
{
@@ -782,21 +841,8 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
if (!is_lmac_valid(cgx, lmac_id))
return;
- if (enable) {
- /* Enable receive pause frames */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
-
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
-
- /* Enable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg |= CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ if (enable) {
/* Set pause time and interval */
cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
DEFAULT_PAUSE_TIME);
@@ -813,21 +859,127 @@ static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
cfg &= ~0xFFFFULL;
cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
cfg | (DEFAULT_PAUSE_TIME / 2));
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
- cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
- cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
- cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
- cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
+ cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+
+ cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
+ cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
+ cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
- /* Disable pause frames transmission */
- cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
- cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
- cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
+ /* Disable all PFC classes by default */
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+}
+
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx)
+{
+ struct cgx *cgx = cgxd;
+ struct lmac *lmac;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ if (!rx_pause)
+ clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
+
+ if (!tx_pause)
+ clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+ else
+ set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
+
+ /* check if other pfvfs are using flow control */
+ if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Receive Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
+ dev_warn(&cgx->pdev->dev,
+ "Transmit Flow control disable not permitted as its used by other PFVFs\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ /* Return as no traffic classes are requested */
+ if (tx_pause && !pfc_en)
+ return 0;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+ pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
+
+ if (rx_pause) {
+ cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ } else {
+ cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
+ CGXX_SMUX_CBFC_CTL_BCK_EN |
+ CGXX_SMUX_CBFC_CTL_DRP_EN);
+ }
+
+ if (tx_pause) {
+ cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
+ } else {
+ cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
+ cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
}
+
+ cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
+
+ /* Write source MAC address which will be filled into PFC packet */
+ cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
+ cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
+
+ return 0;
+}
+
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
+
+ *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
+ *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
+
+ return 0;
}
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
@@ -892,9 +1044,9 @@ int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
dev = &cgx->pdev->dev;
- dev_err(dev, "cgx port %d:%d cmd timeout\n",
- cgx->cgx_id, lmac->lmac_id);
- err = -EIO;
+ dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
+ cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
+ err = LMAC_AF_ERR_CMD_TIMEOUT;
goto unlock;
}
@@ -1078,7 +1230,15 @@ static inline void link_status_user_format(u64 lstat,
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
- linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
+ linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
+
+ if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
+ dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
+ linfo->lmac_type_id, cgx->cgx_id, lmac_id);
+ strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
+ return;
+ }
+
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
}
@@ -1249,7 +1409,7 @@ int cgx_get_fwdata_base(u64 *base)
if (!cgx)
return -ENXIO;
- first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
+ first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err)
@@ -1320,17 +1480,25 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
u64 req = 0;
u64 resp;
- if (enable)
+ if (enable) {
req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
- else
- req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ /* On CN10K firmware offloads link bring up/down operations to ECP
+ * On Octeontx2 link operations are handled by firmware itself
+ * which can cause mbox errors so configure maximum time firmware
+ * poll for Link as 1000 ms
+ */
+ if (!is_dev_rpm(cgx))
+ req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
+ } else {
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+ }
return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
}
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
- int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
+ int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
@@ -1368,7 +1536,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
int i, err;
/* Do Link up for all the enabled lmacs */
- for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
@@ -1388,14 +1556,6 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
-static void cgx_lmac_get_fifolen(struct cgx *cgx)
-{
- u64 cfg;
-
- cfg = cgx_read(cgx, 0, CGX_CONST);
- cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
-}
-
static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
int cnt, bool req_free)
{
@@ -1450,17 +1610,20 @@ static int cgx_lmac_init(struct cgx *cgx)
u64 lmac_list;
int i, err;
- cgx_lmac_get_fifolen(cgx);
-
- cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
/* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled
*/
- if (cgx->mac_ops->non_contiguous_serdes_lane)
- lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ if (is_dev_rpm2(cgx))
+ lmac_list =
+ cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
+ else
+ lmac_list =
+ cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+ }
- if (cgx->lmac_count > MAX_LMAC_PER_CGX)
- cgx->lmac_count = MAX_LMAC_PER_CGX;
+ if (cgx->lmac_count > cgx->max_lmac_per_mac)
+ cgx->lmac_count = cgx->max_lmac_per_mac;
for (i = 0; i < cgx->lmac_count; i++) {
lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
@@ -1481,7 +1644,9 @@ static int cgx_lmac_init(struct cgx *cgx)
lmac->cgx = cgx;
lmac->mac_to_index_bmap.max =
- MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ cgx->mac_ops->dmac_filter_count /
+ cgx->lmac_count;
+
err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
if (err)
goto err_name_free;
@@ -1489,6 +1654,16 @@ static int cgx_lmac_init(struct cgx *cgx)
/* Reserve first entry for default MAC address */
set_bit(0, lmac->mac_to_index_bmap.bmap);
+ lmac->rx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
+ if (err)
+ goto err_dmac_bmap_free;
+
+ lmac->tx_fc_pfvf_bmap.max = 128;
+ err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
+ if (err)
+ goto err_rx_fc_bmap_free;
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1505,6 +1680,10 @@ static int cgx_lmac_init(struct cgx *cgx)
return cgx_lmac_verify_fwi_version(cgx);
err_bitmap_free:
+ rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
+err_rx_fc_bmap_free:
+ rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
+err_dmac_bmap_free:
rvu_free_bitmap(&lmac->mac_to_index_bmap);
err_name_free:
kfree(lmac->name);
@@ -1524,7 +1703,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
}
/* Free all lmac related resources */
- for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
@@ -1540,6 +1719,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx)
{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, 0, CGX_CONST);
+ cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+ cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
+
if (is_dev_rpm(cgx))
cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
@@ -1548,6 +1733,15 @@ static void cgx_populate_features(struct cgx *cgx)
RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
}
+static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
+{
+ if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
+ is_dev_rpm2(cgx))
+ return 0x80;
+ else
+ return 0x60;
+}
+
static struct mac_ops cgx_mac_ops = {
.name = "cgx",
.csr_offset = 0,
@@ -1560,16 +1754,23 @@ static struct mac_ops cgx_mac_ops = {
.non_contiguous_serdes_lane = false,
.rx_stats_cnt = 9,
.tx_stats_cnt = 18,
+ .dmac_filter_count = 32,
.get_nr_lmacs = cgx_get_nr_lmacs,
.get_lmac_type = cgx_get_lmac_type,
+ .lmac_fifo_len = cgx_get_lmac_fifo_len,
.mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
.mac_get_rx_stats = cgx_get_rx_stats,
.mac_get_tx_stats = cgx_get_tx_stats,
+ .get_fec_stats = cgx_get_fec_stats,
.mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
.mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
+ .pfc_config = cgx_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -1586,11 +1787,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, cgx);
/* Use mac_ops to get MAC specific features */
- if (pdev->device == PCI_DEVID_CN10K_RPM)
- cgx->mac_ops = rpm_get_mac_ops();
+ if (is_dev_rpm(cgx))
+ cgx->mac_ops = rpm_get_mac_ops(cgx);
else
cgx->mac_ops = &cgx_mac_ops;
+ cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
+
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
@@ -1612,6 +1815,13 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ if (!cgx->lmac_count) {
+ dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
+ err = -EOPNOTSUPP;
+ goto err_release_regions;
+ }
+
nvec = pci_msix_vec_count(cgx->pdev);
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (err < 0 || err != nvec) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index ab1e4abdea38..5a20d93004c7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -18,11 +18,7 @@
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
-#define CGX_ID_MASK 0x7
-#define MAX_LMAC_PER_CGX 4
-#define MAX_DMAC_ENTRIES_PER_CGX 32
-#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
-#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
+#define CGX_ID_MASK 0xF
/* Registers */
#define CGXX_CMRX_CFG 0x00
@@ -30,7 +26,6 @@
#define CMR_P2X_SEL_SHIFT 59ULL
#define CMR_P2X_SEL_NIX0 1ULL
#define CMR_P2X_SEL_NIX1 2ULL
-#define CMR_EN BIT_ULL(55)
#define DATA_PKT_TX_EN BIT_ULL(53)
#define DATA_PKT_RX_EN BIT_ULL(54)
#define CGX_LMAC_TYPE_SHIFT 40
@@ -56,7 +51,8 @@
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
-#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
+#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(55, 32)
+#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24)
#define CGXX_SPUX_CONTROL1 0x10000
#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
@@ -76,6 +72,13 @@
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define CGXX_SMUX_SMAC 0x20108
+#define CGXX_SMUX_CBFC_CTL 0x20218
+#define CGXX_SMUX_CBFC_CTL_RX_EN BIT_ULL(0)
+#define CGXX_SMUX_CBFC_CTL_TX_EN BIT_ULL(1)
+#define CGXX_SMUX_CBFC_CTL_DRP_EN BIT_ULL(2)
+#define CGXX_SMUX_CBFC_CTL_BCK_EN BIT_ULL(3)
+#define CGX_PFC_CLASS_MASK GENMASK_ULL(47, 32)
#define CGXX_GMP_GMI_TX_PAUSE_PKT_TIME 0x38230
#define CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL 0x38248
#define CGX_SMUX_TX_CTL_L2P_BP_CONV BIT_ULL(7)
@@ -85,7 +88,7 @@
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
-#define CGX_CMD_TIMEOUT 2200 /* msecs */
+#define CGX_CMD_TIMEOUT 5000 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
#define CGX_LMAC_FWI 0
@@ -172,4 +175,10 @@ u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
u64 cgx_read_dmac_entry(void *cgxd, int index);
+int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ int pfvf_idx);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index f72ec0e2506f..d4a27c882a5b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -261,4 +261,6 @@ struct cgx_lnk_sts {
#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+/* LINK_BRING_UP command timeout */
+#define LINKCFG_TIMEOUT GENMASK_ULL(21, 8)
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index fc6e7423cbd8..39aaf0e4467d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -17,6 +17,8 @@
* @resp: command response
* @link_info: link related information
* @mac_to_index_bmap: Mac address to CGX table index mapping
+ * @rx_fc_pfvf_bmap: Receive flow control enabled netdev mapping
+ * @tx_fc_pfvf_bmap: Transmit flow control enabled netdev mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
* @cgx: parent cgx port
@@ -33,6 +35,8 @@ struct lmac {
u64 resp;
struct cgx_link_user_info link_info;
struct rsrc_bmap mac_to_index_bmap;
+ struct rsrc_bmap rx_fc_pfvf_bmap;
+ struct rsrc_bmap tx_fc_pfvf_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
@@ -71,11 +75,17 @@ struct mac_ops {
/* RPM & CGX differs in number of Receive/transmit stats */
u8 rx_stats_cnt;
u8 tx_stats_cnt;
+ /* Unlike CN10K which shares same CSR offset with CGX
+ * CNF10KB has different csr offset
+ */
+ u64 rxid_map_offset;
+ u8 dmac_filter_count;
/* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
* number of setbits in lmac_exist tells number of lmacs
*/
int (*get_nr_lmacs)(void *cgx);
u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ u32 (*lmac_fifo_len)(void *cgx, int lmac_id);
int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
bool enable);
/* Register Stats related functions */
@@ -107,6 +117,18 @@ struct mac_ops {
void (*mac_enadis_ptp_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*pfc_config)(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause, u16 pfc_en);
+
+ int (*mac_get_pfc_frm_cfg)(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause);
+
+ /* FEC stats */
+ int (*get_fec_stats)(void *cgxd, int lmac_id,
+ struct cgx_fec_stats_rsp *rsp);
};
struct cgx {
@@ -114,7 +136,10 @@ struct cgx {
struct pci_dev *pdev;
u8 cgx_id;
u8 lmac_count;
- struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ /* number of LMACs per MAC could be 4 or 8 */
+ u8 max_lmac_per_mac;
+#define MAX_LMAC_COUNT 8
+ struct lmac *lmac_idmap[MAX_LMAC_COUNT];
struct work_struct cgx_cmd_work;
struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list;
@@ -136,6 +161,6 @@ struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
bool is_lmac_valid(struct cgx *cgx, int lmac_id);
-struct mac_ops *rpm_get_mac_ops(void);
+struct mac_ops *rpm_get_mac_ops(struct cgx *cgx);
#endif /* LMAC_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 2898931d5260..9690ac01f02c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(otx2_mbox_init);
*/
int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
struct pci_dev *pdev, void *reg_base,
- int direction, int ndevs)
+ int direction, int ndevs, unsigned long *pf_bmap)
{
struct otx2_mbox_dev *mdev;
int devid, err;
@@ -169,6 +169,9 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
mbox->hwbase = hwbase[0];
for (devid = 0; devid < ndevs; devid++) {
+ if (!test_bit(devid, pf_bmap))
+ continue;
+
mdev = &mbox->dev[devid];
mdev->mbase = hwbase[devid];
mdev->hwbase = hwbase[devid];
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4e79e918a161..6389ed83637d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -33,7 +33,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 6000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -96,9 +96,10 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs);
+
int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
- int direction, int ndevs);
+ int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
@@ -169,9 +170,12 @@ M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
-M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, cgx_mac_addr_reset_req, \
+ msg_rsp) \
M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
- msg_rsp) \
+ cgx_mac_addr_update_rsp) \
+M(CGX_PRIO_FLOW_CTRL_CFG, 0x21F, cgx_prio_flow_ctrl_cfg, cgx_pfc_cfg, \
+ cgx_pfc_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -192,6 +196,9 @@ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
+M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \
+M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
+ cpt_flt_eng_info_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@@ -239,6 +246,12 @@ M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
npc_mcam_get_stats_req, \
npc_mcam_get_stats_rsp) \
+M(NPC_GET_FIELD_HASH_INFO, 0x6013, npc_get_field_hash_info, \
+ npc_get_field_hash_info_req, \
+ npc_get_field_hash_info_rsp) \
+M(NPC_GET_FIELD_STATUS, 0x6014, npc_get_field_status, \
+ npc_get_field_status_req, \
+ npc_get_field_status_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
nix_lf_alloc_req, nix_lf_alloc_rsp) \
@@ -287,20 +300,76 @@ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
- nix_bandprof_get_hwinfo_rsp)
-
-/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+ nix_bandprof_get_hwinfo_rsp) \
+M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
+ msg_req, nix_inline_ipsec_cfg) \
+/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
+M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
+ mcs_alloc_rsrc_rsp) \
+M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \
+M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \
+ msg_rsp) \
+M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \
+ msg_rsp) \
+M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \
+ msg_rsp) \
+M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \
+ msg_rsp) \
+M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \
+ msg_rsp) \
+M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \
+ msg_rsp) \
+M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \
+ msg_rsp) \
+M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \
+M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \
+ mcs_flowid_stats) \
+M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \
+ mcs_secy_stats) \
+M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
+M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \
+M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \
+ mcs_port_stats) \
+M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
+M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
+M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
+M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \
+ msg_rsp) \
+M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \
+ mcs_alloc_ctrl_pkt_rule_req, \
+ mcs_alloc_ctrl_pkt_rule_rsp) \
+M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \
+ mcs_free_ctrl_pkt_rule_req, msg_rsp) \
+M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \
+ mcs_ctrl_pkt_rule_write_req, msg_rsp) \
+M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \
+M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\
+M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \
+ mcs_port_cfg_get_rsp) \
+M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \
+ mcs_custom_tag_cfg_get_req, \
+ mcs_custom_tag_cfg_get_rsp)
+
+/* Messages initiated by AF (range 0xC00 - 0xEFF) */
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
#define MBOX_UP_CPT_MESSAGES \
M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+#define MBOX_UP_MCS_MESSAGES \
+M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
};
@@ -426,6 +495,7 @@ struct get_hw_cap_rsp {
struct mbox_msghdr hdr;
u8 nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */
u8 nix_shaping; /* Is shaping and coloring supported */
+ u8 npc_hash_extract; /* Is hash extract supported */
};
/* CGX mbox message formats */
@@ -449,6 +519,7 @@ struct cgx_fec_stats_rsp {
struct cgx_mac_addr_set_or_get {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
+ u32 index;
};
/* Structure for requesting the operation to
@@ -464,7 +535,7 @@ struct cgx_mac_addr_add_req {
*/
struct cgx_mac_addr_add_rsp {
struct mbox_msghdr hdr;
- u8 index;
+ u32 index;
};
/* Structure for requesting the operation to
@@ -472,7 +543,7 @@ struct cgx_mac_addr_add_rsp {
*/
struct cgx_mac_addr_del_req {
struct mbox_msghdr hdr;
- u8 index;
+ u32 index;
};
/* Structure for response against the operation to
@@ -480,7 +551,7 @@ struct cgx_mac_addr_del_req {
*/
struct cgx_max_dmac_entries_get_rsp {
struct mbox_msghdr hdr;
- u8 max_dmac_filters;
+ u32 max_dmac_filters;
};
struct cgx_link_user_info {
@@ -581,10 +652,20 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_reset_req {
+ struct mbox_msghdr hdr;
+ u32 index;
+};
+
struct cgx_mac_addr_update_req {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN];
- u8 index;
+ u32 index;
+};
+
+struct cgx_mac_addr_update_rsp {
+ struct mbox_msghdr hdr;
+ u32 index;
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
@@ -609,6 +690,21 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct cgx_pfc_cfg {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+ u16 pfc_en; /* bitmap indicating pfc enabled traffic classes */
+};
+
+struct cgx_pfc_rsp {
+ struct mbox_msghdr hdr;
+ u8 rx_pause;
+ u8 tx_pause;
+};
+
+ /* NPA mbox message formats */
+
struct npc_set_pkind {
struct mbox_msghdr hdr;
#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
@@ -732,6 +828,7 @@ enum nix_af_status {
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
};
/* For NIX RX vtag action */
@@ -840,7 +937,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
- u64 prof;
+ struct nix_bandprof_s prof;
};
union {
struct nix_rq_ctx_s rq_mask;
@@ -848,7 +945,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
- u64 prof_mask;
+ struct nix_bandprof_s prof_mask;
};
};
@@ -1105,7 +1202,7 @@ struct nix_inline_ipsec_cfg {
u32 cpt_credit;
struct {
u8 egrp;
- u8 opcode;
+ u16 opcode;
u16 param1;
u16 param2;
} gen_cfg;
@@ -1114,6 +1211,8 @@ struct nix_inline_ipsec_cfg {
u8 cpt_slot;
} inst_qsel;
u8 enable;
+ u16 bpid;
+ u32 credit_th;
};
/* Per NIX LF inline IPSec configuration */
@@ -1349,6 +1448,10 @@ struct flow_msg {
u8 tc;
__be16 sport;
__be16 dport;
+ union {
+ u8 ip_flag;
+ u8 next_header;
+ };
};
struct npc_install_flow_req {
@@ -1422,11 +1525,28 @@ struct npc_mcam_get_stats_rsp {
u8 stat_ena; /* enabled */
};
+struct npc_get_field_hash_info_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+};
+
+struct npc_get_field_hash_info_rsp {
+ struct mbox_msghdr hdr;
+ u64 secret_key[3];
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+};
+
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
+ PTP_OP_EXTTS_ON = 4,
};
struct ptp_req {
@@ -1434,6 +1554,7 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
+ int extts_on;
};
struct ptp_rsp {
@@ -1441,6 +1562,17 @@ struct ptp_rsp {
u64 clk;
};
+struct npc_get_field_status_req {
+ struct mbox_msghdr hdr;
+ u8 intf;
+ u8 field;
+};
+
+struct npc_get_field_status_rsp {
+ struct mbox_msghdr hdr;
+ u8 enable;
+};
+
struct set_vf_perm {
struct mbox_msghdr hdr;
u16 vf;
@@ -1491,6 +1623,8 @@ struct cpt_lf_alloc_req_msg {
u16 sso_pf_func;
u16 eng_grpmsk;
int blkaddr;
+ u8 ctx_ilen_valid : 1;
+ u8 ctx_ilen : 7;
};
#define CPT_INLINE_INBOUND 0
@@ -1574,6 +1708,28 @@ struct cpt_inst_lmtst_req {
u64 rsvd;
};
+/* Mailbox message format to request for CPT LF reset */
+struct cpt_lf_rst_req {
+ struct mbox_msghdr hdr;
+ u32 slot;
+ u32 rsvd;
+};
+
+/* Mailbox message format to request for CPT faulted engines */
+struct cpt_flt_eng_info_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ bool reset;
+ u32 rsvd;
+};
+
+struct cpt_flt_eng_info_rsp {
+ struct mbox_msghdr hdr;
+ u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
+ u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+ u64 rsvd;
+};
+
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
@@ -1602,6 +1758,424 @@ enum cgx_af_status {
LMAC_AF_ERR_INVALID_PARAM = -1101,
LMAC_AF_ERR_PF_NOT_MAPPED = -1102,
LMAC_AF_ERR_PERM_DENIED = -1103,
+ LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED = -1104,
+ LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED = -1105,
+ LMAC_AF_ERR_CMD_TIMEOUT = -1106,
+ LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED = -1107,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED = -1108,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED = -1109,
+ LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
+};
+
+enum mcs_direction {
+ MCS_RX,
+ MCS_TX,
+};
+
+enum mcs_rsrc_type {
+ MCS_RSRC_TYPE_FLOWID,
+ MCS_RSRC_TYPE_SECY,
+ MCS_RSRC_TYPE_SC,
+ MCS_RSRC_TYPE_SA,
+};
+
+struct mcs_alloc_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* Resources count */
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u8 all; /* Allocate all resource type one each */
+ u64 rsvd;
+};
+
+struct mcs_alloc_rsrc_rsp {
+ struct mbox_msghdr hdr;
+ u8 flow_ids[128]; /* Index of reserved entries */
+ u8 secy_ids[128];
+ u8 sc_ids[128];
+ u8 sa_ids[256];
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* No of entries reserved */
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u8 rsvd[256]; /* reserved fields for future expansion */
+};
+
+struct mcs_free_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_id; /* Index of the entry to be freed */
+ u8 rsrc_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* Free all the cam resources */
+ u64 rsvd;
+};
+
+struct mcs_flowid_entry_write_req {
+ struct mbox_msghdr hdr;
+ u64 data[4];
+ u64 mask[4];
+ u64 sci; /* CNF10K-B for tx_secy_mem_map */
+ u8 flow_id;
+ u8 secy_id; /* secyid for which flowid is mapped */
+ u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+ u8 ena; /* Enable tcam entry */
+ u8 ctrl_pkt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_secy_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy;
+ u8 secy_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* RX SC_CAM mapping */
+struct mcs_rx_sc_cam_write_req {
+ struct mbox_msghdr hdr;
+ u64 sci; /* SCI */
+ u64 secy_id; /* secy index mapped to SC */
+ u8 sc_id; /* SC CAM entry index */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_sa_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy[2][9]; /* Support 2 SA policy */
+ u8 sa_index[2];
+ u8 sa_cnt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_tx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index0;
+ u8 sa_index1;
+ u8 rekey_ena;
+ u8 sa_index0_vld;
+ u8 sa_index1_vld;
+ u8 tx_sa_active;
+ u64 sectag_sci;
+ u8 sc_id; /* used as index for SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_rx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index;
+ u8 sa_in_use;
+ u8 sc_id;
+ u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_flowid_ena_dis_entry {
+ struct mbox_msghdr hdr;
+ u8 flow_id;
+ u8 ena;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_pn_table_write_req {
+ struct mbox_msghdr hdr;
+ u64 next_pn;
+ u8 pn_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_hw_info {
+ struct mbox_msghdr hdr;
+ u8 num_mcs_blks; /* Number of MCS blocks */
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+ u8 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+};
+
+struct mcs_set_active_lmac {
+ struct mbox_msghdr hdr;
+ u32 lmac_bmap; /* bitmap of active lmac per mcs block */
+ u8 mcs_id;
+ u16 chan_base; /* MCS channel base */
+ u64 rsvd;
+};
+
+struct mcs_set_lmac_mode {
+ struct mbox_msghdr hdr;
+ u8 mode; /* 1:Bypass 0:Operational */
+ u8 lmac_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_reset_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u8 mcs_id;
+ u8 port_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_set_req {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u16 cstm_etype[8];
+ u8 cstm_indx[8];
+ u8 cstm_etype_en;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* MCS mailbox error codes
+ * Range 1201 - 1300.
+ */
+enum mcs_af_status {
+ MCS_AF_ERR_INVALID_MCSID = -1201,
+ MCS_AF_ERR_NOT_MAPPED = -1202,
+};
+
+struct mcs_set_pn_threshold {
+ struct mbox_msghdr hdr;
+ u64 threshold;
+ u8 xpn; /* '1' for setting xpn threshold */
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+enum mcs_ctrl_pkt_rulew_type {
+ MCS_CTRL_PKT_RULE_TYPE_ETH,
+ MCS_CTRL_PKT_RULE_TYPE_DA,
+ MCS_CTRL_PKT_RULE_TYPE_RANGE,
+ MCS_CTRL_PKT_RULE_TYPE_COMBO,
+ MCS_CTRL_PKT_RULE_TYPE_MAC,
+};
+
+struct mcs_alloc_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_type;
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u64 rsvd;
+};
+
+struct mcs_alloc_ctrl_pkt_rule_rsp {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_free_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u64 rsvd;
+};
+
+struct mcs_ctrl_pkt_rule_write_req {
+ struct mbox_msghdr hdr;
+ u64 data0;
+ u64 data1;
+ u64 data2;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_stats_req {
+ struct mbox_msghdr hdr;
+ u8 id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_flowid_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_hit_cnt;
+ u64 rsvd;
+};
+
+struct mcs_secy_stats {
+ struct mbox_msghdr hdr;
+ u64 ctl_pkt_bcast_cnt;
+ u64 ctl_pkt_mcast_cnt;
+ u64 ctl_pkt_ucast_cnt;
+ u64 ctl_octet_cnt;
+ u64 unctl_pkt_bcast_cnt;
+ u64 unctl_pkt_mcast_cnt;
+ u64 unctl_pkt_ucast_cnt;
+ u64 unctl_octet_cnt;
+ /* Valid only for RX */
+ u64 octet_decrypted_cnt;
+ u64 octet_validated_cnt;
+ u64 pkt_port_disabled_cnt;
+ u64 pkt_badtag_cnt;
+ u64 pkt_nosa_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_tagged_ctl_cnt;
+ u64 pkt_untaged_cnt;
+ u64 pkt_ctl_cnt; /* CN10K-B */
+ u64 pkt_notag_cnt; /* CNF10K-B */
+ /* Valid only for TX */
+ u64 octet_encrypted_cnt;
+ u64 octet_protected_cnt;
+ u64 pkt_noactivesa_cnt;
+ u64 pkt_toolong_cnt;
+ u64 pkt_untagged_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_port_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_miss_cnt;
+ u64 parser_err_cnt;
+ u64 preempt_err_cnt; /* CNF10K-B */
+ u64 sectag_insert_err_cnt;
+ u64 rsvd[4];
+};
+
+/* Only for CN10K-B */
+struct mcs_sa_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 pkt_invalid_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_ok_cnt;
+ u64 pkt_nosa_cnt;
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_sc_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 hit_cnt;
+ u64 pkt_invalid_cnt;
+ u64 pkt_late_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_unchecked_cnt;
+ u64 pkt_delay_cnt; /* CNF10K-B */
+ u64 pkt_ok_cnt; /* CNF10K-B */
+ u64 octet_decrypt_cnt; /* CN10K-B */
+ u64 octet_validate_cnt; /* CN10K-B */
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 octet_encrypt_cnt; /* CN10K-B */
+ u64 octet_protected_cnt; /* CN10K-B */
+ u64 rsvd[4];
+};
+
+struct mcs_clear_stats {
+ struct mbox_msghdr hdr;
+#define MCS_FLOWID_STATS 0
+#define MCS_SECY_STATS 1
+#define MCS_SC_STATS 2
+#define MCS_SA_STATS 3
+#define MCS_PORT_STATS 4
+ u8 type; /* FLOWID, SECY, SC, SA, PORT */
+ u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* All resources stats mapped to PF are cleared */
+};
+
+struct mcs_intr_cfg {
+ struct mbox_msghdr hdr;
+#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+ u64 intr_mask; /* Interrupt enable mask */
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+struct mcs_intr_info {
+ struct mbox_msghdr hdr;
+ u64 intr_mask;
+ int sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
new file mode 100644
index 000000000000..c43f19dfbd74
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -0,0 +1,1605 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+#define DRV_NAME "Marvell MCS Driver"
+
+#define PCI_CFG_REG_BAR_NUM 0
+
+static const struct pci_device_id mcs_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
+ { 0, } /* end of table */
+};
+
+static LIST_HEAD(mcs_list);
+
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
+ stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
+ stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
+ stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
+ stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
+ stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
+ stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
+ stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+ stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
+ stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
+ else
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
+
+ stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
+ stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
+ stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
+ stats->hit_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
+ stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
+ stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
+ stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
+ stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
+ stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+ }
+}
+
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
+{
+ struct mcs_flowid_stats flowid_st;
+ struct mcs_port_stats port_st;
+ struct mcs_secy_stats secy_st;
+ struct mcs_sc_stats sc_st;
+ struct mcs_sa_stats sa_st;
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_SLAVE_CTRL;
+ else
+ reg = MCSX_CSE_TX_SLAVE_CTRL;
+
+ mcs_reg_write(mcs, reg, BIT_ULL(0));
+
+ switch (type) {
+ case MCS_FLOWID_STATS:
+ mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
+ break;
+ case MCS_SECY_STATS:
+ if (dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, &secy_st, id);
+ else
+ mcs_get_tx_secy_stats(mcs, &secy_st, id);
+ break;
+ case MCS_SC_STATS:
+ mcs_get_sc_stats(mcs, &sc_st, id, dir);
+ break;
+ case MCS_SA_STATS:
+ mcs_get_sa_stats(mcs, &sa_st, id, dir);
+ break;
+ case MCS_PORT_STATS:
+ mcs_get_port_stats(mcs, &port_st, id, dir);
+ break;
+ }
+
+ mcs_reg_write(mcs, reg, 0x0);
+}
+
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear FLOWID stats */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
+ }
+
+ /* Clear SECY stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
+ }
+
+ /* Clear SC stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
+ }
+
+ /* Clear SA stats */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
+ }
+ return 0;
+}
+
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ mcs_reg_write(mcs, reg, next_pn);
+}
+
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0xFF) |
+ (map->sa_index1 & 0xFF) << 9 |
+ (map->rekey_ena & 0x1) << 18 |
+ (map->sa_index0_vld & 0x1) << 19 |
+ (map->sa_index1_vld & 0x1) << 20 |
+ (map->tx_sa_active & 0x1) << 21 |
+ map->sectag_sci << 22;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ val = map->sectag_sci >> 42;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 8; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 9; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ }
+}
+
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
+{
+ u64 reg, val;
+
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
+ if (sc_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
+
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
+{
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
+ /* Enable SC CAM */
+ mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
+}
+
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
+
+ mcs_reg_write(mcs, reg, plcy);
+
+ if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
+}
+
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ val |= (map->sc & 0x7F) << 9;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
+{
+ u64 reg, val;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
+ }
+
+ /* Enable/Disable the tcam entry */
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ }
+}
+
+int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+{
+ int flow_id, secy_id, reg_id;
+ struct secy_mem_map map;
+ u64 reg, plcy = 0;
+
+ /* Flow entry */
+ flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ __set_bit(flow_id, mcs->rx.flow_ids.bmap);
+ __set_bit(flow_id, mcs->tx.flow_ids.bmap);
+
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ /* secy */
+ secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ __set_bit(secy_id, mcs->rx.secy.bmap);
+ __set_bit(secy_id, mcs->tx.secy.bmap);
+
+ /* Set validate frames to NULL and enable control port */
+ plcy = 0x7ull;
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | 0x3ull << 4;
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
+
+ /* Enable control port and set mtu to max */
+ plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
+
+ /* Map flowid to secy */
+ map.secy = secy_id;
+ map.ctrl_pkt = 0;
+ map.flow_id = flow_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
+ map.sc = secy_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
+
+ /* Enable Flowid entry */
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
+
+ return 0;
+}
+
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int flow_id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear secy memory to zero */
+ mcs_secy_plcy_write(mcs, 0, secy_id, dir);
+
+ /* Disable the tcam entry using this secy */
+ for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
+ if (map->flowid2secy_map[flow_id] != secy_id)
+ continue;
+ mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
+ }
+}
+
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
+{
+ int rsrc_id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
+ if (rsrc_id >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, rsrc_id, 1);
+ pf_map[rsrc_id] = pcifunc;
+
+ return rsrc_id;
+}
+
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ u64 dis, reg;
+ int id, rc;
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ if (req->all) {
+ for (id = 0; id < map->ctrlpktrule.max; id++) {
+ if (map->ctrlpktrule2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(id);
+ mcs_reg_write(mcs, reg, dis);
+ }
+ return 0;
+ }
+
+ rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, dis);
+
+ return rc;
+}
+
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
+{
+ u64 reg, enb;
+ u64 idx;
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ req->data0 &= GENMASK(15, 0);
+ if (req->data0 != ETH_P_PAE)
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0);
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ req->data2 &= GENMASK(15, 0);
+ if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
+ !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
+ MCSX_PEX_TX_SLAVE_RULE_MAC;
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ }
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+
+ enb = mcs_reg_read(mcs, reg);
+ enb |= BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, enb);
+
+ return 0;
+}
+
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
+{
+ /* Check if the rsrc_id is mapped to PF/VF */
+ if (pf_map[rsrc_id] != pcifunc)
+ return -EINVAL;
+
+ rvu_free_rsrc(rsrc, rsrc_id);
+ pf_map[rsrc_id] = 0;
+ return 0;
+}
+
+/* Free all the cam resources mapped to pf */
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* free tcam entries */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
+ id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, id, dir, false);
+ }
+
+ /* free secy entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->secy, map->secy2pf_map,
+ id, pcifunc);
+ mcs_clear_secy_plcy(mcs, id, dir);
+ }
+
+ /* free sc entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
+
+ /* Disable SC CAM only on RX side */
+ if (dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, id, false);
+ }
+
+ /* free sa entries */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
+ }
+ return 0;
+}
+
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
+{
+ int rsrc_id;
+
+ rsrc_id = rvu_alloc_rsrc(rsrc);
+ if (rsrc_id < 0)
+ return -ENOMEM;
+ pf_map[rsrc_id] = pcifunc;
+ return rsrc_id;
+}
+
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *flow_id = id;
+
+ id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *secy_id = id;
+
+ id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sc_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa1_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa2_id = id;
+
+ return 0;
+}
+
+static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 9) & 0xFF;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val, status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ /* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ /* Auto rekey is enable */
+ if (!((val >> 18) & 0x1))
+ continue;
+
+ status = (val >> 21) & 0x1;
+
+ /* Check if tx_sa_active status had changed */
+ if (status == mcs->tx_sa_active[sc])
+ continue;
+ /* SA_index0 is expired */
+ if (status)
+ event.sa_id = val & 0xFF;
+ else
+ event.sa_id = (val >> 9) & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ int sa, reg;
+ u64 intr;
+
+ /* Check expired SAs */
+ for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
+ /* Bit high in *PN_THRESH_REACHEDX implies
+ * corresponding SAs are expired.
+ */
+ intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
+ for (sa = 0; sa < 64; sa++) {
+ if (!(intr & BIT_ULL(sa)))
+ continue;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
+ event.sa_id = sa + (reg * 64);
+ event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+ }
+}
+
+static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
+ event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SL_GTE48)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
+ if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
+ event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ u64 val, reg;
+ int lmac;
+
+ if (!(intr & 0x6ULL))
+ return;
+
+ if (intr & BIT_ULL(1))
+ reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 :
+ MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0;
+ else
+ reg = (dir == MCS_RX) ? MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 :
+ MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0;
+ val = mcs_reg_read(mcs, reg);
+
+ /* policy/data over flow occurred */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ if (!(val & BIT_ULL(lmac)))
+ continue;
+ dev_warn(mcs->dev, "BEE:Policy or data overflow occurred on lmac:%d\n", lmac);
+ }
+}
+
+void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ int lmac;
+
+ if (!(intr & 0xFFFFFULL))
+ return;
+
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ if (intr & BIT_ULL(lmac))
+ dev_warn(mcs->dev, "PAB: overflow occurred on lmac:%d\n", lmac);
+ }
+}
+
+static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+{
+ struct mcs *mcs = (struct mcs *)mcs_irq;
+ u64 intr, cpm_intr, bbe_intr, pab_intr;
+
+ /* Disable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+
+ /* Check which block has interrupt*/
+ intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+
+ /* CPM RX */
+ if (intr & MCS_CPM_RX_INT_ENA) {
+ /* Check for PN thresh interrupt bit */
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
+
+ if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
+ mcs_rx_pn_thresh_reached_handler(mcs);
+
+ if (cpm_intr & MCS_CPM_RX_INT_ALL)
+ mcs_rx_misc_intr_handler(mcs, cpm_intr);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
+ }
+
+ /* CPM TX */
+ if (intr & MCS_CPM_TX_INT_ENA) {
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ }
+
+ if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
+ mcs_tx_misc_intr_handler(mcs, cpm_intr);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_wrapped_handler(mcs);
+ }
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
+ }
+
+ /* BBE RX */
+ if (intr & MCS_BBE_RX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+ mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* BBE TX */
+ if (intr & MCS_BBE_TX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+ mcs->mcs_ops->mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* PAB RX */
+ if (intr & MCS_PAB_RX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+ mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* PAB TX */
+ if (intr & MCS_PAB_TX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+ mcs->mcs_ops->mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* Clear and enable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ return IRQ_HANDLED;
+}
+
+static void *alloc_mem(struct mcs *mcs, int n)
+{
+ return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
+}
+
+static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
+{
+ struct hwinfo *hw = mcs->hw;
+ int err;
+
+ res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2pf_map)
+ return -ENOMEM;
+
+ res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
+ if (!res->secy2pf_map)
+ return -ENOMEM;
+
+ res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
+ if (!res->sc2pf_map)
+ return -ENOMEM;
+
+ res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
+ if (!res->sa2pf_map)
+ return -ENOMEM;
+
+ res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2secy_map)
+ return -ENOMEM;
+
+ res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
+ if (!res->ctrlpktrule2pf_map)
+ return -ENOMEM;
+
+ res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->flow_ids);
+ if (err)
+ return err;
+
+ res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->secy);
+ if (err)
+ return err;
+
+ res->sc.max = hw->sc_entries;
+ err = rvu_alloc_bitmap(&res->sc);
+ if (err)
+ return err;
+
+ res->sa.max = hw->sa_entries;
+ err = rvu_alloc_bitmap(&res->sa);
+ if (err)
+ return err;
+
+ res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
+ err = rvu_alloc_bitmap(&res->ctrlpktrule);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mcs_register_interrupts(struct mcs *mcs)
+{
+ int ret = 0;
+
+ mcs->num_vec = pci_msix_vec_count(mcs->pdev);
+
+ ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
+ mcs->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
+ mcs->num_vec, ret);
+ return ret;
+ }
+
+ ret = request_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec),
+ mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ if (ret) {
+ dev_err(mcs->dev, "MCS IP irq registration failed\n");
+ goto exit;
+ }
+
+ /* MCS enable IP interrupts */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ /* Enable CPM Rx/Tx interrupts */
+ mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
+ MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
+ MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
+ MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xFFULL);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xFFULL);
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xFFFFFULL);
+
+ mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ if (!mcs->tx_sa_active) {
+ ret = -ENOMEM;
+ goto free_irq;
+ }
+
+ return ret;
+
+free_irq:
+ free_irq(pci_irq_vector(mcs->pdev, mcs->hw->ip_vec), mcs);
+exit:
+ pci_free_irq_vectors(mcs->pdev);
+ mcs->num_vec = 0;
+ return ret;
+}
+
+int mcs_get_blkcnt(void)
+{
+ struct mcs *mcs;
+ int idmax = -ENODEV;
+
+ /* Check MCS block is present in hardware */
+ if (!pci_dev_present(mcs_id_table))
+ return 0;
+
+ list_for_each_entry(mcs, &mcs_list, mcs_list)
+ if (mcs->mcs_id > idmax)
+ idmax = mcs->mcs_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+struct mcs *mcs_get_pdata(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev;
+ }
+ return NULL;
+}
+
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+{
+ u64 val = 0;
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
+ req->port_mode & MCS_PORT_MODE_MASK);
+
+ req->cstm_tag_rel_mode_sel &= 0x3;
+
+ if (mcs->hw->mcs_blks > 1) {
+ req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
+ val = (u32)req->fifo_skid << 0x10;
+ val |= req->fifo_skid;
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
+ req->cstm_tag_rel_mode_sel);
+ val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+
+ if (req->custom_hdr_enb)
+ val |= BIT_ULL(req->port_id);
+ else
+ val &= ~BIT_ULL(req->port_id);
+
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
+ } else {
+ val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
+ val |= (req->cstm_tag_rel_mode_sel << 2);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
+ }
+}
+
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ u64 reg = 0;
+
+ rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
+ MCS_PORT_MODE_MASK;
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
+ rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
+ if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
+ rsp->custom_hdr_enb = 1;
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
+ }
+
+ rsp->port_id = req->port_id;
+ rsp->mcs_id = req->mcs_id;
+}
+
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ u64 reg = 0, val = 0;
+ u8 idx;
+
+ for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
+ if (mcs->hw->mcs_blks > 1)
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
+ MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
+ else
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
+
+ val = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ rsp->cstm_etype[idx] = val & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
+ MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
+ } else {
+ rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
+ rsp->cstm_etype_en |= (val & 0x1) << idx;
+ }
+ }
+
+ rsp->mcs_id = req->mcs_id;
+ rsp->dir = req->dir;
+}
+
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+{
+ u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
+
+ mcs_reg_write(mcs, reg, reset & 0x1);
+}
+
+/* Set lmac to bypass/operational mode */
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+{
+ u64 reg;
+ int id = lmac_id * 2;
+
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(id);
+ mcs_reg_write(mcs, reg, (u64)mode);
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG((id + 1));
+ mcs_reg_write(mcs, reg, (u64)mode);
+}
+
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
+{
+ u64 reg;
+
+ if (pn->dir == MCS_RX)
+ reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
+ else
+ reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
+
+ mcs_reg_write(mcs, reg, pn->threshold);
+}
+
+void cn10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN CTag */
+ val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+}
+
+static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
+{
+ u64 reg;
+
+ /* Port mode 25GB */
+ reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0xe000e);
+ return;
+ }
+
+ reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+}
+
+int mcs_set_lmac_channels(int mcs_id, u16 base)
+{
+ struct mcs *mcs;
+ int lmac;
+ u64 cfg;
+
+ mcs = mcs_get_pdata(mcs_id);
+ if (!mcs)
+ return -ENODEV;
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
+ cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
+ mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
+ base += 16;
+ }
+ return 0;
+}
+
+static int mcs_x2p_calibration(struct mcs *mcs)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ int i, err = 0;
+ u64 val;
+
+ /* set X2P calibration */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ val |= BIT_ULL(5);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Wait for calibration to complete */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_before(jiffies, timeout)) {
+ usleep_range(80, 100);
+ continue;
+ } else {
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
+ return err;
+ }
+ }
+
+ val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
+ for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
+ if (val & BIT_ULL(1 + i))
+ continue;
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
+ }
+ /* Clear X2P calibrate */
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
+
+ return err;
+}
+
+static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+{
+ u64 val;
+
+ /* Set MCS to external bypass */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ if (bypass)
+ val |= BIT_ULL(6);
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+}
+
+static void mcs_global_cfg(struct mcs *mcs)
+{
+ /* Disable external bypass */
+ mcs_set_external_bypass(mcs, false);
+
+ /* Reset TX/RX stats memory */
+ mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
+ mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
+
+ /* Set MCS to perform standard IEEE802.1AE macsec processing */
+ if (mcs->hw->mcs_blks == 1) {
+ mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
+ return;
+ }
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
+}
+
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 128; /* TCAM entries */
+ hw->secy_entries = 128; /* SecY entries */
+ hw->sc_entries = 128; /* SC CAM entries */
+ hw->sa_entries = 256; /* SA entries */
+ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 5; /* x2p clabration intf */
+ hw->mcs_blks = 1; /* MCS blocks */
+ hw->ip_vec = MCS_CN10KB_INT_VEC_IP; /* IP vector */
+}
+
+static struct mcs_ops cn10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
+ .mcs_bbe_intr_handler = cn10kb_mcs_bbe_intr_handler,
+ .mcs_pab_intr_handler = cn10kb_mcs_pab_intr_handler,
+};
+
+static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ int lmac, err = 0;
+ struct mcs *mcs;
+
+ mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
+ if (!mcs)
+ return -ENOMEM;
+
+ mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
+ if (!mcs->hw)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto exit;
+ }
+
+ mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!mcs->reg_base) {
+ dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pci_set_drvdata(pdev, mcs);
+ mcs->pdev = pdev;
+ mcs->dev = &pdev->dev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ mcs->mcs_ops = &cn10kb_mcs_ops;
+ else
+ mcs->mcs_ops = cnf10kb_get_mac_ops();
+
+ /* Set hardware capabilities */
+ mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
+
+ mcs_global_cfg(mcs);
+
+ /* Perform X2P clibration */
+ err = mcs_x2p_calibration(mcs);
+ if (err)
+ goto err_x2p;
+
+ mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & MCS_ID_MASK;
+
+ /* Set mcs tx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->tx);
+ if (err)
+ goto err_x2p;
+
+ /* Set mcs rx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->rx);
+ if (err)
+ goto err_x2p;
+
+ /* per port config */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_lmac_init(mcs, lmac);
+
+ /* Parser configuration */
+ mcs->mcs_ops->mcs_parser_cfg(mcs);
+
+ err = mcs_register_interrupts(mcs);
+ if (err)
+ goto exit;
+
+ list_add(&mcs->mcs_list, &mcs_list);
+ mutex_init(&mcs->stats_lock);
+
+ return 0;
+
+err_x2p:
+ /* Enable external bypass */
+ mcs_set_external_bypass(mcs, true);
+exit:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void mcs_remove(struct pci_dev *pdev)
+{
+ struct mcs *mcs = pci_get_drvdata(pdev);
+
+ /* Set MCS to external bypass */
+ mcs_set_external_bypass(mcs, true);
+ free_irq(pci_irq_vector(pdev, mcs->hw->ip_vec), mcs);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver mcs_driver = {
+ .name = DRV_NAME,
+ .id_table = mcs_id_table,
+ .probe = mcs_probe,
+ .remove = mcs_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
new file mode 100644
index 000000000000..0f89dcb76465
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_H
+#define MCS_H
+
+#include <linux/bits.h>
+#include "rvu.h"
+
+#define PCI_DEVID_CN10K_MCS 0xA096
+
+#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
+#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
+
+#define MCS_ID_MASK 0x7
+#define MCS_MAX_PFS 128
+
+#define MCS_PORT_MODE_MASK 0x3
+#define MCS_PORT_FIFO_SKID_MASK 0x3F
+#define MCS_MAX_CUSTOM_TAGS 0x8
+
+#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
+#define MCS_CTRLPKT_COMBO_RULE_MAX 4
+#define MCS_CTRLPKT_MAC_RULE_MAX 1
+
+#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
+ MCS_CTRLPKT_DA_RULE_MAX + \
+ MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
+ MCS_CTRLPKT_COMBO_RULE_MAX + \
+ MCS_CTRLPKT_MAC_RULE_MAX)
+
+#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
+#define MCS_CTRLPKT_DA_RULE_OFFSET 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
+#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
+#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
+
+/* Reserved resources for default bypass entry */
+#define MCS_RSRC_RSVD_CNT 1
+
+/* MCS Interrupt Vector */
+#define MCS_CNF10KB_INT_VEC_IP 0x13
+#define MCS_CN10KB_INT_VEC_IP 0x53
+
+#define MCS_MAX_BBE_INT 8ULL
+#define MCS_BBE_INT_MASK 0xFFULL
+
+#define MCS_MAX_PAB_INT 8ULL
+#define MCS_PAB_INT_MASK 0xFULL
+
+#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
+#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
+#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
+#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
+#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
+#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
+
+#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
+#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
+#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
+
+#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
+#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
+#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
+#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
+#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
+#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
+#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
+
+#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
+ MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
+ MCS_CPM_RX_INT_SL_GTE48 | \
+ MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
+ MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
+ MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
+ MCS_CPM_RX_INT_PN_THRESH_REACHED)
+
+struct mcs_pfvf {
+ u64 intr_mask; /* Enabled Interrupt mask */
+};
+
+struct mcs_intr_event {
+ u16 pcifunc;
+ u64 intr_mask;
+ u64 sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+};
+
+struct mcs_intrq_entry {
+ struct list_head node;
+ struct mcs_intr_event intr_event;
+};
+
+struct secy_mem_map {
+ u8 flow_id;
+ u8 secy;
+ u8 ctrl_pkt;
+ u8 sc;
+ u64 sci;
+};
+
+struct mcs_rsrc_map {
+ u16 *flowid2pf_map;
+ u16 *secy2pf_map;
+ u16 *sc2pf_map;
+ u16 *sa2pf_map;
+ u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
+ u16 *ctrlpktrule2pf_map;
+ struct rsrc_bmap flow_ids;
+ struct rsrc_bmap secy;
+ struct rsrc_bmap sc;
+ struct rsrc_bmap sa;
+ struct rsrc_bmap ctrlpktrule;
+};
+
+struct hwinfo {
+ u8 tcam_entries;
+ u8 secy_entries;
+ u8 sc_entries;
+ u16 sa_entries;
+ u8 mcs_x2p_intf;
+ u8 lmac_cnt;
+ u8 mcs_blks;
+ unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
+ u16 ip_vec;
+};
+
+struct mcs {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct hwinfo *hw;
+ struct mcs_rsrc_map tx;
+ struct mcs_rsrc_map rx;
+ u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
+ u8 mcs_id;
+ struct mcs_ops *mcs_ops;
+ struct list_head mcs_list;
+ /* Lock for mcs stats */
+ struct mutex stats_lock;
+ struct mcs_pfvf *pf;
+ struct mcs_pfvf *vf;
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
+};
+
+struct mcs_ops {
+ void (*mcs_set_hw_capabilities)(struct mcs *mcs);
+ void (*mcs_parser_cfg)(struct mcs *mcs);
+ void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
+ void (*mcs_bbe_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+ void (*mcs_pab_intr_handler)(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+};
+
+extern struct pci_driver mcs_driver;
+
+static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
+{
+ writeq(val, mcs->reg_base + offset);
+}
+
+static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
+{
+ return readq(mcs->reg_base + offset);
+}
+
+/* MCS APIs */
+struct mcs *mcs_get_pdata(int mcs_id);
+int mcs_get_blkcnt(void);
+int mcs_set_lmac_channels(int mcs_id, u16 base);
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
+void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
+void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
+int mcs_install_flowid_bypass_entry(struct mcs *mcs);
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp);
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp);
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+
+/* CN10K-B APIs */
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cn10kb_mcs_parser_cfg(struct mcs *mcs);
+void cn10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+void cn10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+
+/* CNF10K-B APIs */
+struct mcs_ops *cnf10kb_get_mac_ops(void);
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
+void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir);
+
+/* Stats APIs */
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
+int mcs_set_force_clk_en(struct mcs *mcs, bool set);
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
+
+#endif /* MCS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
new file mode 100644
index 000000000000..9f9b904ab2cd
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+static struct mcs_ops cnf10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
+ .mcs_bbe_intr_handler = cnf10kb_mcs_bbe_intr_handler,
+ .mcs_pab_intr_handler = cnf10kb_mcs_pab_intr_handler,
+};
+
+struct mcs_ops *cnf10kb_get_mac_ops(void)
+{
+ return &cnf10kb_mcs_ops;
+}
+
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 64; /* TCAM entries */
+ hw->secy_entries = 64; /* SecY entries */
+ hw->sc_entries = 64; /* SC CAM entries */
+ hw->sa_entries = 128; /* SA entries */
+ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 1; /* x2p clabration intf */
+ hw->mcs_blks = 7; /* MCS blocks */
+ hw->ip_vec = MCS_CNF10KB_INT_VEC_IP; /* IP vector */
+}
+
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN Ctag */
+ val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
+
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
+
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* Enable custom tage 0 and 1 and sectag */
+ val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
+
+ reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ mcs_reg_write(mcs, reg, map->sci);
+ val |= (map->sc & 0x3F) << 7;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
+
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
+ val = mcs_reg_read(mcs, reg);
+
+ if (map->rekey_ena)
+ val |= BIT_ULL(map->sc_id);
+ else
+ val &= ~BIT_ULL(map->sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
+}
+
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+int mcs_set_force_clk_en(struct mcs *mcs, bool set)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(2000);
+ u64 val;
+
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+
+ if (set) {
+ val |= BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(mcs->dev, "MCS set force clk enable failed\n");
+ break;
+ }
+ }
+ } else {
+ val &= ~BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ }
+
+ return 0;
+}
+
+/* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event;
+ struct rsrc_bmap *sc_bmap;
+ unsigned long rekey_ena;
+ u64 val, sa_status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ /* Auto rekey is enable */
+ if (!test_bit(sc, &rekey_ena))
+ continue;
+ sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
+ /* Check if tx_sa_active status had changed */
+ if (sa_status == mcs->tx_sa_active[sc])
+ continue;
+
+ /* SA_index0 is expired */
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ if (sa_status)
+ event.sa_id = val & 0x7F;
+ else
+ event.sa_id = (val >> 7) & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 7) & 0x7F;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_bbe_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_BBE_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ /* Lower nibble denotes data fifo overflow interrupts and
+ * upper nibble indicates policy fifo overflow interrupts.
+ */
+ if (intr & 0xFULL)
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+ else
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_PLFIFO_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into BBE fatal error */
+ event.lmac_id = i & 0x3ULL;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_pab_intr_handler(struct mcs *mcs, u64 intr,
+ enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_PAB_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_PAB_RX_CHAN_OVERFLOW_INT :
+ MCS_PAB_TX_CHAN_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into PAB fatal error */
+ event.lmac_id = i;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
new file mode 100644
index 000000000000..f3ab01fc363c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -0,0 +1,1106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_REG_H
+#define MCS_REG_H
+
+#include <linux/bits.h>
+
+/* Registers */
+#define MCSX_IP_MODE 0x900c8ull
+#define MCSX_MCS_TOP_SLAVE_PORT_RESET(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x408ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa28ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+
+#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x808ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa68ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_MIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0x80000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60000ull; \
+ offset; })
+
+#define MCSX_MIL_RX_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x900a8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x700a8ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_HIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0xc0000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa0000ull; \
+ offset; })
+
+#define MCSX_LINK_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x90000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x70000ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_MIL_RX_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800c8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600c8ull; \
+ offset; })
+
+#define MCSX_MIL_IP_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800d0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600d0ull; \
+ offset; })
+
+/* PAB */
+#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1718ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x280ull; \
+ offset += (a) * 0x40ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull)
+
+/* PEX registers */
+#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_PORT_CFGX(a) (0x3b98ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fc0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x558ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x598ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4048ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5e0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4080ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x648ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4088ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x650ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4090ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x658ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x40e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6d8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x40e8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6e0ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4b60ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x7d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4ba0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x858ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x860ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8c8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c30ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x4c80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x958ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x4c88ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x960ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION ({ \
+ u64 offset; \
+ \
+ offset = 0x3b50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c0ull; \
+ offset; })
+
+/* CNF10K-B */
+#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull
+#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull
+
+/* BEE */
+#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull
+#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull
+#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
+#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
+#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
+#define MCSX_BBE_RX_SLAVE_DFIFO_OVERFLOW_0 0xe20
+#define MCSX_BBE_TX_SLAVE_DFIFO_OVERFLOW_0 0x1298
+#define MCSX_BBE_RX_SLAVE_PLFIFO_OVERFLOW_0 0xe40
+#define MCSX_BBE_TX_SLAVE_PLFIFO_OVERFLOW_0 0x12b8
+#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0xe00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x160ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x168ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x178ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e0ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1f8ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x1280ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e8ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x16f0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x260ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x268ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x278ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x2908ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x380ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x2910ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x388ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x398ull; \
+ offset; })
+
+/* CPM registers */
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x30740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bf8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x34740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x43f8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x30700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bd8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x38780ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c08ull; \
+ offset += (a) * 0x8ull + (b) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAM_ENA(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x38740ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4bf8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23ee0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x246e0ull + (a) * 0x10ull); \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = (0xdd0ull + (a) * 0x8ull); \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23E90ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbb0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x256e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x27700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x17d8ull; \
+ offset += (a) * 0x8ull + (b) * 0x40ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x2f700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x37d8; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb90ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e48ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb98ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23e50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xba0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1 0x30708ull
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(a) (0x246e8ull + (a) * 0x10ull)
+
+/* TX registers */
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x51d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7c0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x55d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xafc0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x51d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e508ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5550ull + (a) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3ed08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5950ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4c0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5538ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fd10ull + (a) * 0x10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6150ull + (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x40d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x63a0ull; \
+ offset += (a) * 0x8ull + (b) * 0x80ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x50d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa3a0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5528ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5530ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(a) (0x3fd18ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(a) (0x5558ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1 0x51d18ull
+#define MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(a) (0x5b50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(a) (0x5d50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(a) (0x5f50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0 0x5500ull
+
+/* CSE */
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbe18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xca18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x5e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xdc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(a)({ \
+ u64 offset; \
+ \
+ offset = 0x5680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xda18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xd680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xce18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16a80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec38ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16880ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xfe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xde18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xae80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xc680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xce80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xbe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xcc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x52a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9c0ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x52b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9d8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(a) (0x11680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(a) (0x14680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(a) (0xec58ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(a) (0xea18ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(a) (0xe618ull + (a) * 0x8ull)
+
+/* CSE TX */
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCOMMONOCTETSX(a) (0x18440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1c440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1bc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xee78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1b440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1ac40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfc78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1a440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfa78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x18c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1e440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfe78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23240ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10ed8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e98ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x20440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10c78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1fc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10a78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x110d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1dc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1d440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1cc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x54a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa00ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x54b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa18ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(a) (0x1f440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(a) (0x1ec40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSEARLYPREEMPTERRX(a) (0x10eb8ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(a) (0x21c40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(a) (0x20c40ull + (a) * 0x8ull)
+
+#define MCSX_IP_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x80028ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60028ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1S ({ \
+ u64 offset; \
+ \
+ offset = 0x80040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60040ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1C ({ \
+ u64 offset; \
+ \
+ offset = 0x80038ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60038ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM ({ \
+ u64 offset; \
+ \
+ offset = 0xc20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xab8ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xc28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xac0ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x23c00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x0ad8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x23c08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xae0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x3d490ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x3d498ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a8ull; \
+ offset; })
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
new file mode 100644
index 000000000000..dfd23580e3b8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -0,0 +1,926 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mcs.h"
+#include "rvu.h"
+#include "mcs_reg.h"
+#include "lmac_common.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_MCS_MESSAGES
+#undef M
+
+void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena)
+{
+ struct mcs *mcs;
+ u64 cfg;
+ u8 port;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ /* When ptp is enabled, RPM appends 8B header for all
+ * RX packets. MCS PEX need to configure to skip 8B
+ * during packet parsing.
+ */
+
+ /* CNF10K-B */
+ if (rvu->mcs_blk_cnt > 1) {
+ mcs = mcs_get_pdata(rpm_id);
+ cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+ if (ena)
+ cfg |= BIT_ULL(lmac_id);
+ else
+ cfg &= ~BIT_ULL(lmac_id);
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, cfg);
+ return;
+ }
+ /* CN10KB */
+ mcs = mcs_get_pdata(0);
+ port = (rpm_id * rvu->hw->lmac_per_cgx) + lmac_id;
+ cfg = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port));
+ if (ena)
+ cfg |= BIT_ULL(0);
+ else
+ cfg &= ~BIT_ULL(0);
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PORT_CFGX(port), cfg);
+}
+
+int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ struct mcs_set_lmac_mode *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
+ mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
+
+ return 0;
+}
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+{
+ struct mcs_intrq_entry *qentry;
+ u16 pcifunc = event->pcifunc;
+ struct rvu *rvu = mcs->rvu;
+ struct mcs_pfvf *pfvf;
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ event->intr_mask &= pfvf->intr_mask;
+
+ /* Check PF/VF interrupt notification is enabled */
+ if (!(pfvf->intr_mask && event->intr_mask))
+ return 0;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->intr_event = *event;
+ spin_lock(&rvu->mcs_intrq_lock);
+ list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
+ spin_unlock(&rvu->mcs_intrq_lock);
+ queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
+
+ return 0;
+}
+
+static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+{
+ struct mcs_intr_info *req;
+ int err, pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+ if (!req)
+ return -ENOMEM;
+
+ req->mcs_id = event->mcs_id;
+ req->intr_mask = event->intr_mask;
+ req->sa_id = event->sa_id;
+ req->hdr.pcifunc = event->pcifunc;
+ req->lmac_id = event->lmac_id;
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+
+ return 0;
+}
+
+static void mcs_intr_handler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
+ struct mcs_intrq_entry *qentry;
+ struct mcs_intr_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
+ struct mcs_intrq_entry,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->intr_event;
+
+ mcs_notify_pfvf(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
+ struct mcs_intr_cfg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_pfvf *pfvf;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ mcs->pf_map[0] = pcifunc;
+ pfvf->intr_mask = req->intr_mask;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
+ struct msg_req *req,
+ struct mcs_hw_info *rsp)
+{
+ struct mcs *mcs;
+
+ if (!rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ /* MCS resources are same across all blocks */
+ mcs = mcs_get_pdata(0);
+ rsp->num_mcs_blks = rvu->mcs_blk_cnt;
+ rsp->tcam_entries = mcs->hw->tcam_entries;
+ rsp->secy_entries = mcs->hw->secy_entries;
+ rsp->sc_entries = mcs->hw->sc_entries;
+ rsp->sa_entries = mcs->hw->sa_entries;
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_reset_port(mcs, req->port_id, req->reset);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
+ struct mcs_clear_stats *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&mcs->stats_lock);
+ if (req->all)
+ mcs_clear_all_stats(mcs, pcifunc, req->dir);
+ else
+ mcs_clear_stats(mcs, req->type, req->id, req->dir);
+
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_flowid_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* In CNF10K-B, before reading the statistics,
+ * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
+ * to get accurate statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
+ * the statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_secy_stats *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+
+ if (req->dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, rsp, req->id);
+ else
+ mcs_get_tx_secy_stats(mcs, rsp, req->id);
+
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sc_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sa_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_port_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_port_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
+ struct mcs_set_active_lmac *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ if (!mcs)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ mcs->hw->lmac_bmap = req->lmac_bmap;
+ mcs_set_lmac_channels(req->mcs_id, req->chan_base);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_set_port_cfg(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_get_port_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_get_custom_tag_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ struct mcs *mcs;
+ int mcs_id;
+
+ /* CNF10K-B mcs0-6 are mapped to RPM2-8*/
+ if (rvu->mcs_blk_cnt > 1) {
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ } else {
+ /* CN10K-B has only one mcs block */
+ mcs = mcs_get_pdata(0);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
+ struct mcs_flowid_ena_dis_entry *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
+ struct mcs_pn_table_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
+ struct mcs_set_pn_threshold *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_pn_threshold_set(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_rx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_tx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
+ mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
+ struct mcs_sa_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ for (i = 0; i < req->sa_cnt; i++)
+ mcs_sa_plcy_write(mcs, &req->plcy[i][0],
+ req->sa_index[i], req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
+ struct mcs_rx_sc_cam_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
+ struct mcs_secy_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_secy_plcy_write(mcs, req->plcy,
+ req->secy_id, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
+ struct mcs_flowid_entry_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct secy_mem_map map;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* TODO validate the flowid */
+ mcs_flowid_entry_write(mcs, req->data, req->mask,
+ req->flow_id, req->dir);
+ map.secy = req->secy_id;
+ map.sc = req->sc_id;
+ map.ctrl_pkt = req->ctrl_pkt;
+ map.flow_id = req->flow_id;
+ map.sci = req->sci;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
+ if (req->ena)
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id,
+ req->dir, true);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
+ struct mcs_free_rsrc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rc = 0;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the cam resources mapped to PF/VF */
+ if (req->all) {
+ rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
+ mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
+ break;
+ case MCS_RSRC_TYPE_SC:
+ rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
+ /* Disable SC CAM only on RX side */
+ if (req->dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
+ break;
+ case MCS_RSRC_TYPE_SA:
+ rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
+ break;
+ }
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
+ struct mcs_alloc_rsrc_req *req,
+ struct mcs_alloc_rsrc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id, i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (req->all) {
+ rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
+ &rsp->secy_ids[0],
+ &rsp->sc_ids[0],
+ &rsp->sa_ids[0],
+ &rsp->sa_ids[1],
+ pcifunc, req->dir);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->flow_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->secy_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SC:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sc_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SA:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sa_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ }
+
+ rsp->rsrc_type = req->rsrc_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+ rsp->all = req->all;
+
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_alloc_ctrl_pkt_rule_req *req,
+ struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id;
+ u16 offset;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ offset = MCS_CTRLPKT_DA_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ break;
+ }
+
+ rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
+ pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+
+ rsp->rule_idx = rsrc_id;
+ rsp->rule_type = req->rule_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
+ pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return rsrc_id;
+}
+
+int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_free_ctrl_pkt_rule_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ rc = mcs_free_ctrlpktrule(mcs, req);
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
+ struct mcs_ctrl_pkt_rule_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ rc = mcs_ctrlpktrule_write(mcs, req);
+
+ return rc;
+}
+
+static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
+{
+ struct mcs *mcs = mcs_get_pdata(0);
+ unsigned long lmac_bmap;
+ int cgx, lmac, port;
+
+ for (port = 0; port < mcs->hw->lmac_cnt; port++) {
+ cgx = port / rvu->hw->lmac_per_cgx;
+ lmac = port % rvu->hw->lmac_per_cgx;
+ if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
+ continue;
+ set_bit(port, &lmac_bmap);
+ }
+ mcs->hw->lmac_bmap = lmac_bmap;
+}
+
+int rvu_mcs_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lmac, err = 0, mcs_id;
+ struct mcs *mcs;
+
+ rvu->mcs_blk_cnt = mcs_get_blkcnt();
+
+ if (!rvu->mcs_blk_cnt)
+ return 0;
+
+ /* Needed only for CN10K-B */
+ if (rvu->mcs_blk_cnt == 1) {
+ err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
+ if (err)
+ return err;
+ /* Set active lmacs */
+ rvu_mcs_set_lmac_bmap(rvu);
+ }
+
+ /* Install default tcam bypass entry and set port to operational mode */
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_install_flowid_bypass_entry(mcs);
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_set_lmac_mode(mcs, lmac, 0);
+
+ mcs->rvu = rvu;
+
+ /* Allocated memory for PFVF data */
+ mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->pf)
+ return -ENOMEM;
+
+ mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->vf)
+ return -ENOMEM;
+ }
+
+ /* Initialize the wq for handling mcs interrupts */
+ INIT_LIST_HEAD(&rvu->mcs_intrq_head);
+ INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ if (!rvu->mcs_intr_wq) {
+ dev_err(rvu->dev, "mcs alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ return err;
+}
+
+void rvu_mcs_exit(struct rvu *rvu)
+{
+ if (!rvu->mcs_intr_wq)
+ return;
+
+ flush_workqueue(rvu->mcs_intr_wq);
+ destroy_workqueue(rvu->mcs_intr_wq);
+ rvu->mcs_intr_wq = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 77fd39e2c8db..9beeead56d7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -10,6 +10,14 @@
#define NPC_KEX_CHAN_MASK 0xFFFULL
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
+
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
@@ -177,8 +185,10 @@ enum key_fields {
NPC_VLAN_ETYPE_STAG, /* 0x88A8 */
NPC_OUTER_VID,
NPC_TOS,
+ NPC_IPFRAG_IPV4,
NPC_SIP_IPV4,
NPC_DIP_IPV4,
+ NPC_IPFRAG_IPV6,
NPC_SIP_IPV6,
NPC_DIP_IPV6,
NPC_IPPROTO_TCP,
@@ -200,6 +210,7 @@ enum key_fields {
NPC_ERRLEV,
NPC_ERRCODE,
NPC_LXMB,
+ NPC_EXACT_RESULT,
NPC_LA,
NPC_LB,
NPC_LC,
@@ -381,6 +392,22 @@ struct nix_rx_action {
};
/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_EXACT_NIBBLE_START 40
+#define NPC_EXACT_NIBBLE_END 43
+#define NPC_EXACT_NIBBLE GENMASK_ULL(43, 40)
+
+/* NPC_EXACT_KEX_S nibble definitions for each field */
+#define NPC_EXACT_NIBBLE_HIT BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_OPC BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_WAY BIT_ULL(40)
+#define NPC_EXACT_NIBBLE_INDEX GENMASK_ULL(43, 41)
+
+#define NPC_EXACT_RESULT_HIT BIT_ULL(0)
+#define NPC_EXACT_RESULT_OPC GENMASK_ULL(2, 1)
+#define NPC_EXACT_RESULT_WAY GENMASK_ULL(4, 3)
+#define NPC_EXACT_RESULT_IDX GENMASK_ULL(15, 5)
+
+/* NPC_AF_INTFX_KEX_CFG field masks */
#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
/* NPC_PARSE_KEX_S nibble definitions for each field */
@@ -455,7 +482,7 @@ struct npc_coalesced_kpu_prfl {
u8 name[NPC_NAME_LEN]; /* KPU Profile name */
u64 version; /* KPU firmware/profile version */
u8 num_prfl; /* No of NPC profiles. */
- u16 prfl_sz[0];
+ u16 prfl_sz[];
};
struct npc_mcam_kex {
@@ -482,7 +509,7 @@ struct npc_kpu_fwdata {
* struct npc_kpu_profile_cam[entries];
* struct npc_kpu_profile_action[entries];
*/
- u8 data[0];
+ u8 data[];
} __packed;
struct npc_lt_def {
@@ -572,7 +599,7 @@ struct npc_kpu_profile_fwdata {
* Custom KPU CAM and ACTION configuration entries.
* struct npc_kpu_fwdata kpu[kpus];
*/
- u8 data[0];
+ u8 data[];
} __packed;
struct rvu_npc_mcam_rule {
@@ -595,6 +622,7 @@ struct rvu_npc_mcam_rule {
bool vfvlan_cfg;
u16 chan;
u16 chan_mask;
+ u8 lxmb;
};
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0fe7ad35e36f..a820bad3abb2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -155,7 +155,7 @@
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
- NPC_PARSE_NIBBLE_ERRCODE | \
+ NPC_PARSE_NIBBLE_L2L3_BCAST | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -185,7 +185,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_EXDSA,
- NPC_S_KPU2_NGIO,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
@@ -212,6 +211,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_NSH,
NPC_S_KPU5_CPT_IP,
NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
@@ -1124,15 +1124,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
- NPC_ETYPE_NGIO,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1968,6 +1959,15 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_PPPOE,
0xffff,
0x0000,
@@ -2750,15 +2750,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_NGIO, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CPT_CTAG, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -5090,6 +5081,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8425,14 +8425,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_NGIO, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -9196,6 +9188,14 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
@@ -9892,14 +9892,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NGIO,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_CPT_IP, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -11974,6 +11966,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -15123,7 +15123,8 @@ static struct npc_mcam_kex npc_mkex_default = {
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
/* nibble: LA..LE (ltype only) + Error code + Channel */
- [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX |
+ (u64)NPC_EXACT_NIBBLE_HIT,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
},
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index d6321de3cc17..3411e2e47d46 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
#include "ptp.h"
#include "mbox.h"
@@ -25,6 +27,9 @@
#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600
#define PCI_DEVID_OCTEONTX2_RST 0xA085
#define PCI_DEVID_CN10K_PTP 0xA09E
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_PTP_BAR_NO 0
@@ -46,10 +51,153 @@
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
+#define PTP_CLOCK_SEC 0xFD0ULL
+#define PTP_SEC_ROLLOVER 0xFD8ULL
+
+#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
+static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
+{
+ return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
+}
+
+static bool is_ptp_dev_cn10k(struct ptp *ptp)
+{
+ return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
+}
+
+static bool cn10k_ptp_errata(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
+{
+ if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
+ ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_A_PTP)
+ return true;
+ return false;
+}
+
+static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
+{
+ struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
+ ktime_t curr_ts = ktime_get();
+ ktime_t delta_ns, period_ns;
+ u64 ptp_clock_hi;
+
+ /* calculate the elapsed time since last restart */
+ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
+
+ /* if the ptp clock value has crossed 0.5 seconds,
+ * its too late to update pps threshold value, so
+ * update threshold after 1 second.
+ */
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (ptp_clock_hi > 500000000) {
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
+ } else {
+ writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
+ }
+
+ hrtimer_forward_now(hrtimer, period_ns);
+ ptp->last_ts = curr_ts;
+
+ return HRTIMER_RESTART;
+}
+
+static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
+{
+ ktime_t period_ns;
+
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
+ hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
+ ptp->last_ts = ktime_get();
+}
+
+static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
+{
+ u64 sec, sec1, nsec;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ptp->ptp_lock, flags);
+ sec = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec1 = readq(ptp->reg_base + PTP_CLOCK_SEC) & 0xFFFFFFFFUL;
+ /* check nsec rollover */
+ if (sec1 > sec) {
+ nsec = readq(ptp->reg_base + PTP_CLOCK_HI);
+ sec = sec1;
+ }
+ spin_unlock_irqrestore(&ptp->ptp_lock, flags);
+
+ return sec * NSEC_PER_SEC + nsec;
+}
+
+static u64 read_ptp_tstmp_nsec(struct ptp *ptp)
+{
+ return readq(ptp->reg_base + PTP_CLOCK_HI);
+}
+
+static u64 ptp_calc_adjusted_comp(u64 ptp_clock_freq)
+{
+ u64 comp, adj = 0, cycles_per_sec, ns_drift = 0;
+ u32 ptp_clock_nsec, cycle_time;
+ int cycle;
+
+ /* Errata:
+ * Issue #1: At the time of 1 sec rollover of the nano-second counter,
+ * the nano-second counter is set to 0. However, it should be set to
+ * (existing counter_value - 10^9).
+ *
+ * Issue #2: The nano-second counter rolls over at 0x3B9A_C9FF.
+ * It should roll over at 0x3B9A_CA00.
+ */
+
+ /* calculate ptp_clock_comp value */
+ comp = ((u64)1000000000ULL << 32) / ptp_clock_freq;
+ /* use CYCLE_MULT to avoid accuracy loss due to integer arithmetic */
+ cycle_time = NSEC_PER_SEC * CYCLE_MULT / ptp_clock_freq;
+ /* cycles per sec */
+ cycles_per_sec = ptp_clock_freq;
+
+ /* check whether ptp nanosecond counter rolls over early */
+ cycle = cycles_per_sec - 1;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ while (ptp_clock_nsec < NSEC_PER_SEC) {
+ if (ptp_clock_nsec == 0x3B9AC9FF)
+ goto calc_adj_comp;
+ cycle++;
+ ptp_clock_nsec = (cycle * comp) >> 32;
+ }
+ /* compute nanoseconds lost per second when nsec counter rolls over */
+ ns_drift = ptp_clock_nsec - NSEC_PER_SEC;
+ /* calculate ptp_clock_comp adjustment */
+ if (ns_drift > 0) {
+ adj = comp * ns_drift;
+ adj = adj / 1000000000ULL;
+ }
+ /* speed up the ptp clock to account for nanoseconds lost */
+ comp += adj;
+ return comp;
+
+calc_adj_comp:
+ /* slow down the ptp clock to not rollover early */
+ adj = comp * cycle_time;
+ adj = adj / 1000000000ULL;
+ adj = adj / CYCLE_MULT;
+ comp -= adj;
+
+ return comp;
+}
+
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -60,6 +208,8 @@ struct ptp *ptp_get(void)
/* Check driver is bound to PTP block */
if (!ptp)
ptp = ERR_PTR(-EPROBE_DEFER);
+ else
+ pci_dev_get(ptp->pdev);
return ptp;
}
@@ -75,8 +225,8 @@ void ptp_put(struct ptp *ptp)
static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
{
bool neg_adj = false;
- u64 comp;
- u64 adj;
+ u32 freq, freq_adj;
+ u64 comp, adj;
s64 ppb;
if (scaled_ppm < 0) {
@@ -98,15 +248,22 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
* where tbase is the basic compensation value calculated
* initialy in the probe function.
*/
- comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* convert scaled_ppm to ppb */
ppb = 1 + scaled_ppm;
ppb *= 125;
ppb >>= 13;
- adj = comp * ppb;
- adj = div_u64(adj, 1000000000ull);
- comp = neg_adj ? comp - adj : comp + adj;
+ if (cn10k_ptp_errata(ptp)) {
+ /* calculate the new frequency based on ppb */
+ freq_adj = (ptp->clock_rate * ppb) / 1000000000ULL;
+ freq = neg_adj ? ptp->clock_rate + freq_adj : ptp->clock_rate - freq_adj;
+ comp = ptp_calc_adjusted_comp(freq);
+ } else {
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+ }
writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
return 0;
@@ -115,7 +272,7 @@ static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
static int ptp_get_clock(struct ptp *ptp, u64 *clk)
{
/* Return the current PTP clock */
- *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+ *clk = ptp->read_ptp_tstmp(ptp);
return 0;
}
@@ -139,6 +296,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* sclk is in MHz */
ptp->clock_rate = sclk * 1000000;
+ /* Program the seconds rollover value to 1 second */
+ if (is_ptp_dev_cnf10kb(ptp))
+ writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
+
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
@@ -163,22 +324,63 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+ if (cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
+
+ if (cn10k_ptp_errata(ptp))
+ clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
+ else
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
/* Initial compensation value to start the nanosecs counter */
writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
}
static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
{
- *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ u64 timestamp;
+
+ if (is_ptp_dev_cn10k(ptp)) {
+ timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
+ *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
+ } else {
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ }
return 0;
}
static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
{
- writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+ if (!cn10k_ptp_errata(ptp))
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
+static int ptp_extts_on(struct ptp *ptp, int on)
+{
+ u64 ptp_clock_hi;
+
+ if (cn10k_ptp_errata(ptp)) {
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+ }
return 0;
}
@@ -212,6 +414,17 @@ static int ptp_probe(struct pci_dev *pdev,
if (!first_ptp_block)
first_ptp_block = ptp;
+ spin_lock_init(&ptp->ptp_lock);
+ if (is_ptp_tsfmt_sec_nsec(ptp))
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_sec_nsec;
+ else
+ ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+
+ if (cn10k_ptp_errata(ptp)) {
+ hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ptp->hrtimer.function = ptp_reset_thresh;
+ }
+
return 0;
error_free:
@@ -236,6 +449,9 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
+ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+
if (IS_ERR_OR_NULL(ptp))
return;
@@ -303,6 +519,9 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
+ case PTP_OP_EXTTS_ON:
+ err = ptp_extts_on(rvu->ptp, req->extts_on);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 1b81a0493cd3..b9d92abc3844 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -15,7 +15,12 @@
struct ptp {
struct pci_dev *pdev;
void __iomem *reg_base;
+ u64 (*read_ptp_tstmp)(struct ptp *ptp);
+ spinlock_t ptp_lock; /* lock */
+ struct hrtimer hrtimer;
+ ktime_t last_ts;
u32 clock_rate;
+ u32 clock_period;
};
struct ptp *ptp_get(void);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index e695fa0e82a9..de0d88dd10d6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -8,7 +8,7 @@
#include "cgx.h"
#include "lmac_common.h"
-static struct mac_ops rpm_mac_ops = {
+static struct mac_ops rpm_mac_ops = {
.name = "rpm",
.csr_offset = 0x4e00,
.lmac_offset = 20,
@@ -20,21 +20,69 @@ static struct mac_ops rpm_mac_ops = {
.non_contiguous_serdes_lane = true,
.rx_stats_cnt = 43,
.tx_stats_cnt = 34,
+ .dmac_filter_count = 32,
.get_nr_lmacs = rpm_get_nr_lmacs,
.get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm_get_lmac_fifo_len,
.mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
.mac_get_rx_stats = rpm_get_rx_stats,
.mac_get_tx_stats = rpm_get_tx_stats,
+ .get_fec_stats = rpm_get_fec_stats,
.mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
.mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
};
-struct mac_ops *rpm_get_mac_ops(void)
+static struct mac_ops rpm2_mac_ops = {
+ .name = "rpm",
+ .csr_offset = RPM2_CSR_OFFSET,
+ .lmac_offset = 20,
+ .int_register = RPM2_CMRX_SW_INT,
+ .int_set_reg = RPM2_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .dmac_filter_count = 64,
+ .get_nr_lmacs = rpm2_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .lmac_fifo_len = rpm2_get_lmac_fifo_len,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .get_fec_stats = rpm_get_fec_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
+ .pfc_config = rpm_lmac_pfc_config,
+ .mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
+};
+
+bool is_dev_rpm2(void *rpmd)
{
- return &rpm_mac_ops;
+ rpm_t *rpm = rpmd;
+
+ return (rpm->pdev->device == PCI_DEVID_CN10KB_RPM);
+}
+
+struct mac_ops *rpm_get_mac_ops(rpm_t *rpm)
+{
+ if (is_dev_rpm2(rpm))
+ return &rpm2_mac_ops;
+ else
+ return &rpm_mac_ops;
}
static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
@@ -47,6 +95,16 @@ static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
return cgx_read(rpm, lmac, offset);
}
+/* Read HW major version to determine RPM
+ * MAC type 100/USX
+ */
+static bool is_mac_rpmusx(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return rpm_read(rpm, 0, RPMX_CONST1) & 0x700ULL;
+}
+
int rpm_get_nr_lmacs(void *rpmd)
{
rpm_t *rpm = rpmd;
@@ -54,14 +112,67 @@ int rpm_get_nr_lmacs(void *rpmd)
return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
}
+int rpm2_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL);
+}
+
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
+ struct lmac *lmac;
u64 cfg;
if (!rpm)
return;
+ lmac = lmac_pdata(lmac_id, rpm);
+ if (!lmac)
+ return;
+
+ /* Pause frames are not enabled just return */
+ if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
+ return;
+
if (enable) {
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
@@ -83,13 +194,134 @@ int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
return -ENODEV;
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ if (!(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE)) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
return 0;
}
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
+ unsigned long pfc_en,
+ bool enable)
+{
+ u64 quanta_offset = 0, quanta_thresh = 0, cfg;
+ int i, shift;
+
+ /* Set pause time and interval */
+ for_each_set_bit(i, &pfc_en, 16) {
+ switch (i) {
+ case 0:
+ case 1:
+ quanta_offset = RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL01_QUANTA_THRESH;
+ break;
+ case 2:
+ case 3:
+ quanta_offset = RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL23_QUANTA_THRESH;
+ break;
+ case 4:
+ case 5:
+ quanta_offset = RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL45_QUANTA_THRESH;
+ break;
+ case 6:
+ case 7:
+ quanta_offset = RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL67_QUANTA_THRESH;
+ break;
+ case 8:
+ case 9:
+ quanta_offset = RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL89_QUANTA_THRESH;
+ break;
+ case 10:
+ case 11:
+ quanta_offset = RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH;
+ break;
+ case 12:
+ case 13:
+ quanta_offset = RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH;
+ break;
+ case 14:
+ case 15:
+ quanta_offset = RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA;
+ quanta_thresh = RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH;
+ break;
+ }
+
+ if (!quanta_offset || !quanta_thresh)
+ continue;
+
+ shift = (i % 2) ? 1 : 0;
+ cfg = rpm_read(rpm, lmac_id, quanta_offset);
+ if (enable) {
+ cfg |= ((u64)RPM_DEFAULT_PAUSE_TIME << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_offset, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, quanta_thresh);
+ if (enable) {
+ cfg |= ((u64)(RPM_DEFAULT_PAUSE_TIME / 2) << shift * 16);
+ } else {
+ if (!shift)
+ cfg &= ~GENMASK_ULL(15, 0);
+ else
+ cfg &= ~GENMASK_ULL(31, 16);
+ }
+ rpm_write(rpm, lmac_id, quanta_thresh, cfg);
+ }
+}
+
+static void rpm2_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, lmac_id, RPM2_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold
+ * for 802.3X frames
+ */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPM2_CMR_RX_OVR_BP_EN;
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPM2_CMR_RX_OVR_BP_EN;
+ cfg &= ~RPM2_CMR_RX_OVR_BP_BP;
+ }
+ rpm_write(rpm, lmac_id, RPM2_CMR_RX_OVR_BP, cfg);
+}
+
+static void rpm_lmac_cfg_bp(rpm_t *rpm, int lmac_id, u8 tx_pause, u8 rx_pause)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ /* Configure CL0 Pause Quanta & threshold for
+ * 802.3X frames
+ */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 1, true);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ /* Disable all Pause Quanta & threshold values */
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xffff, false);
+ cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+}
+
int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause)
{
@@ -111,14 +343,11 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
- if (tx_pause) {
- cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
- } else {
- cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
- cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
- }
- rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+ if (is_dev_rpm2(rpm))
+ rpm2_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause);
+ else
+ rpm_lmac_cfg_bp(rpm, lmac_id, tx_pause, rx_pause);
+
return 0;
}
@@ -127,56 +356,31 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
rpm_t *rpm = rpmd;
u64 cfg;
- if (enable) {
- /* Enable 802.3 pause frame mode */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable receive pause frames */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
-
- /* Enable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Set pause time and interval */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA,
- cfg | RPM_DEFAULT_PAUSE_TIME);
- /* Set pause interval as the hardware default is too short */
- cfg = rpm_read(rpm, lmac_id,
- RPMX_MTI_MAC100X_CL01_QUANTA_THRESH);
- cfg &= ~0xFFFFULL;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH,
- cfg | (RPM_DEFAULT_PAUSE_TIME / 2));
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- } else {
- /* ALL pause frames received are completely ignored */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- /* Disable forward pause to TX block */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ /* Enable channel mask for all LMACS */
+ if (is_dev_rpm2(rpm))
+ rpm_write(rpm, lmac_id, RPM2_CMR_CHAN_MSK_OR, 0xffff);
+ else
+ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL);
- /* Disable pause frames transmission */
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
- cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
- rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
- }
+ /* Disable all PFC classes */
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg);
+ rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg);
}
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
@@ -184,7 +388,7 @@ int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
rpm_t *rpm = rpmd;
u64 val_lo, val_hi;
- if (!rpm || lmac_id >= rpm->lmac_count)
+ if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
mutex_lock(&rpm->lock);
@@ -212,7 +416,7 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
rpm_t *rpm = rpmd;
u64 val_lo, val_hi;
- if (!rpm || lmac_id >= rpm->lmac_count)
+ if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
mutex_lock(&rpm->lock);
@@ -243,32 +447,119 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
return err;
}
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 hi_perf_lmac;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ fifo_len = rpm->mac_ops->fifo_len;
+ num_lmacs = rpm->mac_ops->get_nr_lmacs(rpm);
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO and rest 1/4th */
+ hi_perf_lmac = rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS);
+ hi_perf_lmac = (hi_perf_lmac >> 4) & 0x3ULL;
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
+static int rpmusx_lmac_internal_loopback(rpm_t *rpm, int lmac_id, bool enable)
+{
+ u64 cfg;
+
+ cfg = rpm_read(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1);
+
+ if (enable)
+ cfg |= RPM2_USX_PCS_LBK;
+ else
+ cfg &= ~RPM2_USX_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPM2_USX_PCSX_CONTROL1, cfg);
+
+ return 0;
+}
+
+u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id)
+{
+ u64 hi_perf_lmac, lmac_info;
+ rpm_t *rpm = rpmd;
+ u8 num_lmacs;
+ u32 fifo_len;
+
+ lmac_info = rpm_read(rpm, 0, RPM2_CMRX_RX_LMACS);
+ /* LMACs are divided into two groups and each group
+ * gets half of the FIFO
+ * Group0 lmac_id range {0..3}
+ * Group1 lmac_id range {4..7}
+ */
+ fifo_len = rpm->mac_ops->fifo_len / 2;
+
+ if (lmac_id < 4) {
+ num_lmacs = hweight8(lmac_info & 0xF);
+ hi_perf_lmac = (lmac_info >> 8) & 0x3ULL;
+ } else {
+ num_lmacs = hweight8(lmac_info & 0xF0);
+ hi_perf_lmac = (lmac_info >> 10) & 0x3ULL;
+ hi_perf_lmac += 4;
+ }
+
+ switch (num_lmacs) {
+ case 1:
+ return fifo_len;
+ case 2:
+ return fifo_len / 2;
+ case 3:
+ /* LMAC marked as hi_perf gets half of the FIFO
+ * and rest 1/4th
+ */
+ if (lmac_id == hi_perf_lmac)
+ return fifo_len / 2;
+ return fifo_len / 4;
+ case 4:
+ default:
+ return fifo_len / 4;
+ }
+ return 0;
+}
+
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
u8 lmac_type;
u64 cfg;
- if (!rpm || lmac_id >= rpm->lmac_count)
+ if (!is_lmac_valid(rpm, lmac_id))
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
- if (lmac_type == LMAC_MODE_100G_R) {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
-
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
- } else {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
}
+ if (is_dev_rpm2(rpm) && is_mac_rpmusx(rpm))
+ return rpmusx_lmac_internal_loopback(rpm, lmac_id, enable);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
return 0;
}
@@ -281,9 +572,144 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
return;
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
- if (enable)
+ if (enable) {
cfg |= RPMX_RX_TS_PREPEND;
- else
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
+}
+
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
+{
+ u64 cfg, class_en, pfc_class_mask_cfg;
+ rpm_t *rpm = rpmd;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ class_en = rpm_read(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL);
+ pfc_en |= FIELD_GET(RPM_PFC_CLASS_MASK, class_en);
+
+ if (rx_pause) {
+ cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ } else {
+ cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE |
+ RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD);
+ }
+
+ if (tx_pause) {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, pfc_en, true);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, pfc_en, class_en);
+ } else {
+ rpm_cfg_pfc_quanta_thresh(rpm, lmac_id, 0xfff, false);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ class_en = FIELD_SET(RPM_PFC_CLASS_MASK, 0, class_en);
+ }
+
+ if (!rx_pause && !tx_pause)
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ else
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ pfc_class_mask_cfg = is_dev_rpm2(rpm) ? RPM2_CMRX_PRT_CBFC_CTL :
+ RPMX_CMRX_PRT_CBFC_CTL;
+
+ rpm_write(rpm, lmac_id, pfc_class_mask_cfg, class_en);
+
+ return 0;
+}
+
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE) {
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ }
+
+ return 0;
+}
+
+int rpm_get_fec_stats(void *rpmd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ u64 val_lo, val_hi;
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
+ return 0;
+
+ if (rpm->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_CCW_LO);
+ val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_corr_blks = (val_hi << 16 | val_lo);
+
+ val_lo = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_VL0_NCCW_LO);
+ val_hi = rpm_read(rpm, lmac_id, RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_uncorr_blks = (val_hi << 16 | val_lo);
+
+ /* 50G uses 2 Physical serdes lines */
+ if (rpm->lmac_idmap[lmac_id]->link_info.lmac_type_id ==
+ LMAC_MODE_50G_R) {
+ val_lo = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_VL1_CCW_LO);
+ val_hi = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_corr_blks += (val_hi << 16 | val_lo);
+
+ val_lo = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_VL1_NCCW_LO);
+ val_hi = rpm_read(rpm, lmac_id,
+ RPMX_MTI_FCFECX_CW_HI);
+ rsp->fec_uncorr_blks += (val_hi << 16 | val_lo);
+ }
+ } else {
+ /* enable RS-FEC capture */
+ cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
+ cfg |= RPMX_RSFEC_RX_CAPTURE | BIT(lmac_id);
+ rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
+
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ rsp->fec_corr_blks = (val_hi << 32 | val_lo);
+
+ val_lo = rpm_read(rpm, 0,
+ RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3);
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+ rsp->fec_uncorr_blks = (val_hi << 32 | val_lo);
+ }
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 57c8a687b488..22147b4c2137 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -12,16 +12,19 @@
/* PCI device IDs */
#define PCI_DEVID_CN10K_RPM 0xA060
+#define PCI_SUBSYS_DEVID_CNF10KB_RPM 0xBC00
+#define PCI_DEVID_CN10KB_RPM 0xA09F
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
+#define RPMX_CMRX_RX_ID_MAP 0x80
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
#define RPMX_CMRX_LINK_CFG 0x1070
#define RPMX_MTI_PCS100X_CONTROL1 0x20000
-#define RPMX_MTI_LPCSX_CONTROL1 0x30000
#define RPMX_MTI_PCS_LBK BIT_ULL(14)
#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
@@ -33,20 +36,82 @@
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL23_PAUSE_QUANTA 0x80B0
+#define RPMX_MTI_MAC100X_CL45_PAUSE_QUANTA 0x80B8
+#define RPMX_MTI_MAC100X_CL67_PAUSE_QUANTA 0x80C0
#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
-#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPMX_MTI_MAC100X_CL23_QUANTA_THRESH 0x80D0
+#define RPMX_MTI_MAC100X_CL45_QUANTA_THRESH 0x80D8
+#define RPMX_MTI_MAC100X_CL67_QUANTA_THRESH 0x80E0
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPMX_MTI_MAC100X_CL1011_PAUSE_QUANTA 0x8110
+#define RPMX_MTI_MAC100X_CL1213_PAUSE_QUANTA 0x8118
+#define RPMX_MTI_MAC100X_CL1415_PAUSE_QUANTA 0x8120
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_CL1011_QUANTA_THRESH 0x8130
+#define RPMX_MTI_MAC100X_CL1213_QUANTA_THRESH 0x8138
+#define RPMX_MTI_MAC100X_CL1415_QUANTA_THRESH 0x8140
#define RPMX_CMR_RX_OVR_BP 0x4120
#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_CMR_CHAN_MSK_OR 0x4118
#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
+#define RPMX_CMRX_PRT_CBFC_CTL 0x5B08
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_RX_SHIFT 33
+#define RPMX_CMRX_PRT_CBFC_CTL_PHYS_BP_SHIFT 16
+#define RPMX_CMRX_PRT_CBFC_CTL_LOGL_EN_TX_SHIFT 0
+#define RPM_PFC_CLASS_MASK GENMASK_ULL(48, 33)
+#define RPMX_MTI_MAC100X_CL89_QUANTA_THRESH 0x8128
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_PAD_EN BIT_ULL(11)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD BIT_ULL(7)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
+#define RPM_DEFAULT_PAUSE_TIME 0x7FF
+
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+#define RPMX_CONST1 0x2008
+
+/* FEC stats */
+#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
+#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
+#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
+#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
+#define RPMX_MTI_FCFECX_VL0_NCCW_LO 0x38620
+#define RPMX_MTI_FCFECX_VL1_CCW_LO 0x38628
+#define RPMX_MTI_FCFECX_VL1_NCCW_LO 0x38630
+#define RPMX_MTI_FCFECX_CW_HI 0x38638
+
+/* CN10KB CSR Declaration */
+#define RPM2_CMRX_SW_INT 0x1b0
+#define RPM2_CMRX_SW_INT_ENA_W1S 0x1b8
+#define RPM2_CMR_CHAN_MSK_OR 0x3120
+#define RPM2_CMR_RX_OVR_BP_EN BIT_ULL(2)
+#define RPM2_CMR_RX_OVR_BP_BP BIT_ULL(1)
+#define RPM2_CMR_RX_OVR_BP 0x3130
+#define RPM2_CSR_OFFSET 0x3e00
+#define RPM2_CMRX_PRT_CBFC_CTL 0x6510
+#define RPM2_CMRX_RX_LMACS 0x100
+#define RPM2_CMRX_RX_LOGL_XON 0x3100
+#define RPM2_CMRX_RX_STAT2 0x3010
+#define RPM2_USX_PCSX_CONTROL1 0x80000
+#define RPM2_USX_PCS_LBK BIT_ULL(14)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+u32 rpm_get_lmac_fifo_len(void *rpmd, int lmac_id);
+u32 rpm2_get_lmac_fifo_len(void *rpmd, int lmac_id);
int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
@@ -57,4 +122,13 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rpm_lmac_get_pfc_frm_cfg(void *rpmd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+int rpm2_get_nr_lmacs(void *rpmd);
+bool is_dev_rpm2(void *rpmd);
+int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3ca6b942ebe2..9f673bda9dbd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -16,14 +16,14 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "ptp.h"
+#include "mcs.h"
#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
-
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
@@ -68,6 +68,8 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
hw->cap.nix_tx_link_bp = true;
hw->cap.nix_rx_multicast = true;
hw->cap.nix_shaper_toggle_wait = false;
+ hw->cap.npc_hash_extract = false;
+ hw->cap.npc_exact_match_enabled = false;
hw->rvu = rvu;
if (is_rvu_pre_96xx_C0(rvu)) {
@@ -85,6 +87,9 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
if (!is_rvu_otx2(rvu))
hw->cap.per_pf_mbox_regs = true;
+
+ if (is_rvu_npc_hash_extract_en(rvu))
+ hw->cap.npc_hash_extract = true;
}
/* Poll a RVU block's register 'offset', for a 'zero'
@@ -412,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
*hwvf = cfg & 0xFFF;
}
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{
int pf, func;
u64 cfg;
@@ -520,8 +525,11 @@ static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
- if (err)
- dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
}
static void rvu_reset_all_blocks(struct rvu *rvu)
@@ -1119,6 +1127,12 @@ cpt:
goto cgx_err;
}
+ err = rvu_npc_exact_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "failed to initialize exact match table\n");
+ return err;
+ }
+
/* Assign MACs for CGX mapped functions */
rvu_setup_pfvf_macaddress(rvu);
@@ -1144,8 +1158,22 @@ cpt:
rvu_program_channels(rvu);
+ err = rvu_mcs_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
+ goto nix_err;
+ }
+
+ err = rvu_cpt_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
+ goto mcs_err;
+ }
+
return 0;
+mcs_err:
+ rvu_mcs_exit(rvu);
nix_err:
rvu_nix_freemem(rvu);
npa_err:
@@ -1988,6 +2016,7 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
rsp->nix_fixed_txschq_mapping = hw->cap.nix_fixed_txschq_mapping;
rsp->nix_shaping = hw->cap.nix_shaping;
+ rsp->npc_hash_extract = hw->cap.npc_hash_extract;
return 0;
}
@@ -2253,7 +2282,7 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
}
static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
- int num, int type)
+ int num, int type, unsigned long *pf_bmap)
{
struct rvu_hwinfo *hw = rvu->hw;
int region;
@@ -2265,6 +2294,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
*/
if (type == TYPE_AFVF) {
for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
if (hw->cap.per_pf_mbox_regs) {
bar4 = rvu_read64(rvu, BLKADDR_RVUM,
RVU_AF_PFX_BAR4_ADDR(0)) +
@@ -2286,6 +2318,9 @@ static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
* RVU_AF_PF_BAR4_ADDR register.
*/
for (region = 0; region < num; region++) {
+ if (!test_bit(region, pf_bmap))
+ continue;
+
if (hw->cap.per_pf_mbox_regs) {
bar4 = rvu_read64(rvu, BLKADDR_RVUM,
RVU_AF_PFX_BAR4_ADDR(region));
@@ -2314,20 +2349,41 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int err = -EINVAL, i, dir, dir_up;
void __iomem *reg_base;
struct rvu_work *mwork;
+ unsigned long *pf_bmap;
void **mbox_regions;
const char *name;
+ u64 cfg;
- mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
- if (!mbox_regions)
+ pf_bmap = bitmap_zalloc(num, GFP_KERNEL);
+ if (!pf_bmap)
return -ENOMEM;
+ /* RVU VFs */
+ if (type == TYPE_AFVF)
+ bitmap_set(pf_bmap, 0, num);
+
+ if (type == TYPE_AFPF) {
+ /* Mark enabled PFs in bitmap */
+ for (i = 0; i < num; i++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(i));
+ if (cfg & BIT_ULL(20))
+ set_bit(i, pf_bmap);
+ }
+ }
+
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions) {
+ err = -ENOMEM;
+ goto free_bitmap;
+ }
+
switch (type) {
case TYPE_AFPF:
name = "rvu_afpf_mailbox";
dir = MBOX_DIR_AFPF;
dir_up = MBOX_DIR_AFPF_UP;
reg_base = rvu->afreg_base;
- err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF, pf_bmap);
if (err)
goto free_regions;
break;
@@ -2336,7 +2392,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
dir = MBOX_DIR_PFVF;
dir_up = MBOX_DIR_PFVF_UP;
reg_base = rvu->pfreg_base;
- err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF, pf_bmap);
if (err)
goto free_regions;
break;
@@ -2367,16 +2423,19 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
- reg_base, dir, num);
+ reg_base, dir, num, pf_bmap);
if (err)
goto exit;
err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
- reg_base, dir_up, num);
+ reg_base, dir_up, num, pf_bmap);
if (err)
goto exit;
for (i = 0; i < num; i++) {
+ if (!test_bit(i, pf_bmap))
+ continue;
+
mwork = &mw->mbox_wrk[i];
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_handler);
@@ -2385,8 +2444,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
- kfree(mbox_regions);
- return 0;
+ goto free_regions;
exit:
destroy_workqueue(mw->mbox_wq);
@@ -2395,6 +2453,8 @@ unmap_regions:
iounmap((void __iomem *)mbox_regions[num]);
free_regions:
kfree(mbox_regions);
+free_bitmap:
+ bitmap_free(pf_bmap);
return err;
}
@@ -2545,6 +2605,9 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
{
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_reset(rvu, pcifunc);
+
mutex_lock(&rvu->flr_lock);
/* Reset order should reflect inter-block dependencies:
* 1. Reset any packet/work sources (NIX, CPT, TIM)
@@ -2561,6 +2624,12 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ /* In scenarios where PF/VF drivers detach NIXLF without freeing MCAM
+ * entries, check and free the MCAM entries explicitly to avoid leak.
+ * Since LF is detached use LF number as -1.
+ */
+ rvu_npc_free_mcam_entries(rvu, pcifunc, -1);
+
mutex_unlock(&rvu->flr_lock);
}
@@ -3268,6 +3337,7 @@ err_mbox:
err_hwsetup:
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
@@ -3294,6 +3364,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
@@ -3329,12 +3400,18 @@ static int __init rvu_init_module(void)
if (err < 0)
goto ptp_err;
+ err = pci_register_driver(&mcs_driver);
+ if (err < 0)
+ goto mcs_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
goto rvu_err;
return 0;
rvu_err:
+ pci_unregister_driver(&mcs_driver);
+mcs_err:
pci_unregister_driver(&ptp_driver);
ptp_err:
pci_unregister_driver(&cgx_driver);
@@ -3345,6 +3422,7 @@ ptp_err:
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&mcs_driver);
pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 66e45d733824..d655bf04a483 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -25,6 +25,8 @@
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
+#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -62,6 +64,10 @@ struct rvu_debugfs {
struct dentry *nix;
struct dentry *npc;
struct dentry *cpt;
+ struct dentry *mcs_root;
+ struct dentry *mcs;
+ struct dentry *mcs_rx;
+ struct dentry *mcs_tx;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
@@ -102,6 +108,8 @@ struct rvu_block {
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
struct rvu *rvu;
+ u64 cpt_flt_eng_map[3];
+ u64 cpt_rcvrd_eng_map[3];
};
struct nix_mcast {
@@ -338,6 +346,8 @@ struct hw_cap {
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
bool ipolicer;
+ bool npc_hash_extract; /* Hash extract enabled ? */
+ bool npc_exact_match_enabled; /* Exact match supported ? */
};
struct rvu_hwinfo {
@@ -369,6 +379,7 @@ struct rvu_hwinfo {
struct rvu *rvu;
struct npc_pkind pkind;
struct npc_mcam mcam;
+ struct npc_exact_table *table;
};
struct mbox_wq_info {
@@ -401,9 +412,15 @@ struct rvu_fwdata {
u32 ptp_ext_tstamp;
#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
-#define CGX_MAX 5
+#define CGX_MAX 9
#define CGX_LMACS_MAX 4
- struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+#define CGX_LMACS_USX 8
+ union {
+ struct cgx_lmac_fwdata_s
+ cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+ struct cgx_lmac_fwdata_s
+ cgx_fw_data_usx[CGX_MAX][CGX_LMACS_USX];
+ };
/* Do not add new fields below this line */
};
@@ -419,6 +436,7 @@ struct npc_kpu_profile_adapter {
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
struct npc_mcam_kex *mkex;
+ struct npc_mcam_kex_hash *mkex_hash;
bool custom;
size_t pkinds;
size_t kpus;
@@ -443,6 +461,7 @@ struct rvu {
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
+ struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
int nix_blkaddr[MAX_NIX_BLKS];
@@ -468,7 +487,7 @@ struct rvu {
u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
- u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
+ u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
*/
unsigned long pf_notify_bmap; /* Flags for PF notification */
@@ -493,6 +512,9 @@ struct rvu {
struct ptp *ptp;
+ int mcs_blk_cnt;
+ int cpt_pf_num;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -500,6 +522,14 @@ struct rvu {
/* RVU switch implementation over NPC with DMAC rules */
struct rvu_switch rswitch;
+
+ struct work_struct mcs_intr_work;
+ struct workqueue_struct *mcs_intr_wq;
+ struct list_head mcs_intrq_head;
+ /* mcs interrupt queue lock */
+ spinlock_t mcs_intrq_lock;
+ /* CPT interrupt lock */
+ spinlock_t cpt_intr_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -522,6 +552,17 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
+static inline void rvu_bar2_sel_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ /* HW requires read back of RVU_AF_BAR2_SEL register to make sure completion of
+ * write operation.
+ */
+ rvu_write64(rvu, block, offset, val);
+ rvu_read64(rvu, block, offset);
+ /* Barrier to ensure read completes before accessing LF registers */
+ mb();
+}
+
/* Silicon revisions */
static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
{
@@ -575,6 +616,17 @@ static inline bool is_rvu_otx2(struct rvu *rvu)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_rvu_npc_hash_extract_en(struct rvu *rvu)
+{
+ u64 npc_const3;
+
+ npc_const3 = rvu_read64(rvu, BLKADDR_NPC, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return false;
+
+ return true;
+}
+
static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
u8 lmacid, u8 chan)
{
@@ -754,7 +806,6 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes);
/* NPC APIs */
-int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
@@ -773,14 +824,17 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u64 chan);
void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
bool enable);
+
void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable);
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
+
void rvu_npc_get_mcam_entry_alloc_info(struct rvu *rvu, u16 pcifunc,
int blkaddr, int *alloc_cnt,
int *enable_cnt);
@@ -806,11 +860,21 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
-
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause,
+ u16 pfc_en);
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause);
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
int index);
+int rvu_npc_init(struct rvu *rvu);
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask);
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx);
+bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf);
/* CPT APIs */
int rvu_cpt_register_interrupts(struct rvu *rvu);
@@ -818,11 +882,18 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu);
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
int slot);
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+int rvu_cpt_init(struct rvu *rvu);
+
+#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0)
+#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16)
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K NIX */
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
+
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
@@ -834,6 +905,8 @@ static inline void rvu_dbg_init(struct rvu *rvu) {}
static inline void rvu_dbg_exit(struct rvu *rvu) {}
#endif
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr);
+
/* RVU Switch */
void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
@@ -842,4 +915,12 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
u8 shift_dir);
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+/* CN10K MCS */
+int rvu_mcs_init(struct rvu *rvu);
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
+void rvu_mcs_ptp_cfg(struct rvu *rvu, u8 rpm_id, u8 lmac_id, bool ena);
+void rvu_mcs_exit(struct rvu *rvu);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 2ca182a4ce82..83b342fa8d75 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "lmac_common.h"
#include "rvu_reg.h"
#include "rvu_trace.h"
+#include "rvu_npc_hash.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -54,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
return (cgx_features_get(cgxd) & feature);
}
+#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
/* Returns bitmap of mapped PFs */
-static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
+static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
@@ -70,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
if (!pfmap)
return -ENODEV;
else
- return find_first_bit(&pfmap, 16);
+ return find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
}
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
@@ -128,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!cgx_cnt_max)
return 0;
- if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
- size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
@@ -144,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
- rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
- GFP_KERNEL);
+ rvu->cgxlmac2pf_map =
+ devm_kzalloc(rvu->dev,
+ cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
+ GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
@@ -155,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!rvu_cgx_pdata(cgx, rvu))
continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
- for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -234,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do {
- pfid = find_first_bit(&pfmap, 16);
+ pfid = find_first_bit(&pfmap,
+ rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
clear_bit(pfid, &pfmap);
/* check if notification is enabled */
@@ -309,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
if (!cgxd)
continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd);
- for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
dev_err(rvu->dev,
@@ -395,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu)
if (!cgxd)
continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd);
- for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
+ for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
cgx_lmac_evh_unregister(cgxd, lmac);
}
@@ -441,22 +446,33 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
- return 0;
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
+
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
}
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
{
int pf = rvu_get_pf(pcifunc);
int i = 0, lmac_count = 0;
+ struct mac_ops *mac_ops;
u8 max_dmac_filters;
u8 cgx_id, lmac_id;
void *cgx_dev;
@@ -464,10 +480,20 @@ void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
if (!is_cgx_config_permitted(rvu, pcifunc))
return;
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rvu_npc_exact_reset(rvu, pcifunc);
+ return;
+ }
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_dev = cgx_get_pdata(cgx_id);
lmac_count = cgx_get_lmac_cnt(cgx_dev);
- max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ mac_ops = get_mac_ops(cgx_dev);
+ if (!mac_ops)
+ return;
+
+ max_dmac_filters = mac_ops->dmac_filter_count / lmac_count;
for (i = 0; i < max_dmac_filters; i++)
cgx_lmac_addr_del(cgx_id, lmac_id, i);
@@ -553,6 +579,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct cgx_fec_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_idx, lmac;
void *cgxd;
@@ -561,7 +588,8 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
- return cgx_get_fec_stats(cgxd, lmac, rsp);
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->get_fec_stats(cgxd, lmac, rsp);
}
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
@@ -574,6 +602,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -592,6 +623,9 @@ int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
if (rc >= 0) {
@@ -612,6 +646,9 @@ int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
}
@@ -633,6 +670,11 @@ int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
return 0;
}
+ if (rvu_npc_exact_has_match_table(rvu)) {
+ rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
+ return 0;
+ }
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
return 0;
@@ -670,6 +712,10 @@ int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, true);
@@ -685,6 +731,10 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return -EPERM;
+ /* Disable drop on non hit rule */
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgx_lmac_promisc_config(cgx_id, lmac_id, false);
@@ -723,6 +773,8 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
/* This flag is required to clean up CGX conf if app gets killed */
pfvf->hw_rx_tstamp_en = enable;
+ /* Inform MCS about 8B RX header */
+ rvu_mcs_ptp_cfg(rvu, cgx_id, lmac_id, enable);
return 0;
}
@@ -823,6 +875,22 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu)
return fifo_len;
}
+u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
+{
+ struct mac_ops *mac_ops;
+ void *cgxd;
+
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ return 0;
+
+ mac_ops = get_mac_ops(cgxd);
+ if (!mac_ops->lmac_fifo_len)
+ return 0;
+
+ return mac_ops->lmac_fifo_len(cgxd, lmac);
+}
+
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
@@ -853,6 +921,45 @@ int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_pfc = 0, tx_pfc = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return LMAC_AF_ERR_PF_NOT_MAPPED;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
+ if (tx_pfc || rx_pfc) {
+ dev_warn(rvu->dev,
+ "Can not configure 802.3X flow control as PFC frames are enabled");
+ return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
+}
+
int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *req,
struct cgx_pause_frm_cfg *rsp)
@@ -860,11 +967,9 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
int pf = rvu_get_pf(req->hdr.pcifunc);
struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ int err = 0;
void *cgxd;
- if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
- return 0;
-
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
@@ -876,13 +981,11 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
mac_ops = get_mac_ops(cgxd);
if (req->set)
- mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
- req->tx_pause, req->rx_pause);
+ err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
else
- mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
- &rsp->tx_pause,
- &rsp->rx_pause);
- return 0;
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+
+ return err;
}
int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
@@ -1014,15 +1117,22 @@ int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
u8 cgx_id, lmac_id;
if (!rvu->fwdata)
- return -ENXIO;
+ return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
if (!is_pf_cgxmapped(rvu, pf))
return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
- sizeof(struct cgx_lmac_fwdata_s));
+ if (rvu->hw->lmac_per_cgx == CGX_LMACS_USX)
+ memcpy(&rsp->fwdata,
+ &rvu->fwdata->cgx_fw_data_usx[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ else
+ memcpy(&rsp->fwdata,
+ &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+
return 0;
}
@@ -1043,7 +1153,7 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
struct msg_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -1053,12 +1163,16 @@ int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
+
return cgx_lmac_addr_reset(cgx_id, lmac_id);
}
int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
struct cgx_mac_addr_update_req *req,
- struct msg_rsp *rsp)
+ struct cgx_mac_addr_update_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
u8 cgx_id, lmac_id;
@@ -1066,6 +1180,73 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
+ if (rvu_npc_exact_has_match_table(rvu))
+ return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
+
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
}
+
+int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
+ u8 rx_pause, u16 pfc_en)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 rx_8023 = 0, tx_8023 = 0;
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
+ if (tx_8023 || rx_8023) {
+ dev_warn(rvu->dev,
+ "Can not configure PFC as 802.3X pause frames are enabled");
+ return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
+ pcifunc & RVU_PFVF_FUNC_MASK)) {
+ mutex_unlock(&rvu->rsrc_lock);
+ return LMAC_AF_ERR_PERM_DENIED;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
+}
+
+int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
+ struct cgx_pfc_cfg *req,
+ struct cgx_pfc_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+ int err;
+
+ /* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
+ req->rx_pause, req->pfc_en);
+
+ mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7dbbc115cde4..0e74c5a2231e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -60,13 +60,14 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
u64 iova, u64 *lmt_addr)
{
u64 pa, val, pf;
- int err;
+ int err = 0;
if (!iova) {
dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
return -EINVAL;
}
+ mutex_lock(&rvu->rsrc_lock);
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
pf = rvu_get_pf(pcifunc) & 0x1F;
val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
@@ -76,12 +77,13 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
if (err) {
dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
- return err;
+ goto exit;
}
val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
if (val & ~0x1ULL) {
dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
- return -EIO;
+ err = -EIO;
+ goto exit;
}
/* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
* PA[11:0] = IOVA[11:0]
@@ -89,8 +91,9 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
pa &= GENMASK_ULL(39, 0);
*lmt_addr = (pa << 12) | (iova & 0xFFF);
-
- return 0;
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return err;
}
static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
@@ -538,3 +541,21 @@ void rvu_program_channels(struct rvu *rvu)
rvu_lbk_set_channels(rvu);
rvu_rpm_set_channels(rvu);
}
+
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ int blkaddr = nix_hw->blkaddr;
+ u64 cfg;
+
+ /* Set AF vWQE timer interval to a LF configurable range of
+ * 6.4us to 1.632ms.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
+
+ /* Enable NIX RX stream and global conditional clock to
+ * avoild multiple free of NPA buffers.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
+ cfg |= BIT_ULL(1) | BIT_ULL(2);
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 45357deecabb..f047185f38e0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -17,7 +17,7 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
-#define CPT_CTX_ILEN 2
+#define CPT_CTX_ILEN 1ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@@ -37,34 +37,68 @@
(_rsp)->free_sts_##etype = free_sts; \
})
-static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{
struct rvu_block *block = ptr;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- u64 reg0, reg1, reg2;
-
- reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
- reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
- if (!is_rvu_otx2(rvu)) {
- reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
- dev_err_ratelimited(rvu->dev,
- "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
- reg0, reg1, reg2);
- } else {
- dev_err_ratelimited(rvu->dev,
- "Received CPTAF FLT irq : 0x%llx, 0x%llx",
- reg0, reg1);
+ u64 reg, val;
+ int i, eng;
+ u8 grp;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
+ dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
+
+ i = -1;
+ while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) {
+ switch (vec) {
+ case 0:
+ eng = i;
+ break;
+ case 1:
+ eng = i + 64;
+ break;
+ case 2:
+ eng = i + 128;
+ break;
+ }
+ grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
+ /* Disable and enable the engine which triggers fault */
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
+
+ spin_lock(&rvu->cpt_intr_lock);
+ block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
+ val = val & 0x3;
+ if (val == 0x1 || val == 0x2)
+ block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
+ spin_unlock(&rvu->cpt_intr_lock);
}
-
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
- if (!is_rvu_otx2(rvu))
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
return IRQ_HANDLED;
}
+static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
+}
+
static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
{
struct rvu_block *block = ptr;
@@ -119,8 +153,10 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
int i;
/* Disable all CPT AF interrupts */
- for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
+
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@@ -151,7 +187,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
/* Disable all CPT AF interrupts */
for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@@ -172,17 +208,31 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- char irq_name[16];
+ irq_handler_t flt_fn;
int i, ret;
for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
- snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
+
+ switch (i) {
+ case CPT_10K_AF_INT_VEC_FLT0:
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
+ break;
+ case CPT_10K_AF_INT_VEC_FLT1:
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
+ break;
+ case CPT_10K_AF_INT_VEC_FLT2:
+ flt_fn = rvu_cpt_af_flt2_intr_handler;
+ break;
+ }
ret = rvu_cpt_do_register_interrupt(block, off + i,
- rvu_cpt_af_flt_intr_handler,
- irq_name);
+ flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ if (i == CPT_10K_AF_INT_VEC_FLT2)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
+ else
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
}
ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
@@ -209,8 +259,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
+ irq_handler_t flt_fn;
int i, offs, ret = 0;
- char irq_name[16];
if (!is_block_implemented(rvu->hw, blkaddr))
return 0;
@@ -227,13 +277,20 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
return cpt_10k_register_interrupts(block, offs);
for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
- snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
+ switch (i) {
+ case CPT_AF_INT_VEC_FLT0:
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
+ break;
+ case CPT_AF_INT_VEC_FLT1:
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
+ break;
+ }
ret = rvu_cpt_do_register_interrupt(block, offs + i,
- rvu_cpt_af_flt_intr_handler,
- irq_name);
+ flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
if (ret)
goto err;
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
}
ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
@@ -291,7 +348,7 @@ static int get_cpt_pf_num(struct rvu *rvu)
static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@@ -303,7 +360,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@@ -372,8 +429,12 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
/* Set CPT LF group and priority */
val = (u64)req->eng_grpmsk << 48 | 1;
- if (!is_rvu_otx2(rvu))
- val |= (CPT_CTX_ILEN << 17);
+ if (!is_rvu_otx2(rvu)) {
+ if (req->ctx_ilen_valid)
+ val |= (req->ctx_ilen << 17);
+ else
+ val |= (CPT_CTX_ILEN << 17);
+ }
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
@@ -481,7 +542,7 @@ static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
*/
if (!is_rvu_otx2(rvu)) {
val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
- val |= rvu->hw->cpt_chan_base;
+ val |= (u64)rvu->hw->cpt_chan_base;
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
@@ -580,7 +641,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
if (blkaddr < 0)
- return blkaddr;
+ return false;
/* Registers that can be accessed from PF/VF */
if ((offset & 0xFF000) == CPT_AF_LFX_CTL(0) ||
@@ -606,6 +667,7 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
switch (offset) {
+ case CPT_AF_DIAG:
case CPT_AF_CTL:
case CPT_AF_PF_FUNC:
case CPT_AF_BLK_RST:
@@ -762,10 +824,21 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
- int blkaddr)
+ int blkaddr, struct cpt_rxc_time_cfg_req *save)
{
u64 dfrg_reg;
+ if (save) {
+ /* Save older config */
+ dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
+ save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
+ save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
+ save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
+
+ save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ }
+
dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
@@ -790,7 +863,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
- cpt_rxc_time_cfg(rvu, req, blkaddr);
+ cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
return 0;
}
@@ -801,9 +874,67 @@ int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
}
+int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+ u64 ctl, ctl2;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+ ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+
+ ret = rvu_lf_reset(rvu, block, cptlf);
+ if (ret)
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
+ struct cpt_flt_eng_info_rsp *rsp)
+{
+ struct rvu_block *block;
+ unsigned long flags;
+ int blkaddr, vec;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ block = &rvu->hw->block[blkaddr];
+ for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+ spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
+ rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
+ rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
+ if (req->reset) {
+ block->cpt_flt_eng_map[vec] = 0x0;
+ block->cpt_rcvrd_eng_map[vec] = 0x0;
+ }
+ spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
+ }
+ return 0;
+}
+
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
- struct cpt_rxc_time_cfg_req req;
+ struct cpt_rxc_time_cfg_req req, prev;
int timeout = 2000;
u64 reg;
@@ -819,7 +950,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
req.active_thres = 1;
req.active_limit = 1;
- cpt_rxc_time_cfg(rvu, &req, blkaddr);
+ cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
@@ -845,70 +976,68 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+
+ /* Restore config */
+ cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
}
-#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
-#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
-#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
-#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
+#define INFLIGHT GENMASK_ULL(8, 0)
+#define GRB_CNT GENMASK_ULL(39, 32)
+#define GWB_CNT GENMASK_ULL(47, 40)
+#define XQ_XOR GENMASK_ULL(63, 63)
+#define DQPTR GENMASK_ULL(19, 0)
+#define NQPTR GENMASK_ULL(51, 32)
static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
{
- int i = 0, hard_lp_ctr = 100000;
- u64 inprog, grp_ptr;
- u16 nq_ptr, dq_ptr;
+ int timeout = 1000000;
+ u64 inprog, inst_ptr;
+ u64 qsize, pending;
+ int i = 0;
/* Disable instructions enqueuing */
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
- /* Disable executions in the LF's queue */
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- inprog &= ~BIT_ULL(16);
+ inprog |= BIT_ULL(16);
rvu_write64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
- /* Wait for CPT queue to become execution-quiescent */
+ qsize = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF;
do {
- inprog = rvu_read64(rvu, blkaddr,
- CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- if (INPROG_GRB_PARTIAL(inprog)) {
- i = 0;
- hard_lp_ctr--;
- } else {
- i++;
- }
-
- grp_ptr = rvu_read64(rvu, blkaddr,
- CPT_AF_BAR2_ALIASX(slot,
- CPT_LF_Q_GRP_PTR));
- nq_ptr = (grp_ptr >> 32) & 0x7FFF;
- dq_ptr = grp_ptr & 0x7FFF;
-
- } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
+ inst_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR));
+ pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
+ FIELD_GET(NQPTR, inst_ptr) -
+ FIELD_GET(DQPTR, inst_ptr);
+ udelay(1);
+ timeout--;
+ } while ((pending != 0) && (timeout != 0));
- if (hard_lp_ctr == 0)
- dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+ if (timeout == 0)
+ dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
- i = 0;
- hard_lp_ctr = 100000;
+ timeout = 1000000;
+ /* Wait for CPT queue to become execution-quiescent */
do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- if ((INPROG_INFLIGHT(inprog) == 0) &&
- (INPROG_GWB(inprog) < 40) &&
- ((INPROG_GRB(inprog) == 0) ||
- (INPROG_GRB((inprog)) == 40))) {
+ if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
+ (FIELD_GET(GRB_CNT, inprog) == 0)) {
i++;
} else {
i = 0;
- hard_lp_ctr--;
+ timeout--;
}
- } while (hard_lp_ctr && (i < 10));
+ } while ((timeout != 0) && (i < 10));
- if (hard_lp_ctr == 0)
- dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+ if (timeout == 0)
+ dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n");
+ /* Wait for 2 us to flush all queue writes to memory */
+ udelay(2);
}
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
@@ -918,18 +1047,15 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
cpt_rxc_teardown(rvu, blkaddr);
+ mutex_lock(&rvu->alias_lock);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
cpt_lf_disable_iqueue(rvu, blkaddr, slot);
- /* Set group drop to help clear out hardware */
- reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- reg |= BIT_ULL(17);
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
-
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ mutex_unlock(&rvu->alias_lock);
return 0;
}
@@ -940,7 +1066,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
int nix_blkaddr)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
struct cpt_inst_lmtst_req *req;
dma_addr_t res_daddr;
int timeout = 3000;
@@ -1064,7 +1190,7 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
for (i = 0; i < max_ctx_entries; i++) {
cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
@@ -1077,10 +1203,19 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
reg);
}
}
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
unlock:
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
+
+int rvu_cpt_init(struct rvu *rvu)
+{
+ /* Retrieve CPT PF number */
+ rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ spin_lock_init(&rvu->cpt_intr_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a09a507369ac..9533b1d92960 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -18,6 +18,8 @@
#include "cgx.h"
#include "lmac_common.h"
#include "npc.h"
+#include "rvu_npc_hash.h"
+#include "mcs.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -196,9 +198,6 @@ enum cpt_eng_type {
CPT_IE_TYPE = 3,
};
-#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \
- blk_addr, NDC_AF_CONST) & 0xFF)
-
#define rvu_dbg_NULL NULL
#define rvu_dbg_open_NULL NULL
@@ -226,6 +225,351 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_port_stats stats;
+ int lmac;
+
+ seq_puts(filp, "\n port stats\n");
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
+ mcs_get_port_stats(mcs, &stats, lmac, dir);
+ seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
+ seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
+
+ if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
+ stats.preempt_err_cnt);
+ if (dir == MCS_TX)
+ seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
+ stats.sectag_insert_err_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sa_stats stats;
+ struct rsrc_bmap *map;
+ int sa_id;
+
+ if (dir == MCS_TX) {
+ map = &mcs->tx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n TX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
+ seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
+ stats.pkt_encrypt_cnt);
+
+ seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
+ stats.pkt_protected_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+ }
+
+ /* RX stats */
+ map = &mcs->rx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n RX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
+ seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->tx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
+ seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
+
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
+ stats.octet_encrypt_cnt);
+ seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
+ stats.octet_protected_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->rx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
+ seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
+ seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
+
+ if (mcs->hw->mcs_blks > 1) {
+ seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
+ seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
+ stats.octet_decrypt_cnt);
+ seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
+ stats.octet_validate_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_flowid_stats stats;
+ struct rsrc_bmap *map;
+ int flow_id;
+
+ seq_puts(filp, "\n Flowid stats\n");
+
+ if (dir == MCS_RX)
+ map = &mcs->rx.flow_ids;
+ else
+ map = &mcs->tx.flow_ids;
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
+ mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
+ seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->tx.secy;
+ seq_puts(filp, "\n MCS TX secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_tx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
+ stats.octet_encrypted_cnt);
+ seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
+ stats.octet_protected_cnt);
+ seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
+ stats.pkt_noactivesa_cnt);
+ seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
+ seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->rx.secy;
+ seq_puts(filp, "\n MCS secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_rx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
+ stats.octet_decrypted_cnt);
+ seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
+ stats.octet_validated_cnt);
+ seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ stats.pkt_port_disabled_cnt);
+ seq_printf(filp, "secy%d: Pkts with badtag: %lld\n", secy_id, stats.pkt_badtag_cnt);
+ seq_printf(filp, "secy%d: Pkts with no SA(sectag.tci.c=0): %lld\n", secy_id,
+ stats.pkt_nosa_cnt);
+ seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+ stats.pkt_tagged_ctl_cnt);
+ seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
+ seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
+ if (mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
+ stats.pkt_notag_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
+
+static void rvu_dbg_mcs_init(struct rvu *rvu)
+{
+ struct mcs *mcs;
+ char dname[10];
+ int i;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
+
+ for (i = 0; i < rvu->mcs_blk_cnt; i++) {
+ mcs = mcs_get_pdata(i);
+
+ sprintf(dname, "mcs%d", i);
+ rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
+ rvu->rvu_dbg.mcs_root);
+
+ rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_port_stats_fops);
+
+ rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_port_stats_fops);
+ }
+}
+
#define LMT_MAPTBL_ENTRY_SIZE 16
/* Dump LMTST map table */
static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
@@ -248,7 +592,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
- return -ENOSPC;
+ return -ENOMEM;
tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
@@ -407,7 +751,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
- return -ENOSPC;
+ return -ENOMEM;
/* Get the maximum width of a column */
lf_str_size = get_max_column_width(rvu);
@@ -534,6 +878,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
sprintf(lmac, "LMAC%d", lmac_id);
seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
+
+ pci_dev_put(pdev);
}
return 0;
}
@@ -1100,6 +1446,7 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
struct nix_hw *nix_hw;
struct rvu *rvu;
int bank, max_bank;
+ u64 ndc_af_const;
if (blk_addr == BLKADDR_NDC_NPA0) {
rvu = s->private;
@@ -1108,7 +1455,8 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr)
rvu = nix_hw->rvu;
}
- max_bank = NDC_MAX_BANK(rvu, blk_addr);
+ ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
for (bank = 0; bank < max_bank; bank++) {
seq_printf(s, "BANK:%d\n", bank);
seq_printf(s, "\tHits:\t%lld\n",
@@ -1224,6 +1572,8 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
@@ -2218,6 +2568,7 @@ static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
}
}
+ pci_dev_put(pdev);
return 0;
}
@@ -2262,7 +2613,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
- for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
@@ -2408,6 +2759,12 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
for_each_set_bit(bit, (unsigned long *)&rule->features, 64) {
seq_printf(s, "\t%s ", npc_get_field_name(bit));
switch (bit) {
+ case NPC_LXMB:
+ if (rule->lxmb == 1)
+ seq_puts(s, "\tL2M nibble is set\n");
+ else
+ seq_puts(s, "\tL2B nibble is set\n");
+ break;
case NPC_DMAC:
seq_printf(s, "%pM ", rule->packet.dmac);
seq_printf(s, "mask %pM\n", rule->mask.dmac);
@@ -2445,6 +2802,14 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "%pI6 ", rule->packet.ip6dst);
seq_printf(s, "mask %pI6\n", rule->mask.ip6dst);
break;
+ case NPC_IPFRAG_IPV6:
+ seq_printf(s, "0x%x ", rule->packet.next_header);
+ seq_printf(s, "mask 0x%x\n", rule->mask.next_header);
+ break;
+ case NPC_IPFRAG_IPV4:
+ seq_printf(s, "0x%x ", rule->packet.ip_flag);
+ seq_printf(s, "mask 0x%x\n", rule->mask.ip_flag);
+ break;
case NPC_SPORT_TCP:
case NPC_SPORT_UDP:
case NPC_SPORT_SCTP:
@@ -2598,6 +2963,170 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
RVU_DEBUG_SEQ_FOPS(npc_mcam_rules, npc_mcam_show_rules, NULL);
+static int rvu_dbg_npc_exact_show_entries(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table_entry *mem_entry[NPC_EXACT_TBL_MAX_WAYS] = { 0 };
+ struct npc_exact_table_entry *cam_entry;
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i, j;
+
+ u8 bitmap = 0;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Check if there is at least one entry in mem table */
+ if (!table->mem_tbl_entry_cnt)
+ goto dump_cam_table;
+
+ /* Print table headers */
+ seq_puts(s, "\n\tExact Match MEM Table\n");
+ seq_puts(s, "Index\t");
+
+ for (i = 0; i < table->mem_table.ways; i++) {
+ mem_entry[i] = list_first_entry_or_null(&table->lhead_mem_tbl_entry[i],
+ struct npc_exact_table_entry, list);
+
+ seq_printf(s, "Way-%d\t\t\t\t\t", i);
+ }
+
+ seq_puts(s, "\n");
+ for (i = 0; i < table->mem_table.ways; i++)
+ seq_puts(s, "\tChan MAC \t");
+
+ seq_puts(s, "\n\n");
+
+ /* Print mem table entries */
+ for (i = 0; i < table->mem_table.depth; i++) {
+ bitmap = 0;
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!mem_entry[j])
+ continue;
+
+ if (mem_entry[j]->index != i)
+ continue;
+
+ bitmap |= BIT(j);
+ }
+
+ /* No valid entries */
+ if (!bitmap)
+ continue;
+
+ seq_printf(s, "%d\t", i);
+ for (j = 0; j < table->mem_table.ways; j++) {
+ if (!(bitmap & BIT(j))) {
+ seq_puts(s, "nil\t\t\t\t\t");
+ continue;
+ }
+
+ seq_printf(s, "0x%x %pM\t\t\t", mem_entry[j]->chan,
+ mem_entry[j]->mac);
+ mem_entry[j] = list_next_entry(mem_entry[j], list);
+ }
+ seq_puts(s, "\n");
+ }
+
+dump_cam_table:
+
+ if (!table->cam_tbl_entry_cnt)
+ goto done;
+
+ seq_puts(s, "\n\tExact Match CAM Table\n");
+ seq_puts(s, "index\tchan\tMAC\n");
+
+ /* Traverse cam table entries */
+ list_for_each_entry(cam_entry, &table->lhead_cam_tbl_entry, list) {
+ seq_printf(s, "%d\t0x%x\t%pM\n", cam_entry->index, cam_entry->chan,
+ cam_entry->mac);
+ }
+
+done:
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_entries, npc_exact_show_entries, NULL);
+
+static int rvu_dbg_npc_exact_show_info(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ int i;
+
+ table = rvu->hw->table;
+
+ seq_puts(s, "\n\tExact Table Info\n");
+ seq_printf(s, "Exact Match Feature : %s\n",
+ rvu->hw->cap.npc_exact_match_enabled ? "enabled" : "disable");
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return 0;
+
+ seq_puts(s, "\nMCAM Index\tMAC Filter Rules Count\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%d\n", i, table->cnt_cmd_rules[i]);
+
+ seq_puts(s, "\nMcam Index\tPromisc Mode Status\n");
+ for (i = 0; i < table->num_drop_rules; i++)
+ seq_printf(s, "%d\t\t%s\n", i, table->promisc_mode[i] ? "on" : "off");
+
+ seq_puts(s, "\n\tMEM Table Info\n");
+ seq_printf(s, "Ways : %d\n", table->mem_table.ways);
+ seq_printf(s, "Depth : %d\n", table->mem_table.depth);
+ seq_printf(s, "Mask : 0x%llx\n", table->mem_table.mask);
+ seq_printf(s, "Hash Mask : 0x%x\n", table->mem_table.hash_mask);
+ seq_printf(s, "Hash Offset : 0x%x\n", table->mem_table.hash_offset);
+
+ seq_puts(s, "\n\tCAM Table Info\n");
+ seq_printf(s, "Depth : %d\n", table->cam_table.depth);
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_info, npc_exact_show_info, NULL);
+
+static int rvu_dbg_npc_exact_drop_cnt(struct seq_file *s, void *unused)
+{
+ struct npc_exact_table *table;
+ struct rvu *rvu = s->private;
+ struct npc_key_field *field;
+ u16 chan, pcifunc;
+ int blkaddr, i;
+ u64 cfg, cam1;
+ char *str;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ table = rvu->hw->table;
+
+ field = &rvu->hw->mcam.rx_key_fields[NPC_CHAN];
+
+ seq_puts(s, "\n\t Exact Hit on drop status\n");
+ seq_puts(s, "\npcifunc\tmcam_idx\tHits\tchan\tstatus\n");
+
+ for (i = 0; i < table->num_drop_rules; i++) {
+ pcifunc = rvu_npc_exact_drop_rule_to_pcifunc(rvu, i);
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CFG(i, 0));
+
+ /* channel will be always in keyword 0 */
+ cam1 = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CAMX_W0(i, 0, 1));
+ chan = field->kw_mask[0] & cam1;
+
+ str = (cfg & 1) ? "enabled" : "disabled";
+
+ seq_printf(s, "0x%x\t%d\t\t%llu\t0x%x\t%s\n", pcifunc, i,
+ rvu_read64(rvu, blkaddr,
+ NPC_AF_MATCH_STATX(table->counter_idx[i])),
+ chan, str);
+ }
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(npc_exact_drop_cnt, npc_exact_drop_cnt, NULL);
+
static void rvu_dbg_npc_init(struct rvu *rvu)
{
rvu->rvu_dbg.npc = debugfs_create_dir("npc", rvu->rvu_dbg.root);
@@ -2606,8 +3135,22 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
&rvu_dbg_npc_mcam_info_fops);
debugfs_create_file("mcam_rules", 0444, rvu->rvu_dbg.npc, rvu,
&rvu_dbg_npc_mcam_rules_fops);
+
debugfs_create_file("rx_miss_act_stats", 0444, rvu->rvu_dbg.npc, rvu,
&rvu_dbg_npc_rx_miss_act_fops);
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return;
+
+ debugfs_create_file("exact_entries", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_entries_fops);
+
+ debugfs_create_file("exact_info", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_info_fops);
+
+ debugfs_create_file("exact_drop_cnt", 0444, rvu->rvu_dbg.npc, rvu,
+ &rvu_dbg_npc_exact_drop_cnt_fops);
+
}
static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
@@ -2872,6 +3415,7 @@ create:
rvu_dbg_npc_init(rvu);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
+ rvu_dbg_mcs_init(rvu);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 70bacd38a6d9..e4407f09c9d3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -10,6 +10,7 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "rvu_struct.h"
+#include "rvu_npc_hash.h"
#define DRV_NAME "octeontx2-af"
@@ -41,7 +42,7 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset,
struct rvu_devlink *rvu_dl = rvu->rvu_dl;
int rc;
- sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
&rvu->irq_name[offset * NAME_SIZE], rvu_dl);
if (rc)
@@ -1436,8 +1437,63 @@ static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
enum rvu_af_dl_param_id {
RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
+ RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
};
+static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ bool enabled;
+
+ enabled = rvu_npc_exact_has_match_table(rvu);
+
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
+ enabled ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_npc_exact_disable_feature(rvu);
+
+ return 0;
+}
+
+static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct rvu_devlink *rvu_dl = devlink_priv(devlink);
+ struct rvu *rvu = rvu_dl->rvu;
+ u64 enable;
+
+ if (kstrtoull(val.vstr, 10, &enable)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only 1 value is supported");
+ return -EINVAL;
+ }
+
+ if (enable != 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only disabling exact match feature is supported");
+ return -EINVAL;
+ }
+
+ if (rvu_npc_exact_can_disable_feature(rvu))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't disable exact match feature; Please try before any configuration");
+ return -EFAULT;
+}
+
static const struct devlink_param rvu_af_dl_params[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
"dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
@@ -1446,6 +1502,15 @@ static const struct devlink_param rvu_af_dl_params[] = {
rvu_af_dl_dwrr_mtu_validate),
};
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
+ DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
+ "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ rvu_af_npc_exact_feature_get,
+ rvu_af_npc_exact_feature_disable,
+ rvu_af_npc_exact_feature_validate),
+};
+
/* Devlink switch mode */
static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
@@ -1485,14 +1550,7 @@ static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
return 0;
}
-static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
- struct netlink_ext_ack *extack)
-{
- return devlink_info_driver_name_put(req, DRV_NAME);
-}
-
static const struct devlink_ops rvu_devlink_ops = {
- .info_get = rvu_devlink_info_get,
.eswitch_mode_get = rvu_devlink_eswitch_mode_get,
.eswitch_mode_set = rvu_devlink_eswitch_mode_set,
};
@@ -1522,17 +1580,32 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
- err = devlink_params_register(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+ err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
if (err) {
dev_err(rvu->dev,
"devlink params register failed with error %d", err);
goto err_dl_health;
}
+ /* Register exact match devlink only for CN10K-B */
+ if (!rvu_npc_exact_has_match_table(rvu))
+ goto done;
+
+ err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink exact match params register failed with error %d", err);
+ goto err_dl_exact_match;
+ }
+
+done:
devlink_register(dl);
return 0;
+err_dl_exact_match:
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
err_dl_health:
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
@@ -1545,8 +1618,14 @@ void rvu_unregister_dl(struct rvu *rvu)
struct devlink *dl = rvu_dl->dl;
devlink_unregister(dl);
- devlink_params_unregister(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+ /* Unregister exact match devlink only for CN10K-B */
+ if (rvu_npc_exact_has_match_table(rvu))
+ devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d8b1948aaa0a..4ad707e758b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -14,6 +14,7 @@
#include "npc.h"
#include "cgx.h"
#include "lmac_common.h"
+#include "rvu_npc_hash.h"
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
@@ -296,7 +297,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
struct rvu_hwinfo *hw = rvu->hw;
struct sdp_node_info *sdp_info;
int pkind, pf, vf, lbkid, vfid;
- struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
bool from_vf;
int err;
@@ -326,13 +326,6 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
- mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
-
- /* By default we enable pause frames */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
- rvu),
- lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -512,11 +505,11 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
-
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -533,7 +526,7 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
*/
switch (type) {
case NIX_INTF_TYPE_CGX:
- if ((req->chan_base + req->chan_cnt) > 15)
+ if ((req->chan_base + req->chan_cnt) > 16)
return -EINVAL;
rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
/* Assign bpid based on cgx, lmac and chan id */
@@ -797,6 +790,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
struct nix_aq_res_s *result;
int timeout = 1000;
u64 reg, head;
+ int ret;
result = (struct nix_aq_res_s *)aq->res->base;
@@ -820,9 +814,22 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return -EBUSY;
}
- if (result->compcode != NIX_AQ_COMP_GOOD)
+ if (result->compcode != NIX_AQ_COMP_GOOD) {
/* TODO: Replace this with some error code */
+ if (result->compcode == NIX_AQ_COMP_CTX_FAULT ||
+ result->compcode == NIX_AQ_COMP_LOCKERR ||
+ result->compcode == NIX_AQ_COMP_CTX_POISON) {
+ ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX);
+ ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX);
+ if (ret)
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
return -EBUSY;
+ }
return 0;
}
@@ -2065,11 +2072,18 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int err, restore_tx_en = 0;
u64 cfg;
+ if (!is_rvu_otx2(rvu)) {
+ /* Skip SMQ flush if pkt count is zero */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
+ if (!cfg)
+ return 0;
+ }
+
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
@@ -2092,7 +2106,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
return err;
}
@@ -3204,8 +3218,12 @@ static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
{
- /* RPM supports FIFO len 128 KB */
- if (rvu_cgx_get_fifolen(rvu) == 0x20000)
+ int fifo_size = rvu_cgx_get_fifolen(rvu);
+
+ /* RPM supports FIFO len 128 KB and RPM2 supports double the
+ * FIFO len to accommodate 8 LMACS
+ */
+ if (fifo_size == 0x20000 || fifo_size == 0x40000)
*max_mtu = CN10K_LMAC_LINK_MAX_FRS;
else
*max_mtu = NIC_HW_MAX_FRS;
@@ -3800,9 +3818,15 @@ int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_promisc_enable(rvu, pcifunc);
} else {
if (!nix_rx_multicast)
rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+
+ if (rvu_npc_exact_has_match_table(rvu))
+ rvu_npc_exact_promisc_disable(rvu, pcifunc);
}
return 0;
@@ -3878,7 +3902,7 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
/* Enable cgx tx if disabled for credits to be back */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
@@ -3891,8 +3915,8 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
}
- rc = -EBUSY;
- poll_tmo = jiffies + usecs_to_jiffies(10000);
+ rc = NIX_AF_ERR_LINK_CREDITS;
+ poll_tmo = jiffies + usecs_to_jiffies(200000);
/* Wait for credits to return */
do {
if (time_after(jiffies, poll_tmo))
@@ -3918,7 +3942,7 @@ exit:
/* Restore state of cgx tx */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
mutex_unlock(&rvu->rsrc_lock);
return rc;
@@ -4011,9 +4035,13 @@ linkcfg:
return 0;
/* Update transmit credits for CGX links */
- lmac_fifo_len =
- rvu_cgx_get_fifolen(rvu) /
- cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, lmac);
+ return 0;
+ }
return nix_config_link_credits(rvu, blkaddr, link, pcifunc,
(lmac_fifo_len - req->maxlen) / 16);
}
@@ -4065,7 +4093,10 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
struct rvu_hwinfo *hw = rvu->hw;
int cgx, lmac_cnt, slink, link;
u16 lbk_max_frs, lmac_max_frs;
+ unsigned long lmac_bmap;
u64 tx_credits, cfg;
+ u64 lmac_fifo_len;
+ int iter;
rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
@@ -4099,12 +4130,23 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
/* Skip when cgx is not available or lmac cnt is zero */
if (lmac_cnt <= 0)
continue;
- tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
- lmac_max_frs) / 16;
- /* Enable credits and set credit pkt count to max allowed */
- cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
slink = cgx * hw->lmac_per_cgx;
- for (link = slink; link < (slink + lmac_cnt); link++) {
+
+ /* Get LMAC id's from bitmap */
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
+ lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
+ if (!lmac_fifo_len) {
+ dev_err(rvu->dev,
+ "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n",
+ __func__, cgx, iter);
+ continue;
+ }
+ tx_credits = (lmac_fifo_len - lmac_max_frs) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+
+ link = iter + slink;
nix_hw->tx_credits[link] = tx_credits;
rvu_write64(rvu, blkaddr,
NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
@@ -4279,8 +4321,17 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
+
/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
- rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+
+ if (!is_rvu_otx2(rvu))
+ rvu_nix_block_cn10k_init(rvu, nix_hw);
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
@@ -4578,6 +4629,12 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
pfvf->hw_rx_tstamp_en = false;
}
+ /* reset priority flow control config */
+ rvu_cgx_prio_flow_ctrl_cfg(rvu, pcifunc, 0, 0, 0);
+
+ /* reset 802.3x flow control config */
+ rvu_cgx_cfg_pause_frm(rvu, pcifunc, 0, 0);
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
@@ -4698,6 +4755,10 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+#define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
+#define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
+#define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
+
static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
int blkaddr)
{
@@ -4734,14 +4795,23 @@ static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *r
val);
/* Set CPT credit */
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
- req->cpt_credit);
+ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
+ if ((val & 0x3FFFFF) != 0x3FFFFF)
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF - val);
+
+ val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
+ val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
+ val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
} else {
rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
0x0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
- 0x3FFFFF);
+ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
+ if ((val & 0x3FFFFF) != 0x3FFFFF)
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF - val);
}
}
@@ -4759,6 +4829,30 @@ int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_inline_ipsec_cfg *rsp)
+
+{
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
+ rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
+ rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
+ rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
+ rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
+
+ val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
+ rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
+ rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
+ rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
+
+ return 0;
+}
+
int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
struct nix_inline_ipsec_lf_cfg *req,
struct msg_rsp *rsp)
@@ -4802,6 +4896,7 @@ int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
return 0;
}
+
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
{
bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
@@ -4956,6 +5051,8 @@ static int nix_setup_ipolicers(struct rvu *rvu,
ipolicer->ref_count = devm_kcalloc(rvu->dev,
ipolicer->band_prof.max,
sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->ref_count)
+ return -ENOMEM;
}
/* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
@@ -5314,6 +5411,7 @@ int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
aq_req.op = NIX_AQ_INSTOP_WRITE;
memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ memset((char *)&aq_req.prof_mask, 0xff, sizeof(struct nix_bandprof_s));
/* Clear higher layer enable bit in the mid profile, just in case */
aq_req.prof.hl_en = 0;
aq_req.prof_mask.hl_en = 1;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 70bd036ed76e..4f5ca5ab13a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -4,7 +4,7 @@
* Copyright (C) 2018 Marvell.
*
*/
-
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -42,9 +42,18 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return -EBUSY;
}
- if (result->compcode != NPA_AQ_COMP_GOOD)
+ if (result->compcode != NPA_AQ_COMP_GOOD) {
/* TODO: Replace this with some error code */
+ if (result->compcode == NPA_AQ_COMP_CTX_FAULT ||
+ result->compcode == NPA_AQ_COMP_LOCKERR ||
+ result->compcode == NPA_AQ_COMP_CTX_POISON) {
+ if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0))
+ dev_err(rvu->dev,
+ "%s: Not able to unlock cachelines\n", __func__);
+ }
+
return -EBUSY;
+ }
return 0;
}
@@ -545,3 +554,48 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
npa_ctx_free(rvu, pfvf);
}
+
+/* Due to an Hardware errata, in some corner cases, AQ context lock
+ * operations can result in a NDC way getting into an illegal state
+ * of not valid but locked.
+ *
+ * This API solves the problem by clearing the lock bit of the NDC block.
+ * The operation needs to be done for each line of all the NDC banks.
+ */
+int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr)
+{
+ int bank, max_bank, line, max_line, err;
+ u64 reg, ndc_af_const;
+
+ /* Set the ENABLE bit(63) to '0' */
+ reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL);
+ rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0));
+
+ /* Poll until the BUSY bits(47:32) are set to '0' */
+ err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true);
+ if (err) {
+ dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n");
+ return err;
+ }
+
+ ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST);
+ max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const);
+ max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const);
+ for (bank = 0; bank < max_bank; bank++) {
+ for (line = 0; line < max_line; line++) {
+ /* Check if 'cache line valid bit(63)' is not set
+ * but 'cache line lock bit(60)' is set and on
+ * success, reset the lock bit(60).
+ */
+ reg = rvu_read64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line));
+ if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) {
+ rvu_write64(rvu, blkaddr,
+ NDC_AF_BANKX_LINEX_METADATA(bank, line),
+ reg & ~BIT_ULL(60));
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index c0005a1feee6..16cfc802e348 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -15,6 +15,7 @@
#include "npc.h"
#include "cgx.h"
#include "npc_profile.h"
+#include "rvu_npc_hash.h"
#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
@@ -402,6 +403,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, struct mcam_entry *entry,
bool *enable)
{
+ struct rvu_npc_mcam_rule *rule;
u16 owner, target_func;
struct rvu_pfvf *pfvf;
u64 rx_action;
@@ -423,6 +425,12 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
*enable = false;
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -489,8 +497,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
/* PF installing VF rule */
- if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -598,7 +606,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
struct npc_install_flow_req req = { 0 };
struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct nix_rx_action action;
+ struct nix_rx_action action = { 0 };
int blkaddr, index;
/* AF's and SDP VFs work in promiscuous mode */
@@ -609,6 +617,12 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
+ /* Ucast rule should not be installed if DMAC
+ * extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf))
+ return;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
@@ -619,7 +633,6 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
blkaddr, index);
} else {
- *(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
}
@@ -650,7 +663,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
- struct nix_rx_action action;
+ struct nix_rx_action action = { 0 };
u64 relaxed_mask;
if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
@@ -678,14 +691,14 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
- *(u64 *)&action = 0x00;
+ *(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
}
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
- *(u64 *)&action = 0x00;
+ *(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
action.index = pfvf->promisc_mce_idx;
@@ -771,6 +784,14 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Bcast rule should not be installed if both DMAC
+ * and LXMB extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
+ !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
+ return;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
@@ -825,7 +846,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
u8 mac_addr[ETH_ALEN] = { 0 };
- struct nix_rx_action action;
+ struct nix_rx_action action = { 0 };
struct rvu_pfvf *pfvf;
u16 vf_func;
@@ -841,6 +862,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Mcast rule should not be installed if both DMAC
+ * and LXMB extraction is not supported by the profile.
+ */
+ if (!npc_is_feature_supported(rvu, BIT_ULL(NPC_DMAC), pfvf->nix_rx_intf) &&
+ !npc_is_feature_supported(rvu, BIT_ULL(NPC_LXMB), pfvf->nix_rx_intf))
+ return;
+
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_ALLMULTI_ENTRY);
@@ -854,14 +883,14 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
- *(u64 *)&action = 0x00;
+ *(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_UCAST;
action.pf_func = pcifunc;
}
/* RX_ACTION set to MCAST for CGX PF's */
if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
- *(u64 *)&action = 0x00;
+ *(u64 *)&action = 0;
action.op = NIX_RX_ACTIONOP_MCAST;
action.index = pfvf->mcast_mce_idx;
}
@@ -916,7 +945,8 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc, u64 rx_action)
{
int actindex, index, bank, entry;
- bool enable;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return;
@@ -924,6 +954,14 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_lock(&mcam->lock);
for (index = 0; index < mcam->bmap_entries; index++) {
if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
bank = npc_get_bank(mcam, index);
actindex = index;
entry = index & (mcam->banksize - 1);
@@ -1081,6 +1119,9 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
/* Delete multicast and promisc MCAM entries */
@@ -1090,8 +1131,39 @@ void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
NIXLF_PROMISC_ENTRY, false);
}
+bool rvu_npc_enable_mcam_by_entry_index(struct rvu *rvu, int entry, int intf, bool enable)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule, *tmp;
+
+ mutex_lock(&mcam->lock);
+
+ list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
+ if (rule->intf != intf)
+ continue;
+
+ if (rule->entry != entry)
+ continue;
+
+ rule->enable = enable;
+ mutex_unlock(&mcam->lock);
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr,
+ entry, enable);
+
+ return true;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return false;
+}
+
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ if (nixlf < 0)
+ return;
+
/* Enables only broadcast match entry. Promisc/Allmulti are enabled
* in set_rx_mode mbox handler.
*/
@@ -1166,14 +1238,6 @@ void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
}
-#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
-
-#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
- rvu_write64(rvu, blkaddr, \
- NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-
static void npc_program_mkex_rx(struct rvu *rvu, int blkaddr,
struct npc_mcam_kex *mkex, u8 intf)
{
@@ -1247,6 +1311,9 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
npc_program_mkex_rx(rvu, blkaddr, mkex, intf);
npc_program_mkex_tx(rvu, blkaddr, mkex, intf);
}
+
+ /* Programme mkex hash profile */
+ npc_program_mkex_hash(rvu, blkaddr);
}
static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
@@ -1448,6 +1515,7 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
profile->lt_def = &npc_lt_defaults;
profile->mkex = &npc_mkex_default;
+ profile->mkex_hash = &npc_mkex_hash_default;
return 0;
}
@@ -1635,7 +1703,7 @@ static void npc_load_kpu_profile(struct rvu *rvu)
* Firmware database method.
* Default KPU profile.
*/
- if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ if (!request_firmware_direct(&fw, kpu_profile, rvu->dev)) {
dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
kpu_profile);
rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
@@ -1804,7 +1872,6 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->hprio_count = mcam->lprio_count;
mcam->hprio_end = mcam->hprio_count;
-
/* Allocate bitmap for managing MCAM counters and memory
* for saving counter to RVU PFFUNC allocation mapping.
*/
@@ -1900,6 +1967,7 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr)
static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
{
+ struct npc_mcam_kex *mkex = rvu->kpu.mkex;
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
u64 nibble_ena, rx_kex, tx_kex;
@@ -1912,15 +1980,15 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr)
mcam->counters.max--;
mcam->rx_miss_act_cntr = mcam->counters.max;
- rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
- tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ rx_kex = mkex->keyx_cfg[NIX_INTF_RX];
+ tx_kex = mkex->keyx_cfg[NIX_INTF_TX];
nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
nibble_ena = rvu_npc_get_tx_nibble_cfg(rvu, nibble_ena);
if (nibble_ena) {
tx_kex &= ~NPC_PARSE_NIBBLE;
tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
- npc_mkex_default.keyx_cfg[NIX_INTF_TX] = tx_kex;
+ mkex->keyx_cfg[NIX_INTF_TX] = tx_kex;
}
/* Configure RX interfaces */
@@ -2032,6 +2100,7 @@ int rvu_npc_init(struct rvu *rvu)
rvu_npc_setup_interfaces(rvu, blkaddr);
+ npc_config_secret_key(rvu, blkaddr);
/* Configure MKEX profile */
npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
@@ -2519,7 +2588,7 @@ alloc:
/* Copy MCAM entry indices into mbox response entry_list.
* Requester always expects indices in ascending order, so
- * so reverse the list if reverse bitmap is used for allocation.
+ * reverse the list if reverse bitmap is used for allocation.
*/
if (!req->contig && rsp->count) {
index = 0;
@@ -2547,6 +2616,14 @@ alloc:
return 0;
}
+/* Marks bitmaps to reserved the mcam slot */
+void npc_mcam_rsrcs_reserve(struct rvu *rvu, int blkaddr, int entry_idx)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+
+ npc_mcam_set_bit(mcam, entry_idx);
+}
+
int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
struct npc_mcam_alloc_entry_req *req,
struct npc_mcam_alloc_entry_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index ff2b21999f36..952319453701 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -10,11 +10,8 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "npc.h"
-
-#define NPC_BYTESM GENMASK_ULL(19, 16)
-#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
-#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
-#define NPC_LDATA_EN BIT_ULL(7)
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
static const char * const npc_flow_names[] = {
[NPC_DMAC] = "dmac",
@@ -24,8 +21,10 @@ static const char * const npc_flow_names[] = {
[NPC_VLAN_ETYPE_STAG] = "vlan ether type stag",
[NPC_OUTER_VID] = "outer vlan id",
[NPC_TOS] = "tos",
+ [NPC_IPFRAG_IPV4] = "fragmented IPv4 header ",
[NPC_SIP_IPV4] = "ipv4 source ip",
[NPC_DIP_IPV4] = "ipv4 destination ip",
+ [NPC_IPFRAG_IPV6] = "fragmented IPv6 header ",
[NPC_SIP_IPV6] = "ipv6 source ip",
[NPC_DIP_IPV6] = "ipv6 destination ip",
[NPC_IPPROTO_TCP] = "ip proto tcp",
@@ -41,9 +40,23 @@ static const char * const npc_flow_names[] = {
[NPC_DPORT_UDP] = "udp destination port",
[NPC_SPORT_SCTP] = "sctp source port",
[NPC_DPORT_SCTP] = "sctp destination port",
+ [NPC_LXMB] = "Mcast/Bcast header ",
[NPC_UNKNOWN] = "unknown",
};
+bool npc_is_feature_supported(struct rvu *rvu, u64 features, u8 intf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u64 mcam_features;
+ u64 unsupported;
+
+ mcam_features = is_npc_intf_tx(intf) ? mcam->tx_features : mcam->rx_features;
+ unsupported = (mcam_features ^ features) & ~mcam_features;
+
+ /* Return false if at least one of the input flows is not extracted */
+ return !unsupported;
+}
+
const char *npc_get_field_name(u8 hdr)
{
if (hdr >= ARRAY_SIZE(npc_flow_names))
@@ -227,6 +240,25 @@ static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
return true;
}
+static void npc_scan_exact_result(struct npc_mcam *mcam, u8 bit_number,
+ u8 key_nibble, u8 intf)
+{
+ u8 offset = (key_nibble * 4) % 64; /* offset within key word */
+ u8 kwi = (key_nibble * 4) / 64; /* which word in key */
+ u8 nr_bits = 4; /* bits in a nibble */
+ u8 type;
+
+ switch (bit_number) {
+ case 40 ... 43:
+ type = NPC_EXACT_RESULT;
+ break;
+
+ default:
+ return;
+ }
+ npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
+}
+
static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
u8 key_nibble, u8 intf)
{
@@ -276,6 +308,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
default:
return;
}
+
npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
}
@@ -318,8 +351,10 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
vlan_tag2 = &key_fields[NPC_VLAN_TAG2];
/* if key profile programmed does not extract Ethertype at all */
- if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
+ if (!etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws) {
+ dev_err(rvu->dev, "mkex: Ethertype is not extracted.\n");
goto vlan_tci;
+ }
/* if key profile programmed extracts Ethertype from one layer */
if (etype_ether->nr_kws && !etype_tag1->nr_kws && !etype_tag2->nr_kws)
@@ -332,35 +367,45 @@ static void npc_handle_multi_layer_fields(struct rvu *rvu, int blkaddr, u8 intf)
/* if key profile programmed extracts Ethertype from multiple layers */
if (etype_ether->nr_kws && etype_tag1->nr_kws) {
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
- if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i])
+ if (etype_ether->kw_mask[i] != etype_tag1->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for untagged and tagged pkts.\n");
goto vlan_tci;
+ }
}
key_fields[NPC_ETYPE] = *etype_tag1;
}
if (etype_ether->nr_kws && etype_tag2->nr_kws) {
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
- if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i])
+ if (etype_ether->kw_mask[i] != etype_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for untagged and double tagged pkts.\n");
goto vlan_tci;
+ }
}
key_fields[NPC_ETYPE] = *etype_tag2;
}
if (etype_tag1->nr_kws && etype_tag2->nr_kws) {
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
- if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i])
+ if (etype_tag1->kw_mask[i] != etype_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Etype pos is different for tagged and double tagged pkts.\n");
goto vlan_tci;
+ }
}
key_fields[NPC_ETYPE] = *etype_tag2;
}
/* check none of higher layers overwrite Ethertype */
start_lid = key_fields[NPC_ETYPE].layer_mdata.lid + 1;
- if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf))
+ if (npc_check_overlap(rvu, blkaddr, NPC_ETYPE, start_lid, intf)) {
+ dev_err(rvu->dev, "mkex: Ethertype is overwritten by higher layers.\n");
goto vlan_tci;
+ }
*features |= BIT_ULL(NPC_ETYPE);
vlan_tci:
/* if key profile does not extract outer vlan tci at all */
- if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
+ if (!vlan_tag1->nr_kws && !vlan_tag2->nr_kws) {
+ dev_err(rvu->dev, "mkex: Outer vlan tci is not extracted.\n");
goto done;
+ }
/* if key profile extracts outer vlan tci from one layer */
if (vlan_tag1->nr_kws && !vlan_tag2->nr_kws)
@@ -371,15 +416,19 @@ vlan_tci:
/* if key profile extracts outer vlan tci from multiple layers */
if (vlan_tag1->nr_kws && vlan_tag2->nr_kws) {
for (i = 0; i < NPC_MAX_KWS_IN_KEY; i++) {
- if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i])
+ if (vlan_tag1->kw_mask[i] != vlan_tag2->kw_mask[i]) {
+ dev_err(rvu->dev, "mkex: Out vlan tci pos is different for tagged and double tagged pkts.\n");
goto done;
+ }
}
key_fields[NPC_OUTER_VID] = *vlan_tag2;
}
/* check none of higher layers overwrite outer vlan tci */
start_lid = key_fields[NPC_OUTER_VID].layer_mdata.lid + 1;
- if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf))
+ if (npc_check_overlap(rvu, blkaddr, NPC_OUTER_VID, start_lid, intf)) {
+ dev_err(rvu->dev, "mkex: Outer vlan tci is overwritten by higher layers.\n");
goto done;
+ }
*features |= BIT_ULL(NPC_OUTER_VID);
done:
return;
@@ -388,6 +437,7 @@ done:
static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
u8 lt, u64 cfg, u8 intf)
{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
struct npc_mcam *mcam = &rvu->hw->mcam;
u8 hdr, key, nr_bytes, bit_offset;
u8 la_ltype, la_start;
@@ -397,8 +447,6 @@ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
nr_bytes = FIELD_GET(NPC_BYTESM, cfg) + 1;
hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
key = FIELD_GET(NPC_KEY_OFFSET, cfg);
- start_kwi = key / 8;
- offset = (key * 8) % 64;
/* For Tx, Layer A has NIX_INST_HDR_S(64 bytes) preceding
* ethernet header.
@@ -413,13 +461,18 @@ static void npc_scan_ldata(struct rvu *rvu, int blkaddr, u8 lid,
#define NPC_SCAN_HDR(name, hlid, hlt, hstart, hlen) \
do { \
+ start_kwi = key / 8; \
+ offset = (key * 8) % 64; \
if (lid == (hlid) && lt == (hlt)) { \
if ((hstart) >= hdr && \
((hstart) + (hlen)) <= (hdr + nr_bytes)) { \
bit_offset = (hdr + nr_bytes - (hstart) - (hlen)) * 8; \
npc_set_layer_mdata(mcam, (name), cfg, lid, lt, intf); \
+ offset += bit_offset; \
+ start_kwi += offset / 64; \
+ offset %= 64; \
npc_set_kw_masks(mcam, (name), (hlen) * 8, \
- start_kwi, offset + bit_offset, intf);\
+ start_kwi, offset, intf); \
} \
} \
} while (0)
@@ -429,10 +482,25 @@ do { \
* Example: Source IP is 4 bytes and starts at 12th byte of IP header
*/
NPC_SCAN_HDR(NPC_TOS, NPC_LID_LC, NPC_LT_LC_IP, 1, 1);
+ NPC_SCAN_HDR(NPC_IPFRAG_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 6, 1);
NPC_SCAN_HDR(NPC_SIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 12, 4);
NPC_SCAN_HDR(NPC_DIP_IPV4, NPC_LID_LC, NPC_LT_LC_IP, 16, 4);
- NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
- NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ NPC_SCAN_HDR(NPC_IPFRAG_IPV6, NPC_LID_LC, NPC_LT_LC_IP6_EXT, 6, 1);
+ if (rvu->hw->cap.npc_hash_extract) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][0])
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 4);
+ else
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][1])
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 4);
+ else
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ } else {
+ NPC_SCAN_HDR(NPC_SIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 8, 16);
+ NPC_SCAN_HDR(NPC_DIP_IPV6, NPC_LID_LC, NPC_LT_LC_IP6, 24, 16);
+ }
+
NPC_SCAN_HDR(NPC_SPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 0, 2);
NPC_SCAN_HDR(NPC_DPORT_UDP, NPC_LID_LD, NPC_LT_LD_UDP, 2, 2);
NPC_SCAN_HDR(NPC_SPORT_TCP, NPC_LID_LD, NPC_LT_LD_TCP, 0, 2);
@@ -445,7 +513,8 @@ do { \
NPC_SCAN_HDR(NPC_VLAN_TAG1, NPC_LID_LB, NPC_LT_LB_CTAG, 2, 2);
NPC_SCAN_HDR(NPC_VLAN_TAG2, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 2, 2);
NPC_SCAN_HDR(NPC_DMAC, NPC_LID_LA, la_ltype, la_start, 6);
- NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start, 6);
+ /* SMAC follows the DMAC(which is 6 bytes) */
+ NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
/* PF_FUNC is 2 bytes at 0th byte of NPC_LT_LA_IH_NIX_ETHER */
NPC_SCAN_HDR(NPC_PF_FUNC, NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER, 0, 2);
}
@@ -499,6 +568,10 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) |
BIT_ULL(NPC_VLAN_ETYPE_STAG);
+
+ /* for L2M/L2B/L3M/L3B, check if the type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
+ *features |= BIT_ULL(NPC_LXMB);
}
/* Scan key extraction profile and record how fields of our interest
@@ -509,8 +582,8 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
u8 lid, lt, ld, bitnr;
+ u64 cfg, masked_cfg;
u8 key_nibble = 0;
- u64 cfg;
/* Scan and note how parse result is going to be in key.
* A bit set in PARSE_NIBBLE_ENA corresponds to a nibble from
@@ -518,12 +591,23 @@ static int npc_scan_kex(struct rvu *rvu, int blkaddr, u8 intf)
* will be concatenated in key.
*/
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(intf));
- cfg &= NPC_PARSE_NIBBLE;
- for_each_set_bit(bitnr, (unsigned long *)&cfg, 31) {
+ masked_cfg = cfg & NPC_PARSE_NIBBLE;
+ for_each_set_bit(bitnr, (unsigned long *)&masked_cfg, 31) {
npc_scan_parse_result(mcam, bitnr, key_nibble, intf);
key_nibble++;
}
+ /* Ignore exact match bits for mcam entries except the first rule
+ * which is drop on hit. This first rule is configured explitcitly by
+ * exact match code.
+ */
+ masked_cfg = cfg & NPC_EXACT_NIBBLE;
+ bitnr = NPC_EXACT_NIBBLE_START;
+ for_each_set_bit_from(bitnr, (unsigned long *)&masked_cfg, NPC_EXACT_NIBBLE_END + 1) {
+ npc_scan_exact_result(mcam, bitnr, key_nibble, intf);
+ key_nibble++;
+ }
+
/* Scan and note how layer data is going to be in key */
for (lid = 0; lid < NPC_MAX_LID; lid++) {
for (lt = 0; lt < NPC_MAX_LT; lt++) {
@@ -564,16 +648,6 @@ static int npc_scan_verify_kex(struct rvu *rvu, int blkaddr)
dev_err(rvu->dev, "Channel cannot be overwritten\n");
return -EINVAL;
}
- /* DMAC should be present in key for unicast filter to work */
- if (!npc_is_field_present(rvu, NPC_DMAC, NIX_INTF_RX)) {
- dev_err(rvu->dev, "DMAC not present in Key\n");
- return -EINVAL;
- }
- /* check that none of the fields overwrite DMAC */
- if (npc_check_overlap(rvu, blkaddr, NPC_DMAC, 0, NIX_INTF_RX)) {
- dev_err(rvu->dev, "DMAC cannot be overwritten\n");
- return -EINVAL;
- }
npc_set_features(rvu, blkaddr, NIX_INTF_TX);
npc_set_features(rvu, blkaddr, NIX_INTF_RX);
@@ -604,9 +678,9 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
unsupported = (*mcam_features ^ features) & ~(*mcam_features);
if (unsupported) {
- dev_info(rvu->dev, "Unsupported flow(s):\n");
+ dev_warn(rvu->dev, "Unsupported flow(s):\n");
for_each_set_bit(bit, (unsigned long *)&unsupported, 64)
- dev_info(rvu->dev, "%s ", npc_get_field_name(bit));
+ dev_warn(rvu->dev, "%s ", npc_get_field_name(bit));
return -EOPNOTSUPP;
}
@@ -624,9 +698,9 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf)
* If any bits in mask are 0 then corresponding bits in value are
* dont care.
*/
-static void npc_update_entry(struct rvu *rvu, enum key_fields type,
- struct mcam_entry *entry, u64 val_lo,
- u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry dummy = { {0} };
@@ -705,8 +779,6 @@ static void npc_update_entry(struct rvu *rvu, enum key_fields type,
}
}
-#define IPV6_WORDS 4
-
static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry,
u64 features, struct flow_msg *pkt,
struct flow_msg *mask,
@@ -779,7 +851,8 @@ static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry,
static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
u64 features, struct flow_msg *pkt,
struct flow_msg *mask,
- struct rvu_npc_mcam_rule *output, u8 intf)
+ struct rvu_npc_mcam_rule *output, u8 intf,
+ int blkaddr)
{
u64 dmac_mask = ether_addr_to_u64(mask->dmac);
u64 smac_mask = ether_addr_to_u64(mask->smac);
@@ -817,6 +890,11 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
0, ~0ULL, 0, intf);
+ if (features & BIT_ULL(NPC_LXMB)) {
+ output->lxmb = is_broadcast_ether_addr(pkt->dmac) ? 2 : 1;
+ npc_update_entry(rvu, NPC_LXMB, entry, output->lxmb, 0,
+ output->lxmb, 0, intf);
+ }
#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
do { \
if (features & BIT_ULL((field))) { \
@@ -828,10 +906,13 @@ do { \
} while (0)
NPC_WRITE_FLOW(NPC_DMAC, dmac, dmac_val, 0, dmac_mask, 0);
+
NPC_WRITE_FLOW(NPC_SMAC, smac, smac_val, 0, smac_mask, 0);
NPC_WRITE_FLOW(NPC_ETYPE, etype, ntohs(pkt->etype), 0,
ntohs(mask->etype), 0);
NPC_WRITE_FLOW(NPC_TOS, tos, pkt->tos, 0, mask->tos, 0);
+ NPC_WRITE_FLOW(NPC_IPFRAG_IPV4, ip_flag, pkt->ip_flag, 0,
+ mask->ip_flag, 0);
NPC_WRITE_FLOW(NPC_SIP_IPV4, ip4src, ntohl(pkt->ip4src), 0,
ntohl(mask->ip4src), 0);
NPC_WRITE_FLOW(NPC_DIP_IPV4, ip4dst, ntohl(pkt->ip4dst), 0,
@@ -852,12 +933,16 @@ do { \
NPC_WRITE_FLOW(NPC_OUTER_VID, vlan_tci, ntohs(pkt->vlan_tci), 0,
ntohs(mask->vlan_tci), 0);
+ NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
+ mask->next_header, 0);
npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
npc_update_vlan_features(rvu, entry, features, intf);
+
+ npc_update_field_hash(rvu, intf, entry, blkaddr, features,
+ pkt, mask, opkt, omask);
}
-static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam,
- u16 entry)
+static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, u16 entry)
{
struct rvu_npc_mcam_rule *iter;
@@ -954,8 +1039,20 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.match_id = req->match_id;
action.flow_key_alg = req->flow_key_alg;
- if (req->op == NIX_RX_ACTION_DEFAULT && pfvf->def_ucast_rule)
- action = pfvf->def_ucast_rule->rx_action;
+ if (req->op == NIX_RX_ACTION_DEFAULT) {
+ if (pfvf->def_ucast_rule) {
+ action = pfvf->def_ucast_rule->rx_action;
+ } else {
+ /* For profiles which do not extract DMAC, the default
+ * unicast entry is unused. Hence modify action for the
+ * requests which use same action as default unicast
+ * entry
+ */
+ *(u64 *)&action = 0;
+ action.pf_func = target;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ }
+ }
entry->action = *(u64 *)&action;
@@ -1023,8 +1120,9 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
u16 owner = req->hdr.pcifunc;
struct msg_rsp write_rsp;
struct mcam_entry *entry;
- int entry_index, err;
bool new = false;
+ u16 entry_index;
+ int err;
installed_features = req->features;
features = req->features;
@@ -1032,7 +1130,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
entry_index = req->entry;
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
- req->intf);
+ req->intf, blkaddr);
if (is_npc_intf_rx(req->intf))
npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
@@ -1057,7 +1155,8 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, missing_features,
&def_ucast_rule->packet,
&def_ucast_rule->mask,
- &dummy, req->intf);
+ &dummy, req->intf,
+ blkaddr);
installed_features = req->features | missing_features;
}
@@ -1098,14 +1197,6 @@ find_rule:
write_req.cntr = rule->cntr;
}
- err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
- &write_rsp);
- if (err) {
- rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
- if (new)
- kfree(rule);
- return err;
- }
/* update rule */
memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
@@ -1122,6 +1213,7 @@ find_rule:
rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
rule->chan &= rule->chan_mask;
+ rule->lxmb = dummy.lxmb;
if (is_npc_intf_tx(req->intf))
rule->intf = pfvf->nix_tx_intf;
else
@@ -1132,6 +1224,18 @@ find_rule:
if (req->default_rule)
pfvf->def_ucast_rule = rule;
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
/* VF's MAC address is being changed via PF */
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
@@ -1172,6 +1276,35 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (!is_npc_interface_valid(rvu, req->intf))
return NPC_FLOW_INTF_INVALID;
+ /* If DMAC is not extracted in MKEX, rules installed by AF
+ * can rely on L2MB bit set by hardware protocol checker for
+ * broadcast and multicast addresses.
+ */
+ if (npc_check_field(rvu, blkaddr, NPC_DMAC, req->intf))
+ goto process_flow;
+
+ if (is_pffunc_af(req->hdr.pcifunc) &&
+ req->features & BIT_ULL(NPC_DMAC)) {
+ if (is_unicast_ether_addr(req->packet.dmac)) {
+ dev_warn(rvu->dev,
+ "%s: mkex profile does not support ucast flow\n",
+ __func__);
+ return NPC_FLOW_NOT_SUPPORTED;
+ }
+
+ if (!npc_is_field_present(rvu, NPC_LXMB, req->intf)) {
+ dev_warn(rvu->dev,
+ "%s: mkex profile does not support bcast/mcast flow",
+ __func__);
+ return NPC_FLOW_NOT_SUPPORTED;
+ }
+
+ /* Modify feature to use LXMB instead of DMAC */
+ req->features &= ~BIT_ULL(NPC_DMAC);
+ req->features |= BIT_ULL(NPC_LXMB);
+ }
+
+process_flow:
if (from_vf && req->default_rule)
return NPC_FLOW_VF_PERM_DENIED;
@@ -1420,3 +1553,117 @@ void npc_mcam_disable_flows(struct rvu *rvu, u16 target)
}
mutex_unlock(&mcam->lock);
}
+
+/* single drop on non hit rule starting from 0th index. This an extension
+ * to RPM mac filter to support more rules.
+ */
+int npc_install_mcam_drop_rule(struct rvu *rvu, int mcam_idx, u16 *counter_idx,
+ u64 chan_val, u64 chan_mask, u64 exact_val, u64 exact_mask,
+ u64 bcast_mcast_val, u64 bcast_mcast_mask)
+{
+ struct npc_mcam_alloc_counter_req cntr_req = { 0 };
+ struct npc_mcam_alloc_counter_rsp cntr_rsp = { 0 };
+ struct npc_mcam_write_entry_req req = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_npc_mcam_rule *rule;
+ struct msg_rsp rsp;
+ bool enabled;
+ int blkaddr;
+ int err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Bail out if no exact match support */
+ if (!rvu_npc_exact_has_match_table(rvu)) {
+ dev_info(rvu->dev, "%s: No support for exact match feature\n", __func__);
+ return -EINVAL;
+ }
+
+ /* If 0th entry is already used, return err */
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_idx);
+ if (enabled) {
+ dev_err(rvu->dev, "%s: failed to add single drop on non hit rule at %d th index\n",
+ __func__, mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Add this entry to mcam rules list */
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ /* Disable rule by default. Enable rule when first dmac filter is
+ * installed
+ */
+ rule->enable = false;
+ rule->chan = chan_val;
+ rule->chan_mask = chan_mask;
+ rule->entry = mcam_idx;
+ rvu_mcam_add_rule(mcam, rule);
+
+ /* Reserve slot 0 */
+ npc_mcam_rsrcs_reserve(rvu, blkaddr, mcam_idx);
+
+ /* Allocate counter for this single drop on non hit rule */
+ cntr_req.hdr.pcifunc = 0; /* AF request */
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+ err = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Err to allocate cntr for drop rule (err=%d)\n",
+ __func__, err);
+ return -EFAULT;
+ }
+ *counter_idx = cntr_rsp.cntr;
+
+ /* Fill in fields for this mcam entry */
+ npc_update_entry(rvu, NPC_EXACT_RESULT, &req.entry_data, exact_val, 0,
+ exact_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_CHAN, &req.entry_data, chan_val, 0,
+ chan_mask, 0, NIX_INTF_RX);
+ npc_update_entry(rvu, NPC_LXMB, &req.entry_data, bcast_mcast_val, 0,
+ bcast_mcast_mask, 0, NIX_INTF_RX);
+
+ req.intf = NIX_INTF_RX;
+ req.set_cntr = true;
+ req.cntr = cntr_rsp.cntr;
+ req.entry = mcam_idx;
+
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &req, &rsp);
+ if (err) {
+ dev_err(rvu->dev, "%s: Installation of single drop on non hit rule at %d failed\n",
+ __func__, mcam_idx);
+ return err;
+ }
+
+ dev_err(rvu->dev, "%s: Installed single drop on non hit rule at %d, cntr=%d\n",
+ __func__, mcam_idx, req.cntr);
+
+ /* disable entry at Bank 0, index 0 */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, mcam_idx, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_get_field_status(struct rvu *rvu,
+ struct npc_get_field_status_req *req,
+ struct npc_get_field_status_rsp *rsp)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (!is_npc_interface_valid(rvu, req->intf))
+ return NPC_FLOW_INTF_INVALID;
+
+ if (npc_check_field(rvu, blkaddr, req->field, req->intf))
+ rsp->enable = 1;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
new file mode 100644
index 000000000000..3f5c9042d10e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_FS_H
+#define __RVU_NPC_FS_H
+
+#define IPV6_WORDS 4
+#define NPC_BYTESM GENMASK_ULL(19, 16)
+#define NPC_HDR_OFFSET GENMASK_ULL(15, 8)
+#define NPC_KEY_OFFSET GENMASK_ULL(5, 0)
+#define NPC_LDATA_EN BIT_ULL(7)
+
+void npc_update_entry(struct rvu *rvu, enum key_fields type,
+ struct mcam_entry *entry, u64 val_lo,
+ u64 val_hi, u64 mask_lo, u64 mask_hi, u8 intf);
+
+#endif /* RVU_NPC_FS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
new file mode 100644
index 000000000000..51209119f0f2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -0,0 +1,2017 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/debugfs.h>
+
+#include "rvu_struct.h"
+#include "rvu_reg.h"
+#include "rvu.h"
+#include "npc.h"
+#include "cgx.h"
+#include "rvu_npc_fs.h"
+#include "rvu_npc_hash.h"
+
+static u64 rvu_npc_wide_extract(const u64 input[], size_t start_bit,
+ size_t width_bits)
+{
+ const u64 mask = ~(u64)((~(__uint128_t)0) << width_bits);
+ const size_t msb = start_bit + width_bits - 1;
+ const size_t lword = start_bit >> 6;
+ const size_t uword = msb >> 6;
+ size_t lbits;
+ u64 hi, lo;
+
+ if (lword == uword)
+ return (input[lword] >> (start_bit & 63)) & mask;
+
+ lbits = 64 - (start_bit & 63);
+ hi = input[uword];
+ lo = (input[lword] >> (start_bit & 63));
+ return ((hi << lbits) | lo) & mask;
+}
+
+static void rvu_npc_lshift_key(u64 *key, size_t key_bit_len)
+{
+ u64 prev_orig_word = 0;
+ u64 cur_orig_word = 0;
+ size_t extra = key_bit_len % 64;
+ size_t max_idx = key_bit_len / 64;
+ size_t i;
+
+ if (extra)
+ max_idx++;
+
+ for (i = 0; i < max_idx; i++) {
+ cur_orig_word = key[i];
+ key[i] = key[i] << 1;
+ key[i] |= ((prev_orig_word >> 63) & 0x1);
+ prev_orig_word = cur_orig_word;
+ }
+}
+
+static u32 rvu_npc_toeplitz_hash(const u64 *data, u64 *key, size_t data_bit_len,
+ size_t key_bit_len)
+{
+ u32 hash_out = 0;
+ u64 temp_data = 0;
+ int i;
+
+ for (i = data_bit_len - 1; i >= 0; i--) {
+ temp_data = (data[i / 64]);
+ temp_data = temp_data >> (i % 64);
+ temp_data &= 0x1;
+ if (temp_data)
+ hash_out ^= (u32)(rvu_npc_wide_extract(key, key_bit_len - 32, 32));
+
+ rvu_npc_lshift_key(key, key_bit_len);
+ }
+
+ return hash_out;
+}
+
+u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
+ u8 intf, u8 hash_idx)
+{
+ u64 hash_key[3];
+ u64 data_padded[2];
+ u32 field_hash;
+
+ hash_key[0] = rsp.secret_key[1] << 31;
+ hash_key[0] |= rsp.secret_key[2];
+ hash_key[1] = rsp.secret_key[1] >> 33;
+ hash_key[1] |= rsp.secret_key[0] << 31;
+ hash_key[2] = rsp.secret_key[0] >> 33;
+
+ data_padded[0] = rsp.hash_mask[intf][hash_idx][0] & ldata[0];
+ data_padded[1] = rsp.hash_mask[intf][hash_idx][1] & ldata[1];
+ field_hash = rvu_npc_toeplitz_hash(data_padded, hash_key, 128, 159);
+
+ field_hash &= FIELD_GET(GENMASK(63, 32), rsp.hash_ctrl[intf][hash_idx]);
+ field_hash += FIELD_GET(GENMASK(31, 0), rsp.hash_ctrl[intf][hash_idx]);
+ return field_hash;
+}
+
+static u64 npc_update_use_hash(struct rvu *rvu, int blkaddr,
+ u8 intf, int lid, int lt, int ld)
+{
+ u8 hdr, key;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld));
+ hdr = FIELD_GET(NPC_HDR_OFFSET, cfg);
+ key = FIELD_GET(NPC_KEY_OFFSET, cfg);
+
+ /* Update use_hash(bit-20) to 'true' and
+ * bytesm1(bit-16:19) to '0x3' in KEX_LD_CFG
+ */
+ cfg = KEX_LD_CFG_USE_HASH(0x1, 0x03,
+ hdr, 0x1, 0x0, key);
+
+ return cfg;
+}
+
+static void npc_program_mkex_hash_rx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_tx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg;
+
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ cfg = npc_update_use_hash(rvu, blkaddr,
+ intf, lid, lt, ld);
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+
+ hash_cnt++;
+ }
+ }
+ }
+ }
+}
+
+static void npc_program_mkex_hash_tx(struct rvu *rvu, int blkaddr,
+ u8 intf)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ int lid, lt, ld, hash_cnt = 0;
+
+ if (is_npc_intf_rx(intf))
+ return;
+
+ /* Program HASH_CFG */
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][lt][ld]) {
+ u64 cfg;
+
+ if (hash_cnt == NPC_MAX_HASH)
+ return;
+
+ cfg = npc_update_use_hash(rvu, blkaddr,
+ intf, lid, lt, ld);
+ /* Set updated KEX configuration */
+ SET_KEX_LD(intf, lid, lt, ld, cfg);
+ /* Set HASH configuration */
+ SET_KEX_LD_HASH(intf, ld,
+ mkex_hash->hash[intf][ld]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 0,
+ mkex_hash->hash_mask[intf][ld][0]);
+ SET_KEX_LD_HASH_MASK(intf, ld, 1,
+ mkex_hash->hash_mask[intf][ld][1]);
+ SET_KEX_LD_HASH_CTRL(intf, ld,
+ mkex_hash->hash_ctrl[intf][ld]);
+ hash_cnt++;
+ }
+ }
+ }
+}
+
+void npc_config_secret_key(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract)
+ return;
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
+ RVU_NPC_HASH_SECRET_KEY0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf),
+ RVU_NPC_HASH_SECRET_KEY1);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf),
+ RVU_NPC_HASH_SECRET_KEY2);
+ }
+}
+
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
+{
+ struct hw_cap *hwcap = &rvu->hw->cap;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u8 intf;
+
+ if (!hwcap->npc_hash_extract)
+ return;
+
+ for (intf = 0; intf < hw->npc_intfs; intf++) {
+ npc_program_mkex_hash_rx(rvu, blkaddr, intf);
+ npc_program_mkex_hash_tx(rvu, blkaddr, intf);
+ }
+}
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask)
+{
+ struct npc_mcam_kex_hash *mkex_hash = rvu->kpu.mkex_hash;
+ struct npc_get_field_hash_info_req req;
+ struct npc_get_field_hash_info_rsp rsp;
+ u64 ldata[2], cfg;
+ u32 field_hash;
+ u8 hash_idx;
+
+ if (!rvu->hw->cap.npc_hash_extract) {
+ dev_dbg(rvu->dev, "%s: Field hash extract feature is not supported\n", __func__);
+ return;
+ }
+
+ req.intf = intf;
+ rvu_mbox_handler_npc_get_field_hash_info(rvu, &req, &rsp);
+
+ for (hash_idx = 0; hash_idx < NPC_MAX_HASH; hash_idx++) {
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_CFG(intf, hash_idx));
+ if ((cfg & BIT_ULL(11)) && (cfg & BIT_ULL(12))) {
+ u8 lid = (cfg & GENMASK_ULL(10, 8)) >> 8;
+ u8 ltype = (cfg & GENMASK_ULL(7, 4)) >> 4;
+ u8 ltype_mask = cfg & GENMASK_ULL(3, 0);
+
+ if (mkex_hash->lid_lt_ld_hash_en[intf][lid][ltype][hash_idx]) {
+ switch (ltype & ltype_mask) {
+ /* If hash extract enabled is supported for IPv6 then
+ * 128 bit IPv6 source and destination addressed
+ * is hashed to 32 bit value.
+ */
+ case NPC_LT_LC_IP6:
+ /* ld[0] == hash_idx[0] == Source IPv6
+ * ld[1] == hash_idx[1] == Destination IPv6
+ */
+ if ((features & BIT_ULL(NPC_SIP_IPV6)) && !hash_idx) {
+ u32 src_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(src_ip, pkt->ip6src, IPV6_WORDS);
+ ldata[1] = (u64)src_ip[0] << 32 | src_ip[1];
+ ldata[0] = (u64)src_ip[2] << 32 | src_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ rsp,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_SIP_IPV6, entry,
+ field_hash, 0,
+ GENMASK(31, 0), 0, intf);
+ memcpy(&opkt->ip6src, &pkt->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&omask->ip6src, &mask->ip6src,
+ sizeof(mask->ip6src));
+ } else if ((features & BIT_ULL(NPC_DIP_IPV6)) && hash_idx) {
+ u32 dst_ip[IPV6_WORDS];
+
+ be32_to_cpu_array(dst_ip, pkt->ip6dst, IPV6_WORDS);
+ ldata[1] = (u64)dst_ip[0] << 32 | dst_ip[1];
+ ldata[0] = (u64)dst_ip[2] << 32 | dst_ip[3];
+ field_hash = npc_field_hash_calc(ldata,
+ rsp,
+ intf,
+ hash_idx);
+ npc_update_entry(rvu, NPC_DIP_IPV6, entry,
+ field_hash, 0,
+ GENMASK(31, 0), 0, intf);
+ memcpy(&opkt->ip6dst, &pkt->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&omask->ip6dst, &mask->ip6dst,
+ sizeof(mask->ip6dst));
+ }
+
+ break;
+ }
+ }
+ }
+ }
+}
+
+int rvu_mbox_handler_npc_get_field_hash_info(struct rvu *rvu,
+ struct npc_get_field_hash_info_req *req,
+ struct npc_get_field_hash_info_rsp *rsp)
+{
+ u64 *secret_key = rsp->secret_key;
+ u8 intf = req->intf;
+ int i, j, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ secret_key[0] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf));
+ secret_key[1] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY1(intf));
+ secret_key[2] = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY2(intf));
+
+ for (i = 0; i < NPC_MAX_HASH; i++) {
+ for (j = 0; j < NPC_MAX_HASH_MASK; j++) {
+ rsp->hash_mask[NIX_INTF_RX][i][j] =
+ GET_KEX_LD_HASH_MASK(NIX_INTF_RX, i, j);
+ rsp->hash_mask[NIX_INTF_TX][i][j] =
+ GET_KEX_LD_HASH_MASK(NIX_INTF_TX, i, j);
+ }
+ }
+
+ for (i = 0; i < NPC_MAX_INTF; i++)
+ for (j = 0; j < NPC_MAX_HASH; j++)
+ rsp->hash_ctrl[i][j] = GET_KEX_LD_HASH_CTRL(i, j);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac2u64 - utility function to convert mac address to u64.
+ * @mac_addr: MAC address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_npc_exact_mac2u64(u8 *mac_addr)
+{
+ u64 mac = 0;
+ int index;
+
+ for (index = ETH_ALEN - 1; index >= 0; index--)
+ mac |= ((u64)*mac_addr++) << (8 * index);
+
+ return mac;
+}
+
+/**
+ * rvu_exact_prepare_mdata - Make mdata for mcam entry
+ * @mac: MAC address
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @mask: LDATA mask.
+ * Return: Meta data
+ */
+static u64 rvu_exact_prepare_mdata(u8 *mac, u16 chan, u16 ctype, u64 mask)
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac);
+
+ /* Please note that mask is 48bit which excludes chan and ctype.
+ * Increase mask bits if we need to include them as well.
+ */
+ ldata |= ((u64)chan << 48);
+ ldata |= ((u64)ctype << 60);
+ ldata &= mask;
+ ldata = ldata << 2;
+
+ return ldata;
+}
+
+/**
+ * rvu_exact_calculate_hash - calculate hash index to mem table.
+ * @rvu: resource virtualization unit.
+ * @chan: Channel number
+ * @ctype: Channel type.
+ * @mac: MAC address
+ * @mask: HASH mask.
+ * @table_depth: Depth of table.
+ * Return: Hash value
+ */
+static u32 rvu_exact_calculate_hash(struct rvu *rvu, u16 chan, u16 ctype, u8 *mac,
+ u64 mask, u32 table_depth)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ u64 hash_key[2];
+ u64 key_in[2];
+ u64 ldata;
+ u32 hash;
+
+ key_in[0] = RVU_NPC_HASH_SECRET_KEY0;
+ key_in[1] = RVU_NPC_HASH_SECRET_KEY2;
+
+ hash_key[0] = key_in[0] << 31;
+ hash_key[0] |= key_in[1];
+ hash_key[1] = key_in[0] >> 33;
+
+ ldata = rvu_exact_prepare_mdata(mac, chan, ctype, mask);
+
+ dev_dbg(rvu->dev, "%s: ldata=0x%llx hash_key0=0x%llx hash_key2=0x%llx\n", __func__,
+ ldata, hash_key[1], hash_key[0]);
+ hash = rvu_npc_toeplitz_hash(&ldata, (u64 *)hash_key, 64, 95);
+
+ hash &= table->mem_table.hash_mask;
+ hash += table->mem_table.hash_offset;
+ dev_dbg(rvu->dev, "%s: hash=%x\n", __func__, hash);
+
+ return hash;
+}
+
+/**
+ * rvu_npc_exact_alloc_mem_table_entry - find free entry in 4 way table.
+ * @rvu: resource virtualization unit.
+ * @way: Indicate way to table.
+ * @index: Hash index to 4 way table.
+ * @hash: Hash value.
+ *
+ * Searches 4 way table using hash index. Returns 0 on success.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_mem_table_entry(struct rvu *rvu, u8 *way,
+ u32 *index, unsigned int hash)
+{
+ struct npc_exact_table *table;
+ int depth, i;
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ /* Check all the 4 ways for a free slot. */
+ mutex_lock(&table->lock);
+ for (i = 0; i < table->mem_table.ways; i++) {
+ if (test_bit(hash + i * depth, table->mem_table.bmap))
+ continue;
+
+ set_bit(hash + i * depth, table->mem_table.bmap);
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: mem table entry alloc success (way=%d index=%d)\n",
+ __func__, i, hash);
+
+ *way = i;
+ *index = hash;
+ return 0;
+ }
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: No space in 4 way exact way, weight=%u\n", __func__,
+ bitmap_weight(table->mem_table.bmap, table->mem_table.depth));
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_free_id - Free seq id from bitmat.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier to be freed.
+ */
+static void rvu_npc_exact_free_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ mutex_lock(&table->lock);
+ clear_bit(seq_id, table->id_bmap);
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: freed id %d\n", __func__, seq_id);
+}
+
+/**
+ * rvu_npc_exact_alloc_id - Alloc seq id from bitmap.
+ * @rvu: Resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ * Return: True or false.
+ */
+static bool rvu_npc_exact_alloc_id(struct rvu *rvu, u32 *seq_id)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->id_bmap, table->tot_ids);
+ if (idx == table->tot_ids) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: No space in id bitmap (%d)\n",
+ __func__, table->tot_ids);
+
+ return false;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->id_bmap);
+ mutex_unlock(&table->lock);
+
+ *seq_id = idx;
+ dev_dbg(rvu->dev, "%s: Allocated id (%d)\n", __func__, *seq_id);
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_alloc_cam_table_entry - find free slot in fully associative table.
+ * @rvu: resource virtualization unit.
+ * @index: Index to exact CAM table.
+ * Return: 0 upon success; else error number.
+ */
+static int rvu_npc_exact_alloc_cam_table_entry(struct rvu *rvu, int *index)
+{
+ struct npc_exact_table *table;
+ u32 idx;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+ idx = find_first_zero_bit(table->cam_table.bmap, table->cam_table.depth);
+ if (idx == table->cam_table.depth) {
+ mutex_unlock(&table->lock);
+ dev_info(rvu->dev, "%s: No space in exact cam table, weight=%u\n", __func__,
+ bitmap_weight(table->cam_table.bmap, table->cam_table.depth));
+ return -ENOSPC;
+ }
+
+ /* Mark bit map to indicate that slot is used.*/
+ set_bit(idx, table->cam_table.bmap);
+ mutex_unlock(&table->lock);
+
+ *index = idx;
+ dev_dbg(rvu->dev, "%s: cam table entry alloc success (index=%d)\n",
+ __func__, idx);
+ return 0;
+}
+
+/**
+ * rvu_exact_prepare_table_entry - Data for exact match table entry.
+ * @rvu: Resource virtualization unit.
+ * @enable: Enable/Disable entry
+ * @ctype: Software defined channel type. Currently set as 0.
+ * @chan: Channel number.
+ * @mac_addr: Destination mac address.
+ * Return: mdata for exact match table.
+ */
+static u64 rvu_exact_prepare_table_entry(struct rvu *rvu, bool enable,
+ u8 ctype, u16 chan, u8 *mac_addr)
+
+{
+ u64 ldata = rvu_npc_exact_mac2u64(mac_addr);
+
+ /* Enable or disable */
+ u64 mdata = FIELD_PREP(GENMASK_ULL(63, 63), enable ? 1 : 0);
+
+ /* Set Ctype */
+ mdata |= FIELD_PREP(GENMASK_ULL(61, 60), ctype);
+
+ /* Set chan */
+ mdata |= FIELD_PREP(GENMASK_ULL(59, 48), chan);
+
+ /* MAC address */
+ mdata |= FIELD_PREP(GENMASK_ULL(47, 0), ldata);
+
+ return mdata;
+}
+
+/**
+ * rvu_exact_config_secret_key - Configure secret key.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_secret_key(struct rvu *rvu)
+{
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET0(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET1(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY1);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_SECRET2(NIX_INTF_RX),
+ RVU_NPC_HASH_SECRET_KEY2);
+}
+
+/**
+ * rvu_exact_config_search_key - Configure search key
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_search_key(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 reg_val;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* HDR offset */
+ reg_val = FIELD_PREP(GENMASK_ULL(39, 32), 0);
+
+ /* BYTESM1, number of bytes - 1 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(18, 16), ETH_ALEN - 1);
+
+ /* Enable LID and set LID to NPC_LID_LA */
+ reg_val |= FIELD_PREP(GENMASK_ULL(11, 11), 1);
+ reg_val |= FIELD_PREP(GENMASK_ULL(10, 8), NPC_LID_LA);
+
+ /* Clear layer type based extraction */
+
+ /* Disable LT_EN */
+ reg_val |= FIELD_PREP(GENMASK_ULL(12, 12), 0);
+
+ /* Set LTYPE_MATCH to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(7, 4), 0);
+
+ /* Set LTYPE_MASK to 0 */
+ reg_val |= FIELD_PREP(GENMASK_ULL(3, 0), 0);
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_CFG(NIX_INTF_RX), reg_val);
+}
+
+/**
+ * rvu_exact_config_result_ctrl - Set exact table hash control
+ * @rvu: Resource virtualization unit.
+ * @depth: Depth of Exact match table.
+ *
+ * Sets mask and offset for hash for mem table.
+ */
+static void rvu_exact_config_result_ctrl(struct rvu *rvu, uint32_t depth)
+{
+ int blkaddr;
+ u64 reg = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Set mask. Note that depth is a power of 2 */
+ rvu->hw->table->mem_table.hash_mask = (depth - 1);
+ reg |= FIELD_PREP(GENMASK_ULL(42, 32), (depth - 1));
+
+ /* Set offset as 0 */
+ rvu->hw->table->mem_table.hash_offset = 0;
+ reg |= FIELD_PREP(GENMASK_ULL(10, 0), 0);
+
+ /* Set reg for RX */
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_RESULT_CTL(NIX_INTF_RX), reg);
+ /* Store hash mask and offset for s/w algorithm */
+}
+
+/**
+ * rvu_exact_config_table_mask - Set exact table mask.
+ * @rvu: Resource virtualization unit.
+ */
+static void rvu_exact_config_table_mask(struct rvu *rvu)
+{
+ int blkaddr;
+ u64 mask = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+
+ /* Don't use Ctype */
+ mask |= FIELD_PREP(GENMASK_ULL(61, 60), 0);
+
+ /* Set chan */
+ mask |= GENMASK_ULL(59, 48);
+
+ /* Full ldata */
+ mask |= GENMASK_ULL(47, 0);
+
+ /* Store mask for s/w hash calcualtion */
+ rvu->hw->table->mem_table.mask = mask;
+
+ /* Set mask for RX.*/
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_EXACT_MASK(NIX_INTF_RX), mask);
+}
+
+/**
+ * rvu_npc_exact_get_max_entries - Get total number of entries in table.
+ * @rvu: resource virtualization unit.
+ * Return: Maximum table entries possible.
+ */
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu)
+{
+ struct npc_exact_table *table;
+
+ table = rvu->hw->table;
+ return table->tot_ids;
+}
+
+/**
+ * rvu_npc_exact_has_match_table - Checks support for exact match.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match table is supported/enabled.
+ */
+bool rvu_npc_exact_has_match_table(struct rvu *rvu)
+{
+ return rvu->hw->cap.npc_exact_match_enabled;
+}
+
+/**
+ * __rvu_npc_exact_find_entry_by_seq_id - find entry by id
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier.
+ *
+ * Caller should acquire the lock.
+ * Return: Pointer to table entry.
+ */
+static struct npc_exact_table_entry *
+__rvu_npc_exact_find_entry_by_seq_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *entry = NULL;
+ struct list_head *lhead;
+
+ lhead = &table->lhead_gbl;
+
+ /* traverse to find the matching entry */
+ list_for_each_entry(entry, lhead, glist) {
+ if (entry->seq_id != seq_id)
+ continue;
+
+ return entry;
+ }
+
+ return NULL;
+}
+
+/**
+ * rvu_npc_exact_add_to_list - Add entry to list
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE to select MEM/CAM table.
+ * @ways: MEM table ways.
+ * @index: Index in MEM/CAM table.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @mac_addr: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence identifier
+ * @cmd: True if function is called by ethtool cmd
+ * @mcam_idx: NPC mcam index of DMAC entry in NPC mcam.
+ * @pcifunc: pci function
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_to_list(struct rvu *rvu, enum npc_exact_opc_type opc_type, u8 ways,
+ u32 index, u8 cgx_id, u8 lmac_id, u8 *mac_addr, u16 chan,
+ u8 ctype, u32 *seq_id, bool cmd, u32 mcam_idx, u16 pcifunc)
+{
+ struct npc_exact_table_entry *entry, *tmp, *iter;
+ struct npc_exact_table *table = rvu->hw->table;
+ struct list_head *lhead, *pprev;
+
+ WARN_ON(ways >= NPC_EXACT_TBL_MAX_WAYS);
+
+ if (!rvu_npc_exact_alloc_id(rvu, seq_id)) {
+ dev_err(rvu->dev, "%s: Generate seq id failed\n", __func__);
+ return -EFAULT;
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ rvu_npc_exact_free_id(rvu, *seq_id);
+ dev_err(rvu->dev, "%s: Memory allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&table->lock);
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+ lhead = &table->lhead_cam_tbl_entry;
+ table->cam_tbl_entry_cnt++;
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+ lhead = &table->lhead_mem_tbl_entry[ways];
+ table->mem_tbl_entry_cnt++;
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ kfree(entry);
+ rvu_npc_exact_free_id(rvu, *seq_id);
+
+ dev_err(rvu->dev, "%s: Unknown opc type%d\n", __func__, opc_type);
+ return -EINVAL;
+ }
+
+ /* Add to global list */
+ INIT_LIST_HEAD(&entry->glist);
+ list_add_tail(&entry->glist, &table->lhead_gbl);
+ INIT_LIST_HEAD(&entry->list);
+ entry->index = index;
+ entry->ways = ways;
+ entry->opc_type = opc_type;
+
+ entry->pcifunc = pcifunc;
+
+ ether_addr_copy(entry->mac, mac_addr);
+ entry->chan = chan;
+ entry->ctype = ctype;
+ entry->cgx_id = cgx_id;
+ entry->lmac_id = lmac_id;
+
+ entry->seq_id = *seq_id;
+
+ entry->mcam_idx = mcam_idx;
+ entry->cmd = cmd;
+
+ pprev = lhead;
+
+ /* Insert entry in ascending order of index */
+ list_for_each_entry_safe(iter, tmp, lhead, list) {
+ if (index < iter->index)
+ break;
+
+ pprev = &iter->list;
+ }
+
+ /* Add to each table list */
+ list_add(&entry->list, pprev);
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mem_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @ways: ways for MEM table.
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_mem_table_write(struct rvu *rvu, int blkaddr, u8 ways,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_MEM_ENTRY(ways, index), mdata);
+}
+
+/**
+ * rvu_npc_exact_cam_table_write - Wrapper for register write
+ * @rvu: resource virtualization unit.
+ * @blkaddr: Block address
+ * @index: Index in MEM
+ * @mdata: Meta data to be written to register.
+ */
+static void rvu_npc_exact_cam_table_write(struct rvu *rvu, int blkaddr,
+ u32 index, u64 mdata)
+{
+ rvu_write64(rvu, blkaddr, NPC_AF_EXACT_CAM_ENTRY(index), mdata);
+}
+
+/**
+ * rvu_npc_exact_dealloc_table_entry - dealloc table entry
+ * @rvu: resource virtualization unit.
+ * @opc_type: OPCODE for selection of table(MEM or CAM)
+ * @ways: ways if opc_type is MEM table.
+ * @index: Index of MEM or CAM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_dealloc_table_entry(struct rvu *rvu, enum npc_exact_opc_type opc_type,
+ u8 ways, u32 index)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table *table;
+ u8 null_dmac[6] = { 0 };
+ int depth;
+
+ /* Prepare entry with all fields set to zero */
+ u64 null_mdata = rvu_exact_prepare_table_entry(rvu, false, 0, 0, null_dmac);
+
+ table = rvu->hw->table;
+ depth = table->mem_table.depth;
+
+ mutex_lock(&table->lock);
+
+ switch (opc_type) {
+ case NPC_EXACT_OPC_CAM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index, table->cam_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry ways=%d index=%d\n",
+ __func__, ways, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, null_mdata);
+ clear_bit(index, table->cam_table.bmap);
+ break;
+
+ case NPC_EXACT_OPC_MEM:
+
+ /* Check whether entry is used already */
+ if (!test_bit(index + ways * depth, table->mem_table.bmap)) {
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: Trying to free an unused entry index=%d\n",
+ __func__, index);
+ return -EINVAL;
+ }
+
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, null_mdata);
+ clear_bit(index + ways * depth, table->mem_table.bmap);
+ break;
+
+ default:
+ mutex_unlock(&table->lock);
+ dev_err(rvu->dev, "%s: invalid opc type %d", __func__, opc_type);
+ return -ENOSPC;
+ }
+
+ mutex_unlock(&table->lock);
+
+ dev_dbg(rvu->dev, "%s: Successfully deleted entry (index=%d, ways=%d opc_type=%d\n",
+ __func__, index, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_alloc_table_entry - Allociate an entry
+ * @rvu: resource virtualization unit.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @index: Index of MEM table or CAM table.
+ * @ways: Ways. Only valid for MEM table.
+ * @opc_type: OPCODE to select table (MEM or CAM)
+ *
+ * Try allocating a slot from MEM table. If all 4 ways
+ * slot are full for a hash index, check availability in
+ * 32-entry CAM table for allocation.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_alloc_table_entry(struct rvu *rvu, char *mac, u16 chan, u8 ctype,
+ u32 *index, u8 *ways, enum npc_exact_opc_type *opc_type)
+{
+ struct npc_exact_table *table;
+ unsigned int hash;
+ int err;
+
+ table = rvu->hw->table;
+
+ /* Check in 4-ways mem entry for free slote */
+ hash = rvu_exact_calculate_hash(rvu, chan, ctype, mac, table->mem_table.mask,
+ table->mem_table.depth);
+ err = rvu_npc_exact_alloc_mem_table_entry(rvu, ways, index, hash);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_MEM;
+ dev_dbg(rvu->dev, "%s: inserted in 4 ways hash table ways=%d, index=%d\n",
+ __func__, *ways, *index);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s: failed to insert in 4 ways hash table\n", __func__);
+
+ /* wayss is 0 for cam table */
+ *ways = 0;
+ err = rvu_npc_exact_alloc_cam_table_entry(rvu, index);
+ if (!err) {
+ *opc_type = NPC_EXACT_OPC_CAM;
+ dev_dbg(rvu->dev, "%s: inserted in fully associative hash table index=%u\n",
+ __func__, *index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s: failed to insert in fully associative hash table\n", __func__);
+ return -ENOSPC;
+}
+
+/**
+ * rvu_npc_exact_save_drop_rule_chan_and_mask - Save drop rules info in data base.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: Drop rule index in NPC mcam.
+ * @chan_val: Channel value.
+ * @chan_mask: Channel Mask.
+ * @pcifunc: pcifunc of interface.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_save_drop_rule_chan_and_mask(struct rvu *rvu, int drop_mcam_idx,
+ u64 chan_val, u64 chan_mask, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (table->drop_rule_map[i].chan_mask != (u16)chan_mask)
+ continue;
+
+ return false;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX)
+ return false;
+
+ table->drop_rule_map[i].drop_rule_idx = drop_mcam_idx;
+ table->drop_rule_map[i].chan_val = (u16)chan_val;
+ table->drop_rule_map[i].chan_mask = (u16)chan_mask;
+ table->drop_rule_map[i].pcifunc = pcifunc;
+ table->drop_rule_map[i].valid = true;
+ return true;
+}
+
+/**
+ * rvu_npc_exact_calc_drop_rule_chan_and_mask - Calculate Channel number and mask.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (SDK, LBK or CGX)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LAMC identifier.
+ * @val: Channel number.
+ * @mask: Channel mask.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_calc_drop_rule_chan_and_mask(struct rvu *rvu, u8 intf_type,
+ u8 cgx_id, u8 lmac_id,
+ u64 *val, u64 *mask)
+{
+ u16 chan_val, chan_mask;
+
+ /* No support for SDP and LBK */
+ if (intf_type != NIX_INTF_TYPE_CGX)
+ return false;
+
+ chan_val = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
+ chan_mask = 0xfff;
+
+ if (val)
+ *val = chan_val;
+
+ if (mask)
+ *mask = chan_mask;
+
+ return true;
+}
+
+/**
+ * rvu_npc_exact_drop_rule_to_pcifunc - Retrieve pcifunc
+ * @rvu: resource virtualization unit.
+ * @drop_rule_idx: Drop rule index in NPC mcam.
+ *
+ * Debugfs (exact_drop_cnt) entry displays pcifunc for interface
+ * by retrieving the pcifunc value from data base.
+ * Return: Drop rule index.
+ */
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx)
+{
+ struct npc_exact_table *table;
+ int i;
+
+ table = rvu->hw->table;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].drop_rule_idx != drop_rule_idx)
+ continue;
+
+ return table->drop_rule_map[i].pcifunc;
+ }
+
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, drop_rule_idx);
+ return -1;
+}
+
+/**
+ * rvu_npc_exact_get_drop_rule_info - Get drop rule information.
+ * @rvu: resource virtualization unit.
+ * @intf_type: Interface type (CGX, SDP or LBK)
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: Channel value.
+ * @mask: Channel mask.
+ * @pcifunc: pcifunc of interface corresponding to the drop rule.
+ * Return: True upon success.
+ */
+static bool rvu_npc_exact_get_drop_rule_info(struct rvu *rvu, u8 intf_type, u8 cgx_id,
+ u8 lmac_id, u32 *drop_mcam_idx, u64 *val,
+ u64 *mask, u16 *pcifunc)
+{
+ struct npc_exact_table *table;
+ u64 chan_val, chan_mask;
+ bool rc;
+ int i;
+
+ table = rvu->hw->table;
+
+ if (intf_type != NIX_INTF_TYPE_CGX) {
+ dev_err(rvu->dev, "%s: No drop rule for LBK/SDP mode\n", __func__);
+ return false;
+ }
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, intf_type, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc)
+ return false;
+
+ for (i = 0; i < NPC_MCAM_DROP_RULE_MAX; i++) {
+ if (!table->drop_rule_map[i].valid)
+ break;
+
+ if (table->drop_rule_map[i].chan_val != (u16)chan_val)
+ continue;
+
+ if (val)
+ *val = table->drop_rule_map[i].chan_val;
+ if (mask)
+ *mask = table->drop_rule_map[i].chan_mask;
+ if (pcifunc)
+ *pcifunc = table->drop_rule_map[i].pcifunc;
+
+ *drop_mcam_idx = i;
+ return true;
+ }
+
+ if (i == NPC_MCAM_DROP_RULE_MAX) {
+ dev_err(rvu->dev, "%s: drop mcam rule index (%d) >= NPC_MCAM_DROP_RULE_MAX\n",
+ __func__, *drop_mcam_idx);
+ return false;
+ }
+
+ dev_err(rvu->dev, "%s: Could not retrieve for cgx=%d, lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return false;
+}
+
+/**
+ * __rvu_npc_exact_cmd_rules_cnt_update - Update number dmac rules against a drop rule.
+ * @rvu: resource virtualization unit.
+ * @drop_mcam_idx: NPC mcam drop rule index.
+ * @val: +1 or -1.
+ * @enable_or_disable_cam: If no exact match rules against a drop rule, disable it.
+ *
+ * when first exact match entry against a drop rule is added, enable_or_disable_cam
+ * is set to true. When last exact match entry against a drop rule is deleted,
+ * enable_or_disable_cam is set to true.
+ * Return: Number of rules
+ */
+static u16 __rvu_npc_exact_cmd_rules_cnt_update(struct rvu *rvu, int drop_mcam_idx,
+ int val, bool *enable_or_disable_cam)
+{
+ struct npc_exact_table *table;
+ u16 *cnt, old_cnt;
+ bool promisc;
+
+ table = rvu->hw->table;
+ promisc = table->promisc_mode[drop_mcam_idx];
+
+ cnt = &table->cnt_cmd_rules[drop_mcam_idx];
+ old_cnt = *cnt;
+
+ *cnt += val;
+
+ if (!enable_or_disable_cam)
+ goto done;
+
+ *enable_or_disable_cam = false;
+
+ if (promisc)
+ goto done;
+
+ /* If all rules are deleted and not already in promisc mode; disable cam */
+ if (!*cnt && val < 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+ /* If rule got added and not already in promisc mode; enable cam */
+ if (!old_cnt && val > 0) {
+ *enable_or_disable_cam = true;
+ goto done;
+ }
+
+done:
+ return *cnt;
+}
+
+/**
+ * rvu_npc_exact_del_table_entry_by_id - Delete and free table entry.
+ * @rvu: resource virtualization unit.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Deletes entry from linked lists and free up slot in HW MEM or CAM
+ * table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_del_table_entry_by_id(struct rvu *rvu, u32 seq_id)
+{
+ struct npc_exact_table_entry *entry = NULL;
+ struct npc_exact_table *table;
+ bool disable_cam = false;
+ u32 drop_mcam_idx = -1;
+ int *cnt;
+ bool rc;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, seq_id);
+ if (!entry) {
+ dev_dbg(rvu->dev, "%s: failed to find entry for id=%d\n", __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ cnt = (entry->opc_type == NPC_EXACT_OPC_CAM) ? &table->cam_tbl_entry_cnt :
+ &table->mem_tbl_entry_cnt;
+
+ /* delete from lists */
+ list_del_init(&entry->list);
+ list_del_init(&entry->glist);
+
+ (*cnt)--;
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, entry->cgx_id,
+ entry->lmac_id, &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to retrieve drop info for id=0x%x\n",
+ __func__, seq_id);
+ mutex_unlock(&table->lock);
+ return -ENODATA;
+ }
+
+ if (entry->cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, -1, &disable_cam);
+
+ /* No dmac filter rules; disable drop on hit rule */
+ if (disable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ dev_dbg(rvu->dev, "%s: Disabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ mutex_unlock(&table->lock);
+
+ rvu_npc_exact_dealloc_table_entry(rvu, entry->opc_type, entry->ways, entry->index);
+
+ rvu_npc_exact_free_id(rvu, seq_id);
+
+ dev_dbg(rvu->dev, "%s: delete entry success for id=0x%x, mca=%pM\n",
+ __func__, seq_id, entry->mac);
+ kfree(entry);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_add_table_entry - Adds a table entry
+ * @rvu: resource virtualization unit.
+ * @cgx_id: cgx identifier.
+ * @lmac_id: lmac identifier.
+ * @mac: MAC address.
+ * @chan: Channel number.
+ * @ctype: Channel Type.
+ * @seq_id: Sequence number.
+ * @cmd: Whether it is invoked by ethtool cmd.
+ * @mcam_idx: NPC mcam index corresponding to MAC
+ * @pcifunc: PCI func.
+ *
+ * Creates a new exact match table entry in either CAM or
+ * MEM table.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_add_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id, u8 *mac,
+ u16 chan, u8 ctype, u32 *seq_id, bool cmd,
+ u32 mcam_idx, u16 pcifunc)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ enum npc_exact_opc_type opc_type;
+ bool enable_cam = false;
+ u32 drop_mcam_idx;
+ u32 index;
+ u64 mdata;
+ bool rc;
+ int err;
+ u8 ways;
+
+ ctype = 0;
+
+ err = rvu_npc_exact_alloc_table_entry(rvu, mac, chan, ctype, &index, &ways, &opc_type);
+ if (err) {
+ dev_err(rvu->dev, "%s: Could not alloc in exact match table\n", __func__);
+ return err;
+ }
+
+ /* Write mdata to table */
+ mdata = rvu_exact_prepare_table_entry(rvu, true, ctype, chan, mac);
+
+ if (opc_type == NPC_EXACT_OPC_CAM)
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, index, mdata);
+ else
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, ways, index, mdata);
+
+ /* Insert entry to linked list */
+ err = rvu_npc_exact_add_to_list(rvu, opc_type, ways, index, cgx_id, lmac_id,
+ mac, chan, ctype, seq_id, cmd, mcam_idx, pcifunc);
+ if (err) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_err(rvu->dev, "%s: could not add to exact match table\n", __func__);
+ return err;
+ }
+
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ rvu_npc_exact_dealloc_table_entry(rvu, opc_type, ways, index);
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ if (cmd)
+ __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 1, &enable_cam);
+
+ /* First command rule; enable drop on hit rule */
+ if (enable_cam) {
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, true);
+ dev_dbg(rvu->dev, "%s: Enabling mcam idx %d\n",
+ __func__, drop_mcam_idx);
+ }
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully added entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, index, mac, ways, opc_type);
+
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_update_table_entry - Update exact match table.
+ * @rvu: resource virtualization unit.
+ * @cgx_id: CGX identifier.
+ * @lmac_id: LMAC identifier.
+ * @old_mac: Existing MAC address entry.
+ * @new_mac: New MAC address entry.
+ * @seq_id: Sequence identifier of the entry.
+ *
+ * Updates MAC address of an entry. If entry is in MEM table, new
+ * hash value may not match with old one.
+ * Return: 0 upon success.
+ */
+static int rvu_npc_exact_update_table_entry(struct rvu *rvu, u8 cgx_id, u8 lmac_id,
+ u8 *old_mac, u8 *new_mac, u32 *seq_id)
+{
+ int blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ u32 hash_index;
+ u64 mdata;
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, *seq_id);
+ if (!entry) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev,
+ "%s: failed to find entry for cgx_id=%d lmac_id=%d old_mac=%pM\n",
+ __func__, cgx_id, lmac_id, old_mac);
+ return -ENODATA;
+ }
+
+ /* If entry is in mem table and new hash index is different than old
+ * hash index, we cannot update the entry. Fail in these scenarios.
+ */
+ if (entry->opc_type == NPC_EXACT_OPC_MEM) {
+ hash_index = rvu_exact_calculate_hash(rvu, entry->chan, entry->ctype,
+ new_mac, table->mem_table.mask,
+ table->mem_table.depth);
+ if (hash_index != entry->index) {
+ dev_dbg(rvu->dev,
+ "%s: Update failed due to index mismatch(new=0x%x, old=%x)\n",
+ __func__, hash_index, entry->index);
+ mutex_unlock(&table->lock);
+ return -EINVAL;
+ }
+ }
+
+ mdata = rvu_exact_prepare_table_entry(rvu, true, entry->ctype, entry->chan, new_mac);
+
+ if (entry->opc_type == NPC_EXACT_OPC_MEM)
+ rvu_npc_exact_mem_table_write(rvu, blkaddr, entry->ways, entry->index, mdata);
+ else
+ rvu_npc_exact_cam_table_write(rvu, blkaddr, entry->index, mdata);
+
+ /* Update entry fields */
+ ether_addr_copy(entry->mac, new_mac);
+ *seq_id = entry->seq_id;
+
+ dev_dbg(rvu->dev,
+ "%s: Successfully updated entry (index=%d, dmac=%pM, ways=%d opc_type=%d\n",
+ __func__, entry->index, entry->mac, entry->ways, entry->opc_type);
+
+ dev_dbg(rvu->dev, "%s: Successfully updated entry (old mac=%pM new_mac=%pM\n",
+ __func__, old_mac, new_mac);
+
+ mutex_unlock(&table->lock);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_disable - Disable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc
+ *
+ * Drop rule is against each PF. We dont support DMAC filter for
+ * VF.
+ * Return: 0 upon success
+ */
+
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+ u32 cnt;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (!*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Err Already promisc mode disabled (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = false;
+ cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ mutex_unlock(&table->lock);
+
+ /* If no dmac filter entries configured, disable drop rule */
+ if (!cnt)
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ else
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+
+ dev_dbg(rvu->dev, "%s: disabled promisc mode (cgx=%d lmac=%d, cnt=%d)\n",
+ __func__, cgx_id, lmac_id, cnt);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_promisc_enable - Enable promiscuous mode.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: pcifunc.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ u32 drop_mcam_idx;
+ bool *promisc;
+ bool rc;
+ u32 cnt;
+
+ table = rvu->hw->table;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = rvu_npc_exact_get_drop_rule_info(rvu, NIX_INTF_TYPE_CGX, cgx_id, lmac_id,
+ &drop_mcam_idx, NULL, NULL, NULL);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s: failed to get drop rule info cgx=%d lmac=%d\n",
+ __func__, cgx_id, lmac_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&table->lock);
+ promisc = &table->promisc_mode[drop_mcam_idx];
+
+ if (*promisc) {
+ mutex_unlock(&table->lock);
+ dev_dbg(rvu->dev, "%s: Already in promisc mode (cgx=%d lmac=%d)\n",
+ __func__, cgx_id, lmac_id);
+ return LMAC_AF_ERR_INVALID_PARAM;
+ }
+ *promisc = true;
+ cnt = __rvu_npc_exact_cmd_rules_cnt_update(rvu, drop_mcam_idx, 0, NULL);
+ mutex_unlock(&table->lock);
+
+ /* If no dmac filter entries configured, disable drop rule */
+ if (!cnt)
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, false);
+ else
+ rvu_npc_enable_mcam_by_entry_index(rvu, drop_mcam_idx, NIX_INTF_RX, !*promisc);
+
+ dev_dbg(rvu->dev, "%s: Enabled promisc mode (cgx=%d lmac=%d cnt=%d)\n",
+ __func__, cgx_id, lmac_id, cnt);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_reset - Delete PF mac address.
+ * @rvu: resource virtualization unit.
+ * @req: Reset request
+ * @rsp: Reset response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ if (rc) {
+ /* TODO: how to handle this error case ? */
+ dev_err(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__, pfvf->mac_addr, pf);
+ return 0;
+ }
+
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d success (seq_id=%u)\n",
+ __func__, pfvf->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_update - Update mac address field with new value.
+ * @rvu: resource virtualization unit.
+ * @req: Update request.
+ * @rsp: Update response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct npc_exact_table_entry *entry;
+ struct npc_exact_table *table;
+ struct rvu_pfvf *pfvf;
+ u32 seq_id, mcam_idx;
+ u8 old_mac[ETH_ALEN];
+ u8 cgx_id, lmac_id;
+ int rc;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ dev_dbg(rvu->dev, "%s: Update request for seq_id=%d, mac=%pM\n",
+ __func__, req->index, req->mac_addr);
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ table = rvu->hw->table;
+
+ mutex_lock(&table->lock);
+
+ /* Lookup for entry which needs to be updated */
+ entry = __rvu_npc_exact_find_entry_by_seq_id(rvu, req->index);
+ if (!entry) {
+ dev_err(rvu->dev, "%s: failed to find entry for id=0x%x\n", __func__, req->index);
+ mutex_unlock(&table->lock);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED;
+ }
+ ether_addr_copy(old_mac, entry->mac);
+ seq_id = entry->seq_id;
+ mcam_idx = entry->mcam_idx;
+ mutex_unlock(&table->lock);
+
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, old_mac,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s mac:%pM (pfvf:%pM default:%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ /* This could be a new entry */
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n", __func__,
+ pfvf->mac_addr, pf);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id, true,
+ mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev,
+ "%s MAC (new:%pM, old=%pM default:%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pfvf->mac_addr, pfvf->default_mac, pf, seq_id);
+
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_add - Adds MAC address to exact match table.
+ * @rvu: resource virtualization unit.
+ * @req: Add request.
+ * @rsp: Add response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+ u32 seq_id;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, -1, req->hdr.pcifunc);
+
+ if (!rc) {
+ rsp->index = seq_id;
+ dev_dbg(rvu->dev, "%s MAC (%pM) add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s MAC (%pM) add to PF=%d failed\n", __func__,
+ req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_del - Delete DMAC filter
+ * @rvu: resource virtualization unit.
+ * @req: Delete request.
+ * @rsp: Delete response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ int rc;
+
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (!rc) {
+ dev_dbg(rvu->dev, "%s del to PF=%d success (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return 0;
+ }
+
+ dev_err(rvu->dev, "%s del to PF=%d failed (seq_id=%u)\n",
+ __func__, pf, req->index);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_DEL_FAILED;
+}
+
+/**
+ * rvu_npc_exact_mac_addr_set - Add PF mac address to dmac filter.
+ * @rvu: resource virtualization unit.
+ * @req: Set request.
+ * @rsp: Set response.
+ * Return: 0 upon success
+ */
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u32 seq_id = req->index;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u32 mcam_idx = -1;
+ int rc, nixlf;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ pfvf = &rvu->pf[pf];
+
+ /* If table does not have an entry; both update entry and del table entry API
+ * below fails. Those are not failure conditions.
+ */
+ rc = rvu_npc_exact_update_table_entry(rvu, cgx_id, lmac_id, pfvf->mac_addr,
+ req->mac_addr, &seq_id);
+ if (!rc) {
+ rsp->index = seq_id;
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev, "%s MAC (%pM) update to PF=%d success\n",
+ __func__, req->mac_addr, pf);
+ return 0;
+ }
+
+ /* Try deleting and adding it again */
+ rc = rvu_npc_exact_del_table_entry_by_id(rvu, req->index);
+ if (rc) {
+ dev_dbg(rvu->dev, "%s MAC (%pM) del PF=%d failed\n",
+ __func__, pfvf->mac_addr, pf);
+ }
+
+ /* find mcam entry if exist */
+ rc = nix_get_nixlf(rvu, req->hdr.pcifunc, &nixlf, NULL);
+ if (!rc) {
+ mcam_idx = npc_get_nixlf_mcam_index(&rvu->hw->mcam, req->hdr.pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ }
+
+ rc = rvu_npc_exact_add_table_entry(rvu, cgx_id, lmac_id, req->mac_addr,
+ pfvf->rx_chan_base, 0, &seq_id,
+ true, mcam_idx, req->hdr.pcifunc);
+ if (rc) {
+ dev_err(rvu->dev, "%s MAC (%pM) add PF=%d failed\n",
+ __func__, req->mac_addr, pf);
+ return LMAC_AF_ERR_EXACT_MATCH_TBL_ADD_FAILED;
+ }
+
+ rsp->index = seq_id;
+ ether_addr_copy(rsp->mac_addr, req->mac_addr);
+ ether_addr_copy(pfvf->mac_addr, req->mac_addr);
+ dev_dbg(rvu->dev,
+ "%s MAC (%pM) del and add to PF=%d success (seq_id=%u)\n",
+ __func__, req->mac_addr, pf, seq_id);
+ return 0;
+}
+
+/**
+ * rvu_npc_exact_can_disable_feature - Check if feature can be disabled.
+ * @rvu: resource virtualization unit.
+ * Return: True if exact match feature is supported.
+ */
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ bool empty;
+
+ if (!rvu->hw->cap.npc_exact_match_enabled)
+ return false;
+
+ mutex_lock(&table->lock);
+ empty = list_empty(&table->lhead_gbl);
+ mutex_unlock(&table->lock);
+
+ return empty;
+}
+
+/**
+ * rvu_npc_exact_disable_feature - Disable feature.
+ * @rvu: resource virtualization unit.
+ */
+void rvu_npc_exact_disable_feature(struct rvu *rvu)
+{
+ rvu->hw->cap.npc_exact_match_enabled = false;
+}
+
+/**
+ * rvu_npc_exact_reset - Delete and free all entry which match pcifunc.
+ * @rvu: resource virtualization unit.
+ * @pcifunc: PCI func to match.
+ */
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc)
+{
+ struct npc_exact_table *table = rvu->hw->table;
+ struct npc_exact_table_entry *tmp, *iter;
+ u32 seq_id;
+
+ mutex_lock(&table->lock);
+ list_for_each_entry_safe(iter, tmp, &table->lhead_gbl, glist) {
+ if (pcifunc != iter->pcifunc)
+ continue;
+
+ seq_id = iter->seq_id;
+ dev_dbg(rvu->dev, "%s: resetting pcifun=%d seq_id=%u\n", __func__,
+ pcifunc, seq_id);
+
+ mutex_unlock(&table->lock);
+ rvu_npc_exact_del_table_entry_by_id(rvu, seq_id);
+ mutex_lock(&table->lock);
+ }
+ mutex_unlock(&table->lock);
+}
+
+/**
+ * rvu_npc_exact_init - initialize exact match table
+ * @rvu: resource virtualization unit.
+ *
+ * Initialize HW and SW resources to manage 4way-2K table and fully
+ * associative 32-entry mcam table.
+ * Return: 0 upon success.
+ */
+int rvu_npc_exact_init(struct rvu *rvu)
+{
+ u64 bcast_mcast_val, bcast_mcast_mask;
+ struct npc_exact_table *table;
+ u64 exact_val, exact_mask;
+ u64 chan_val, chan_mask;
+ u8 cgx_id, lmac_id;
+ u32 *drop_mcam_idx;
+ u16 max_lmac_cnt;
+ u64 npc_const3;
+ int table_size;
+ int blkaddr;
+ u16 pcifunc;
+ int err, i;
+ u64 cfg;
+ bool rc;
+
+ /* Read NPC_AF_CONST3 and check for have exact
+ * match functionality is present
+ */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check exact match feature is supported */
+ npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
+ if (!(npc_const3 & BIT_ULL(62)))
+ return 0;
+
+ /* Check if kex profile has enabled EXACT match nibble */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT))
+ return 0;
+
+ /* Set capability to true */
+ rvu->hw->cap.npc_exact_match_enabled = true;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Memory allocation for table success\n", __func__);
+ rvu->hw->table = table;
+
+ /* Read table size, ways and depth */
+ table->mem_table.ways = FIELD_GET(GENMASK_ULL(19, 16), npc_const3);
+ table->mem_table.depth = FIELD_GET(GENMASK_ULL(15, 0), npc_const3);
+ table->cam_table.depth = FIELD_GET(GENMASK_ULL(31, 24), npc_const3);
+
+ dev_dbg(rvu->dev, "%s: NPC exact match 4way_2k table(ways=%d, depth=%d)\n",
+ __func__, table->mem_table.ways, table->cam_table.depth);
+
+ /* Check if depth of table is not a sequre of 2
+ * TODO: why _builtin_popcount() is not working ?
+ */
+ if ((table->mem_table.depth & (table->mem_table.depth - 1)) != 0) {
+ dev_err(rvu->dev,
+ "%s: NPC exact match 4way_2k table depth(%d) is not square of 2\n",
+ __func__, table->mem_table.depth);
+ return -EINVAL;
+ }
+
+ table_size = table->mem_table.depth * table->mem_table.ways;
+
+ /* Allocate bitmap for 4way 2K table */
+ table->mem_table.bmap = devm_bitmap_zalloc(rvu->dev, table_size,
+ GFP_KERNEL);
+ if (!table->mem_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 4way 2K entry table\n", __func__);
+
+ /* Allocate bitmap for 32 entry mcam */
+ table->cam_table.bmap = devm_bitmap_zalloc(rvu->dev, 32, GFP_KERNEL);
+
+ if (!table->cam_table.bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for 32 entry cam\n", __func__);
+
+ table->tot_ids = table_size + table->cam_table.depth;
+ table->id_bmap = devm_bitmap_zalloc(rvu->dev, table->tot_ids,
+ GFP_KERNEL);
+
+ if (!table->id_bmap)
+ return -ENOMEM;
+
+ dev_dbg(rvu->dev, "%s: Allocated bitmap for id map (total=%d)\n",
+ __func__, table->tot_ids);
+
+ /* Initialize list heads for npc_exact_table entries.
+ * This entry is used by debugfs to show entries in
+ * exact match table.
+ */
+ for (i = 0; i < NPC_EXACT_TBL_MAX_WAYS; i++)
+ INIT_LIST_HEAD(&table->lhead_mem_tbl_entry[i]);
+
+ INIT_LIST_HEAD(&table->lhead_cam_tbl_entry);
+ INIT_LIST_HEAD(&table->lhead_gbl);
+
+ mutex_init(&table->lock);
+
+ rvu_exact_config_secret_key(rvu);
+ rvu_exact_config_search_key(rvu);
+
+ rvu_exact_config_table_mask(rvu);
+ rvu_exact_config_result_ctrl(rvu, table->mem_table.depth);
+
+ /* - No drop rule for LBK
+ * - Drop rules for SDP and each LMAC.
+ */
+ exact_val = !NPC_EXACT_RESULT_HIT;
+ exact_mask = NPC_EXACT_RESULT_HIT;
+
+ /* nibble - 3 2 1 0
+ * L3B L3M L2B L2M
+ */
+ bcast_mcast_val = 0b0000;
+ bcast_mcast_mask = 0b0011;
+
+ /* Install SDP drop rule */
+ drop_mcam_idx = &table->num_drop_rules;
+
+ max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
+ PF_CGXMAP_BASE;
+
+ for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
+ if (rvu->pf2cgxlmac_map[i] == 0xFF)
+ continue;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[i], &cgx_id, &lmac_id);
+
+ rc = rvu_npc_exact_calc_drop_rule_chan_and_mask(rvu, NIX_INTF_TYPE_CGX, cgx_id,
+ lmac_id, &chan_val, &chan_mask);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed, info chan_val=0x%llx chan_mask=0x%llx rule_id=%d\n",
+ __func__, chan_val, chan_mask, *drop_mcam_idx);
+ return -EINVAL;
+ }
+
+ /* Filter rules are only for PF */
+ pcifunc = RVU_PFFUNC(i, 0);
+
+ dev_dbg(rvu->dev,
+ "%s:Drop rule cgx=%d lmac=%d chan(val=0x%llx, mask=0x%llx\n",
+ __func__, cgx_id, lmac_id, chan_val, chan_mask);
+
+ rc = rvu_npc_exact_save_drop_rule_chan_and_mask(rvu, table->num_drop_rules,
+ chan_val, chan_mask, pcifunc);
+ if (!rc) {
+ dev_err(rvu->dev,
+ "%s: failed to set drop info for cgx=%d, lmac=%d, chan=%llx\n",
+ __func__, cgx_id, lmac_id, chan_val);
+ return -EINVAL;
+ }
+
+ err = npc_install_mcam_drop_rule(rvu, *drop_mcam_idx,
+ &table->counter_idx[*drop_mcam_idx],
+ chan_val, chan_mask,
+ exact_val, exact_mask,
+ bcast_mcast_val, bcast_mcast_mask);
+ if (err) {
+ dev_err(rvu->dev,
+ "failed to configure drop rule (cgx=%d lmac=%d)\n",
+ cgx_id, lmac_id);
+ return err;
+ }
+
+ (*drop_mcam_idx)++;
+ }
+
+ dev_info(rvu->dev, "initialized exact match table successfully\n");
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
new file mode 100644
index 000000000000..a1c3d987b804
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.h
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell RVU Admin Function driver
+ *
+ * Copyright (C) 2022 Marvell.
+ *
+ */
+
+#ifndef __RVU_NPC_HASH_H
+#define __RVU_NPC_HASH_H
+
+#define RVU_NPC_HASH_SECRET_KEY0 0xa9d5af4c9fbc76b1
+#define RVU_NPC_HASH_SECRET_KEY1 0xa9d5af4c9fbc87b4
+#define RVU_NPC_HASH_SECRET_KEY2 0x5954c9e7
+
+#define NPC_MAX_HASH 2
+#define NPC_MAX_HASH_MASK 2
+
+#define KEX_LD_CFG_USE_HASH(use_hash, bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ ((use_hash) << 20 | ((bytesm1) << 16) | ((hdr_ofs) << 8) | \
+ ((ena) << 7) | ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+#define KEX_LD_CFG_HASH(hdr_ofs, bytesm1, lt_en, lid_en, lid, ltype_match, ltype_mask) \
+ (((hdr_ofs) << 32) | ((bytesm1) << 16) | \
+ ((lt_en) << 12) | ((lid_en) << 11) | ((lid) << 8) | \
+ ((ltype_match) << 4) | ((ltype_mask) & 0xF))
+
+#define SET_KEX_LD_HASH(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_CFG(intf, ld), cfg)
+
+#define SET_KEX_LD_HASH_MASK(intf, ld, mask_idx, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx), cfg)
+
+#define GET_KEX_LD_HASH_CTRL(intf, ld) \
+ rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld))
+
+#define GET_KEX_LD_HASH_MASK(intf, ld, mask_idx) \
+ rvu_read64(rvu, blkaddr, NPC_AF_INTFX_HASHX_MASKX(intf, ld, mask_idx))
+
+#define SET_KEX_LD_HASH_CTRL(intf, ld, cfg) \
+ rvu_write64(rvu, blkaddr, \
+ NPC_AF_INTFX_HASHX_RESULT_CTRL(intf, ld), cfg)
+
+struct npc_mcam_kex_hash {
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ bool lid_lt_ld_hash_en[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_CFG */
+ u64 hash[NPC_MAX_INTF][NPC_MAX_HASH];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_MASK(0..1) */
+ u64 hash_mask[NPC_MAX_INTF][NPC_MAX_HASH][NPC_MAX_HASH_MASK];
+ /* NPC_AF_INTF(0..1)_HASH(0..1)_RESULT_CTRL */
+ u64 hash_ctrl[NPC_MAX_INTF][NPC_MAX_HASH];
+} __packed;
+
+void npc_update_field_hash(struct rvu *rvu, u8 intf,
+ struct mcam_entry *entry,
+ int blkaddr,
+ u64 features,
+ struct flow_msg *pkt,
+ struct flow_msg *mask,
+ struct flow_msg *opkt,
+ struct flow_msg *omask);
+void npc_config_secret_key(struct rvu *rvu, int blkaddr);
+void npc_program_mkex_hash(struct rvu *rvu, int blkaddr);
+u32 npc_field_hash_calc(u64 *ldata, struct npc_get_field_hash_info_rsp rsp,
+ u8 intf, u8 hash_idx);
+
+static struct npc_mcam_kex_hash npc_mkex_hash_default __maybe_unused = {
+ .lid_lt_ld_hash_en = {
+ [NIX_INTF_RX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ true,
+ true,
+ },
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [NPC_LID_LC] = {
+ [NPC_LT_LC_IP6] = {
+ true,
+ true,
+ },
+ },
+ },
+ },
+
+ .hash = {
+ [NIX_INTF_RX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+
+ [NIX_INTF_TX] = {
+ KEX_LD_CFG_HASH(0x8ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ KEX_LD_CFG_HASH(0x18ULL, 0xf, 0x1, 0x1, NPC_LID_LC, NPC_LT_LC_IP6, 0xf),
+ },
+ },
+
+ .hash_mask = {
+ [NIX_INTF_RX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ [1] = {
+ GENMASK_ULL(63, 0),
+ GENMASK_ULL(63, 0),
+ },
+ },
+ },
+
+ .hash_ctrl = {
+ [NIX_INTF_RX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+
+ [NIX_INTF_TX] = {
+ [0] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ [1] = GENMASK_ULL(63, 32), /* MSB 32 bit is mask and LSB 32 bit is offset. */
+ },
+ },
+};
+
+/* If exact match table support is enabled, enable drop rules */
+#define NPC_MCAM_DROP_RULE_MAX 30
+#define NPC_MCAM_SDP_DROP_RULE_IDX 0
+
+#define RVU_PFFUNC(pf, func) \
+ ((((pf) & RVU_PFVF_PF_MASK) << RVU_PFVF_PF_SHIFT) | \
+ (((func) & RVU_PFVF_FUNC_MASK) << RVU_PFVF_FUNC_SHIFT))
+
+enum npc_exact_opc_type {
+ NPC_EXACT_OPC_MEM,
+ NPC_EXACT_OPC_CAM,
+};
+
+struct npc_exact_table_entry {
+ struct list_head list;
+ struct list_head glist;
+ u32 seq_id; /* Sequence number of entry */
+ u32 index; /* Mem table or cam table index */
+ u32 mcam_idx;
+ /* Mcam index. This is valid only if "cmd" field is false */
+ enum npc_exact_opc_type opc_type;
+ u16 chan;
+ u16 pcifunc;
+ u8 ways;
+ u8 mac[ETH_ALEN];
+ u8 ctype;
+ u8 cgx_id;
+ u8 lmac_id;
+ bool cmd; /* Is added by ethtool command ? */
+};
+
+struct npc_exact_table {
+ struct mutex lock; /* entries update lock */
+ unsigned long *id_bmap;
+ int num_drop_rules;
+ u32 tot_ids;
+ u16 cnt_cmd_rules[NPC_MCAM_DROP_RULE_MAX];
+ u16 counter_idx[NPC_MCAM_DROP_RULE_MAX];
+ bool promisc_mode[NPC_MCAM_DROP_RULE_MAX];
+ struct {
+ int ways;
+ int depth;
+ unsigned long *bmap;
+ u64 mask; // Masks before hash calculation.
+ u16 hash_mask; // 11 bits for hash mask
+ u16 hash_offset; // 11 bits offset
+ } mem_table;
+
+ struct {
+ int depth;
+ unsigned long *bmap;
+ } cam_table;
+
+ struct {
+ bool valid;
+ u16 chan_val;
+ u16 chan_mask;
+ u16 pcifunc;
+ u8 drop_rule_idx;
+ } drop_rule_map[NPC_MCAM_DROP_RULE_MAX];
+
+#define NPC_EXACT_TBL_MAX_WAYS 4
+
+ struct list_head lhead_mem_tbl_entry[NPC_EXACT_TBL_MAX_WAYS];
+ int mem_tbl_entry_cnt;
+
+ struct list_head lhead_cam_tbl_entry;
+ int cam_tbl_entry_cnt;
+
+ struct list_head lhead_gbl;
+};
+
+bool rvu_npc_exact_has_match_table(struct rvu *rvu);
+u32 rvu_npc_exact_get_max_entries(struct rvu *rvu);
+int rvu_npc_exact_init(struct rvu *rvu);
+int rvu_npc_exact_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct cgx_mac_addr_update_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp);
+
+int rvu_npc_exact_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req,
+ struct cgx_mac_addr_set_or_get *rsp);
+
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+
+bool rvu_npc_exact_can_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_disable_feature(struct rvu *rvu);
+void rvu_npc_exact_reset(struct rvu *rvu, u16 pcifunc);
+u16 rvu_npc_exact_drop_rule_to_pcifunc(struct rvu *rvu, u32 drop_rule_idx);
+int rvu_npc_exact_promisc_disable(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_exact_promisc_enable(struct rvu *rvu, u16 pcifunc);
+#endif /* RVU_NPC_HASH_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 22cd751613cd..7007f0b8e659 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -189,6 +189,7 @@
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_VWQE_TIMER (0x00F8)
#define NIX_AF_RX_MCAST_BASE (0x0100)
#define NIX_AF_RX_MCAST_CFG (0x0110)
#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
@@ -266,6 +267,7 @@
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
@@ -425,6 +427,7 @@
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
+#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
@@ -544,6 +547,8 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_SIZE 0x100
+#define CPT_LF_Q_INST_PTR 0x110
#define CPT_LF_Q_GRP_PTR 0x120
#define CPT_LF_CTX_FLUSH 0x510
@@ -565,7 +570,13 @@
#define NPC_AF_PCK_DEF_OIP4 (0x00620)
#define NPC_AF_PCK_DEF_OIP6 (0x00630)
#define NPC_AF_PCK_DEF_IIP4 (0x00640)
+#define NPC_AF_INTFX_HASHX_RESULT_CTRL(a, b) (0x006c0 | (a) << 4 | (b) << 3)
+#define NPC_AF_INTFX_HASHX_MASKX(a, b, c) (0x00700 | (a) << 5 | (b) << 4 | (c) << 3)
#define NPC_AF_KEX_LDATAX_FLAGS_CFG(a) (0x00800 | (a) << 3)
+#define NPC_AF_INTFX_HASHX_CFG(a, b) (0x00b00 | (a) << 6 | (b) << 4)
+#define NPC_AF_INTFX_SECRET_KEY0(a) (0x00e00 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY1(a) (0x00e20 | (a) << 3)
+#define NPC_AF_INTFX_SECRET_KEY2(a) (0x00e40 | (a) << 3)
#define NPC_AF_INTFX_KEX_CFG(a) (0x01010 | (a) << 8)
#define NPC_AF_PKINDX_ACTION0(a) (0x80000ull | (a) << 6)
#define NPC_AF_PKINDX_ACTION1(a) (0x80008ull | (a) << 6)
@@ -599,6 +610,15 @@
#define NPC_AF_DBG_DATAX(a) (0x3001400 | (a) << 4)
#define NPC_AF_DBG_RESULTX(a) (0x3001800 | (a) << 4)
+#define NPC_AF_EXACT_MEM_ENTRY(a, b) (0x300000 | (a) << 15 | (b) << 3)
+#define NPC_AF_EXACT_CAM_ENTRY(a) (0xC00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_MASK(a) (0x660 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_RESULT_CTL(a)(0x680 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_CFG(a) (0xA00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET0(a) (0xE00 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET1(a) (0xE20 | (a) << 3)
+#define NPC_AF_INTFX_EXACT_SECRET2(a) (0xE40 | (a) << 3)
+
#define NPC_AF_MCAMEX_BANKX_CAMX_INTF(a, b, c) ({ \
u64 offset; \
\
@@ -674,6 +694,7 @@
#define NDC_AF_INTR_ENA_W1S (0x00068)
#define NDC_AF_INTR_ENA_W1C (0x00070)
#define NDC_AF_ACTIVE_PC (0x00078)
+#define NDC_AF_CAMS_RD_INTERVAL (0x00080)
#define NDC_AF_BP_TEST_ENABLE (0x001F8)
#define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3)
#define NDC_AF_BLK_RST (0x002F0)
@@ -689,6 +710,8 @@
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+#define NDC_AF_BANKX_LINEX_METADATA(a, b) \
+ (0x10000 | (a) << 12 | (b) << 3)
/* LBK */
#define LBK_CONST (0x10ull)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
index b04fb226f708..ae50d56258ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c
@@ -62,15 +62,18 @@ int rvu_sdp_init(struct rvu *rvu)
pfvf->sdp_info = devm_kzalloc(rvu->dev,
sizeof(struct sdp_node_info),
GFP_KERNEL);
- if (!pfvf->sdp_info)
+ if (!pfvf->sdp_info) {
+ pci_dev_put(pdev);
return -ENOMEM;
+ }
dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
- put_device(&pdev->dev);
i++;
}
+ pci_dev_put(pdev);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 0048b5946712..73fdb8798614 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -11,4 +11,8 @@ rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
+
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index fd4f083c699e..826f691de259 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -86,8 +86,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
new file mode 100644
index 000000000000..a487a98eac88
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -0,0 +1,1693 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MACSEC hardware offload driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/bitfield.h>
+#include <net/macsec.h>
+#include "otx2_common.h"
+
+#define MCS_TCAM0_MAC_DA_MASK GENMASK_ULL(47, 0)
+#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
+#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
+#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
+
+#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
+
+#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
+#define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
+#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
+#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
+#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
+#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
+#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
+#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
+#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
+#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
+#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
+#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
+#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_GCM_AES_128 0
+#define MCS_GCM_AES_256 1
+#define MCS_GCM_AES_XPN_128 2
+#define MCS_GCM_AES_XPN_256 3
+
+#define MCS_TCI_ES 0x40 /* end station */
+#define MCS_TCI_SC 0x20 /* SCI present */
+#define MCS_TCI_SCB 0x10 /* epon */
+#define MCS_TCI_E 0x08 /* encryption */
+#define MCS_TCI_C 0x04 /* changed text */
+
+static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy)
+{
+ struct cn10k_mcs_txsc *txsc;
+
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ if (txsc->sw_secy == secy)
+ return txsc;
+ }
+
+ return NULL;
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy,
+ struct macsec_rx_sc *rx_sc)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
+ return rxsc;
+ }
+
+ return NULL;
+}
+
+static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
+{
+ switch (rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ return "FLOW";
+ case MCS_RSRC_TYPE_SC:
+ return "SC";
+ case MCS_RSRC_TYPE_SECY:
+ return "SECY";
+ case MCS_RSRC_TYPE_SA:
+ return "SA";
+ default:
+ return "Unknown";
+ };
+
+ return "Unknown";
+}
+
+static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 *rsrc_id)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_alloc_rsrc_req *req;
+ struct mcs_alloc_rsrc_rsp *rsp;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_type = type;
+ req->rsrc_cnt = 1;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
+ req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ switch (rsp->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ *rsrc_id = rsp->flow_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SC:
+ *rsrc_id = rsp->sc_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ *rsrc_id = rsp->secy_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SA:
+ *rsrc_id = rsp->sa_ids[0];
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ bool all)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_free_rsrc_req *req;
+
+ mutex_lock(&mbox->lock);
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req)
+ goto fail;
+
+ clear_req->id = hw_rsrc_id;
+ clear_req->type = type;
+ clear_req->dir = dir;
+
+ req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_id = hw_rsrc_id;
+ req->rsrc_type = type;
+ req->dir = dir;
+ if (all)
+ req->all = 1;
+
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return;
+fail:
+ dev_err(pfvf->dev, "Failed to free %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+}
+
+static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 policy;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
+ if (secy->replay_protect)
+ policy |= MCS_RX_SECY_PLCY_RP;
+
+ policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
+
+ policy |= MCS_RX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = hw_secy_id;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct macsec_secy *secy = rxsc->sw_secy;
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_da;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_da = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_DA_MASK, mac_da);
+ req->mask[0] = ~0ULL;
+ req->mask[0] = ~MCS_TCAM0_MAC_DA_MASK;
+
+ req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = rxsc->hw_flow_id;
+ req->secy_id = hw_secy_id;
+ req->sc_id = rxsc->hw_sc_id;
+ req->dir = MCS_RX;
+
+ if (sw_rx_sc->active)
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_rx_sc_cam_write_req *sc_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
+ if (!sc_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
+ sc_req->sc_id = rxsc->hw_sc_id;
+ sc_req->secy_id = hw_secy_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, bool sa_in_use)
+{
+ unsigned char *src = rxsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mcs_rx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg],
+ (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_RX;
+
+ map_req->sa_index = rxsc->hw_sa_id[assoc_num];
+ map_req->sa_in_use = sa_in_use;
+ map_req->sc_id = rxsc->hw_sc_id;
+ map_req->an = assoc_num;
+
+ /* Send two messages together */
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = rxsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct macsec_tx_sc *sw_tx_sc;
+ /* Insert SecTag after 12 bytes (DA+SA)*/
+ u8 tag_offset = 12;
+ u8 sectag_tci = 0;
+ u64 policy;
+ int ret;
+
+ sw_tx_sc = &secy->tx_sc;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (sw_tx_sc->send_sci) {
+ sectag_tci |= MCS_TCI_SC;
+ } else {
+ if (sw_tx_sc->end_station)
+ sectag_tci |= MCS_TCI_ES;
+ if (sw_tx_sc->scb)
+ sectag_tci |= MCS_TCI_SCB;
+ }
+
+ if (sw_tx_sc->encrypt)
+ sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ /* Write SecTag excluding AN bits(1..0) */
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+ policy |= MCS_TX_SECY_PLCY_INS_MODE;
+ policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+
+ if (secy->protect_frames)
+ policy |= MCS_TX_SECY_PLCY_PROTECT;
+
+ /* If the encodingsa does not exist/active and protect is
+ * not set then frames can be sent out as it is. Hence enable
+ * the policy irrespective of secy operational when !protect.
+ */
+ if (!secy->protect_frames || secy->operational)
+ policy |= MCS_TX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_sa;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
+ req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
+
+ req->mask[0] = ~0ULL;
+ req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
+
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = txsc->hw_flow_id;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->sc_id = txsc->hw_sc_id;
+ req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ req->dir = MCS_TX;
+ /* This can be enabled since stack xmits packets only when interface is up */
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 sa_num, bool sa_active)
+{
+ struct mcs_tx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ /* Link the encoding_sa only to SC out of all SAs */
+ if (txsc->encoding_sa != sa_num)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+
+ map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req->sa_index0 = txsc->hw_sa_id[sa_num];
+ map_req->sa_index0_vld = sa_active;
+ map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ map_req->sc_id = txsc->hw_sc_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num)
+{
+ unsigned char *src = txsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->plcy[0][8] = assoc_num;
+ plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = txsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
+ bool enable, enum mcs_direction dir)
+{
+ struct mcs_flowid_ena_dis_entry *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->flow_id = hw_flow_id;
+ req->ena = enable;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
+ struct mcs_sa_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sa_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sa_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sa_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SA;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
+ struct mcs_sc_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sc_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sc_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sc_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SC;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
+ struct mcs_secy_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_secy_stats *rsp;
+ struct mcs_stats_req *req;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_secy_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_secy_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SECY;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_txsc *txsc;
+ int ret;
+
+ txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
+ if (!txsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ &txsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ /* For a SecY, one TX secy and one RX secy HW resources are needed */
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_tx);
+ if (ret)
+ goto free_flowid;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_rx);
+ if (ret)
+ goto free_tx_secy;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ &txsc->hw_sc_id);
+ if (ret)
+ goto free_rx_secy;
+
+ return txsc;
+free_rx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+free_tx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+fail:
+ kfree(txsc);
+ return ERR_PTR(ret);
+}
+
+/* Free Tx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc)
+{
+ u8 sa_bmap = txsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
+ txsc, sa_num);
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ txsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+ int ret;
+
+ rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
+ if (!rxsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ &rxsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ &rxsc->hw_sc_id);
+ if (ret)
+ goto free_flowid;
+
+ return rxsc;
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+fail:
+ kfree(rxsc);
+ return ERR_PTR(ret);
+}
+
+/* Free Rx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc)
+{
+ u8 sa_bmap = rxsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
+ sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ rxsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+}
+
+static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
+{
+ if (sw_tx_sa) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
+ sw_tx_sa->active);
+ }
+
+ cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
+ cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
+ /* When updating secy, change RX secy also */
+ cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
+
+ return 0;
+}
+
+static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ struct macsec_rx_sa *sw_rx_sa;
+ u8 sa_num;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
+ sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
+ if (!sw_rx_sa)
+ continue;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
+ sa_num, sw_rx_sa->active);
+ cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
+ sw_rx_sa->next_pn_halves.lower);
+ }
+
+ cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
+ cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
+ }
+
+ return 0;
+}
+
+static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ bool delete)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ int ret;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
+ false, MCS_RX);
+ if (ret)
+ dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
+ mcs_rx_sc->hw_sc_id);
+ if (delete) {
+ cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
+ list_del(&mcs_rx_sc->entry);
+ kfree(mcs_rx_sc);
+ }
+ }
+
+ return 0;
+}
+
+static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_secy_stats rx_rsp = { 0 };
+ struct mcs_sc_stats sc_rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ /* Because of shared counters for some stats in the hardware, when
+ * updating secy policy take a snapshot of current stats and reset them.
+ * Below are the effected stats because of shared counters.
+ */
+
+ /* Check if sync is really needed */
+ if (secy->validate_frames == txsc->last_validate_frames &&
+ secy->replay_protect == txsc->last_replay_protect)
+ return;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+
+ if (txsc->last_replay_protect)
+ rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_DISABLED)
+ rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ }
+
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_replay_protect = secy->replay_protect;
+}
+
+static int cn10k_mdo_open(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+
+ return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
+}
+
+static int cn10k_mdo_stop(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ if (err)
+ return err;
+
+ return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
+}
+
+static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ return -EOPNOTSUPP;
+
+ /* Stick to 16 bytes key len until XPN support is added */
+ if (secy->key_len != 16)
+ return -EOPNOTSUPP;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ txsc = cn10k_mcs_create_txsc(pfvf);
+ if (IS_ERR(txsc))
+ return -ENOSPC;
+
+ txsc->sw_secy = secy;
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_replay_protect = secy->replay_protect;
+
+ list_add(&txsc->entry, &cfg->txsc_list);
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ bool active;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ /* Encoding SA got changed */
+ if (txsc->encoding_sa != secy->tx_sc.encoding_sa) {
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+ active = sw_tx_sa ? sw_tx_sa->active : false;
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num, active);
+ }
+
+ if (netif_running(secy->netdev)) {
+ cn10k_mcs_sync_stats(pfvf, secy, txsc);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
+ cn10k_mcs_delete_txsc(pfvf, txsc);
+ list_del(&txsc->entry);
+ kfree(txsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
+ txsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ if (err)
+ return err;
+
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ /* Keys cannot be changed after creation */
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ txsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_rxsc *rxsc;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ rxsc = cn10k_mcs_create_rxsc(pfvf);
+ if (IS_ERR(rxsc))
+ return -ENOSPC;
+
+ rxsc->sw_secy = ctx->secy;
+ rxsc->sw_rxsc = ctx->rx_sc;
+ list_add(&rxsc->entry, &cfg->rxsc_list);
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ bool enable = ctx->rx_sc->active;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
+ enable, MCS_RX);
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
+ cn10k_mcs_delete_rxsc(pfvf, rxsc);
+ list_del(&rxsc->entry);
+ kfree(rxsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
+ rxsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
+ sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+
+ rxsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
+ ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
+ ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+ txsc->stats.InPktsOverrun = 0;
+
+ ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
+ ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
+ ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
+ ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
+ ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
+ ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+
+ if (secy->replay_protect)
+ rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+
+ if (secy->validate_frames == MACSEC_VALIDATE_DISABLED)
+ rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
+ ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
+ ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
+ ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
+ ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
+ ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
+ ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
+
+ ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
+ ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
+
+ return 0;
+}
+
+static const struct macsec_ops cn10k_mcs_ops = {
+ .mdo_dev_open = cn10k_mdo_open,
+ .mdo_dev_stop = cn10k_mdo_stop,
+ .mdo_add_secy = cn10k_mdo_add_secy,
+ .mdo_upd_secy = cn10k_mdo_upd_secy,
+ .mdo_del_secy = cn10k_mdo_del_secy,
+ .mdo_add_rxsc = cn10k_mdo_add_rxsc,
+ .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
+ .mdo_del_rxsc = cn10k_mdo_del_rxsc,
+ .mdo_add_rxsa = cn10k_mdo_add_rxsa,
+ .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
+ .mdo_del_rxsa = cn10k_mdo_del_rxsa,
+ .mdo_add_txsa = cn10k_mdo_add_txsa,
+ .mdo_upd_txsa = cn10k_mdo_upd_txsa,
+ .mdo_del_txsa = cn10k_mdo_del_txsa,
+ .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
+};
+
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_tx_sa *sw_tx_sa = NULL;
+ struct macsec_secy *secy = NULL;
+ struct cn10k_mcs_txsc *txsc;
+ u8 an;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
+ return;
+
+ /* Find the SecY to which the expired hardware SA is mapped */
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
+ if (txsc->hw_sa_id[an] == event->sa_id) {
+ secy = txsc->sw_secy;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ }
+ }
+
+ if (secy && sw_tx_sa)
+ macsec_pn_wrapped(secy, sw_tx_sa);
+}
+
+int cn10k_mcs_init(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct cn10k_mcs_cfg *cfg;
+ struct mcs_intr_cfg *req;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return 0;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cfg->txsc_list);
+ INIT_LIST_HEAD(&cfg->rxsc_list);
+ pfvf->macsec_cfg = cfg;
+
+ pfvf->netdev->features |= NETIF_F_HW_MACSEC;
+ pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
+ if (!req)
+ goto fail;
+
+ req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ if (otx2_sync_mbox_msg(mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ mutex_unlock(&mbox->lock);
+ return 0;
+}
+
+void cn10k_mcs_free(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
+ kfree(pfvf->macsec_cfg);
+ pfvf->macsec_cfg = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 66da31f30d3e..8a41ad8ca04f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -97,11 +97,6 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf)
{
struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
-#define OTX2_GET_RX_STATS(reg) \
- otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
-#define OTX2_GET_TX_STATS(reg) \
- otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
-
dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
@@ -222,8 +217,11 @@ EXPORT_SYMBOL(otx2_set_mac_address);
int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
{
struct nix_frs_cfg *req;
+ u16 maxlen;
int err;
+ maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
if (!req) {
@@ -233,6 +231,10 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Use max receive length supported by hardware for loopback devices */
+ if (is_otx2_lbkvf(pfvf->pdev))
+ req->maxlen = maxlen;
+
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
return err;
@@ -262,6 +264,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_pause_frm);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
{
@@ -583,8 +586,9 @@ void otx2_get_mac_from_af(struct net_device *netdev)
}
EXPORT_SYMBOL(otx2_get_mac_from_af);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
{
+ u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
@@ -599,7 +603,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->lvl = lvl;
req->num_regs = 1;
- schq = hw->txschq_list[lvl][0];
+ schq_list = hw->txschq_list;
+#ifdef CONFIG_DCB
+ if (txschq_for_pfc)
+ schq_list = pfvf->pfc_schq_list;
+#endif
+
+ schq = schq_list[lvl][prio];
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
@@ -608,7 +618,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
(0x2ULL << 36);
req->num_regs++;
/* MDQ config */
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
req->reg[1] = NIX_AF_MDQX_PARENT(schq);
req->regval[1] = parent << 16;
req->num_regs++;
@@ -616,21 +626,29 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
+ }
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
req->regval[0] = parent << 16;
@@ -638,11 +656,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
- req->num_regs++;
- req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
-
+ if (lvl == hw->txschq_link_cfg_lvl) {
+ req->num_regs++;
+ req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
+ }
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
* For VF this is always ignored.
@@ -666,6 +687,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_txschq_config);
+
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
+{
+ struct nix_txschq_config *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] |= BIT_ULL(49);
+ req->num_regs++;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
@@ -796,8 +842,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
@@ -853,6 +898,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
}
sq->head = 0;
+ sq->cons_head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
/* Set SQE threshold to 10% of total SQEs */
@@ -931,7 +977,11 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
if (!is_otx2_lbkvf(pfvf->pdev)) {
/* Enable receive CQ backpressure */
aq->cq.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
+#else
aq->cq.bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level is same as cq pass level */
aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
@@ -1036,7 +1086,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
struct nix_lf_alloc_rsp *rsp;
int err;
- pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
+ pfvf->qset.xqe_size = pfvf->hw.xqe_size;
/* Get memory to put this msg */
nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
@@ -1049,7 +1099,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
- nixlf->xqe_sz = NIX_XQESZ_W16;
+ nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
* NPA LF attached to this RVU PF/VF.
@@ -1211,7 +1261,11 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
*/
if (pfvf->nix_blkaddr == BLKADDR_NIX1)
aq->aura.bp_ena = 1;
+#ifdef CONFIG_DCB
+ aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
+#else
aq->aura.nix0_bpid = pfvf->bpid[0];
+#endif
/* Set backpressure level for RQ's Aura */
aq->aura.bp = RQ_BP_LVL_AURA;
@@ -1322,18 +1376,23 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
sq = &qset->sq[qidx];
sq->sqb_count = 0;
sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
- if (!sq->sqb_ptrs)
- return -ENOMEM;
+ if (!sq->sqb_ptrs) {
+ err = -ENOMEM;
+ goto err_mem;
+ }
for (ptr = 0; ptr < num_sqbs; ptr++) {
- if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
- return -ENOMEM;
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
+ goto err_mem;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
}
- return 0;
+err_mem:
+ return err ? -ENOMEM : 0;
+
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf);
@@ -1376,13 +1435,13 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+ err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+ if (err)
return -ENOMEM;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
}
-
return 0;
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
@@ -1538,11 +1597,18 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
return -ENOMEM;
req->chan_base = 0;
- req->chan_cnt = 1;
+#ifdef CONFIG_DCB
+ req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
+ req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
+#else
+ req->chan_cnt = 1;
req->bpid_per_chan = 0;
+#endif
+
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_nix_config_bp);
/* Mbox message handlers */
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
@@ -1573,6 +1639,8 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
for (schq = 0; schq < rsp->schq[lvl]; schq++)
pf->hw.txschq_list[lvl][schq] =
rsp->schq_list[lvl][schq];
+
+ pf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
}
EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
@@ -1704,6 +1772,56 @@ out:
}
EXPORT_SYMBOL(otx2_get_max_mtu);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
+{
+ netdev_features_t changed = features ^ netdev->features;
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ bool ntuple = !!(features & NETIF_F_NTUPLE);
+ bool tc = !!(features & NETIF_F_HW_TC);
+
+ if ((changed & NETIF_F_NTUPLE) && !ntuple)
+ otx2_destroy_ntuple_flows(pfvf);
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc) {
+ if (!pfvf->flow_cfg->max_flows) {
+ netdev_err(netdev,
+ "Can't enable TC, MCAM entries not allocated\n");
+ return -EINVAL;
+ }
+ }
+
+ if ((changed & NETIF_F_HW_TC) && !tc &&
+ pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
+ netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
+ return -EBUSY;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && ntuple &&
+ (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
+ netdev_err(netdev,
+ "Can't enable NTUPLE when TC is active, disable TC and retry\n");
+ return -EINVAL;
+ }
+
+ if ((changed & NETIF_F_HW_TC) && tc &&
+ (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
+ netdev_err(netdev,
+ "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
@@ -1715,4 +1833,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 61e52812983f..0c8fc66ade82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -17,6 +17,9 @@
#include <linux/soc/marvell/octeontx2/asm.h>
#include <net/pkt_cls.h>
#include <net/devlink.h>
+#include <linux/time64.h>
+#include <linux/dim.h>
+#include <uapi/linux/if_macsec.h>
#include <mbox.h>
#include <npc.h>
@@ -25,12 +28,16 @@
#include "otx2_devlink.h"
#include <rvu_trace.h>
+/* IPv4 flag more fragment bit */
+#define IPV4_FLAG_MORE 0x20
+
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
#define PCI_DEVID_OCTEONTX2_RVU_VF 0xA064
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2
@@ -38,6 +45,11 @@
#define NAME_SIZE 32
+#ifdef CONFIG_DCB
+/* Max priority supported for PFC */
+#define NIX_PF_PFC_PRIO_MAX 8
+#endif
+
enum arua_mapped_qtypes {
AURA_NIX_RQ,
AURA_NIX_SQ,
@@ -53,6 +65,11 @@ enum arua_mapped_qtypes {
/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
#define SEND_CQ_SKID 2000
+#define OTX2_GET_RX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
+#define OTX2_GET_TX_STATS(reg) \
+ otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
+
struct otx2_lmt_info {
u64 lmt_addr;
u16 lmt_id;
@@ -178,13 +195,18 @@ struct otx2_hw {
u16 rqpool_cnt;
u16 sqpool_cnt;
+#define OTX2_DEFAULT_RBUF_LEN 2048
+ u16 rbuf_len;
+ u32 xqe_size;
+
/* NPA */
u32 stack_pg_ptrs; /* No of ptrs per stack page */
u32 stack_pg_bytes; /* Size of stack page */
u16 sqb_size;
/* NIX */
- u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u8 txschq_link_cfg_lvl;
+ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
u32 dwrr_mtu;
@@ -226,6 +248,8 @@ struct otx2_hw {
#define CN10K_MBOX 1
#define CN10K_LMTST 2
#define CN10K_RPM 3
+#define CN10K_PTP_ONESTEP 4
+#define CN10K_HW_MACSEC 5
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@@ -259,6 +283,13 @@ struct refill_work {
struct otx2_nic *pf;
};
+/* PTPv2 originTimestamp structure */
+struct ptpv2_tstamp {
+ __be16 seconds_msb; /* 16 bits + */
+ __be32 seconds_lsb; /* 32 bits = 48 bits*/
+ __be32 nanoseconds;
+} __packed;
+
struct otx2_ptp {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -272,6 +303,11 @@ struct otx2_ptp {
u64 thresh;
struct ptp_pin_desc extts_config;
+ u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
+ u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
+ struct delayed_work synctstamp_work;
+ u64 tstamp;
+ u32 base_ns;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -299,11 +335,11 @@ struct otx2_flow_config {
#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
- u16 max_flows;
- u8 dmacflt_max_flows;
- u8 *bmap_to_dmacindex;
- unsigned long dmacflt_bmap;
+ u32 *bmap_to_dmacindex;
+ unsigned long *dmacflt_bmap;
struct list_head flow_list;
+ u32 dmacflt_max_flows;
+ u16 max_flows;
};
struct otx2_tc_info {
@@ -321,6 +357,66 @@ struct dev_hw_ops {
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
+#define CN10K_MCS_SA_PER_SC 4
+
+/* Stats which need to be accumulated in software because
+ * of shared counters in hardware.
+ */
+struct cn10k_txsc_stats {
+ u64 InPktsUntagged;
+ u64 InPktsNoTag;
+ u64 InPktsBadTag;
+ u64 InPktsUnknownSCI;
+ u64 InPktsNoSCI;
+ u64 InPktsOverrun;
+};
+
+struct cn10k_rxsc_stats {
+ u64 InOctetsValidated;
+ u64 InOctetsDecrypted;
+ u64 InPktsUnchecked;
+ u64 InPktsDelayed;
+ u64 InPktsOK;
+ u64 InPktsInvalid;
+ u64 InPktsLate;
+ u64 InPktsNotValid;
+ u64 InPktsNotUsingSA;
+ u64 InPktsUnusedSA;
+};
+
+struct cn10k_mcs_txsc {
+ struct macsec_secy *sw_secy;
+ struct cn10k_txsc_stats stats;
+ struct list_head entry;
+ enum macsec_validation_type last_validate_frames;
+ bool last_replay_protect;
+ u16 hw_secy_id_tx;
+ u16 hw_secy_id_rx;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+ u8 encoding_sa;
+};
+
+struct cn10k_mcs_rxsc {
+ struct macsec_secy *sw_secy;
+ struct macsec_rx_sc *sw_rxsc;
+ struct cn10k_rxsc_stats stats;
+ struct list_head entry;
+ u16 hw_flow_id;
+ u16 hw_sc_id;
+ u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
+ u8 sa_bmap;
+ u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
+};
+
+struct cn10k_mcs_cfg {
+ struct list_head txsc_list;
+ struct list_head rxsc_list;
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -344,6 +440,8 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
+#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
+#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
u64 flags;
u64 *cq_op_addr;
@@ -396,6 +494,20 @@ struct otx2_nic {
/* Devlink */
struct otx2_devlink *dl;
+#ifdef CONFIG_DCB
+ /* PFC */
+ u8 pfc_en;
+ u8 *queue_to_pfc_map;
+ u16 pfc_schq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ bool pfc_alloc_status[NIX_PF_PFC_PRIO_MAX];
+#endif
+
+ /* napi event count. It is needed for adaptive irq coalescing. */
+ u32 napi_events;
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ struct cn10k_mcs_cfg *macsec_cfg;
+#endif
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -435,6 +547,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
}
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -464,7 +581,11 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
__set_bit(CN10K_RPM, &hw->cap_flag);
+ __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
}
+
+ if (is_dev_cn10kb(pfvf->pdev))
+ __set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
}
/* Register read/write APIs */
@@ -603,6 +724,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
+ dma_wmb();
memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
/* Perform LMTST flush */
cn10k_lmt_flush(val, tar_addr);
@@ -614,8 +736,10 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
u64 ptrs[2];
ptrs[1] = buf;
+ get_cpu();
/* Free only one buffer at time during init and teardown */
__cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
+ put_cpu();
}
/* Alloc pointer from pool/aura */
@@ -719,6 +843,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
struct _rsp_type *rsp); \
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
/* Time to wait before watchdog kicks off */
@@ -761,6 +886,16 @@ static inline void otx2_dma_unmap_page(struct otx2_nic *pfvf,
dir, DMA_ATTR_SKIP_CPU_SYNC);
}
+static inline u16 otx2_get_smq_idx(struct otx2_nic *pfvf, u16 qidx)
+{
+#ifdef CONFIG_DCB
+ if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx])
+ return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx];
+#endif
+
+ return pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+}
+
/* MSI-X APIs */
void otx2_free_cints(struct otx2_nic *pfvf, int n);
void otx2_set_cints_affinity(struct otx2_nic *pfvf);
@@ -783,7 +918,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type);
void otx2_sq_free_sqbs(struct otx2_nic *pfvf);
int otx2_config_nix(struct otx2_nic *pfvf);
int otx2_config_nix_queues(struct otx2_nic *pfvf);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
@@ -862,6 +997,10 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
+int otx2_handle_ntuple_tc_features(struct net_device *netdev,
+ netdev_features_t features);
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
+
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
@@ -870,9 +1009,35 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic);
/* CGX/RPM DMAC filters support */
int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
-int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
-int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
-int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u32 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos);
void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
+
+#ifdef CONFIG_DCB
+/* DCB support*/
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx, bool pfc_enable);
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf);
+int otx2_dcbnl_set_ops(struct net_device *dev);
+/* PFC support */
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
+#endif
+
+#if IS_ENABLED(CONFIG_MACSEC)
+/* MACSEC offload support */
+int cn10k_mcs_init(struct otx2_nic *pfvf);
+void cn10k_mcs_free(struct otx2_nic *pfvf);
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
+#else
+static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
+static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
+static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
+ struct mcs_intr_info *event)
+{}
+#endif /* CONFIG_MACSEC */
+
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
new file mode 100644
index 000000000000..ccaf97bb1ce0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RVU Ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ *
+ */
+
+#include "otx2_common.h"
+
+static int otx2_check_pfc_config(struct otx2_nic *pfvf)
+{
+ u8 tx_queues = pfvf->hw.tx_queues, prio;
+ u8 pfc_en = pfvf->pfc_en;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ if ((pfc_en & (1 << prio)) &&
+ prio > tx_queues - 1) {
+ dev_warn(pfvf->dev,
+ "Increase number of tx queues from %d to %d to support PFC.\n",
+ tx_queues, prio + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, lvl, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* Either PFC bit is not set
+ * or tx scheduler is not allocated for the priority
+ */
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* configure the scheduler for the tls*/
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ err = otx2_txschq_config(pfvf, lvl, prio, true);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s configure PFC tx schq for lvl:%d, prio:%d failed!\n",
+ __func__, lvl, prio);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int lvl, rc;
+
+ /* Get memory to put this msg */
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ /* Request one schq per level upto max level as configured
+ * link config level. These rest of the scheduler can be
+ * same as hw.txschq_list.
+ */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++)
+ req->schq[lvl] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ return rc;
+
+ rsp = (struct nix_txsch_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ /* Setup transmit scheduler list */
+ for (lvl = 0; lvl < pfvf->hw.txschq_link_cfg_lvl; lvl++) {
+ if (!rsp->schq[lvl])
+ return -ENOSPC;
+
+ pfvf->pfc_schq_list[lvl][prio] = rsp->schq_list[lvl][0];
+ }
+
+ /* Set the Tx schedulers for rest of the levels same as
+ * hw.txschq_list as those will be common for all.
+ */
+ for (; lvl < NIX_TXSCH_LVL_CNT; lvl++)
+ pfvf->pfc_schq_list[lvl][prio] = pfvf->hw.txschq_list[lvl][0];
+
+ pfvf->pfc_alloc_status[prio] = true;
+ return 0;
+}
+
+int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
+{
+ u8 pfc_en = pfvf->pfc_en;
+ u8 pfc_bit_set;
+ int err, prio;
+
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to allocate PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
+{
+ struct nix_txsch_free_req *free_req;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* free PFC TLx nodes */
+ free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
+ if (!free_req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ free_req->flags = TXSCHQ_FREE_ALL;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ pfvf->pfc_alloc_status[prio] = false;
+ return 0;
+}
+
+static int otx2_pfc_update_sq_smq_mapping(struct otx2_nic *pfvf, int prio)
+{
+ struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
+ struct net_device *dev = pfvf->netdev;
+ bool if_up = netif_running(dev);
+ struct nix_aq_enq_req *sq_aq;
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_stop_all_queues(pfvf->netdev);
+ else
+ netif_tx_stop_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!cn10k_sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ cn10k_sq_aq->qidx = prio;
+ cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ cn10k_sq_aq->sq.ena = 1;
+ cn10k_sq_aq->sq_mask.ena = 1;
+ cn10k_sq_aq->sq_mask.smq = GENMASK(9, 0);
+ cn10k_sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ } else {
+ sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!sq_aq)
+ return -ENOMEM;
+
+ /* Fill AQ info */
+ sq_aq->qidx = prio;
+ sq_aq->ctype = NIX_AQ_CTYPE_SQ;
+ sq_aq->op = NIX_AQ_INSTOP_WRITE;
+
+ /* Fill fields to update */
+ sq_aq->sq.ena = 1;
+ sq_aq->sq_mask.ena = 1;
+ sq_aq->sq_mask.smq = GENMASK(8, 0);
+ sq_aq->sq.smq = otx2_get_smq_idx(pfvf, prio);
+ }
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ if (if_up) {
+ if (pfvf->pfc_alloc_status[prio])
+ netif_tx_start_all_queues(pfvf->netdev);
+ else
+ netif_tx_start_queue(netdev_get_tx_queue(dev, prio));
+ }
+
+ return 0;
+}
+
+int otx2_pfc_txschq_update(struct otx2_nic *pfvf)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ u8 pfc_en = pfvf->pfc_en, pfc_bit_set;
+ struct mbox *mbox = &pfvf->mbox;
+ int err, prio;
+
+ mutex_lock(&mbox->lock);
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+
+ /* tx scheduler was created but user wants to disable now */
+ if (!pfc_bit_set && pfvf->pfc_alloc_status[prio]) {
+ mutex_unlock(&mbox->lock);
+ if (if_up)
+ netif_tx_stop_all_queues(pfvf->netdev);
+
+ otx2_smq_flush(pfvf, pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][prio]);
+ if (if_up)
+ netif_tx_start_all_queues(pfvf->netdev);
+
+ /* delete the schq */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev,
+ "%s failed to stop PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+ mutex_lock(&mbox->lock);
+ goto update_sq_smq_map;
+ }
+
+ /* Either PFC bit is not set
+ * or Tx scheduler is already mapped for the priority
+ */
+ if (!pfc_bit_set || pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Add new scheduler to the priority */
+ err = otx2_pfc_txschq_alloc_one(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev,
+ "%s failed to allocate PFC tx schedulers for priority: %d\n",
+ __func__, prio);
+ return err;
+ }
+
+update_sq_smq_map:
+ err = otx2_pfc_update_sq_smq_mapping(pfvf, prio);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ dev_err(pfvf->dev, "%s failed PFC Tx schq sq:%d mapping", __func__, prio);
+ return err;
+ }
+ }
+
+ err = otx2_pfc_txschq_config(pfvf);
+ mutex_unlock(&mbox->lock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
+{
+ u8 pfc_en, pfc_bit_set;
+ int prio, err;
+
+ pfc_en = pfvf->pfc_en;
+ for (prio = 0; prio < NIX_PF_PFC_PRIO_MAX; prio++) {
+ pfc_bit_set = pfc_en & (1 << prio);
+ if (!pfc_bit_set || !pfvf->pfc_alloc_status[prio])
+ continue;
+
+ /* Delete the existing scheduler */
+ err = otx2_pfc_txschq_stop_one(pfvf, prio);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to stop PFC TX schedulers\n", __func__);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
+{
+ struct cgx_pfc_cfg *req;
+ struct cgx_pfc_rsp *rsp;
+ int err = 0;
+
+ if (is_otx2_lbkvf(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ if (pfvf->pfc_en) {
+ req->rx_pause = true;
+ req->tx_pause = true;
+ } else {
+ req->rx_pause = false;
+ req->tx_pause = false;
+ }
+ req->pfc_en = pfvf->pfc_en;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
+ rsp = (struct cgx_pfc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
+ dev_warn(pfvf->dev,
+ "Failed to config PFC\n");
+ err = -EPERM;
+ }
+ }
+unlock:
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
+void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
+ bool pfc_enable)
+{
+ bool if_up = netif_running(pfvf->netdev);
+ struct npa_aq_enq_req *npa_aq;
+ struct nix_aq_enq_req *aq;
+ int err = 0;
+
+ if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
+ dev_warn(pfvf->dev,
+ "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
+ pfvf->queue_to_pfc_map[qidx], qidx);
+ return;
+ }
+
+ if (if_up) {
+ netif_tx_stop_all_queues(pfvf->netdev);
+ netif_carrier_off(pfvf->netdev);
+ }
+
+ pfvf->queue_to_pfc_map[qidx] = vlan_prio;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ aq->cq.bpid = pfvf->bpid[vlan_prio];
+ aq->cq_mask.bpid = GENMASK(8, 0);
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
+ if (!npa_aq) {
+ err = -ENOMEM;
+ goto out;
+ }
+ npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
+ npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
+
+ /* Fill NPA AQ info */
+ npa_aq->aura_id = qidx;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_WRITE;
+ otx2_sync_mbox_msg(&pfvf->mbox);
+
+out:
+ if (if_up) {
+ netif_carrier_on(pfvf->netdev);
+ netif_tx_start_all_queues(pfvf->netdev);
+ }
+
+ if (err)
+ dev_warn(pfvf->dev,
+ "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
+ qidx, err);
+}
+
+static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc->pfc_en = pfvf->pfc_en;
+
+ return 0;
+}
+
+static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ int err;
+
+ /* Save PFC configuration to interface */
+ pfvf->pfc_en = pfc->pfc_en;
+
+ if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX)
+ goto process_pfc;
+
+ /* Check if the PFC configuration can be
+ * supported by the tx queue configuration
+ */
+ err = otx2_check_pfc_config(pfvf);
+ if (err)
+ return err;
+
+process_pfc:
+ err = otx2_config_priority_flow_ctrl(pfvf);
+ if (err)
+ return err;
+
+ /* Request Per channel Bpids */
+ if (pfc->pfc_en)
+ otx2_nix_config_bp(pfvf, true);
+
+ err = otx2_pfc_txschq_update(pfvf);
+ if (err) {
+ dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__);
+ return err;
+ }
+
+ return 0;
+}
+
+static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
+ .ieee_getpfc = otx2_dcbnl_ieee_getpfc,
+ .ieee_setpfc = otx2_dcbnl_ieee_setpfc,
+ .getdcbx = otx2_dcbnl_getdcbx,
+ .setdcbx = otx2_dcbnl_setdcbx,
+};
+
+int otx2_dcbnl_set_ops(struct net_device *dev)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+
+ pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
+ GFP_KERNEL);
+ if (!pfvf->queue_to_pfc_map)
+ return -ENOMEM;
+ dev->dcbnl_ops = &otx2_dcbnl_ops;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 777a27047c8e..63ef7c41d18d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -77,22 +77,7 @@ static const struct devlink_param otx2_dl_params[] = {
otx2_dl_mcam_count_validate),
};
-/* Devlink OPs */
-static int otx2_devlink_info_get(struct devlink *devlink,
- struct devlink_info_req *req,
- struct netlink_ext_ack *extack)
-{
- struct otx2_devlink *otx2_dl = devlink_priv(devlink);
- struct otx2_nic *pfvf = otx2_dl->pfvf;
-
- if (is_otx2_vf(pfvf->pcifunc))
- return devlink_info_driver_name_put(req, "rvu_nicvf");
-
- return devlink_info_driver_name_put(req, "rvu_nicpf");
-}
-
static const struct devlink_ops otx2_devlink_ops = {
- .info_get = otx2_devlink_info_get,
};
int otx2_register_dl(struct otx2_nic *pfvf)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
index 2ec800f741d8..80d853b343f9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -8,7 +8,7 @@
#include "otx2_common.h"
static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
- u8 *dmac_index)
+ u32 *dmac_index)
{
struct cgx_mac_addr_add_req *req;
struct cgx_mac_addr_add_rsp *rsp;
@@ -35,9 +35,10 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
return err;
}
-static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf, u32 *dmac_index)
{
struct cgx_mac_addr_set_or_get *req;
+ struct cgx_mac_addr_set_or_get *rsp;
int err;
mutex_lock(&pf->mbox.lock);
@@ -48,16 +49,31 @@ static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
return -ENOMEM;
}
+ req->index = *dmac_index;
+
ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_set_or_get *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ *dmac_index = rsp->index;
+out:
mutex_unlock(&pf->mbox.lock);
return err;
}
-int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u32 bit_pos)
{
- u8 *dmacindex;
+ u32 *dmacindex;
/* Store dmacindex returned by CGX/RPM driver which will
* be used for macaddr update/remove
@@ -65,13 +81,13 @@ int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
- return otx2_dmacflt_add_pfmac(pf);
+ return otx2_dmacflt_add_pfmac(pf, dmacindex);
else
return otx2_dmacflt_do_add(pf, mac, dmacindex);
}
static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
- u8 dmac_index)
+ u32 dmac_index)
{
struct cgx_mac_addr_del_req *req;
int err;
@@ -91,9 +107,9 @@ static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
return err;
}
-static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf, u32 dmac_index)
{
- struct msg_req *req;
+ struct cgx_mac_addr_reset_req *req;
int err;
mutex_lock(&pf->mbox.lock);
@@ -102,6 +118,7 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
mutex_unlock(&pf->mbox.lock);
return -ENOMEM;
}
+ req->index = dmac_index;
err = otx2_sync_mbox_msg(&pf->mbox);
@@ -110,12 +127,12 @@ static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
}
int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
- u8 bit_pos)
+ u32 bit_pos)
{
- u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ u32 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
if (ether_addr_equal(mac, pf->netdev->dev_addr))
- return otx2_dmacflt_remove_pfmac(pf);
+ return otx2_dmacflt_remove_pfmac(pf, dmacindex);
else
return otx2_dmacflt_do_remove(pf, mac, dmacindex);
}
@@ -144,6 +161,12 @@ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
rsp = (struct cgx_max_dmac_entries_get_rsp *)
otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+
+ if (IS_ERR_OR_NULL(rsp)) {
+ err = -EINVAL;
+ goto out;
+ }
+
pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
out:
@@ -151,9 +174,10 @@ out:
return err;
}
-int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos)
{
struct cgx_mac_addr_update_req *req;
+ struct cgx_mac_addr_update_rsp *rsp;
int rc;
mutex_lock(&pf->mbox.lock);
@@ -167,8 +191,19 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
ether_addr_copy(req->mac_addr, mac);
req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ /* check the response and change index */
+
rc = otx2_sync_mbox_msg(&pf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct cgx_mac_addr_update_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index;
+
+out:
mutex_unlock(&pf->mbox.lock);
return rc;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 80d4ce61f442..0f8d1a69139f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -76,8 +76,8 @@ static void otx2_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pfvf->pdev), sizeof(info->bus_info));
}
static void otx2_get_qset_strings(struct otx2_nic *pfvf, u8 **data, int qset)
@@ -360,7 +360,9 @@ static int otx2_set_pauseparam(struct net_device *netdev,
}
static void otx2_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct otx2_qset *qs = &pfvf->qset;
@@ -369,12 +371,19 @@ static void otx2_get_ringparam(struct net_device *netdev,
ring->rx_pending = qs->rqe_cnt ? qs->rqe_cnt : Q_COUNT(Q_SIZE_256);
ring->tx_max_pending = Q_COUNT(Q_SIZE_MAX);
ring->tx_pending = qs->sqe_cnt ? qs->sqe_cnt : Q_COUNT(Q_SIZE_4K);
+ kernel_ring->rx_buf_len = pfvf->hw.rbuf_len;
+ kernel_ring->cqe_size = pfvf->hw.xqe_size;
}
static int otx2_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u32 rx_buf_len = kernel_ring->rx_buf_len;
+ u32 old_rx_buf_len = pfvf->hw.rbuf_len;
+ u32 xqe_size = kernel_ring->cqe_size;
bool if_up = netif_running(netdev);
struct otx2_qset *qs = &pfvf->qset;
u32 rx_count, tx_count;
@@ -382,6 +391,21 @@ static int otx2_set_ringparam(struct net_device *netdev,
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
+ /* Hardware supports max size of 32k for a receive buffer
+ * and 1536 is typical ethernet frame size.
+ */
+ if (rx_buf_len && (rx_buf_len < 1536 || rx_buf_len > 32768)) {
+ netdev_err(netdev,
+ "Receive buffer range is 1536 - 32768");
+ return -EINVAL;
+ }
+
+ if (xqe_size != 128 && xqe_size != 512) {
+ netdev_err(netdev,
+ "Completion event size must be 128 or 512");
+ return -EINVAL;
+ }
+
/* Permitted lengths are 16 64 256 1K 4K 16K 64K 256K 1M */
rx_count = ring->rx_pending;
/* On some silicon variants a skid or reserved CQEs are
@@ -399,7 +423,8 @@ static int otx2_set_ringparam(struct net_device *netdev,
Q_COUNT(Q_SIZE_4K), Q_COUNT(Q_SIZE_MAX));
tx_count = Q_COUNT(Q_SIZE(tx_count, 3));
- if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt)
+ if (tx_count == qs->sqe_cnt && rx_count == qs->rqe_cnt &&
+ rx_buf_len == old_rx_buf_len && xqe_size == pfvf->hw.xqe_size)
return 0;
if (if_up)
@@ -409,6 +434,9 @@ static int otx2_set_ringparam(struct net_device *netdev,
qs->sqe_cnt = tx_count;
qs->rqe_cnt = rx_count;
+ pfvf->hw.rbuf_len = rx_buf_len;
+ pfvf->hw.xqe_size = xqe_size;
+
if (if_up)
return netdev->netdev_ops->ndo_open(netdev);
@@ -427,6 +455,14 @@ static int otx2_get_coalesce(struct net_device *netdev,
cmd->rx_max_coalesced_frames = hw->cq_ecount_wait;
cmd->tx_coalesce_usecs = hw->cq_time_wait;
cmd->tx_max_coalesced_frames = hw->cq_ecount_wait;
+ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
+ cmd->use_adaptive_rx_coalesce = 1;
+ cmd->use_adaptive_tx_coalesce = 1;
+ } else {
+ cmd->use_adaptive_rx_coalesce = 0;
+ cmd->use_adaptive_tx_coalesce = 0;
+ }
return 0;
}
@@ -438,11 +474,30 @@ static int otx2_set_coalesce(struct net_device *netdev,
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct otx2_hw *hw = &pfvf->hw;
+ u8 priv_coalesce_status;
int qidx;
if (!ec->rx_max_coalesced_frames || !ec->tx_max_coalesced_frames)
return 0;
+ if (ec->use_adaptive_rx_coalesce != ec->use_adaptive_tx_coalesce) {
+ netdev_err(netdev,
+ "adaptive-rx should be same as adaptive-tx");
+ return -EINVAL;
+ }
+
+ /* Check and update coalesce status */
+ if ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED) {
+ priv_coalesce_status = 1;
+ if (!ec->use_adaptive_rx_coalesce)
+ pfvf->flags &= ~OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
+ } else {
+ priv_coalesce_status = 0;
+ if (ec->use_adaptive_rx_coalesce)
+ pfvf->flags |= OTX2_FLAG_ADPTV_INT_COAL_ENABLED;
+ }
+
/* 'cq_time_wait' is 8bit and is in multiple of 100ns,
* so clamp the user given value to the range of 1 to 25usec.
*/
@@ -466,9 +521,9 @@ static int otx2_set_coalesce(struct net_device *netdev,
* so clamp the user given value to the range of 1 to 64k.
*/
ec->rx_max_coalesced_frames = clamp_t(u32, ec->rx_max_coalesced_frames,
- 1, U16_MAX);
+ 1, NAPI_POLL_WEIGHT);
ec->tx_max_coalesced_frames = clamp_t(u32, ec->tx_max_coalesced_frames,
- 1, U16_MAX);
+ 1, NAPI_POLL_WEIGHT);
/* Rx and Tx are mapped to same CQ, check which one
* is changed, if both then choose the min.
@@ -481,6 +536,17 @@ static int otx2_set_coalesce(struct net_device *netdev,
hw->cq_ecount_wait = min_t(u16, ec->rx_max_coalesced_frames,
ec->tx_max_coalesced_frames);
+ /* Reset 'cq_time_wait' and 'cq_ecount_wait' to
+ * default values if coalesce status changed from
+ * 'on' to 'off'.
+ */
+ if (priv_coalesce_status &&
+ ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) !=
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+ hw->cq_time_wait = CQ_TIMER_THRESH_DEFAULT;
+ hw->cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
+ }
+
if (netif_running(netdev)) {
for (qidx = 0; qidx < pfvf->hw.cint_cnt; qidx++)
otx2_config_irq_coalescing(pfvf, qidx);
@@ -897,10 +963,12 @@ static int otx2_get_ts_info(struct net_device *netdev,
info->phc_index = otx2_ptp_clock_index(pfvf);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
@@ -1200,9 +1268,45 @@ end:
return err;
}
+static void otx2_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+
+ otx2_update_lmac_fec_stats(pfvf);
+
+ /* Report MAC FEC stats */
+ fec_stats->corrected_blocks.total = pfvf->hw.cgx_fec_corr_blks;
+ fec_stats->uncorrectable_blocks.total = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_stats->corrected_blocks.total = p->brfec_corr_blks;
+ fec_stats->uncorrectable_blocks.total = p->brfec_uncorr_blks;
+ } else {
+ fec_stats->corrected_blocks.total = p->rsfec_corr_cws;
+ fec_stats->uncorrectable_blocks.total = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+}
+
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2_get_drvinfo,
.get_strings = otx2_get_strings,
@@ -1227,6 +1331,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_ts_info = otx2_get_ts_info,
+ .get_fec_stats = otx2_get_fec_stats,
.get_fecparam = otx2_get_fecparam,
.set_fecparam = otx2_set_fecparam,
.get_link_ksettings = otx2_get_link_ksettings,
@@ -1244,8 +1349,8 @@ static void otx2vf_get_drvinfo(struct net_device *netdev,
{
struct otx2_nic *vf = netdev_priv(netdev);
- strlcpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_VF_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(vf->pdev), sizeof(info->bus_info));
}
static void otx2vf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
@@ -1321,7 +1426,10 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev,
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
- ETHTOOL_COALESCE_MAX_FRAMES,
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN |
+ ETHTOOL_RING_USE_CQE_SIZE,
.get_link = otx2_get_link,
.get_drvinfo = otx2vf_get_drvinfo,
.get_strings = otx2vf_get_strings,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 77a13fb555fb..10e11262d48a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,11 +18,13 @@ struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
u32 location;
- u16 entry;
+ u32 entry;
bool is_vf;
u8 rss_ctx_id;
+#define DMAC_FILTER_RULE BIT(0)
+#define PFC_FLOWCTRL_RULE BIT(1)
+ u16 rule_type;
int vf;
- bool dmac_filter;
};
enum dmac_req {
@@ -162,6 +164,8 @@ EXPORT_SYMBOL(otx2_alloc_mcam_entries);
static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_get_field_status_req *freq;
+ struct npc_get_field_status_rsp *frsp;
struct npc_mcam_alloc_entry_req *req;
struct npc_mcam_alloc_entry_rsp *rsp;
int vf_vlan_max_flows;
@@ -212,8 +216,29 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
OTX2_MAX_UNICAST_FLOWS;
pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
- pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
- pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+
+ /* Check if NPC_DMAC field is supported
+ * by the mkex profile before setting VLAN support flag.
+ */
+ freq = otx2_mbox_alloc_msg_npc_get_field_status(&pfvf->mbox);
+ if (!freq) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ freq->field = NPC_DMAC;
+ if (otx2_sync_mbox_msg(&pfvf->mbox)) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -EINVAL;
+ }
+
+ frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &freq->hdr);
+
+ if (frsp->enable) {
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ }
pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
mutex_unlock(&pfvf->mbox.lock);
@@ -230,6 +255,9 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
return 0;
}
+/* TODO : revisit on size */
+#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
+
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg;
@@ -240,6 +268,12 @@ int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
if (!pfvf->flow_cfg)
return -ENOMEM;
+ pfvf->flow_cfg->dmacflt_bmap = devm_kcalloc(pfvf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pfvf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
flow_cfg = pfvf->flow_cfg;
INIT_LIST_HEAD(&flow_cfg->flow_list);
flow_cfg->max_flows = 0;
@@ -257,6 +291,12 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->flow_cfg)
return -ENOMEM;
+ pf->flow_cfg->dmacflt_bmap = devm_kcalloc(pf->dev,
+ BITS_TO_LONGS(OTX2_DMAC_FLTR_BITMAP_SZ),
+ sizeof(long), GFP_KERNEL);
+ if (!pf->flow_cfg->dmacflt_bmap)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
/* Allocate bare minimum number of MCAM entries needed for
@@ -282,7 +322,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return 0;
pf->flow_cfg->bmap_to_dmacindex =
- devm_kzalloc(pf->dev, sizeof(u8) *
+ devm_kzalloc(pf->dev, sizeof(u32) *
pf->flow_cfg->dmacflt_max_flows,
GFP_KERNEL);
@@ -353,7 +393,7 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
- if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ if (!bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(netdev,
"Add %pM to CGX/RPM DMAC filters list as well\n",
@@ -436,7 +476,7 @@ int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
return 0;
if (flow_cfg->nr_flows == flow_cfg->max_flows ||
- bitmap_weight(&flow_cfg->dmacflt_bmap,
+ !bitmap_empty(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows))
return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
else
@@ -671,6 +711,11 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip6dst));
req->features |= BIT_ULL(NPC_DIP_IPV6);
}
+ if (ipv6_usr_hdr->l4_proto == IPPROTO_FRAGMENT) {
+ pkt->next_header = ipv6_usr_hdr->l4_proto;
+ pmask->next_header = ipv6_usr_mask->l4_proto;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
+ }
pkt->etype = cpu_to_be16(ETH_P_IPV6);
pmask->etype = cpu_to_be16(0xFFFF);
req->features |= BIT_ULL(NPC_ETYPE);
@@ -748,7 +793,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
/* NPC profile doesn't extract AH/ESP header fields */
if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
- (ah_esp_mask->tclass & ah_esp_mask->tclass))
+ (ah_esp_mask->tclass & ah_esp_hdr->tclass))
return -EOPNOTSUPP;
if (flow_type == AH_V6_FLOW)
@@ -851,10 +896,22 @@ static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
req->features |= BIT_ULL(NPC_OUTER_VID);
}
- /* Not Drop/Direct to queue but use action in default entry */
- if (fsp->m_ext.data[1] &&
- fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
- req->op = NIX_RX_ACTION_DEFAULT;
+ if (fsp->m_ext.data[1]) {
+ if (flow_type == IP_USER_FLOW) {
+ if (be32_to_cpu(fsp->h_ext.data[1]) != IPV4_FLAG_MORE)
+ return -EINVAL;
+
+ pkt->ip_flag = be32_to_cpu(fsp->h_ext.data[1]);
+ pmask->ip_flag = be32_to_cpu(fsp->m_ext.data[1]);
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
+ } else if (fsp->h_ext.data[1] ==
+ cpu_to_be32(OTX2_DEFAULT_ACTION)) {
+ /* Not Drop/Direct to queue but use action
+ * in default entry
+ */
+ req->op = NIX_RX_ACTION_DEFAULT;
+ }
+ }
}
if (fsp->flow_type & FLOW_MAC_EXT &&
@@ -899,6 +956,9 @@ static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
+#ifdef CONFIG_DCB
+ int vlan_prio, qidx, pfc_rule = 0;
+#endif
struct npc_install_flow_req *req;
int err, vf = 0;
@@ -940,6 +1000,24 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
mutex_unlock(&pfvf->mbox.lock);
return -EINVAL;
}
+
+#ifdef CONFIG_DCB
+ /* Identify PFC rule if PFC enabled and ntuple rule is vlan */
+ if (!vf && (req->features & BIT_ULL(NPC_OUTER_VID)) &&
+ pfvf->pfc_en && req->op != NIX_RX_ACTIONOP_RSS) {
+ vlan_prio = ntohs(req->packet.vlan_tci) &
+ ntohs(req->mask.vlan_tci);
+
+ /* Get the priority */
+ vlan_prio >>= 13;
+ flow->rule_type |= PFC_FLOWCTRL_RULE;
+ /* Check if PFC enabled for this priority */
+ if (pfvf->pfc_en & BIT(vlan_prio)) {
+ pfc_rule = true;
+ qidx = req->index;
+ }
+ }
+#endif
}
/* ethtool ring_cookie has (VF + 1) for VF */
@@ -951,6 +1029,12 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* Send message to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
+
+#ifdef CONFIG_DCB
+ if (!err && pfc_rule)
+ otx2_update_bpid_in_rqctx(pfvf, vlan_prio, qidx, true);
+#endif
+
mutex_unlock(&pfvf->mbox.lock);
return err;
}
@@ -966,7 +1050,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
return -ENOMEM;
pf_mac->entry = 0;
- pf_mac->dmac_filter = true;
+ pf_mac->rule_type |= DMAC_FILTER_RULE;
pf_mac->location = pfvf->flow_cfg->max_flows;
memcpy(&pf_mac->flow_spec, &flow->flow_spec,
sizeof(struct ethtool_rx_flow_spec));
@@ -981,7 +1065,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
otx2_add_flow_to_list(pfvf, pf_mac);
pfvf->flow_cfg->nr_flows++;
- set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ set_bit(0, pfvf->flow_cfg->dmacflt_bmap);
return 0;
}
@@ -1031,11 +1115,11 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* Sync dmac filter table with updated fields */
- if (flow->dmac_filter)
+ if (flow->rule_type & DMAC_FILTER_RULE)
return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
flow->entry);
- if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ if (bitmap_full(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows)) {
netdev_warn(pfvf->netdev,
"Can't insert the rule %d as max allowed dmac filters are %d\n",
@@ -1049,17 +1133,17 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
}
/* Install PF mac address to DMAC filter list */
- if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ if (!test_bit(0, flow_cfg->dmacflt_bmap))
otx2_add_flow_with_pfmac(pfvf, flow);
- flow->dmac_filter = true;
- flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow->rule_type |= DMAC_FILTER_RULE;
+ flow->entry = find_first_zero_bit(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows);
fsp->location = flow_cfg->max_flows + flow->entry;
flow->flow_spec.location = fsp->location;
flow->location = fsp->location;
- set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ set_bit(flow->entry, flow_cfg->dmacflt_bmap);
otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
} else {
@@ -1120,16 +1204,17 @@ static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
bool found = false;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter && iter->entry == 0) {
+ if ((iter->rule_type & DMAC_FILTER_RULE) && iter->entry == 0) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
if (req == DMAC_ADDR_DEL) {
otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
0);
- clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ clear_bit(0, pfvf->flow_cfg->dmacflt_bmap);
found = true;
} else {
ether_addr_copy(eth_hdr->h_dest,
pfvf->netdev->dev_addr);
+
otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
}
break;
@@ -1156,7 +1241,7 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
if (!flow)
return -ENOENT;
- if (flow->dmac_filter) {
+ if (flow->rule_type & DMAC_FILTER_RULE) {
struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
/* user not allowed to remove dmac filter with interface mac */
@@ -1165,15 +1250,22 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
flow->entry);
- clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ clear_bit(flow->entry, flow_cfg->dmacflt_bmap);
/* If all dmac filters are removed delete macfilter with
* interface mac address and configure CGX/RPM block in
* promiscuous mode
*/
- if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ if (bitmap_weight(flow_cfg->dmacflt_bmap,
flow_cfg->dmacflt_max_flows) == 1)
otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
} else {
+#ifdef CONFIG_DCB
+ if (flow->rule_type & PFC_FLOWCTRL_RULE)
+ otx2_update_bpid_in_rqctx(pfvf, 0,
+ flow->flow_spec.ring_cookie,
+ false);
+#endif
+
err = otx2_remove_flow_msg(pfvf, flow->entry, false);
}
@@ -1383,7 +1475,7 @@ void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
struct ethhdr *eth_hdr;
list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
- if (iter->dmac_filter) {
+ if (iter->rule_type & DMAC_FILTER_RULE) {
eth_hdr = &iter->flow_spec.h_u.ether_spec;
otx2_dmacflt_add(pf, eth_hdr->h_dest,
iter->entry);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 1e0d0c9c1dac..18284ad75157 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -15,6 +15,7 @@
#include <net/ip.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/bitfield.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -394,7 +395,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
- if (err) {
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
+ if (err == -EIO) {
dev_warn(pf->dev,
"AF not responding to VF%d messages\n", vf);
/* restore PF mbase and exit */
@@ -853,6 +859,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
}
}
+int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
+ struct mcs_intr_info *event,
+ struct msg_rsp *rsp)
+{
+ cn10k_handle_mcs_event(pf, event);
+
+ return 0;
+}
+
int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg,
struct msg_rsp *rsp)
@@ -912,6 +927,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
return err; \
}
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
break;
default:
@@ -1115,7 +1131,7 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
- if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
pf->flow_cfg->dmacflt_max_flows))
netdev_warn(pf->netdev,
"CGX/RPM internal loopback might not work as DMAC filters are active\n");
@@ -1156,6 +1172,59 @@ int otx2_set_real_num_queues(struct net_device *netdev,
}
EXPORT_SYMBOL(otx2_set_real_num_queues);
+static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
+ "NIX_SQOPERR_OOR",
+ "NIX_SQOPERR_CTX_FAULT",
+ "NIX_SQOPERR_CTX_POISON",
+ "NIX_SQOPERR_DISABLED",
+ "NIX_SQOPERR_SIZE_ERR",
+ "NIX_SQOPERR_OFLOW",
+ "NIX_SQOPERR_SQB_NULL",
+ "NIX_SQOPERR_SQB_FAULT",
+ "NIX_SQOPERR_SQE_SZ_ZERO",
+};
+
+static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
+ "NIX_MNQERR_SQ_CTX_FAULT",
+ "NIX_MNQERR_SQ_CTX_POISON",
+ "NIX_MNQERR_SQB_FAULT",
+ "NIX_MNQERR_SQB_POISON",
+ "NIX_MNQERR_TOTAL_ERR",
+ "NIX_MNQERR_LSO_ERR",
+ "NIX_MNQERR_CQ_QUERY_ERR",
+ "NIX_MNQERR_MAX_SQE_SIZE_ERR",
+ "NIX_MNQERR_MAXLEN_ERR",
+ "NIX_MNQERR_SQE_SIZEM1_ZERO",
+};
+
+static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = {
+ "NIX_SND_STATUS_GOOD",
+ "NIX_SND_STATUS_SQ_CTX_FAULT",
+ "NIX_SND_STATUS_SQ_CTX_POISON",
+ "NIX_SND_STATUS_SQB_FAULT",
+ "NIX_SND_STATUS_SQB_POISON",
+ "NIX_SND_STATUS_HDR_ERR",
+ "NIX_SND_STATUS_EXT_ERR",
+ "NIX_SND_STATUS_JUMP_FAULT",
+ "NIX_SND_STATUS_JUMP_POISON",
+ "NIX_SND_STATUS_CRC_ERR",
+ "NIX_SND_STATUS_IMM_ERR",
+ "NIX_SND_STATUS_SG_ERR",
+ "NIX_SND_STATUS_MEM_ERR",
+ "NIX_SND_STATUS_INVALID_SUBDC",
+ "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+ "NIX_SND_STATUS_DATA_FAULT",
+ "NIX_SND_STATUS_DATA_POISON",
+ "NIX_SND_STATUS_NPC_DROP_ACTION",
+ "NIX_SND_STATUS_LOCK_VIOL",
+ "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
+ "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
+ "NIX_SND_STATUS_NPC_MCAST_ABORT",
+ "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+ "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+ "NIX_SND_STATUS_SEND_STATS_ERR",
+};
+
static irqreturn_t otx2_q_intr_handler(int irq, void *data)
{
struct otx2_nic *pf = data;
@@ -1189,46 +1258,67 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
/* SQ */
for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
+ u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
+ u8 sq_op_err_code, mnq_err_code, snd_err_code;
+
+ /* Below debug registers captures first errors corresponding to
+ * those registers. We don't have to check against SQ qid as
+ * these are fatal errors.
+ */
+
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
(val & NIX_SQINT_BITS));
- if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
- continue;
-
if (val & BIT_ULL(42)) {
netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
qidx, otx2_read64(pf, NIX_LF_ERR_INT));
- } else {
- if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
- qidx,
- otx2_read64(pf,
- NIX_LF_SQ_OP_ERR_DBG));
- otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
- qidx,
- otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
- otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
- netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
- qidx,
- otx2_read64(pf,
- NIX_LF_SEND_ERR_DBG));
- otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
- BIT_ULL(44));
- }
- if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
- netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
- qidx);
+ goto done;
}
+ sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
+ if (!(sq_op_err_dbg & BIT(44)))
+ goto chk_mnq_err_dbg;
+
+ sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n",
+ qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
+
+ otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
+
+ if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
+ goto chk_mnq_err_dbg;
+
+ /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
+ * TODO: But we are in irq context. How to call mbox functions which does sleep
+ */
+
+chk_mnq_err_dbg:
+ mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
+ if (!(mnq_err_dbg & BIT(44)))
+ goto chk_snd_err_dbg;
+
+ mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n",
+ qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]);
+ otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
+
+chk_snd_err_dbg:
+ snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
+ if (snd_err_dbg & BIT(44)) {
+ snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
+ netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
+ qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
+ otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
+ }
+
+done:
+ /* Print values and reset */
+ if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+ netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
+ qidx);
+
schedule_work(&pf->reset_task);
}
@@ -1249,6 +1339,7 @@ static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
/* Schedule NAPI */
+ pf->napi_events++;
napi_schedule_irqoff(&cq_poll->napi);
return IRQ_HANDLED;
@@ -1262,6 +1353,7 @@ static void otx2_disable_napi(struct otx2_nic *pf)
for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
cq_poll = &qset->napi[qidx];
+ cancel_work_sync(&cq_poll->dim.work);
napi_disable(&cq_poll->napi);
netif_napi_del(&cq_poll->napi);
}
@@ -1306,6 +1398,9 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
int total_size;
int rbuf_size;
+ if (pf->hw.rbuf_len)
+ return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
+
/* The data transferred by NIX to memory consists of actual packet
* plus additional data which has timestamp and/or EDSA/HIGIG2
* headers if interface is configured in corresponding modes.
@@ -1379,18 +1474,40 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
goto err_free_sq_ptrs;
}
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_alloc(pf);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_sq_ptrs;
+ }
+ }
+#endif
+
err = otx2_config_nix_queues(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_txsch;
}
+
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
- err = otx2_txschq_config(pf, lvl);
+ err = otx2_txschq_config(pf, lvl, 0, false);
+ if (err) {
+ mutex_unlock(&mbox->lock);
+ goto err_free_nix_queues;
+ }
+ }
+
+#ifdef CONFIG_DCB
+ if (pf->pfc_en) {
+ err = otx2_pfc_txschq_config(pf);
if (err) {
mutex_unlock(&mbox->lock);
goto err_free_nix_queues;
}
}
+#endif
+
mutex_unlock(&mbox->lock);
return err;
@@ -1445,6 +1562,11 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
if (err)
dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
+#ifdef CONFIG_DCB
+ if (pf->pfc_en)
+ otx2_pfc_txschq_stop(pf);
+#endif
+
mutex_lock(&mbox->lock);
/* Disable backpressure */
if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
@@ -1538,6 +1660,24 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
mutex_unlock(&pf->mbox.lock);
}
+static void otx2_dim_work(struct work_struct *w)
+{
+ struct dim_cq_moder cur_moder;
+ struct otx2_cq_poll *cq_poll;
+ struct otx2_nic *pfvf;
+ struct dim *dim;
+
+ dim = container_of(w, struct dim, work);
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ cq_poll = container_of(dim, struct otx2_cq_poll, dim);
+ pfvf = (struct otx2_nic *)cq_poll->dev;
+ pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
+ CQ_TIMER_THRESH_MAX : cur_moder.usec;
+ pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
+ NAPI_POLL_WEIGHT : cur_moder.pkts;
+ dim->state = DIM_START_MEASURE;
+}
+
int otx2_open(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1604,8 +1744,9 @@ int otx2_open(struct net_device *netdev)
cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
cq_poll->dev = (void *)pf;
- netif_napi_add(netdev, &cq_poll->napi,
- otx2_napi_handler, NAPI_POLL_WEIGHT);
+ cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
+ netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
napi_enable(&cq_poll->napi);
}
@@ -1689,21 +1830,27 @@ int otx2_open(struct net_device *netdev)
if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
otx2_handle_link_event(pf);
- /* Restore pause frame settings */
- otx2_config_pause_frm(pf);
-
/* Install DMAC Filters */
if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
otx2_dmacflt_reinstall_flows(pf);
err = otx2_rxtx_enable(pf, true);
- if (err)
+ /* If a mbox communication error happens at this point then interface
+ * will end up in a state such that it is in down state but hardware
+ * mcam entries are enabled to receive the packets. Hence disable the
+ * packet I/O.
+ */
+ if (err == EIO)
+ goto err_disable_rxtx;
+ else if (err)
goto err_tx_stop_queues;
otx2_do_set_rx_mode(pf);
return 0;
+err_disable_rxtx:
+ otx2_rxtx_enable(pf, false);
err_tx_stop_queues:
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
@@ -1713,7 +1860,6 @@ err_free_cints:
vec = pci_irq_vector(pf->pdev,
pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
- synchronize_irq(vec);
free_irq(vec, pf);
err_disable_napi:
otx2_disable_napi(pf);
@@ -1757,7 +1903,6 @@ int otx2_stop(struct net_device *netdev)
vec = pci_irq_vector(pf->pdev,
pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
- synchronize_irq(vec);
free_irq(vec, pf);
/* Cleanup CQ NAPI and IRQ */
@@ -1791,8 +1936,7 @@ int otx2_stop(struct net_device *netdev)
kfree(qset->rq);
kfree(qset->napi);
/* Do not clear RQ/SQ ringsize settings */
- memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
- sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
+ memset_startat(qset, 0, sqe_cnt);
return 0;
}
EXPORT_SYMBOL(otx2_stop);
@@ -1829,6 +1973,30 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+#ifdef CONFIG_DCB
+ struct otx2_nic *pf = netdev_priv(netdev);
+ u8 vlan_prio;
+#endif
+
+#ifdef CONFIG_DCB
+ if (!skb_vlan_tag_present(skb))
+ goto pick_tx;
+
+ vlan_prio = skb->vlan_tci >> 13;
+ if ((vlan_prio > pf->hw.tx_queues - 1) ||
+ !pf->pfc_alloc_status[vlan_prio])
+ goto pick_tx;
+
+ return vlan_prio;
+
+pick_tx:
+#endif
+ return netdev_pick_tx(netdev, skb, NULL);
+}
+
static netdev_features_t otx2_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1858,9 +2026,7 @@ static int otx2_set_features(struct net_device *netdev,
netdev_features_t features)
{
netdev_features_t changed = features ^ netdev->features;
- bool ntuple = !!(features & NETIF_F_NTUPLE);
struct otx2_nic *pf = netdev_priv(netdev);
- bool tc = !!(features & NETIF_F_HW_TC);
if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
return otx2_cgx_config_loopback(pf,
@@ -1870,46 +2036,7 @@ static int otx2_set_features(struct net_device *netdev,
return otx2_enable_rxvlan(pf,
features & NETIF_F_HW_VLAN_CTAG_RX);
- if ((changed & NETIF_F_NTUPLE) && !ntuple)
- otx2_destroy_ntuple_flows(pf);
-
- if ((changed & NETIF_F_NTUPLE) && ntuple) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && tc) {
- if (!pf->flow_cfg->max_flows) {
- netdev_err(netdev,
- "Can't enable TC, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
-
- if ((changed & NETIF_F_HW_TC) && !tc &&
- pf->flow_cfg && pf->flow_cfg->nr_flows) {
- netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
- return -EBUSY;
- }
-
- if ((changed & NETIF_F_NTUPLE) && ntuple &&
- (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
- netdev_err(netdev,
- "Can't enable NTUPLE when TC is active, disable TC and retry\n");
- return -EINVAL;
- }
-
- if ((changed & NETIF_F_HW_TC) && tc &&
- (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
- netdev_err(netdev,
- "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
- return -EINVAL;
- }
-
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static void otx2_reset_task(struct work_struct *work)
@@ -2002,14 +2129,21 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
+ if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
+ pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
+
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false);
break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
+ return -ERANGE;
+ pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
+ schedule_delayed_work(&pfvf->ptp->synctstamp_work,
+ msecs_to_jiffies(500));
+ fallthrough;
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
@@ -2387,10 +2521,13 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
/* Network stack and XDP shared same rx queues.
* Use separate tx queues for XDP and network stack.
*/
- if (pf->xdp_prog)
+ if (pf->xdp_prog) {
pf->hw.xdp_queues = pf->hw.rx_queues;
- else
+ xdp_features_set_redirect_target(dev, false);
+ } else {
pf->hw.xdp_queues = 0;
+ xdp_features_clear_redirect_target(dev);
+ }
pf->hw.tot_tx_queues += pf->hw.xdp_queues;
@@ -2468,6 +2605,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
.ndo_start_xmit = otx2_xmit,
+ .ndo_select_queue = otx2_select_queue,
.ndo_fix_features = otx2_fix_features,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2_change_mtu,
@@ -2624,6 +2762,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
num_vec = pci_msix_vec_count(pdev);
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
@@ -2720,6 +2861,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_ptp_destroy;
+ err = cn10k_mcs_init(pf);
+ if (err)
+ goto err_del_mcam_entries;
+
if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2741,10 +2886,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
@@ -2752,7 +2898,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_del_mcam_entries;
+ goto err_mcs_free;
}
err = otx2_wq_init(pf);
@@ -2777,9 +2923,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
- /* Enable pause frames by default */
- pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_pf_sriov_init;
+#endif
return 0;
@@ -2789,6 +2937,8 @@ err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_mcs_free:
+ cn10k_mcs_free(pf);
err_del_mcam_entries:
otx2_mcam_flow_del(pf);
err_ptp_destroy:
@@ -2924,12 +3074,28 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
otx2_config_hw_rx_tstamp(pf, false);
+ /* Disable 802.3x pause frames */
+ if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(pf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (pf->pfc_en) {
+ pf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(pf);
+ }
+#endif
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
otx2_unregister_dl(pf);
unregister_netdev(netdev);
+ cn10k_mcs_free(pf);
otx2_sriov_disable(pf->pdev);
otx2_sriov_vfcfg_cleanup(pf);
if (pf->otx2_wq)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 61c20907315f..896b2f9bac34 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -10,6 +10,33 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
+static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -46,32 +73,28 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static u64 ptp_cc_read(const struct cyclecounter *cc)
+static int ptp_extts_on(struct otx2_ptp *ptp, int on)
{
- struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
- struct ptp_rsp *rsp;
- int err;
if (!ptp->nic)
- return 0;
+ return -ENODEV;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
- return 0;
+ return -ENOMEM;
- req->op = PTP_OP_GET_CLOCK;
+ req->op = PTP_OP_EXTTS_ON;
+ req->extts_on = on;
- err = otx2_sync_mbox_msg(&ptp->nic->mbox);
- if (err)
- return 0;
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
- rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
- &req->hdr);
- if (IS_ERR(rsp))
- return 0;
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
- return rsp->clk;
+ return otx2_ptp_get_clock(ptp);
}
static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
@@ -101,6 +124,15 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
return rsp->clk;
}
+static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp)
+{
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ *tstamp = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -119,14 +151,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
- struct otx2_nic *pfvf = ptp->nic;
- u64 nsec;
+ u64 tstamp;
- mutex_lock(&pfvf->mbox.lock);
- nsec = timecounter_read(&ptp->time_counter);
- mutex_unlock(&pfvf->mbox.lock);
-
- *ts = ns_to_timespec64(nsec);
+ otx2_get_ptpclock(ptp, &tstamp);
+ *ts = ns_to_timespec64(tstamp);
return 0;
}
@@ -178,8 +206,6 @@ static void otx2_ptp_extts_check(struct work_struct *work)
event.index = 0;
event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
- ptp->last_extts = tstmp;
-
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
mutex_lock(&ptp->nic->mbox.lock);
@@ -187,10 +213,28 @@ static void otx2_ptp_extts_check(struct work_struct *work)
mutex_unlock(&ptp->nic->mbox.lock);
ptp->thresh = new_thresh;
}
+ ptp->last_extts = tstmp;
}
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
}
+static void otx2_sync_tstamp(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ synctstamp_work.work);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 tstamp;
+
+ mutex_lock(&pfvf->mbox.lock);
+ tstamp = otx2_ptp_get_clock(ptp);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+ ptp->base_ns = tstamp % NSEC_PER_SEC;
+
+ schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
@@ -207,10 +251,13 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on)
+ if (on) {
+ ptp_extts_on(ptp, on);
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- else
+ } else {
+ ptp_extts_on(ptp, on);
cancel_delayed_work_sync(&ptp->extts_work);
+ }
return 0;
default:
break;
@@ -294,6 +341,16 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
goto error;
}
+ if (is_dev_otx2(pfvf->pdev)) {
+ ptp_ptr->convert_rx_ptp_tstmp = &otx2_ptp_convert_rx_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &otx2_ptp_convert_tx_timestamp;
+ } else {
+ ptp_ptr->convert_rx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
+ }
+
+ INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
+
pfvf->ptp = ptp_ptr;
error:
@@ -308,6 +365,8 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
+ cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
index 6ff284211d7b..7ff41927ceaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -8,6 +8,21 @@
#ifndef OTX2_PTP_H
#define OTX2_PTP_H
+static inline u64 otx2_ptp_convert_rx_timestamp(u64 timestamp)
+{
+ return be64_to_cpu(*(__be64 *)&timestamp);
+}
+
+static inline u64 otx2_ptp_convert_tx_timestamp(u64 timestamp)
+{
+ return timestamp;
+}
+
+static inline u64 cn10k_ptp_convert_timestamp(u64 timestamp)
+{
+ return ((timestamp >> 32) * NSEC_PER_SEC) + (timestamp & 0xFFFFFFFFUL);
+}
+
int otx2_ptp_init(struct otx2_nic *pfvf);
void otx2_ptp_destroy(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index 4bbd12ff26e6..fa37b9f312ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -236,8 +236,15 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
- u64 offset : 16; /* W0 */
- u64 rsvd_51_16 : 36;
+ u64 start_offset : 8;
+ u64 rsvd_11_8 : 4;
+ u64 rsvd_12 : 1;
+ u64 udp_csum_crt : 1;
+ u64 update64 : 1;
+ u64 rsvd_15_16 : 1;
+ u64 base_ns : 32;
+ u64 step_type : 1;
+ u64 rsvd_51_49 : 3;
u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
@@ -274,4 +281,61 @@ enum nix_sqint_e {
BIT_ULL(NIX_SQINT_SEND_ERR) | \
BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
+enum nix_sqoperr_e {
+ NIX_SQOPERR_OOR = 0,
+ NIX_SQOPERR_CTX_FAULT = 1,
+ NIX_SQOPERR_CTX_POISON = 2,
+ NIX_SQOPERR_DISABLED = 3,
+ NIX_SQOPERR_SIZE_ERR = 4,
+ NIX_SQOPERR_OFLOW = 5,
+ NIX_SQOPERR_SQB_NULL = 6,
+ NIX_SQOPERR_SQB_FAULT = 7,
+ NIX_SQOPERR_SQE_SZ_ZERO = 8,
+ NIX_SQOPERR_MAX,
+};
+
+enum nix_mnqerr_e {
+ NIX_MNQERR_SQ_CTX_FAULT = 0,
+ NIX_MNQERR_SQ_CTX_POISON = 1,
+ NIX_MNQERR_SQB_FAULT = 2,
+ NIX_MNQERR_SQB_POISON = 3,
+ NIX_MNQERR_TOTAL_ERR = 4,
+ NIX_MNQERR_LSO_ERR = 5,
+ NIX_MNQERR_CQ_QUERY_ERR = 6,
+ NIX_MNQERR_MAX_SQE_SIZE_ERR = 7,
+ NIX_MNQERR_MAXLEN_ERR = 8,
+ NIX_MNQERR_SQE_SIZEM1_ZERO = 9,
+ NIX_MNQERR_MAX,
+};
+
+enum nix_snd_status_e {
+ NIX_SND_STATUS_GOOD = 0x0,
+ NIX_SND_STATUS_SQ_CTX_FAULT = 0x1,
+ NIX_SND_STATUS_SQ_CTX_POISON = 0x2,
+ NIX_SND_STATUS_SQB_FAULT = 0x3,
+ NIX_SND_STATUS_SQB_POISON = 0x4,
+ NIX_SND_STATUS_HDR_ERR = 0x5,
+ NIX_SND_STATUS_EXT_ERR = 0x6,
+ NIX_SND_STATUS_JUMP_FAULT = 0x7,
+ NIX_SND_STATUS_JUMP_POISON = 0x8,
+ NIX_SND_STATUS_CRC_ERR = 0x9,
+ NIX_SND_STATUS_IMM_ERR = 0x10,
+ NIX_SND_STATUS_SG_ERR = 0x11,
+ NIX_SND_STATUS_MEM_ERR = 0x12,
+ NIX_SND_STATUS_INVALID_SUBDC = 0x13,
+ NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
+ NIX_SND_STATUS_DATA_FAULT = 0x15,
+ NIX_SND_STATUS_DATA_POISON = 0x16,
+ NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
+ NIX_SND_STATUS_LOCK_VIOL = 0x18,
+ NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
+ NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
+ NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
+ NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
+ NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
+ NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
+ NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
+ NIX_SND_STATUS_MAX,
+};
+
#endif /* OTX2_STRUCT_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 626961a41089..8392f63e433f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -28,6 +28,9 @@
#define MAX_RATE_EXPONENT 0x0FULL
#define MAX_RATE_MANTISSA 0xFFULL
+#define CN10K_MAX_BURST_MANTISSA 0x7FFFULL
+#define CN10K_MAX_BURST_SIZE 8453888ULL
+
/* Bitfields in NIX_TLX_PIR register */
#define TLX_RATE_MANTISSA GENMASK_ULL(8, 1)
#define TLX_RATE_EXPONENT GENMASK_ULL(12, 9)
@@ -35,6 +38,9 @@
#define TLX_BURST_MANTISSA GENMASK_ULL(36, 29)
#define TLX_BURST_EXPONENT GENMASK_ULL(40, 37)
+#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
+#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -58,7 +64,7 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
- if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc))
+ if (!nic->flow_cfg->max_flows)
return 0;
/* Max flows changed, free the existing bitmap */
@@ -77,33 +83,42 @@ int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic)
}
EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap);
-static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
- u32 *burst_mantissa)
+static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
+ u32 *burst_exp, u32 *burst_mantissa)
{
+ int max_burst, max_mantissa;
unsigned int tmp;
+ if (is_dev_otx2(nic->pdev)) {
+ max_burst = MAX_BURST_SIZE;
+ max_mantissa = MAX_BURST_MANTISSA;
+ } else {
+ max_burst = CN10K_MAX_BURST_SIZE;
+ max_mantissa = CN10K_MAX_BURST_MANTISSA;
+ }
+
/* Burst is calculated as
* ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
* Max supported burst size is 130,816 bytes.
*/
- burst = min_t(u32, burst, MAX_BURST_SIZE);
+ burst = min_t(u32, burst, max_burst);
if (burst) {
*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
tmp = burst - rounddown_pow_of_two(burst);
- if (burst < MAX_BURST_MANTISSA)
+ if (burst < max_mantissa)
*burst_mantissa = tmp * 2;
else
*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
} else {
*burst_exp = MAX_BURST_EXPONENT;
- *burst_mantissa = MAX_BURST_MANTISSA;
+ *burst_mantissa = max_mantissa;
}
}
-static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
+static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
u32 *mantissa, u32 *div_exp)
{
- unsigned int tmp;
+ u64 tmp;
/* Rate calculation by hardware
*
@@ -132,21 +147,44 @@ static void otx2_get_egress_rate_cfg(u32 maxrate, u32 *exp,
}
}
-static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 maxrate)
+static u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
+ u64 maxrate, u32 burst)
{
- struct otx2_hw *hw = &nic->hw;
- struct nix_txschq_config *req;
u32 burst_exp, burst_mantissa;
u32 exp, mantissa, div_exp;
+ u64 regval = 0;
+
+ /* Get exponent and mantissa values from the desired rate */
+ otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
+ otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
+
+ if (is_dev_otx2(nic->pdev)) {
+ regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ } else {
+ regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
+ FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
+ FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
+ FIELD_PREP(TLX_RATE_EXPONENT, exp) |
+ FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ }
+
+ return regval;
+}
+
+static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
+ u32 burst, u64 maxrate)
+{
+ struct otx2_hw *hw = &nic->hw;
+ struct nix_txschq_config *req;
int txschq, err;
/* All SQs share the same TL4, so pick the first scheduler */
txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
- /* Get exponent and mantissa values from the desired rate */
- otx2_get_egress_burst_cfg(burst, &burst_exp, &burst_mantissa);
- otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
-
mutex_lock(&nic->mbox.lock);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
if (!req) {
@@ -157,11 +195,7 @@ static int otx2_set_matchall_egress_rate(struct otx2_nic *nic, u32 burst, u32 ma
req->lvl = NIX_TXSCH_LVL_TL4;
req->num_regs = 1;
req->reg[0] = NIX_AF_TL4X_PIR(txschq);
- req->regval[0] = FIELD_PREP(TLX_BURST_EXPONENT, burst_exp) |
- FIELD_PREP(TLX_BURST_MANTISSA, burst_mantissa) |
- FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
- FIELD_PREP(TLX_RATE_EXPONENT, exp) |
- FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
+ req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
err = otx2_sync_mbox_msg(&nic->mbox);
mutex_unlock(&nic->mbox.lock);
@@ -190,13 +224,47 @@ static int otx2_tc_validate_flow(struct otx2_nic *nic,
return 0;
}
+static int otx2_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
struct tc_cls_matchall_offload *cls)
{
struct netlink_ext_ack *extack = cls->common.extack;
struct flow_action *actions = &cls->rule->action;
struct flow_action_entry *entry;
- u32 rate;
+ u64 rate;
int err;
err = otx2_tc_validate_flow(nic, actions, extack);
@@ -212,13 +280,17 @@ static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
entry = &cls->rule->action.entries[0];
switch (entry->id) {
case FLOW_ACTION_POLICE:
+ err = otx2_policer_validate(&cls->rule->action, entry, extack);
+ if (err)
+ return err;
+
if (entry->police.rate_pkt_ps) {
NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
return -EOPNOTSUPP;
}
/* Convert bytes per second to Mbps */
rate = entry->police.rate_bytes_ps * 8;
- rate = max_t(u32, rate / 1000000, 1);
+ rate = max_t(u64, rate / 1000000, 1);
err = otx2_set_matchall_egress_rate(nic, entry->police.burst, rate);
if (err)
return err;
@@ -315,6 +387,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
u8 nr_police = 0;
bool pps = false;
u64 rate;
+ int err;
int i;
if (!flow_action_has_entries(flow_action)) {
@@ -355,6 +428,10 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
return -EOPNOTSUPP;
}
+ err = otx2_policer_validate(flow_action, act, extack);
+ if (err)
+ return err;
+
if (act->police.rate_bytes_ps > 0) {
rate = act->police.rate_bytes_ps * 8;
burst = act->police.burst;
@@ -455,6 +532,31 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
+ return -EOPNOTSUPP;
+ }
+
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (ntohs(flow_spec->etype) == ETH_P_IP) {
+ flow_spec->ip_flag = IPV4_FLAG_MORE;
+ flow_mask->ip_flag = IPV4_FLAG_MORE;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
+ } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
+ flow_spec->next_header = IPPROTO_FRAGMENT;
+ flow_mask->next_header = 0xff;
+ req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -571,21 +673,27 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
flow_spec->dport = match.key->dst;
flow_mask->dport = match.mask->dst;
- if (ip_proto == IPPROTO_UDP)
- req->features |= BIT_ULL(NPC_DPORT_UDP);
- else if (ip_proto == IPPROTO_TCP)
- req->features |= BIT_ULL(NPC_DPORT_TCP);
- else if (ip_proto == IPPROTO_SCTP)
- req->features |= BIT_ULL(NPC_DPORT_SCTP);
+
+ if (flow_mask->dport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_DPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_DPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_DPORT_SCTP);
+ }
flow_spec->sport = match.key->src;
flow_mask->sport = match.mask->src;
- if (ip_proto == IPPROTO_UDP)
- req->features |= BIT_ULL(NPC_SPORT_UDP);
- else if (ip_proto == IPPROTO_TCP)
- req->features |= BIT_ULL(NPC_SPORT_TCP);
- else if (ip_proto == IPPROTO_SCTP)
- req->features |= BIT_ULL(NPC_SPORT_SCTP);
+
+ if (flow_mask->sport) {
+ if (ip_proto == IPPROTO_UDP)
+ req->features |= BIT_ULL(NPC_SPORT_UDP);
+ else if (ip_proto == IPPROTO_TCP)
+ req->features |= BIT_ULL(NPC_SPORT_TCP);
+ else if (ip_proto == IPPROTO_SCTP)
+ req->features |= BIT_ULL(NPC_SPORT_SCTP);
+ }
}
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
@@ -1023,6 +1131,7 @@ int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_setup_tc);
static const struct rhashtable_params tc_flow_ht_params = {
.head_offset = offsetof(struct otx2_tc_flow, node),
@@ -1050,8 +1159,14 @@ int otx2_init_tc(struct otx2_nic *nic)
return err;
tc->flow_ht_params = tc_flow_ht_params;
- return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+ err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
+ if (err) {
+ kfree(tc->tc_entries_bitmap);
+ tc->tc_entries_bitmap = NULL;
+ }
+ return err;
}
+EXPORT_SYMBOL(otx2_init_tc);
void otx2_shutdown_tc(struct otx2_nic *nic)
{
@@ -1060,3 +1175,4 @@ void otx2_shutdown_tc(struct otx2_nic *nic)
kfree(tc->tc_entries_bitmap);
rhashtable_destroy(&tc->flow_table);
}
+EXPORT_SYMBOL(otx2_shutdown_tc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index 0cc6353254bf..7045fedfd73a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -10,6 +10,7 @@
#include <net/tso.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <net/ip6_checksum.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -19,6 +20,12 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+#define PTP_PORT 0x13F
+/* PTPv2 header Original Timestamp starts at byte offset 34 and
+ * contains 6 byte seconds field and 4 byte nano seconds field.
+ */
+#define PTP_SYNC_SEC_OFFSET 34
+
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
@@ -148,6 +155,7 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
if (timestamp != 1) {
+ timestamp = pfvf->ptp->convert_tx_ptp_tstmp(timestamp);
err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (!err) {
memset(&ts, 0, sizeof(ts));
@@ -167,14 +175,15 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
struct sk_buff *skb, void *data)
{
- u64 tsns;
+ u64 timestamp, tsns;
int err;
if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
return;
+ timestamp = pfvf->ptp->convert_rx_ptp_tstmp(*(u64 *)data);
/* The first 8 bytes is the timestamp */
- err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
if (err)
return;
@@ -433,6 +442,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
int tx_pkts = 0, tx_bytes = 0, qidx;
+ struct otx2_snd_queue *sq;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
@@ -443,6 +453,9 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
return 0;
process_cqe:
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ sq = &pfvf->qset.sq[qidx];
+
while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
@@ -450,18 +463,20 @@ process_cqe:
return 0;
break;
}
+
if (cq->cq_type == CQ_XDP) {
- qidx = cq->cq_idx - pfvf->hw.rx_queues;
- otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
- cqe);
+ otx2_xdp_snd_pkt_handler(pfvf, sq, cqe);
} else {
- otx2_snd_pkt_handler(pfvf, cq,
- &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
+ otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget,
+ &tx_pkts, &tx_bytes);
}
+
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
cq->pend_cqe--;
+
+ sq->cons_head++;
+ sq->cons_head &= (sq->sqe_cnt - 1);
}
/* Free CQEs to HW */
@@ -482,6 +497,18 @@ process_cqe:
return 0;
}
+static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
+{
+ struct dim_sample dim_sample;
+ u64 rx_frames, rx_bytes;
+
+ rx_frames = OTX2_GET_RX_STATS(RX_BCAST) + OTX2_GET_RX_STATS(RX_MCAST) +
+ OTX2_GET_RX_STATS(RX_UCAST);
+ rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
+ dim_update_sample(pfvf->napi_events, rx_frames, rx_bytes, &dim_sample);
+ net_dim(&cq_poll->dim, dim_sample);
+}
+
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
struct otx2_cq_queue *rx_cq = NULL;
@@ -519,6 +546,17 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
return workdone;
+ /* Check for adaptive interrupt coalesce */
+ if (workdone != 0 &&
+ ((pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) ==
+ OTX2_FLAG_ADPTV_INT_COAL_ENABLED)) {
+ /* Adjust irq coalese using net_dim */
+ otx2_adjust_adaptive_coalese(pfvf, cq_poll);
+ /* Update irq coalescing */
+ for (i = 0; i < pfvf->hw.cint_cnt; i++)
+ otx2_config_irq_coalescing(pfvf, i);
+ }
+
/* Re-enable interrupts */
otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
BIT_ULL(0));
@@ -599,7 +637,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
ext->subdc = NIX_SUBDC_EXT;
if (skb_shinfo(skb)->gso_size) {
ext->lso = 1;
- ext->lso_sb = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ ext->lso_sb = skb_tcp_all_headers(skb);
ext->lso_mps = skb_shinfo(skb)->gso_size;
/* Only TSOv4 and TSOv6 GSO offloads are supported */
@@ -661,7 +699,8 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
- int alg, u64 iova)
+ int alg, u64 iova, int ptp_offset,
+ u64 base_ns, bool udp_csum_crt)
{
struct nix_sqe_mem_s *mem;
@@ -671,6 +710,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
+ if (ptp_offset) {
+ mem->start_offset = ptp_offset;
+ mem->udp_csum_crt = !!udp_csum_crt;
+ mem->base_ns = base_ns;
+ mem->step_type = 1;
+ }
+
*offset += sizeof(*mem);
}
@@ -906,7 +952,7 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
* be correctly modified, hence don't offload such TSO segments.
*/
- payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
+ payload_len = skb->len - skb_tcp_all_headers(skb);
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
if (last_seg_size && last_seg_size < 16)
return false;
@@ -927,16 +973,139 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static bool otx2_validate_network_transport(struct sk_buff *skb)
+{
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
+ (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
+ struct udphdr *udph = udp_hdr(skb);
+
+ if (udph->source == htons(PTP_PORT) &&
+ udph->dest == htons(PTP_PORT))
+ return true;
+ }
+
+ return false;
+}
+
+static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ u16 nix_offload_hlen = 0, inner_vhlen = 0;
+ bool udp_hdr_present = false, is_sync;
+ u8 *data = skb->data, *msgtype;
+ __be16 proto = eth->h_proto;
+ int network_depth = 0;
+
+ /* NIX is programmed to offload outer VLAN header
+ * in case of single vlan protocol field holds Network header ETH_IP/V6
+ * in case of stacked vlan protocol field holds Inner vlan (8100)
+ */
+ if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
+ skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ /* Get vlan protocol */
+ proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
+ /* SKB APIs like skb_transport_offset does not include
+ * offloaded vlan header length. Need to explicitly add
+ * the length
+ */
+ nix_offload_hlen = VLAN_HLEN;
+ inner_vhlen = VLAN_HLEN;
+ } else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
+ nix_offload_hlen = VLAN_HLEN;
+ }
+ } else if (eth_type_vlan(eth->h_proto)) {
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+ }
+
+ switch (ntohs(proto)) {
+ case ETH_P_1588:
+ if (network_depth)
+ *offset = network_depth;
+ else
+ *offset = ETH_HLEN + nix_offload_hlen +
+ inner_vhlen;
+ break;
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ if (!otx2_validate_network_transport(skb))
+ return false;
+
+ *offset = nix_offload_hlen + skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ udp_hdr_present = true;
+
+ }
+
+ msgtype = data + *offset;
+ /* Check PTP messageId is SYNC or not */
+ is_sync = !(*msgtype & 0xf);
+ if (is_sync)
+ *udp_csum_crt = udp_hdr_present;
+ else
+ *offset = 0;
+
+ return is_sync;
+}
+
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct ptpv2_tstamp *origin_tstamp;
+ bool udp_csum_crt = false;
+ unsigned int udphoff;
+ struct timespec64 ts;
+ int ptp_offset = 0;
+ __wsum skb_csum;
u64 iova;
- if (!skb_shinfo(skb)->gso_size &&
- skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (unlikely(!skb_shinfo(skb)->gso_size &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
+ if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC &&
+ otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) {
+ origin_tstamp = (struct ptpv2_tstamp *)
+ ((u8 *)skb->data + ptp_offset +
+ PTP_SYNC_SEC_OFFSET);
+ ts = ns_to_timespec64(pfvf->ptp->tstamp);
+ origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
+ origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
+ origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
+ /* Point to correction field in PTP packet */
+ ptp_offset += 8;
+
+ /* When user disables hw checksum, stack calculates the csum,
+ * but it does not cover ptp timestamp which is added later.
+ * Recalculate the checksum manually considering the timestamp.
+ */
+ if (udp_csum_crt) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) {
+ udphoff = skb_transport_offset(skb);
+ uh->check = 0;
+ skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff,
+ 0);
+ if (ntohs(eth->h_proto) == ETH_P_IPV6)
+ uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len - udphoff,
+ ipv6_hdr(skb)->nexthdr,
+ skb_csum);
+ else
+ uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ skb->len - udphoff,
+ IPPROTO_UDP,
+ skb_csum);
+ }
+ }
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
- otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
+ ptp_offset, pfvf->ptp->base_ns, udp_csum_crt);
} else {
skb_tx_timestamp(skb);
}
@@ -947,17 +1116,17 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
{
struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx);
struct otx2_nic *pfvf = netdev_priv(netdev);
- int offset, num_segs, free_sqe;
+ int offset, num_segs, free_desc;
struct nix_sqe_hdr_s *sqe_hdr;
- /* Check if there is room for new SQE.
- * 'Num of SQBs freed to SQ's pool - SQ's Aura count'
- * will give free SQE count.
+ /* Check if there is enough room between producer
+ * and consumer index.
*/
- free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1);
+ if (free_desc < sq->sqe_thresh)
+ return false;
- if (free_sqe < sq->sqe_thresh ||
- free_sqe < otx2_get_sqe_count(pfvf, skb))
+ if (free_desc < otx2_get_sqe_count(pfvf, skb))
return false;
num_segs = skb_shinfo(skb)->nr_frags + 1;
@@ -1198,7 +1367,7 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
put_page(page);
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act);
break;
case XDP_ABORTED:
trace_xdp_exception(pfvf->netdev, prog, act);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index f1a04cf9210c..93cac2c2664c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -21,7 +21,7 @@
#define OTX2_HEAD_ROOM OTX2_ALIGN
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
-#define OTX2_MIN_MTU 64
+#define OTX2_MIN_MTU 60
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
@@ -79,6 +79,7 @@ struct sg_list {
struct otx2_snd_queue {
u8 aura_id;
u16 head;
+ u16 cons_head;
u16 sqe_size;
u32 sqe_cnt;
u16 num_sqbs;
@@ -109,6 +110,7 @@ struct otx2_cq_poll {
#define CINT_INVALID_CQ 255
u8 cint_idx;
u8 cq_ids[CQS_PER_CINT];
+ struct dim dim;
struct napi_struct napi;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 78944ad3492f..53366dbfbf27 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -472,23 +472,7 @@ static void otx2vf_reset_task(struct work_struct *work)
static int otx2vf_set_features(struct net_device *netdev,
netdev_features_t features)
{
- netdev_features_t changed = features ^ netdev->features;
- bool ntuple_enabled = !!(features & NETIF_F_NTUPLE);
- struct otx2_nic *vf = netdev_priv(netdev);
-
- if (changed & NETIF_F_NTUPLE) {
- if (!ntuple_enabled) {
- otx2_mcam_flow_del(vf);
- return 0;
- }
-
- if (!otx2_get_maxflows(vf->flow_cfg)) {
- netdev_err(netdev,
- "Can't enable NTUPLE, MCAM entries not allocated\n");
- return -EINVAL;
- }
- }
- return 0;
+ return otx2_handle_ntuple_tc_features(netdev, features);
}
static const struct net_device_ops otx2vf_netdev_ops = {
@@ -502,6 +486,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_eth_ioctl = otx2_ioctl,
+ .ndo_setup_tc = otx2_setup_tc,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -586,6 +571,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->tx_queues = qcount;
hw->max_queues = qcount;
hw->tot_tx_queues = qcount;
+ hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
+ /* Use CQE of 128 byte descriptor size by default */
+ hw->xqe_size = 128;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -633,7 +621,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = otx2vf_realloc_msix_vectors(vf);
if (err)
- goto err_mbox_destroy;
+ goto err_detach_rsrc;
err = otx2_set_real_num_queues(netdev, qcount, qcount);
if (err)
@@ -662,8 +650,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features |= NETIF_F_NTUPLE;
netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= NETIF_F_HW_TC;
- netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
+ netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2vf_netdev_ops;
@@ -684,7 +673,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
err = otx2_wq_init(vf);
@@ -697,19 +686,30 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_unreg_netdev;
- err = otx2_register_dl(vf);
+ err = otx2_init_tc(vf);
if (err)
goto err_unreg_netdev;
- /* Enable pause frames by default */
- vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
- vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
+ err = otx2_register_dl(vf);
+ if (err)
+ goto err_shutdown_tc;
+
+#ifdef CONFIG_DCB
+ err = otx2_dcbnl_set_ops(netdev);
+ if (err)
+ goto err_shutdown_tc;
+#endif
return 0;
+err_shutdown_tc:
+ otx2_shutdown_tc(vf);
err_unreg_netdev:
unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(vf);
err_detach_rsrc:
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
@@ -737,13 +737,33 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ /* Disable 802.3x pause frames */
+ if (vf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
+ (vf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
+ vf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
+ vf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
+ otx2_config_pause_frm(vf);
+ }
+
+#ifdef CONFIG_DCB
+ /* Disable PFC config */
+ if (vf->pfc_en) {
+ vf->pfc_en = 0;
+ otx2_config_priority_flow_ctrl(vf);
+ }
+#endif
+
cancel_work_sync(&vf->reset_task);
otx2_unregister_dl(vf);
unregister_netdev(netdev);
if (vf->otx2_wq)
destroy_workqueue(vf->otx2_wq);
+ otx2_ptp_destroy(vf);
+ otx2_mcam_flow_del(vf);
+ otx2_shutdown_tc(vf);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
index b6f20e2034c6..f2f7663c3d10 100644
--- a/drivers/net/ethernet/marvell/prestera/Kconfig
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -8,6 +8,7 @@ config PRESTERA
depends on NET_SWITCHDEV && VLAN_8021Q
depends on BRIDGE || BRIDGE=n
select NET_DEVLINK
+ select PHYLINK
help
This driver supports Marvell Prestera Switch ASICs family.
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
index 0609df8b913d..df14cee80153 100644
--- a/drivers/net/ethernet/marvell/prestera/Makefile
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PRESTERA) += prestera.o
prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
prestera_switchdev.o prestera_acl.o prestera_flow.o \
- prestera_flower.o prestera_span.o
+ prestera_flower.o prestera_span.o prestera_counter.o \
+ prestera_router.o prestera_router_hw.o prestera_matchall.o
obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 2a4c14c704c0..35554ee805cd 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -7,6 +7,7 @@
#include <linux/notifier.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
+#include <linux/phylink.h>
#include <net/devlink.h>
#include <uapi/linux/if_ether.h>
@@ -20,6 +21,26 @@ struct prestera_fw_rev {
u16 sub;
};
+struct prestera_flood_domain {
+ struct prestera_switch *sw;
+ struct list_head flood_domain_port_list;
+ u32 idx;
+};
+
+struct prestera_mdb_entry {
+ struct prestera_switch *sw;
+ struct prestera_flood_domain *flood_domain;
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+struct prestera_flood_domain_port {
+ struct prestera_flood_domain *flood_domain;
+ struct net_device *dev;
+ struct list_head flood_domain_port_node;
+ u16 vid;
+};
+
struct prestera_port_stats {
u64 good_octets_received;
u64 bad_octets_received;
@@ -72,6 +93,7 @@ struct prestera_lag {
struct prestera_flow_block;
struct prestera_port_mac_state {
+ bool valid;
u32 mode;
u32 speed;
bool oper;
@@ -107,7 +129,8 @@ struct prestera_port_phy_config {
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
- struct prestera_flow_block *flow_block;
+ struct prestera_flow_block *ingress_flow_block;
+ struct prestera_flow_block *egress_flow_block;
struct devlink_port dl_port;
struct list_head lag_member;
struct prestera_lag *lag;
@@ -130,6 +153,13 @@ struct prestera_port {
struct prestera_port_phy_config cfg_phy;
struct prestera_port_mac_state state_mac;
struct prestera_port_phy_state state_phy;
+
+ struct phylink_config phy_config;
+ struct phylink *phy_link;
+ struct phylink_pcs phylink_pcs;
+
+ /* protects state_mac */
+ spinlock_t state_mac_lock;
};
struct prestera_device {
@@ -225,6 +255,29 @@ struct prestera_event {
};
};
+enum prestera_if_type {
+ /* the interface is of port type (dev,port) */
+ PRESTERA_IF_PORT_E = 0,
+
+ /* the interface is of lag type (lag-id) */
+ PRESTERA_IF_LAG_E = 1,
+
+ /* the interface is of Vid type (vlan-id) */
+ PRESTERA_IF_VID_E = 3,
+};
+
+struct prestera_iface {
+ enum prestera_if_type type;
+ struct {
+ u32 hw_dev_num;
+ u32 port_num;
+ } dev_port;
+ u32 hw_dev_num;
+ u16 vr_id;
+ u16 lag_id;
+ u16 vlan_id;
+};
+
struct prestera_switchdev;
struct prestera_span;
struct prestera_rxtx;
@@ -247,9 +300,33 @@ struct prestera_switch {
u32 mtu_min;
u32 mtu_max;
u8 id;
+ struct device_node *np;
+ struct prestera_router *router;
struct prestera_lag *lags;
+ struct prestera_counter *counter;
u8 lag_member_max;
u8 lag_max;
+ u32 size_tbl_router_nexthop;
+};
+
+struct prestera_router {
+ struct prestera_switch *sw;
+ struct list_head vr_list;
+ struct list_head rif_entry_list;
+ struct rhashtable nh_neigh_ht;
+ struct rhashtable nexthop_group_ht;
+ struct rhashtable fib_ht;
+ struct rhashtable kern_neigh_cache_ht;
+ struct rhashtable kern_fib_cache_ht;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inetaddr_valid_nb;
+ struct notifier_block fib_nb;
+ struct notifier_block netevent_nb;
+ u8 *nhgrp_hw_state_cache; /* Bitmap cached hw state of nhs */
+ unsigned long nhgrp_hw_cache_kick; /* jiffies */
+ struct {
+ struct delayed_work dw;
+ } neighs_update;
};
struct prestera_rxtx_params {
@@ -279,8 +356,13 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes);
+int prestera_router_init(struct prestera_switch *sw);
+void prestera_router_fini(struct prestera_switch *sw);
+
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+struct prestera_switch *prestera_switch_get(struct net_device *dev);
+
int prestera_port_cfg_mac_read(struct prestera_port *port,
struct prestera_port_mac_config *cfg);
@@ -289,14 +371,47 @@ int prestera_port_cfg_mac_write(struct prestera_port *port,
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+void prestera_queue_work(struct work_struct *work);
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay);
+void prestera_queue_drain(void);
+
+int prestera_port_learning_set(struct prestera_port *port, bool learn_enable);
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood);
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood);
+
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
+int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr);
+
bool prestera_port_is_lag_member(const struct prestera_port *port);
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id);
struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
u16 prestera_port_lag_id(const struct prestera_port *port);
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid);
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry);
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw);
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain);
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid);
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port);
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid);
+
#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
index 83c75ffb1a1c..cba89fda504b 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -1,35 +1,88 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved */
#include <linux/rhashtable.h>
-#include "prestera.h"
-#include "prestera_hw.h"
#include "prestera_acl.h"
-#include "prestera_span.h"
+#include "prestera_flow.h"
+#include "prestera_hw.h"
+#include "prestera.h"
+
+#define ACL_KEYMASK_SIZE \
+ (sizeof(__be32) * __PRESTERA_ACL_RULE_MATCH_TYPE_MAX)
struct prestera_acl {
struct prestera_switch *sw;
+ struct list_head vtcam_list;
struct list_head rules;
+ struct rhashtable ruleset_ht;
+ struct rhashtable acl_rule_entry_ht;
+ struct idr uid;
+};
+
+struct prestera_acl_ruleset_ht_key {
+ struct prestera_flow_block *block;
+ u32 chain_index;
+};
+
+struct prestera_acl_rule_entry {
+ struct rhash_head ht_node;
+ struct prestera_acl_rule_entry_key key;
+ u32 hw_id;
+ u32 vtcam_id;
+ struct {
+ struct {
+ u8 valid:1;
+ } accept, drop, trap;
+ struct {
+ u8 valid:1;
+ struct prestera_acl_action_police i;
+ } police;
+ struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
+ struct {
+ u32 id;
+ struct prestera_counter_block *block;
+ } counter;
+ };
};
struct prestera_acl_ruleset {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct prestera_acl_ruleset_ht_key ht_key;
struct rhashtable rule_ht;
- struct prestera_switch *sw;
- u16 id;
+ struct prestera_acl *acl;
+ struct {
+ u32 min;
+ u32 max;
+ } prio;
+ unsigned long rule_count;
+ refcount_t refcount;
+ void *keymask;
+ u32 vtcam_id;
+ u32 index;
+ u16 pcl_id;
+ bool offload;
+ bool ingress;
};
-struct prestera_acl_rule {
- struct rhash_head ht_node;
+struct prestera_acl_vtcam {
struct list_head list;
- struct list_head match_list;
- struct list_head action_list;
- struct prestera_flow_block *block;
- unsigned long cookie;
- u32 priority;
- u8 n_actions;
- u8 n_matches;
+ __be32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ refcount_t refcount;
u32 id;
+ bool is_keymask_set;
+ u8 lookup;
+ u8 direction;
+};
+
+static const struct rhashtable_params prestera_acl_ruleset_ht_params = {
+ .key_len = sizeof(struct prestera_acl_ruleset_ht_key),
+ .key_offset = offsetof(struct prestera_acl_ruleset, ht_key),
+ .head_offset = offsetof(struct prestera_acl_ruleset, ht_node),
+ .automatic_shrinking = true,
};
static const struct rhashtable_params prestera_acl_rule_ht_params = {
@@ -39,28 +92,92 @@ static const struct rhashtable_params prestera_acl_rule_ht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params __prestera_acl_rule_entry_ht_params = {
+ .key_offset = offsetof(struct prestera_acl_rule_entry, key),
+ .head_offset = offsetof(struct prestera_acl_rule_entry, ht_node),
+ .key_len = sizeof(struct prestera_acl_rule_entry_key),
+ .automatic_shrinking = true,
+};
+
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client)
+{
+ static const u32 ingress_client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2
+ };
+
+ if (!ingress) {
+ /* prestera supports only one chain on egress */
+ if (chain_index > 0)
+ return -EINVAL;
+
+ *client = PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP;
+ return 0;
+ }
+
+ if (chain_index >= ARRAY_SIZE(ingress_client_map))
+ return -EINVAL;
+
+ *client = ingress_client_map[chain_index];
+ return 0;
+}
+
+static bool prestera_acl_chain_is_supported(u32 chain_index, bool ingress)
+{
+ if (!ingress)
+ /* prestera supports only one chain on egress */
+ return chain_index == 0;
+
+ return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
+}
+
static struct prestera_acl_ruleset *
-prestera_acl_ruleset_create(struct prestera_switch *sw)
+prestera_acl_ruleset_create(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
+ u32 uid = 0;
int err;
+ if (!prestera_acl_chain_is_supported(chain_index, block->ingress))
+ return ERR_PTR(-EINVAL);
+
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
+ ruleset->acl = acl;
+ ruleset->ingress = block->ingress;
+ ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
+ refcount_set(&ruleset->refcount, 1);
+
err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
if (err)
goto err_rhashtable_init;
- err = prestera_hw_acl_ruleset_create(sw, &ruleset->id);
+ err = idr_alloc_u32(&acl->uid, NULL, &uid, U8_MAX, GFP_KERNEL);
if (err)
goto err_ruleset_create;
- ruleset->sw = sw;
+ /* make pcl-id based on uid */
+ ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index);
+ ruleset->index = uid;
+
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
+ err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ prestera_acl_ruleset_ht_params);
+ if (err)
+ goto err_ruleset_ht_insert;
return ruleset;
+err_ruleset_ht_insert:
+ idr_remove(&acl->uid, uid);
err_ruleset_create:
rhashtable_destroy(&ruleset->rule_ht);
err_rhashtable_init:
@@ -68,271 +185,367 @@ err_rhashtable_init:
return ERR_PTR(err);
}
-static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
-{
- prestera_hw_acl_ruleset_del(ruleset->sw, ruleset->id);
- rhashtable_destroy(&ruleset->rule_ht);
- kfree(ruleset);
-}
-
-struct prestera_flow_block *
-prestera_acl_block_create(struct prestera_switch *sw, struct net *net)
-{
- struct prestera_flow_block *block;
-
- block = kzalloc(sizeof(*block), GFP_KERNEL);
- if (!block)
- return NULL;
- INIT_LIST_HEAD(&block->binding_list);
- block->net = net;
- block->sw = sw;
-
- block->ruleset = prestera_acl_ruleset_create(sw);
- if (IS_ERR(block->ruleset)) {
- kfree(block);
- return NULL;
- }
-
- return block;
-}
-
-void prestera_acl_block_destroy(struct prestera_flow_block *block)
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask)
{
- prestera_acl_ruleset_destroy(block->ruleset);
- WARN_ON(!list_empty(&block->binding_list));
- kfree(block);
-}
-
-static struct prestera_flow_block_binding *
-prestera_acl_block_lookup(struct prestera_flow_block *block,
- struct prestera_port *port)
-{
- struct prestera_flow_block_binding *binding;
-
- list_for_each_entry(binding, &block->binding_list, list)
- if (binding->port == port)
- return binding;
+ ruleset->keymask = kmemdup(keymask, ACL_KEYMASK_SIZE, GFP_KERNEL);
+ if (!ruleset->keymask)
+ return -ENOMEM;
- return NULL;
+ return 0;
}
-int prestera_acl_block_bind(struct prestera_flow_block *block,
- struct prestera_port *port)
+int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
- struct prestera_flow_block_binding *binding;
+ struct prestera_acl_iface iface;
+ u32 vtcam_id;
+ int dir;
int err;
- if (WARN_ON(prestera_acl_block_lookup(block, port)))
- return -EEXIST;
+ dir = ruleset->ingress ?
+ PRESTERA_HW_VTCAM_DIR_INGRESS : PRESTERA_HW_VTCAM_DIR_EGRESS;
- binding = kzalloc(sizeof(*binding), GFP_KERNEL);
- if (!binding)
- return -ENOMEM;
- binding->span_id = PRESTERA_SPAN_INVALID_ID;
- binding->port = port;
+ if (ruleset->offload)
+ return -EEXIST;
- err = prestera_hw_acl_port_bind(port, block->ruleset->id);
+ err = prestera_acl_vtcam_id_get(ruleset->acl,
+ ruleset->ht_key.chain_index,
+ dir,
+ ruleset->keymask, &vtcam_id);
if (err)
- goto err_rules_bind;
+ goto err_vtcam_create;
+
+ if (ruleset->ht_key.chain_index) {
+ /* for chain > 0, bind iface index to pcl-id to be able
+ * to jump from any other ruleset to this one using the index.
+ */
+ iface.index = ruleset->index;
+ iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX;
+ err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface,
+ vtcam_id, ruleset->pcl_id);
+ if (err)
+ goto err_ruleset_bind;
+ }
- list_add(&binding->list, &block->binding_list);
+ ruleset->vtcam_id = vtcam_id;
+ ruleset->offload = true;
return 0;
-err_rules_bind:
- kfree(binding);
+err_ruleset_bind:
+ prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id);
+err_vtcam_create:
return err;
}
-int prestera_acl_block_unbind(struct prestera_flow_block *block,
- struct prestera_port *port)
+static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
{
- struct prestera_flow_block_binding *binding;
+ struct prestera_acl *acl = ruleset->acl;
+ u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER;
+ int err;
- binding = prestera_acl_block_lookup(block, port);
- if (!binding)
- return -ENOENT;
+ rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+ prestera_acl_ruleset_ht_params);
+
+ if (ruleset->offload) {
+ if (ruleset->ht_key.chain_index) {
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_INDEX,
+ .index = ruleset->index
+ };
+ err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface,
+ ruleset->vtcam_id);
+ WARN_ON(err);
+ }
+ WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id));
+ }
- list_del(&binding->list);
+ idr_remove(&acl->uid, uid);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset->keymask);
+ kfree(ruleset);
+}
- prestera_hw_acl_port_unbind(port, block->ruleset->id);
+static struct prestera_acl_ruleset *
+__prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index)
+{
+ struct prestera_acl_ruleset_ht_key ht_key;
- kfree(binding);
- return 0;
+ memset(&ht_key, 0, sizeof(ht_key));
+ ht_key.block = block;
+ ht_key.chain_index = chain_index;
+ return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
+ prestera_acl_ruleset_ht_params);
}
struct prestera_acl_ruleset *
-prestera_acl_block_ruleset_get(struct prestera_flow_block *block)
+prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
- return block->ruleset;
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
+ if (!ruleset)
+ return ERR_PTR(-ENOENT);
+
+ refcount_inc(&ruleset->refcount);
+ return ruleset;
}
-u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule)
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_get(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
- return rule->block->ruleset->id;
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
+ if (ruleset) {
+ refcount_inc(&ruleset->refcount);
+ return ruleset;
+ }
+
+ return prestera_acl_ruleset_create(acl, block, chain_index);
}
-struct net *prestera_acl_block_net(struct prestera_flow_block *block)
+void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset)
{
- return block->net;
+ if (!refcount_dec_and_test(&ruleset->refcount))
+ return;
+
+ prestera_acl_ruleset_destroy(ruleset);
}
-struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block)
+int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port)
{
- return block->sw;
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_PORT,
+ .port = port
+ };
+
+ return prestera_hw_vtcam_iface_bind(port->sw, &iface, ruleset->vtcam_id,
+ ruleset->pcl_id);
}
-struct prestera_acl_rule *
-prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie)
+int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port)
{
- return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
- prestera_acl_rule_ht_params);
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_PORT,
+ .port = port
+ };
+
+ return prestera_hw_vtcam_iface_unbind(port->sw, &iface,
+ ruleset->vtcam_id);
}
-struct prestera_acl_rule *
-prestera_acl_rule_create(struct prestera_flow_block *block,
- unsigned long cookie)
+static int prestera_acl_ruleset_block_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_flow_block *block)
{
- struct prestera_acl_rule *rule;
+ struct prestera_flow_block_binding *binding;
+ int err;
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule)
- return ERR_PTR(-ENOMEM);
+ block->ruleset_zero = ruleset;
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_acl_ruleset_bind(ruleset, binding->port);
+ if (err)
+ goto rollback;
+ }
+ return 0;
- INIT_LIST_HEAD(&rule->match_list);
- INIT_LIST_HEAD(&rule->action_list);
- rule->cookie = cookie;
- rule->block = block;
+rollback:
+ list_for_each_entry_continue_reverse(binding, &block->binding_list,
+ list)
+ err = prestera_acl_ruleset_unbind(ruleset, binding->port);
+ block->ruleset_zero = NULL;
- return rule;
+ return err;
}
-struct list_head *
-prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule)
+static void
+prestera_acl_ruleset_block_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_flow_block *block)
{
- return &rule->match_list;
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_acl_ruleset_unbind(ruleset, binding->port);
+ block->ruleset_zero = NULL;
}
-struct list_head *
-prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule)
+static void
+prestera_acl_ruleset_prio_refresh(struct prestera_acl *acl,
+ struct prestera_acl_ruleset *ruleset)
{
- return &rule->action_list;
+ struct prestera_acl_rule *rule;
+
+ ruleset->prio.min = UINT_MAX;
+ ruleset->prio.max = 0;
+
+ list_for_each_entry(rule, &acl->rules, list) {
+ if (ruleset->ingress != rule->ruleset->ingress)
+ continue;
+ if (ruleset->ht_key.chain_index != rule->chain_index)
+ continue;
+
+ ruleset->prio.min = min(ruleset->prio.min, rule->priority);
+ ruleset->prio.max = max(ruleset->prio.max, rule->priority);
+ }
}
-int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_action_entry *entry)
+void
+prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule, u16 pcl_id)
{
- struct prestera_acl_rule_action_entry *a_entry;
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ __be16 pcl_id_mask = htons(PRESTERA_ACL_KEYMASK_PCL_ID);
+ __be16 pcl_id_key = htons(pcl_id);
- a_entry = kmalloc(sizeof(*a_entry), GFP_KERNEL);
- if (!a_entry)
- return -ENOMEM;
-
- memcpy(a_entry, entry, sizeof(*entry));
- list_add(&a_entry->list, &rule->action_list);
+ rule_match_set(r_match->key, PCL_ID, pcl_id_key);
+ rule_match_set(r_match->mask, PCL_ID, pcl_id_mask);
+}
- rule->n_actions++;
- return 0;
+struct prestera_acl_rule *
+prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie)
+{
+ return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
+ prestera_acl_rule_ht_params);
}
-u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule)
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset)
{
- return rule->n_actions;
+ return ruleset->index;
}
-u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule)
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max)
{
- return rule->priority;
+ *prio_min = ruleset->prio.min;
+ *prio_max = ruleset->prio.max;
}
-void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
- u32 priority)
+bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
{
- rule->priority = priority;
+ return ruleset->offload;
}
-int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_match_entry *entry)
+struct prestera_acl_rule *
+prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie, u32 chain_index)
{
- struct prestera_acl_rule_match_entry *m_entry;
+ struct prestera_acl_rule *rule;
- m_entry = kmalloc(sizeof(*m_entry), GFP_KERNEL);
- if (!m_entry)
- return -ENOMEM;
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return ERR_PTR(-ENOMEM);
- memcpy(m_entry, entry, sizeof(*entry));
- list_add(&m_entry->list, &rule->match_list);
+ rule->ruleset = ruleset;
+ rule->cookie = cookie;
+ rule->chain_index = chain_index;
- rule->n_matches++;
- return 0;
+ refcount_inc(&ruleset->refcount);
+
+ return rule;
}
-u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule)
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority)
{
- return rule->n_matches;
+ rule->priority = priority;
}
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
{
- struct prestera_acl_rule_action_entry *a_entry;
- struct prestera_acl_rule_match_entry *m_entry;
- struct list_head *pos, *n;
-
- list_for_each_safe(pos, n, &rule->match_list) {
- m_entry = list_entry(pos, typeof(*m_entry), list);
- list_del(pos);
- kfree(m_entry);
- }
-
- list_for_each_safe(pos, n, &rule->action_list) {
- a_entry = list_entry(pos, typeof(*a_entry), list);
- list_del(pos);
- kfree(a_entry);
- }
+ if (rule->jump_ruleset)
+ /* release ruleset kept by jump action */
+ prestera_acl_ruleset_put(rule->jump_ruleset);
+ prestera_acl_ruleset_put(rule->ruleset);
kfree(rule);
}
+static void prestera_acl_ruleset_prio_update(struct prestera_acl_ruleset *ruleset,
+ u32 prio)
+{
+ ruleset->prio.min = min(ruleset->prio.min, prio);
+ ruleset->prio.max = max(ruleset->prio.max, prio);
+}
+
int prestera_acl_rule_add(struct prestera_switch *sw,
struct prestera_acl_rule *rule)
{
- u32 rule_id;
int err;
+ struct prestera_acl_ruleset *ruleset = rule->ruleset;
+ struct prestera_flow_block *block = ruleset->ht_key.block;
/* try to add rule to hash table first */
- err = rhashtable_insert_fast(&rule->block->ruleset->rule_ht,
- &rule->ht_node,
+ err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
prestera_acl_rule_ht_params);
if (err)
- return err;
+ goto err_ht_insert;
- /* add rule to hw */
- err = prestera_hw_acl_rule_add(sw, rule, &rule_id);
+ prestera_acl_rule_keymask_pcl_id_set(rule, ruleset->pcl_id);
+ rule->re_arg.vtcam_id = ruleset->vtcam_id;
+ rule->re_key.prio = rule->priority;
+
+ rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
+ err = WARN_ON(rule->re) ? -EEXIST : 0;
if (err)
goto err_rule_add;
- rule->id = rule_id;
+ rule->re = prestera_acl_rule_entry_create(sw->acl, &rule->re_key,
+ &rule->re_arg);
+ err = !rule->re ? -EINVAL : 0;
+ if (err)
+ goto err_rule_add;
- list_add_tail(&rule->list, &sw->acl->rules);
+ /* bind the block (all ports) to chain index 0, rest of
+ * the chains are bound to goto action
+ */
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count) {
+ err = prestera_acl_ruleset_block_bind(ruleset, block);
+ if (err)
+ goto err_acl_block_bind;
+ }
+ list_add_tail(&rule->list, &sw->acl->rules);
+ ruleset->rule_count++;
+ prestera_acl_ruleset_prio_update(ruleset, rule->priority);
return 0;
+err_acl_block_bind:
+ prestera_acl_rule_entry_destroy(sw->acl, rule->re);
err_rule_add:
- rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
+ rule->re = NULL;
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
prestera_acl_rule_ht_params);
+err_ht_insert:
return err;
}
void prestera_acl_rule_del(struct prestera_switch *sw,
struct prestera_acl_rule *rule)
{
- rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
+ struct prestera_acl_ruleset *ruleset = rule->ruleset;
+ struct prestera_flow_block *block = ruleset->ht_key.block;
+
+ rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
prestera_acl_rule_ht_params);
+ ruleset->rule_count--;
list_del(&rule->list);
- prestera_hw_acl_rule_del(sw, rule->id);
+
+ prestera_acl_rule_entry_destroy(sw->acl, rule->re);
+ prestera_acl_ruleset_prio_refresh(sw->acl, ruleset);
+
+ /* unbind block (all ports) */
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count)
+ prestera_acl_ruleset_block_unbind(ruleset, block);
}
-int prestera_acl_rule_get_stats(struct prestera_switch *sw,
+int prestera_acl_rule_get_stats(struct prestera_acl *acl,
struct prestera_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use)
{
@@ -340,8 +553,10 @@ int prestera_acl_rule_get_stats(struct prestera_switch *sw,
u64 current_bytes;
int err;
- err = prestera_hw_acl_rule_stats_get(sw, rule->id, &current_packets,
- &current_bytes);
+ err = prestera_counter_stats_get(acl->sw->counter,
+ rule->re->counter.block,
+ rule->re->counter.id,
+ &current_packets, &current_bytes);
if (err)
return err;
@@ -352,25 +567,361 @@ int prestera_acl_rule_get_stats(struct prestera_switch *sw,
return 0;
}
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_find(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key)
+{
+ return rhashtable_lookup_fast(&acl->acl_rule_entry_ht, key,
+ __prestera_acl_rule_entry_ht_params);
+}
+
+static int __prestera_acl_rule_entry2hw_del(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
+{
+ return prestera_hw_vtcam_rule_del(sw, e->vtcam_id, e->hw_id);
+}
+
+static int __prestera_acl_rule_entry2hw_add(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
+{
+ struct prestera_acl_hw_action_info act_hw[PRESTERA_ACL_RULE_ACTION_MAX];
+ int act_num;
+
+ memset(&act_hw, 0, sizeof(act_hw));
+ act_num = 0;
+
+ /* accept */
+ if (e->accept.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ act_num++;
+ }
+ /* drop */
+ if (e->drop.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_DROP;
+ act_num++;
+ }
+ /* trap */
+ if (e->trap.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ act_num++;
+ }
+ /* police */
+ if (e->police.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_POLICE;
+ act_hw[act_num].police = e->police.i;
+ act_num++;
+ }
+ /* jump */
+ if (e->jump.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP;
+ act_hw[act_num].jump = e->jump.i;
+ act_num++;
+ }
+ /* counter */
+ if (e->counter.block) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT;
+ act_hw[act_num].count.id = e->counter.id;
+ act_num++;
+ }
+
+ return prestera_hw_vtcam_rule_add(sw, e->vtcam_id, e->key.prio,
+ e->key.match.key, e->key.match.mask,
+ act_hw, act_num, &e->hw_id);
+}
+
+static void
+__prestera_acl_rule_entry_act_destruct(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e)
+{
+ /* counter */
+ prestera_counter_put(sw->counter, e->counter.block, e->counter.id);
+ /* police */
+ if (e->police.valid)
+ prestera_hw_policer_release(sw, e->police.i.id);
+}
+
+void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry *e)
+{
+ int ret;
+
+ rhashtable_remove_fast(&acl->acl_rule_entry_ht, &e->ht_node,
+ __prestera_acl_rule_entry_ht_params);
+
+ ret = __prestera_acl_rule_entry2hw_del(acl->sw, e);
+ WARN_ON(ret && ret != -ENODEV);
+
+ __prestera_acl_rule_entry_act_destruct(acl->sw, e);
+ kfree(e);
+}
+
+static int
+__prestera_acl_rule_entry_act_construct(struct prestera_switch *sw,
+ struct prestera_acl_rule_entry *e,
+ struct prestera_acl_rule_entry_arg *arg)
+{
+ int err;
+
+ /* accept */
+ e->accept.valid = arg->accept.valid;
+ /* drop */
+ e->drop.valid = arg->drop.valid;
+ /* trap */
+ e->trap.valid = arg->trap.valid;
+ /* jump */
+ e->jump.valid = arg->jump.valid;
+ e->jump.i = arg->jump.i;
+ /* police */
+ if (arg->police.valid) {
+ u8 type = arg->police.ingress ? PRESTERA_POLICER_TYPE_INGRESS :
+ PRESTERA_POLICER_TYPE_EGRESS;
+
+ err = prestera_hw_policer_create(sw, type, &e->police.i.id);
+ if (err)
+ goto err_out;
+
+ err = prestera_hw_policer_sr_tcm_set(sw, e->police.i.id,
+ arg->police.rate,
+ arg->police.burst);
+ if (err) {
+ prestera_hw_policer_release(sw, e->police.i.id);
+ goto err_out;
+ }
+ e->police.valid = arg->police.valid;
+ }
+ /* counter */
+ if (arg->count.valid) {
+ err = prestera_counter_get(sw->counter, arg->count.client,
+ &e->counter.block,
+ &e->counter.id);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ __prestera_acl_rule_entry_act_destruct(sw, e);
+ return -EINVAL;
+}
+
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_create(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key,
+ struct prestera_acl_rule_entry_arg *arg)
+{
+ struct prestera_acl_rule_entry *e;
+ int err;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto err_kzalloc;
+
+ memcpy(&e->key, key, sizeof(*key));
+ e->vtcam_id = arg->vtcam_id;
+ err = __prestera_acl_rule_entry_act_construct(acl->sw, e, arg);
+ if (err)
+ goto err_act_construct;
+
+ err = __prestera_acl_rule_entry2hw_add(acl->sw, e);
+ if (err)
+ goto err_hw_add;
+
+ err = rhashtable_insert_fast(&acl->acl_rule_entry_ht, &e->ht_node,
+ __prestera_acl_rule_entry_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return e;
+
+err_ht_insert:
+ WARN_ON(__prestera_acl_rule_entry2hw_del(acl->sw, e));
+err_hw_add:
+ __prestera_acl_rule_entry_act_destruct(acl->sw, e);
+err_act_construct:
+ kfree(e);
+err_kzalloc:
+ return NULL;
+}
+
+static int __prestera_acl_vtcam_id_try_fit(struct prestera_acl *acl, u8 lookup,
+ void *keymask, u32 *vtcam_id)
+{
+ struct prestera_acl_vtcam *vtcam;
+ int i;
+
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (lookup != vtcam->lookup)
+ continue;
+
+ if (!keymask && !vtcam->is_keymask_set)
+ goto vtcam_found;
+
+ if (!(keymask && vtcam->is_keymask_set))
+ continue;
+
+ /* try to fit with vtcam keymask */
+ for (i = 0; i < __PRESTERA_ACL_RULE_MATCH_TYPE_MAX; i++) {
+ __be32 __keymask = ((__be32 *)keymask)[i];
+
+ if (!__keymask)
+ /* vtcam keymask in not interested */
+ continue;
+
+ if (__keymask & ~vtcam->keymask[i])
+ /* keymask does not fit the vtcam keymask */
+ break;
+ }
+
+ if (i == __PRESTERA_ACL_RULE_MATCH_TYPE_MAX)
+ /* keymask fits vtcam keymask, return it */
+ goto vtcam_found;
+ }
+
+ /* nothing is found */
+ return -ENOENT;
+
+vtcam_found:
+ refcount_inc(&vtcam->refcount);
+ *vtcam_id = vtcam->id;
+ return 0;
+}
+
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
+ void *keymask, u32 *vtcam_id)
+{
+ struct prestera_acl_vtcam *vtcam;
+ u32 new_vtcam_id;
+ int err;
+
+ /* find the vtcam that suits keymask. We do not expect to have
+ * a big number of vtcams, so, the list type for vtcam list is
+ * fine for now
+ */
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (lookup != vtcam->lookup ||
+ dir != vtcam->direction)
+ continue;
+
+ if (!keymask && !vtcam->is_keymask_set) {
+ refcount_inc(&vtcam->refcount);
+ goto vtcam_found;
+ }
+
+ if (keymask && vtcam->is_keymask_set &&
+ !memcmp(keymask, vtcam->keymask, sizeof(vtcam->keymask))) {
+ refcount_inc(&vtcam->refcount);
+ goto vtcam_found;
+ }
+ }
+
+ /* vtcam not found, try to create new one */
+ vtcam = kzalloc(sizeof(*vtcam), GFP_KERNEL);
+ if (!vtcam)
+ return -ENOMEM;
+
+ err = prestera_hw_vtcam_create(acl->sw, lookup, keymask, &new_vtcam_id,
+ dir);
+ if (err) {
+ kfree(vtcam);
+
+ /* cannot create new, try to fit into existing vtcam */
+ if (__prestera_acl_vtcam_id_try_fit(acl, lookup,
+ keymask, &new_vtcam_id))
+ return err;
+
+ *vtcam_id = new_vtcam_id;
+ return 0;
+ }
+
+ vtcam->direction = dir;
+ vtcam->id = new_vtcam_id;
+ vtcam->lookup = lookup;
+ if (keymask) {
+ memcpy(vtcam->keymask, keymask, sizeof(vtcam->keymask));
+ vtcam->is_keymask_set = true;
+ }
+ refcount_set(&vtcam->refcount, 1);
+ list_add_rcu(&vtcam->list, &acl->vtcam_list);
+
+vtcam_found:
+ *vtcam_id = vtcam->id;
+ return 0;
+}
+
+int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id)
+{
+ struct prestera_acl_vtcam *vtcam;
+ int err;
+
+ list_for_each_entry(vtcam, &acl->vtcam_list, list) {
+ if (vtcam_id != vtcam->id)
+ continue;
+
+ if (!refcount_dec_and_test(&vtcam->refcount))
+ return 0;
+
+ err = prestera_hw_vtcam_destroy(acl->sw, vtcam->id);
+ if (err && err != -ENODEV) {
+ refcount_set(&vtcam->refcount, 1);
+ return err;
+ }
+
+ list_del(&vtcam->list);
+ kfree(vtcam);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
int prestera_acl_init(struct prestera_switch *sw)
{
struct prestera_acl *acl;
+ int err;
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
if (!acl)
return -ENOMEM;
+ acl->sw = sw;
INIT_LIST_HEAD(&acl->rules);
+ INIT_LIST_HEAD(&acl->vtcam_list);
+ idr_init(&acl->uid);
+
+ err = rhashtable_init(&acl->acl_rule_entry_ht,
+ &__prestera_acl_rule_entry_ht_params);
+ if (err)
+ goto err_acl_rule_entry_ht_init;
+
+ err = rhashtable_init(&acl->ruleset_ht,
+ &prestera_acl_ruleset_ht_params);
+ if (err)
+ goto err_ruleset_ht_init;
+
sw->acl = acl;
- acl->sw = sw;
return 0;
+
+err_ruleset_ht_init:
+ rhashtable_destroy(&acl->acl_rule_entry_ht);
+err_acl_rule_entry_ht_init:
+ kfree(acl);
+ return err;
}
void prestera_acl_fini(struct prestera_switch *sw)
{
struct prestera_acl *acl = sw->acl;
+ WARN_ON(!idr_is_empty(&acl->uid));
+ idr_destroy(&acl->uid);
+
+ WARN_ON(!list_empty(&acl->vtcam_list));
WARN_ON(!list_empty(&acl->rules));
+
+ rhashtable_destroy(&acl->ruleset_ht);
+ rhashtable_destroy(&acl->acl_rule_entry_ht);
+
kfree(acl);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
index 39b7869be659..a35cc0609a1d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_acl.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -1,114 +1,162 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */
#ifndef _PRESTERA_ACL_H_
#define _PRESTERA_ACL_H_
-enum prestera_acl_rule_match_entry_type {
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE = 1,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE,
- PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE
+#include <linux/types.h>
+#include "prestera_counter.h"
+
+#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF
+#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF)
+#define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00)
+#define PRESTERA_ACL_CHAIN_MASK \
+ (PRESTERA_ACL_KEYMASK_PCL_ID >> 8)
+
+#define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \
+ (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \
+ (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN))
+
+#define rule_match_set_n(match_p, type, val_p, size) \
+ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
+ val_p, size)
+#define rule_match_set(match_p, type, val) \
+ memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
+ &(val), sizeof(val))
+
+enum prestera_acl_match_type {
+ PRESTERA_ACL_RULE_MATCH_TYPE_PCL_ID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_TYPE,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_0,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_DMAC_1,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_0,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ETH_SMAC_1,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_PROTO,
+ PRESTERA_ACL_RULE_MATCH_TYPE_SYS_PORT,
+ PRESTERA_ACL_RULE_MATCH_TYPE_SYS_DEV,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_IP_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_SRC,
+ PRESTERA_ACL_RULE_MATCH_TYPE_L4_PORT_RANGE_DST,
+ PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_ID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_VLAN_TPID,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_TYPE,
+ PRESTERA_ACL_RULE_MATCH_TYPE_ICMP_CODE,
+
+ __PRESTERA_ACL_RULE_MATCH_TYPE_MAX
};
enum prestera_acl_rule_action {
- PRESTERA_ACL_RULE_ACTION_ACCEPT,
- PRESTERA_ACL_RULE_ACTION_DROP,
- PRESTERA_ACL_RULE_ACTION_TRAP
+ PRESTERA_ACL_RULE_ACTION_ACCEPT = 0,
+ PRESTERA_ACL_RULE_ACTION_DROP = 1,
+ PRESTERA_ACL_RULE_ACTION_TRAP = 2,
+ PRESTERA_ACL_RULE_ACTION_JUMP = 5,
+ PRESTERA_ACL_RULE_ACTION_COUNT = 7,
+ PRESTERA_ACL_RULE_ACTION_POLICE = 8,
+
+ PRESTERA_ACL_RULE_ACTION_MAX
};
-struct prestera_switch;
-struct prestera_port;
-struct prestera_acl_rule;
-struct prestera_acl_ruleset;
+enum {
+ PRESTERA_ACL_IFACE_TYPE_PORT,
+ PRESTERA_ACL_IFACE_TYPE_INDEX
+};
-struct prestera_flow_block_binding {
- struct list_head list;
- struct prestera_port *port;
- int span_id;
+struct prestera_acl_match {
+ __be32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
};
-struct prestera_flow_block {
- struct list_head binding_list;
- struct prestera_switch *sw;
- struct net *net;
- struct prestera_acl_ruleset *ruleset;
- struct flow_block_cb *block_cb;
+struct prestera_acl_action_jump {
+ u32 index;
};
-struct prestera_acl_rule_action_entry {
- struct list_head list;
- enum prestera_acl_rule_action id;
+struct prestera_acl_action_police {
+ u32 id;
};
-struct prestera_acl_rule_match_entry {
- struct list_head list;
- enum prestera_acl_rule_match_entry_type type;
+struct prestera_acl_action_count {
+ u32 id;
+};
+
+struct prestera_acl_rule_entry_key {
+ u32 prio;
+ struct prestera_acl_match match;
+};
+
+struct prestera_acl_hw_action_info {
+ enum prestera_acl_rule_action id;
union {
+ struct prestera_acl_action_police police;
+ struct prestera_acl_action_count count;
+ struct prestera_acl_action_jump jump;
+ };
+};
+
+/* This struct (arg) used only to be passed as parameter for
+ * acl_rule_entry_create. Must be flat. Can contain object keys, which will be
+ * resolved to object links, before saving to acl_rule_entry struct
+ */
+struct prestera_acl_rule_entry_arg {
+ u32 vtcam_id;
+ struct {
struct {
- u8 key;
- u8 mask;
- } u8;
- struct {
- u16 key;
- u16 mask;
- } u16;
+ u8 valid:1;
+ } accept, drop, trap;
struct {
- u32 key;
- u32 mask;
- } u32;
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
struct {
- u64 key;
- u64 mask;
- } u64;
+ u8 valid:1;
+ u64 rate;
+ u64 burst;
+ bool ingress;
+ } police;
struct {
- u8 key[ETH_ALEN];
- u8 mask[ETH_ALEN];
- } mac;
- } keymask;
+ u8 valid:1;
+ u32 client;
+ } count;
+ };
+};
+
+struct prestera_acl_rule {
+ struct rhash_head ht_node; /* Member of acl HT */
+ struct list_head list;
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_ruleset *jump_ruleset;
+ unsigned long cookie;
+ u32 chain_index;
+ u32 priority;
+ struct prestera_acl_rule_entry_key re_key;
+ struct prestera_acl_rule_entry_arg re_arg;
+ struct prestera_acl_rule_entry *re;
};
+struct prestera_acl_iface {
+ union {
+ struct prestera_port *port;
+ u32 index;
+ };
+ u8 type;
+};
+
+struct prestera_acl;
+struct prestera_switch;
+struct prestera_flow_block;
+
int prestera_acl_init(struct prestera_switch *sw);
void prestera_acl_fini(struct prestera_switch *sw);
-struct prestera_flow_block *
-prestera_acl_block_create(struct prestera_switch *sw, struct net *net);
-void prestera_acl_block_destroy(struct prestera_flow_block *block);
-struct net *prestera_acl_block_net(struct prestera_flow_block *block);
-struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block);
-int prestera_acl_block_bind(struct prestera_flow_block *block,
- struct prestera_port *port);
-int prestera_acl_block_unbind(struct prestera_flow_block *block,
- struct prestera_port *port);
-struct prestera_acl_ruleset *
-prestera_acl_block_ruleset_get(struct prestera_flow_block *block);
+
struct prestera_acl_rule *
-prestera_acl_rule_create(struct prestera_flow_block *block,
- unsigned long cookie);
-u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule);
+prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie, u32 chain_index);
void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
u32 priority);
-u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule);
-struct list_head *
-prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule);
-u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule);
-u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule);
-int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_action_entry *entry);
-struct list_head *
-prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule);
-int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
- struct prestera_acl_rule_match_entry *entry);
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
struct prestera_acl_rule *
prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
@@ -117,8 +165,45 @@ int prestera_acl_rule_add(struct prestera_switch *sw,
struct prestera_acl_rule *rule);
void prestera_acl_rule_del(struct prestera_switch *sw,
struct prestera_acl_rule *rule);
-int prestera_acl_rule_get_stats(struct prestera_switch *sw,
+int prestera_acl_rule_get_stats(struct prestera_acl *acl,
struct prestera_acl_rule *rule,
u64 *packets, u64 *bytes, u64 *last_use);
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_find(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key);
+void prestera_acl_rule_entry_destroy(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry *e);
+struct prestera_acl_rule_entry *
+prestera_acl_rule_entry_create(struct prestera_acl *acl,
+ struct prestera_acl_rule_entry_key *key,
+ struct prestera_acl_rule_entry_arg *arg);
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_get(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index);
+struct prestera_acl_ruleset *
+prestera_acl_ruleset_lookup(struct prestera_acl *acl,
+ struct prestera_flow_block *block,
+ u32 chain_index);
+int prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
+ void *keymask);
+bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
+int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset);
+int prestera_acl_ruleset_bind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port);
+int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
+ struct prestera_port *port);
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset);
+void prestera_acl_ruleset_prio_get(struct prestera_acl_ruleset *ruleset,
+ u32 *prio_min, u32 *prio_max);
+void
+prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
+ u16 pcl_id);
+
+int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup, u8 dir,
+ void *keymask, u32 *vtcam_id);
+int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
+int prestera_acl_chain_to_client(u32 chain_index, bool ingress, u32 *client);
#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.c b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
new file mode 100644
index 000000000000..4cd53a2dae46
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2021 Marvell International Ltd. All rights reserved */
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_counter.h"
+
+#define COUNTER_POLL_TIME (msecs_to_jiffies(1000))
+#define COUNTER_RESCHED_TIME (msecs_to_jiffies(50))
+#define COUNTER_BULK_SIZE (256)
+
+struct prestera_counter {
+ struct prestera_switch *sw;
+ struct delayed_work stats_dw;
+ struct mutex mtx; /* protect block_list */
+ struct prestera_counter_block **block_list;
+ u32 total_read;
+ u32 block_list_len;
+ u32 curr_idx;
+ bool is_fetching;
+};
+
+struct prestera_counter_block {
+ struct list_head list;
+ u32 id;
+ u32 offset;
+ u32 num_counters;
+ u32 client;
+ struct idr counter_idr;
+ refcount_t refcnt;
+ struct mutex mtx; /* protect stats and counter_idr */
+ struct prestera_counter_stats *stats;
+ u8 *counter_flag;
+ bool is_updating;
+ bool full;
+};
+
+enum {
+ COUNTER_FLAG_READY = 0,
+ COUNTER_FLAG_INVALID = 1
+};
+
+static bool
+prestera_counter_is_ready(struct prestera_counter_block *block, u32 id)
+{
+ return block->counter_flag[id - block->offset] == COUNTER_FLAG_READY;
+}
+
+static void prestera_counter_lock(struct prestera_counter *counter)
+{
+ mutex_lock(&counter->mtx);
+}
+
+static void prestera_counter_unlock(struct prestera_counter *counter)
+{
+ mutex_unlock(&counter->mtx);
+}
+
+static void prestera_counter_block_lock(struct prestera_counter_block *block)
+{
+ mutex_lock(&block->mtx);
+}
+
+static void prestera_counter_block_unlock(struct prestera_counter_block *block)
+{
+ mutex_unlock(&block->mtx);
+}
+
+static bool prestera_counter_block_incref(struct prestera_counter_block *block)
+{
+ return refcount_inc_not_zero(&block->refcnt);
+}
+
+static bool prestera_counter_block_decref(struct prestera_counter_block *block)
+{
+ return refcount_dec_and_test(&block->refcnt);
+}
+
+/* must be called with prestera_counter_block_lock() */
+static void prestera_counter_stats_clear(struct prestera_counter_block *block,
+ u32 counter_id)
+{
+ memset(&block->stats[counter_id - block->offset], 0,
+ sizeof(*block->stats));
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_lookup_not_full(struct prestera_counter *counter,
+ u32 client)
+{
+ u32 i;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i] &&
+ counter->block_list[i]->client == client &&
+ !counter->block_list[i]->full &&
+ prestera_counter_block_incref(counter->block_list[i])) {
+ prestera_counter_unlock(counter);
+ return counter->block_list[i];
+ }
+ }
+ prestera_counter_unlock(counter);
+
+ return NULL;
+}
+
+static int prestera_counter_block_list_add(struct prestera_counter *counter,
+ struct prestera_counter_block *block)
+{
+ struct prestera_counter_block **arr;
+ u32 i;
+
+ prestera_counter_lock(counter);
+
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i])
+ continue;
+
+ counter->block_list[i] = block;
+ prestera_counter_unlock(counter);
+ return 0;
+ }
+
+ arr = krealloc(counter->block_list, (counter->block_list_len + 1) *
+ sizeof(*counter->block_list), GFP_KERNEL);
+ if (!arr) {
+ prestera_counter_unlock(counter);
+ return -ENOMEM;
+ }
+
+ counter->block_list = arr;
+ counter->block_list[counter->block_list_len] = block;
+ counter->block_list_len++;
+ prestera_counter_unlock(counter);
+ return 0;
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_get(struct prestera_counter *counter, u32 client)
+{
+ struct prestera_counter_block *block;
+ int err;
+
+ block = prestera_counter_block_lookup_not_full(counter, client);
+ if (block)
+ return block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ err = prestera_hw_counter_block_get(counter->sw, client,
+ &block->id, &block->offset,
+ &block->num_counters);
+ if (err)
+ goto err_block;
+
+ block->stats = kcalloc(block->num_counters,
+ sizeof(*block->stats), GFP_KERNEL);
+ if (!block->stats) {
+ err = -ENOMEM;
+ goto err_stats;
+ }
+
+ block->counter_flag = kcalloc(block->num_counters,
+ sizeof(*block->counter_flag),
+ GFP_KERNEL);
+ if (!block->counter_flag) {
+ err = -ENOMEM;
+ goto err_flag;
+ }
+
+ block->client = client;
+ mutex_init(&block->mtx);
+ refcount_set(&block->refcnt, 1);
+ idr_init_base(&block->counter_idr, block->offset);
+
+ err = prestera_counter_block_list_add(counter, block);
+ if (err)
+ goto err_list_add;
+
+ return block;
+
+err_list_add:
+ idr_destroy(&block->counter_idr);
+ mutex_destroy(&block->mtx);
+ kfree(block->counter_flag);
+err_flag:
+ kfree(block->stats);
+err_stats:
+ prestera_hw_counter_block_release(counter->sw, block->id);
+err_block:
+ kfree(block);
+ return ERR_PTR(err);
+}
+
+static void prestera_counter_block_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block)
+{
+ u32 i;
+
+ if (!prestera_counter_block_decref(block))
+ return;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ if (counter->block_list[i] &&
+ counter->block_list[i]->id == block->id) {
+ counter->block_list[i] = NULL;
+ break;
+ }
+ }
+ prestera_counter_unlock(counter);
+
+ WARN_ON(!idr_is_empty(&block->counter_idr));
+
+ prestera_hw_counter_block_release(counter->sw, block->id);
+ idr_destroy(&block->counter_idr);
+ mutex_destroy(&block->mtx);
+ kfree(block->stats);
+ kfree(block);
+}
+
+static int prestera_counter_get_vacant(struct prestera_counter_block *block,
+ u32 *id)
+{
+ int free_id;
+
+ if (block->full)
+ return -ENOSPC;
+
+ prestera_counter_block_lock(block);
+ free_id = idr_alloc_cyclic(&block->counter_idr, NULL, block->offset,
+ block->offset + block->num_counters,
+ GFP_KERNEL);
+ if (free_id < 0) {
+ if (free_id == -ENOSPC)
+ block->full = true;
+
+ prestera_counter_block_unlock(block);
+ return free_id;
+ }
+ *id = free_id;
+ prestera_counter_block_unlock(block);
+
+ return 0;
+}
+
+int prestera_counter_get(struct prestera_counter *counter, u32 client,
+ struct prestera_counter_block **bl, u32 *counter_id)
+{
+ struct prestera_counter_block *block;
+ int err;
+ u32 id;
+
+get_next_block:
+ block = prestera_counter_block_get(counter, client);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+
+ err = prestera_counter_get_vacant(block, &id);
+ if (err) {
+ prestera_counter_block_put(counter, block);
+
+ if (err == -ENOSPC)
+ goto get_next_block;
+
+ return err;
+ }
+
+ prestera_counter_block_lock(block);
+ if (block->is_updating)
+ block->counter_flag[id - block->offset] = COUNTER_FLAG_INVALID;
+ prestera_counter_block_unlock(block);
+
+ *counter_id = id;
+ *bl = block;
+
+ return 0;
+}
+
+void prestera_counter_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block, u32 counter_id)
+{
+ if (!block)
+ return;
+
+ prestera_counter_block_lock(block);
+ idr_remove(&block->counter_idr, counter_id);
+ block->full = false;
+ prestera_counter_stats_clear(block, counter_id);
+ prestera_counter_block_unlock(block);
+
+ prestera_hw_counter_clear(counter->sw, block->id, counter_id);
+ prestera_counter_block_put(counter, block);
+}
+
+static u32 prestera_counter_block_idx_next(struct prestera_counter *counter,
+ u32 curr_idx)
+{
+ u32 idx, i, start = curr_idx + 1;
+
+ prestera_counter_lock(counter);
+ for (i = 0; i < counter->block_list_len; i++) {
+ idx = (start + i) % counter->block_list_len;
+ if (!counter->block_list[idx])
+ continue;
+
+ prestera_counter_unlock(counter);
+ return idx;
+ }
+ prestera_counter_unlock(counter);
+
+ return 0;
+}
+
+static struct prestera_counter_block *
+prestera_counter_block_get_by_idx(struct prestera_counter *counter, u32 idx)
+{
+ if (idx >= counter->block_list_len)
+ return NULL;
+
+ prestera_counter_lock(counter);
+
+ if (!counter->block_list[idx] ||
+ !prestera_counter_block_incref(counter->block_list[idx])) {
+ prestera_counter_unlock(counter);
+ return NULL;
+ }
+
+ prestera_counter_unlock(counter);
+ return counter->block_list[idx];
+}
+
+static void prestera_counter_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dl_work =
+ container_of(work, struct delayed_work, work);
+ struct prestera_counter *counter =
+ container_of(dl_work, struct prestera_counter, stats_dw);
+ struct prestera_counter_block *block;
+ u32 resched_time = COUNTER_POLL_TIME;
+ u32 count = COUNTER_BULK_SIZE;
+ bool done = false;
+ int err;
+ u32 i;
+
+ block = prestera_counter_block_get_by_idx(counter, counter->curr_idx);
+ if (!block) {
+ if (counter->is_fetching)
+ goto abort;
+
+ goto next;
+ }
+
+ if (!counter->is_fetching) {
+ err = prestera_hw_counter_trigger(counter->sw, block->id);
+ if (err)
+ goto abort;
+
+ prestera_counter_block_lock(block);
+ block->is_updating = true;
+ prestera_counter_block_unlock(block);
+
+ counter->is_fetching = true;
+ counter->total_read = 0;
+ resched_time = COUNTER_RESCHED_TIME;
+ goto resched;
+ }
+
+ prestera_counter_block_lock(block);
+ err = prestera_hw_counters_get(counter->sw, counter->total_read,
+ &count, &done,
+ &block->stats[counter->total_read]);
+ prestera_counter_block_unlock(block);
+ if (err)
+ goto abort;
+
+ counter->total_read += count;
+ if (!done || counter->total_read < block->num_counters) {
+ resched_time = COUNTER_RESCHED_TIME;
+ goto resched;
+ }
+
+ for (i = 0; i < block->num_counters; i++) {
+ if (block->counter_flag[i] == COUNTER_FLAG_INVALID) {
+ prestera_counter_block_lock(block);
+ block->counter_flag[i] = COUNTER_FLAG_READY;
+ memset(&block->stats[i], 0, sizeof(*block->stats));
+ prestera_counter_block_unlock(block);
+ }
+ }
+
+ prestera_counter_block_lock(block);
+ block->is_updating = false;
+ prestera_counter_block_unlock(block);
+
+ goto next;
+abort:
+ prestera_hw_counter_abort(counter->sw);
+next:
+ counter->is_fetching = false;
+ counter->curr_idx =
+ prestera_counter_block_idx_next(counter, counter->curr_idx);
+resched:
+ if (block)
+ prestera_counter_block_put(counter, block);
+
+ schedule_delayed_work(&counter->stats_dw, resched_time);
+}
+
+/* Can be executed without rtnl_lock().
+ * So pay attention when something changing.
+ */
+int prestera_counter_stats_get(struct prestera_counter *counter,
+ struct prestera_counter_block *block,
+ u32 counter_id, u64 *packets, u64 *bytes)
+{
+ if (!block || !prestera_counter_is_ready(block, counter_id)) {
+ *packets = 0;
+ *bytes = 0;
+ return 0;
+ }
+
+ prestera_counter_block_lock(block);
+ *packets = block->stats[counter_id - block->offset].packets;
+ *bytes = block->stats[counter_id - block->offset].bytes;
+
+ prestera_counter_stats_clear(block, counter_id);
+ prestera_counter_block_unlock(block);
+
+ return 0;
+}
+
+int prestera_counter_init(struct prestera_switch *sw)
+{
+ struct prestera_counter *counter;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return -ENOMEM;
+
+ counter->block_list = kzalloc(sizeof(*counter->block_list), GFP_KERNEL);
+ if (!counter->block_list) {
+ kfree(counter);
+ return -ENOMEM;
+ }
+
+ mutex_init(&counter->mtx);
+ counter->block_list_len = 1;
+ counter->sw = sw;
+ sw->counter = counter;
+
+ INIT_DELAYED_WORK(&counter->stats_dw, prestera_counter_stats_work);
+ schedule_delayed_work(&counter->stats_dw, COUNTER_POLL_TIME);
+
+ return 0;
+}
+
+void prestera_counter_fini(struct prestera_switch *sw)
+{
+ struct prestera_counter *counter = sw->counter;
+ u32 i;
+
+ cancel_delayed_work_sync(&counter->stats_dw);
+
+ for (i = 0; i < counter->block_list_len; i++)
+ WARN_ON(counter->block_list[i]);
+
+ mutex_destroy(&counter->mtx);
+ kfree(counter->block_list);
+ kfree(counter);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_counter.h b/drivers/net/ethernet/marvell/prestera/prestera_counter.h
new file mode 100644
index 000000000000..ad6b73907799
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_counter.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_COUNTER_H_
+#define _PRESTERA_COUNTER_H_
+
+#include <linux/types.h>
+
+struct prestera_counter_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct prestera_switch;
+struct prestera_counter;
+struct prestera_counter_block;
+
+int prestera_counter_init(struct prestera_switch *sw);
+void prestera_counter_fini(struct prestera_switch *sw);
+
+int prestera_counter_get(struct prestera_counter *counter, u32 client,
+ struct prestera_counter_block **block,
+ u32 *counter_id);
+void prestera_counter_put(struct prestera_counter *counter,
+ struct prestera_counter_block *block, u32 counter_id);
+int prestera_counter_stats_get(struct prestera_counter *counter,
+ struct prestera_counter_block *block,
+ u32 counter_id, u64 *packets, u64 *bytes);
+
+#endif /* _PRESTERA_COUNTER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 06279cd6da67..2a4c9df4eb79 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -355,11 +355,6 @@ static int prestera_dl_info_get(struct devlink *dl,
{
struct prestera_switch *sw = devlink_priv(dl);
char buf[16];
- int err;
-
- err = devlink_info_driver_name_put(req, PRESTERA_DRV_NAME);
- if (err)
- return err;
snprintf(buf, sizeof(buf), "%d.%d.%d",
sw->dev->fw_rev.maj,
@@ -445,23 +440,6 @@ void prestera_devlink_port_unregister(struct prestera_port *port)
devlink_port_unregister(&port->dl_port);
}
-void prestera_devlink_port_set(struct prestera_port *port)
-{
- devlink_port_type_eth_set(&port->dl_port, port->dev);
-}
-
-void prestera_devlink_port_clear(struct prestera_port *port)
-{
- devlink_port_type_clear(&port->dl_port);
-}
-
-struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
-{
- struct prestera_port *port = netdev_priv(dev);
-
- return &port->dl_port;
-}
-
int prestera_devlink_traps_register(struct prestera_switch *sw)
{
const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index b322295bad3a..bf84ad6fd87e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -15,11 +15,6 @@ void prestera_devlink_unregister(struct prestera_switch *sw);
int prestera_devlink_port_register(struct prestera_port *port);
void prestera_devlink_port_unregister(struct prestera_port *port);
-void prestera_devlink_port_set(struct prestera_port *port);
-void prestera_devlink_port_clear(struct prestera_port *port);
-
-struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
-
void prestera_devlink_trap_report(struct prestera_port *port,
struct sk_buff *skb, u8 cpu_code);
int prestera_devlink_traps_register(struct prestera_switch *sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 40d5b89573bb..2f52daba58e6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -300,8 +300,8 @@ static void prestera_ethtool_get_drvinfo(struct net_device *dev,
struct prestera_port *port = netdev_priv(dev);
struct prestera_switch *sw = port->sw;
- strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ strscpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
@@ -521,6 +521,9 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
ecmd->base.speed = SPEED_UNKNOWN;
ecmd->base.duplex = DUPLEX_UNKNOWN;
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_get(port->phy_link, ecmd);
+
ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
@@ -648,6 +651,9 @@ prestera_ethtool_set_link_ksettings(struct net_device *dev,
u8 adver_fec;
int err;
+ if (port->phy_link)
+ return phylink_ethtool_ksettings_set(port->phy_link, ecmd);
+
err = prestera_port_type_set(ecmd, port);
if (err)
return err;
@@ -782,28 +788,6 @@ static int prestera_ethtool_nway_reset(struct net_device *dev)
return -EINVAL;
}
-void prestera_ethtool_port_state_changed(struct prestera_port *port,
- struct prestera_port_event *evt)
-{
- struct prestera_port_mac_state *smac = &port->state_mac;
-
- smac->oper = evt->data.mac.oper;
-
- if (smac->oper) {
- smac->mode = evt->data.mac.mode;
- smac->speed = evt->data.mac.speed;
- smac->duplex = evt->data.mac.duplex;
- smac->fc = evt->data.mac.fc;
- smac->fec = evt->data.mac.fec;
- } else {
- smac->mode = PRESTERA_MAC_MODE_MAX;
- smac->speed = SPEED_UNKNOWN;
- smac->duplex = DUPLEX_UNKNOWN;
- smac->fc = 0;
- smac->fec = 0;
- }
-}
-
const struct ethtool_ops prestera_ethtool_ops = {
.get_drvinfo = prestera_ethtool_get_drvinfo,
.get_link_ksettings = prestera_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
index 9eb18e99dea6..bd5600886bc6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -11,7 +11,4 @@ struct prestera_port;
extern const struct ethtool_ops prestera_ethtool_ops;
-void prestera_ethtool_port_state_changed(struct prestera_port *port,
- struct prestera_port_event *evt);
-
#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
index c9891e968259..9f4267f326b0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -7,8 +7,9 @@
#include "prestera.h"
#include "prestera_acl.h"
#include "prestera_flow.h"
-#include "prestera_span.h"
#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
static LIST_HEAD(prestera_block_cb_list);
@@ -17,9 +18,9 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
{
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
- return prestera_span_replace(block, f);
+ return prestera_mall_replace(block, f);
case TC_CLSMATCHALL_DESTROY:
- prestera_span_destroy(block);
+ prestera_mall_destroy(block);
return 0;
default:
return -EOPNOTSUPP;
@@ -29,9 +30,6 @@ static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- if (f->common.chain_index != 0)
- return -EOPNOTSUPP;
-
switch (f->command) {
case FLOW_CLS_REPLACE:
return prestera_flower_replace(block, f);
@@ -40,6 +38,11 @@ static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
return 0;
case FLOW_CLS_STATS:
return prestera_flower_stats(block, f);
+ case FLOW_CLS_TMPLT_CREATE:
+ return prestera_flower_tmplt_create(block, f);
+ case FLOW_CLS_TMPLT_DESTROY:
+ prestera_flower_tmplt_destroy(block, f);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -60,17 +63,117 @@ static int prestera_flow_block_cb(enum tc_setup_type type,
}
}
+static void prestera_flow_block_destroy(void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ prestera_flower_template_cleanup(block);
+
+ WARN_ON(!list_empty(&block->template_list));
+ WARN_ON(!list_empty(&block->binding_list));
+
+ kfree(block);
+}
+
+static struct prestera_flow_block *
+prestera_flow_block_create(struct prestera_switch *sw,
+ struct net *net,
+ bool ingress)
+{
+ struct prestera_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+
+ INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->template_list);
+ block->net = net;
+ block->sw = sw;
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
+ block->ingress = ingress;
+
+ return block;
+}
+
static void prestera_flow_block_release(void *cb_priv)
{
struct prestera_flow_block *block = cb_priv;
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
+}
+
+static bool
+prestera_flow_block_is_bound(const struct prestera_flow_block *block)
+{
+ return block->ruleset_zero;
+}
+
+static struct prestera_flow_block_binding *
+prestera_flow_block_lookup(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->port == port)
+ return binding;
+
+ return NULL;
+}
+
+static int prestera_flow_block_bind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ binding->port = port;
+
+ if (prestera_flow_block_is_bound(block)) {
+ err = prestera_acl_ruleset_bind(block->ruleset_zero, port);
+ if (err)
+ goto err_ruleset_bind;
+ }
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_ruleset_bind:
+ kfree(binding);
+ return err;
+}
+
+static int prestera_flow_block_unbind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ binding = prestera_flow_block_lookup(block, port);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ if (prestera_flow_block_is_bound(block))
+ prestera_acl_ruleset_unbind(block->ruleset_zero, port);
+
+ kfree(binding);
+ return 0;
}
static struct prestera_flow_block *
prestera_flow_block_get(struct prestera_switch *sw,
struct flow_block_offload *f,
- bool *register_block)
+ bool *register_block,
+ bool ingress)
{
struct prestera_flow_block *block;
struct flow_block_cb *block_cb;
@@ -78,7 +181,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
block_cb = flow_block_cb_lookup(f->block,
prestera_flow_block_cb, sw);
if (!block_cb) {
- block = prestera_acl_block_create(sw, f->net);
+ block = prestera_flow_block_create(sw, f->net, ingress);
if (!block)
return ERR_PTR(-ENOMEM);
@@ -86,7 +189,7 @@ prestera_flow_block_get(struct prestera_switch *sw,
sw, block,
prestera_flow_block_release);
if (IS_ERR(block_cb)) {
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
return ERR_CAST(block_cb);
}
@@ -110,11 +213,11 @@ static void prestera_flow_block_put(struct prestera_flow_block *block)
return;
flow_block_cb_free(block_cb);
- prestera_acl_block_destroy(block);
+ prestera_flow_block_destroy(block);
}
static int prestera_setup_flow_block_bind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -122,13 +225,13 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
bool register_block;
int err;
- block = prestera_flow_block_get(sw, f, &register_block);
+ block = prestera_flow_block_get(sw, f, &register_block, ingress);
if (IS_ERR(block))
return PTR_ERR(block);
block_cb = block->block_cb;
- err = prestera_acl_block_bind(block, port);
+ err = prestera_flow_block_bind(block, port);
if (err)
goto err_block_bind;
@@ -137,7 +240,11 @@ static int prestera_setup_flow_block_bind(struct prestera_port *port,
list_add_tail(&block_cb->driver_list, &prestera_block_cb_list);
}
- port->flow_block = block;
+ if (ingress)
+ port->ingress_flow_block = block;
+ else
+ port->egress_flow_block = block;
+
return 0;
err_block_bind:
@@ -147,7 +254,7 @@ err_block_bind:
}
static void prestera_setup_flow_block_unbind(struct prestera_port *port,
- struct flow_block_offload *f)
+ struct flow_block_offload *f, bool ingress)
{
struct prestera_switch *sw = port->sw;
struct prestera_flow_block *block;
@@ -160,9 +267,9 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
block = flow_block_cb_priv(block_cb);
- prestera_span_destroy(block);
+ prestera_mall_destroy(block);
- err = prestera_acl_block_unbind(block, port);
+ err = prestera_flow_block_unbind(block, port);
if (err)
goto error;
@@ -171,24 +278,38 @@ static void prestera_setup_flow_block_unbind(struct prestera_port *port,
list_del(&block_cb->driver_list);
}
error:
- port->flow_block = NULL;
+ if (ingress)
+ port->ingress_flow_block = NULL;
+ else
+ port->egress_flow_block = NULL;
}
-int prestera_flow_block_setup(struct prestera_port *port,
- struct flow_block_offload *f)
+static int prestera_setup_flow_block_clsact(struct prestera_port *port,
+ struct flow_block_offload *f,
+ bool ingress)
{
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
f->driver_block_list = &prestera_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return prestera_setup_flow_block_bind(port, f);
+ return prestera_setup_flow_block_bind(port, f, ingress);
case FLOW_BLOCK_UNBIND:
- prestera_setup_flow_block_unbind(port, f);
+ prestera_setup_flow_block_unbind(port, f, ingress);
return 0;
default:
return -EOPNOTSUPP;
}
}
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ switch (f->binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ return prestera_setup_flow_block_clsact(port, f, true);
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ return prestera_setup_flow_block_clsact(port, f, false);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
index 467c7038cace..a85a3eb40279 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flow.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -7,6 +7,29 @@
#include <net/flow_offload.h>
struct prestera_port;
+struct prestera_switch;
+
+struct prestera_flow_block_binding {
+ struct list_head list;
+ struct prestera_port *port;
+ int span_id;
+};
+
+struct prestera_flow_block {
+ struct list_head binding_list;
+ struct prestera_switch *sw;
+ struct net *net;
+ struct prestera_acl_ruleset *ruleset_zero;
+ struct flow_block_cb *block_cb;
+ struct list_head template_list;
+ struct {
+ u32 prio_min;
+ u32 prio_max;
+ bool bound;
+ } mall;
+ unsigned int rule_count;
+ bool ingress;
+};
int prestera_flow_block_setup(struct prestera_port *port,
struct flow_block_offload *f);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index e571ba09ec08..91a478b75cbf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -3,42 +3,134 @@
#include "prestera.h"
#include "prestera_acl.h"
+#include "prestera_flow.h"
#include "prestera_flower.h"
+#include "prestera_matchall.h"
+
+struct prestera_flower_template {
+ struct prestera_acl_ruleset *ruleset;
+ struct list_head list;
+ u32 chain_index;
+};
+
+static void
+prestera_flower_template_free(struct prestera_flower_template *template)
+{
+ prestera_acl_ruleset_put(template->ruleset);
+ list_del(&template->list);
+ kfree(template);
+}
+
+void prestera_flower_template_cleanup(struct prestera_flow_block *block)
+{
+ struct prestera_flower_template *template, *tmp;
+
+ /* put the reference to all rulesets kept in tmpl create */
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ prestera_flower_template_free(template);
+}
+
+static int
+prestera_flower_parse_goto_action(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ u32 chain_index,
+ const struct flow_action_entry *act)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ if (act->chain_index <= chain_index)
+ /* we can jump only forward */
+ return -EINVAL;
+
+ if (rule->re_arg.jump.valid)
+ return -EEXIST;
+
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ act->chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule->re_arg.jump.valid = 1;
+ rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
+
+ rule->jump_ruleset = ruleset;
+
+ return 0;
+}
static int prestera_flower_parse_actions(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_action *flow_action,
+ u32 chain_index,
struct netlink_ext_ack *extack)
{
- struct prestera_acl_rule_action_entry a_entry;
const struct flow_action_entry *act;
int err, i;
+ /* whole struct (rule->re_arg) must be initialized with 0 */
if (!flow_action_has_entries(flow_action))
return 0;
- flow_action_for_each(i, act, flow_action) {
- memset(&a_entry, 0, sizeof(a_entry));
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
+ act = flow_action_first_entry_get(flow_action);
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
+ /* Nothing to do */
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
+ /* setup counter first */
+ rule->re_arg.count.valid = true;
+ err = prestera_acl_chain_to_client(chain_index, block->ingress,
+ &rule->re_arg.count.client);
+ if (err)
+ return err;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
+ return -EOPNOTSUPP;
+ }
+ flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ if (rule->re_arg.accept.valid)
+ return -EEXIST;
+
+ rule->re_arg.accept.valid = 1;
break;
case FLOW_ACTION_DROP:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP;
+ if (rule->re_arg.drop.valid)
+ return -EEXIST;
+
+ rule->re_arg.drop.valid = 1;
break;
case FLOW_ACTION_TRAP:
- a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ if (rule->re_arg.trap.valid)
+ return -EEXIST;
+
+ rule->re_arg.trap.valid = 1;
+ break;
+ case FLOW_ACTION_POLICE:
+ if (rule->re_arg.police.valid)
+ return -EEXIST;
+
+ rule->re_arg.police.valid = 1;
+ rule->re_arg.police.rate =
+ act->police.rate_bytes_ps;
+ rule->re_arg.police.burst = act->police.burst;
+ rule->re_arg.police.ingress = block->ingress;
+ break;
+ case FLOW_ACTION_GOTO:
+ err = prestera_flower_parse_goto_action(block, rule,
+ chain_index,
+ act);
+ if (err)
+ return err;
break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
pr_err("Unsupported action\n");
return -EOPNOTSUPP;
}
-
- err = prestera_acl_rule_action_add(rule, &a_entry);
- if (err)
- return err;
}
return 0;
@@ -49,10 +141,11 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
struct prestera_flow_block *block)
{
struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
- struct prestera_acl_rule_match_entry m_entry = {0};
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ struct prestera_port *port;
struct net_device *ingress_dev;
struct flow_match_meta match;
- struct prestera_port *port;
+ __be16 key, mask;
flow_rule_match_meta(f_rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
@@ -61,7 +154,7 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
return -EINVAL;
}
- ingress_dev = __dev_get_by_index(prestera_acl_block_net(block),
+ ingress_dev = __dev_get_by_index(block->net,
match.key->ingress_ifindex);
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(f->common.extack,
@@ -76,11 +169,17 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
}
port = netdev_priv(ingress_dev);
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT;
- m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32);
- m_entry.keymask.u64.mask = ~(u64)0;
+ mask = htons(0x1FFF << 3);
+ key = htons(port->hw_id << 3);
+ rule_match_set(r_match->key, SYS_PORT, key);
+ rule_match_set(r_match->mask, SYS_PORT, mask);
+
+ mask = htons(0x3FF);
+ key = htons(port->dev_id);
+ rule_match_set(r_match->key, SYS_DEV, key);
+ rule_match_set(r_match->mask, SYS_DEV, mask);
- return prestera_acl_rule_match_add(rule, &m_entry);
+ return 0;
}
static int prestera_flower_parse(struct prestera_flow_block *block,
@@ -89,9 +188,9 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
{
struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = f_rule->match.dissector;
- struct prestera_acl_rule_match_entry m_entry;
- u16 n_proto_mask = 0;
- u16 n_proto_key = 0;
+ struct prestera_acl_match *r_match = &rule->re_key.match;
+ __be16 n_proto_mask = 0;
+ __be16 n_proto_key = 0;
u16 addr_type = 0;
u8 ip_proto = 0;
int err;
@@ -105,6 +204,7 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ICMP) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
BIT(FLOW_DISSECTOR_KEY_VLAN))) {
NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
return -EOPNOTSUPP;
@@ -129,32 +229,19 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
struct flow_match_basic match;
flow_rule_match_basic(f_rule, &match);
- n_proto_key = ntohs(match.key->n_proto);
- n_proto_mask = ntohs(match.mask->n_proto);
+ n_proto_key = match.key->n_proto;
+ n_proto_mask = match.mask->n_proto;
- if (n_proto_key == ETH_P_ALL) {
+ if (ntohs(match.key->n_proto) == ETH_P_ALL) {
n_proto_key = 0;
n_proto_mask = 0;
}
- /* add eth type key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE;
- m_entry.keymask.u16.key = n_proto_key;
- m_entry.keymask.u16.mask = n_proto_mask;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
-
- /* add ip proto key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO;
- m_entry.keymask.u8.key = match.key->ip_proto;
- m_entry.keymask.u8.mask = match.mask->ip_proto;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
+ rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
+ rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
+ rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
ip_proto = match.key->ip_proto;
}
@@ -163,27 +250,27 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_eth_addrs(f_rule, &match);
- /* add ethernet dst key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC;
- memcpy(&m_entry.keymask.mac.key,
- &match.key->dst, sizeof(match.key->dst));
- memcpy(&m_entry.keymask.mac.mask,
- &match.mask->dst, sizeof(match.mask->dst));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
-
- /* add ethernet src key,mask */
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC;
- memcpy(&m_entry.keymask.mac.key,
- &match.key->src, sizeof(match.key->src));
- memcpy(&m_entry.keymask.mac.mask,
- &match.mask->src, sizeof(match.mask->src));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ /* DA key, mask */
+ rule_match_set_n(r_match->key,
+ ETH_DMAC_0, &match.key->dst[0], 4);
+ rule_match_set_n(r_match->key,
+ ETH_DMAC_1, &match.key->dst[4], 2);
+
+ rule_match_set_n(r_match->mask,
+ ETH_DMAC_0, &match.mask->dst[0], 4);
+ rule_match_set_n(r_match->mask,
+ ETH_DMAC_1, &match.mask->dst[4], 2);
+
+ /* SA key, mask */
+ rule_match_set_n(r_match->key,
+ ETH_SMAC_0, &match.key->src[0], 4);
+ rule_match_set_n(r_match->key,
+ ETH_SMAC_1, &match.key->src[4], 2);
+
+ rule_match_set_n(r_match->mask,
+ ETH_SMAC_0, &match.mask->src[0], 4);
+ rule_match_set_n(r_match->mask,
+ ETH_SMAC_1, &match.mask->src[4], 2);
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -191,25 +278,11 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_ipv4_addrs(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC;
- memcpy(&m_entry.keymask.u32.key,
- &match.key->src, sizeof(match.key->src));
- memcpy(&m_entry.keymask.u32.mask,
- &match.mask->src, sizeof(match.mask->src));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, IP_SRC, match.key->src);
+ rule_match_set(r_match->mask, IP_SRC, match.mask->src);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST;
- memcpy(&m_entry.keymask.u32.key,
- &match.key->dst, sizeof(match.key->dst));
- memcpy(&m_entry.keymask.u32.mask,
- &match.mask->dst, sizeof(match.mask->dst));
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, IP_DST, match.key->dst);
+ rule_match_set(r_match->mask, IP_DST, match.mask->dst);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
@@ -224,21 +297,34 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_ports(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC;
- m_entry.keymask.u16.key = ntohs(match.key->src);
- m_entry.keymask.u16.mask = ntohs(match.mask->src);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
+ rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST;
- m_entry.keymask.u16.key = ntohs(match.key->dst);
- m_entry.keymask.u16.mask = ntohs(match.mask->dst);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
+ rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
+ struct flow_match_ports_range match;
+ __be32 tp_key, tp_mask;
+
+ flow_rule_match_ports_range(f_rule, &match);
+
+ /* src port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.src) |
+ (ntohs(match.key->tp_max.src) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.src) |
+ (ntohs(match.mask->tp_max.src) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask);
+
+ /* dst port range (min, max) */
+ tp_key = htonl(ntohs(match.key->tp_min.dst) |
+ (ntohs(match.key->tp_max.dst) << 16));
+ tp_mask = htonl(ntohs(match.mask->tp_min.dst) |
+ (ntohs(match.mask->tp_max.dst) << 16));
+ rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key);
+ rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
@@ -247,22 +333,15 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_vlan(f_rule, &match);
if (match.mask->vlan_id != 0) {
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID;
- m_entry.keymask.u16.key = match.key->vlan_id;
- m_entry.keymask.u16.mask = match.mask->vlan_id;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ __be16 key = cpu_to_be16(match.key->vlan_id);
+ __be16 mask = cpu_to_be16(match.mask->vlan_id);
+
+ rule_match_set(r_match->key, VLAN_ID, key);
+ rule_match_set(r_match->mask, VLAN_ID, mask);
}
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID;
- m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid);
- m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid);
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
+ rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
@@ -270,90 +349,227 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_icmp(f_rule, &match);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE;
- m_entry.keymask.u8.key = match.key->type;
- m_entry.keymask.u8.mask = match.mask->type;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
+ rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
- memset(&m_entry, 0, sizeof(m_entry));
- m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE;
- m_entry.keymask.u8.key = match.key->code;
- m_entry.keymask.u8.mask = match.mask->code;
- err = prestera_acl_rule_match_add(rule, &m_entry);
- if (err)
- return err;
+ rule_match_set(r_match->key, ICMP_CODE, match.key->code);
+ rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
}
- return prestera_flower_parse_actions(block, rule,
- &f->rule->action,
+ return prestera_flower_parse_actions(block, rule, &f->rule->action,
+ f->common.chain_index,
f->common.extack);
}
+static int prestera_flower_prio_check(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ u32 mall_prio_min;
+ u32 mall_prio_max;
+ int err;
+
+ err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max);
+ if (err == -ENOENT)
+ /* No matchall filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
+ return err;
+ }
+
+ if (f->common.prio <= mall_prio_max && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Failed to add in front of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= mall_prio_min && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max);
+ return 0;
+}
+
int prestera_flower_replace(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl *acl = block->sw->acl;
struct prestera_acl_rule *rule;
int err;
- rule = prestera_acl_rule_create(block, f->cookie);
- if (IS_ERR(rule))
- return PTR_ERR(rule);
+ err = prestera_flower_prio_check(block, f);
+ if (err)
+ return err;
+
+ ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ /* increments the ruleset reference */
+ rule = prestera_acl_rule_create(ruleset, f->cookie,
+ f->common.chain_index);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_rule_create;
+ }
err = prestera_flower_parse(block, rule, f);
if (err)
- goto err_flower_parse;
+ goto err_rule_add;
- err = prestera_acl_rule_add(sw, rule);
+ if (!prestera_acl_ruleset_is_offload(ruleset)) {
+ err = prestera_acl_ruleset_offload(ruleset);
+ if (err)
+ goto err_ruleset_offload;
+ }
+
+ err = prestera_acl_rule_add(block->sw, rule);
if (err)
goto err_rule_add;
+ prestera_acl_ruleset_put(ruleset);
return 0;
+err_ruleset_offload:
err_rule_add:
-err_flower_parse:
prestera_acl_rule_destroy(rule);
+err_rule_create:
+ prestera_acl_ruleset_put(ruleset);
return err;
}
void prestera_flower_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
+ struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
- struct prestera_switch *sw;
- rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
- f->cookie);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
+ if (IS_ERR(ruleset))
+ return;
+
+ rule = prestera_acl_rule_lookup(ruleset, f->cookie);
if (rule) {
- sw = prestera_acl_block_sw(block);
- prestera_acl_rule_del(sw, rule);
+ prestera_acl_rule_del(block->sw, rule);
prestera_acl_rule_destroy(rule);
}
+ prestera_acl_ruleset_put(ruleset);
+}
+
+int prestera_flower_tmplt_create(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_flower_template *template;
+ struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_rule rule;
+ int err;
+
+ memset(&rule, 0, sizeof(rule));
+ err = prestera_flower_parse(block, &rule, f);
+ if (err)
+ return err;
+
+ template = kmalloc(sizeof(*template), GFP_KERNEL);
+ if (!template) {
+ err = -ENOMEM;
+ goto err_malloc;
+ }
+
+ prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ f->common.chain_index);
+ if (IS_ERR_OR_NULL(ruleset)) {
+ err = -EINVAL;
+ goto err_ruleset_get;
+ }
+
+ /* preserve keymask/template to this ruleset */
+ err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
+ if (err)
+ goto err_ruleset_keymask_set;
+
+ /* skip error, as it is not possible to reject template operation,
+ * so, keep the reference to the ruleset for rules to be added
+ * to that ruleset later. In case of offload fail, the ruleset
+ * will be offloaded again during adding a new rule. Also,
+ * unlikly possble that ruleset is already offloaded at this staage.
+ */
+ prestera_acl_ruleset_offload(ruleset);
+
+ /* keep the reference to the ruleset */
+ template->ruleset = ruleset;
+ template->chain_index = f->common.chain_index;
+ list_add_rcu(&template->list, &block->template_list);
+ return 0;
+
+err_ruleset_keymask_set:
+ prestera_acl_ruleset_put(ruleset);
+err_ruleset_get:
+ kfree(template);
+err_malloc:
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
+ return err;
+}
+
+void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_flower_template *template, *tmp;
+
+ list_for_each_entry_safe(template, tmp, &block->template_list, list)
+ if (template->chain_index == f->common.chain_index) {
+ /* put the reference to the ruleset kept in create */
+ prestera_flower_template_free(template);
+ return;
+ }
}
int prestera_flower_stats(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
u64 packets;
u64 lastuse;
u64 bytes;
int err;
- rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
- f->cookie);
- if (!rule)
- return -EINVAL;
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule = prestera_acl_rule_lookup(ruleset, f->cookie);
+ if (!rule) {
+ err = -EINVAL;
+ goto err_rule_get_stats;
+ }
- err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse);
+ err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
+ &bytes, &lastuse);
if (err)
- return err;
+ goto err_rule_get_stats;
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
- FLOW_ACTION_HW_STATS_IMMEDIATE);
- return 0;
+ FLOW_ACTION_HW_STATS_DELAYED);
+
+err_rule_get_stats:
+ prestera_acl_ruleset_put(ruleset);
+ return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
index 91e045eec58b..1181115fe6fa 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
-/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+/* Copyright (c) 2020-2021 Marvell International Ltd. All rights reserved. */
#ifndef _PRESTERA_FLOWER_H_
#define _PRESTERA_FLOWER_H_
@@ -14,5 +14,12 @@ void prestera_flower_destroy(struct prestera_flow_block *block,
struct flow_cls_offload *f);
int prestera_flower_stats(struct prestera_flow_block *block,
struct flow_cls_offload *f);
+int prestera_flower_tmplt_create(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_template_cleanup(struct prestera_flow_block *block);
+int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
+ u32 *prio_min, u32 *prio_max);
#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 9b8b1ed474fc..fc6f7d2746e8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -9,11 +9,15 @@
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_acl.h"
+#include "prestera_counter.h"
+#include "prestera_router_hw.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
#define PRESTERA_MIN_MTU 64
+#define PRESTERA_MSG_CHUNK_SIZE 1024
+
enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
@@ -38,13 +42,38 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
- PRESTERA_CMD_TYPE_ACL_RULE_ADD = 0x500,
- PRESTERA_CMD_TYPE_ACL_RULE_DELETE = 0x501,
- PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET = 0x510,
- PRESTERA_CMD_TYPE_ACL_RULESET_CREATE = 0x520,
- PRESTERA_CMD_TYPE_ACL_RULESET_DELETE = 0x521,
- PRESTERA_CMD_TYPE_ACL_PORT_BIND = 0x530,
- PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531,
+ PRESTERA_CMD_TYPE_COUNTER_GET = 0x510,
+ PRESTERA_CMD_TYPE_COUNTER_ABORT = 0x511,
+ PRESTERA_CMD_TYPE_COUNTER_TRIGGER = 0x512,
+ PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET = 0x513,
+ PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE = 0x514,
+ PRESTERA_CMD_TYPE_COUNTER_CLEAR = 0x515,
+
+ PRESTERA_CMD_TYPE_VTCAM_CREATE = 0x540,
+ PRESTERA_CMD_TYPE_VTCAM_DESTROY = 0x541,
+ PRESTERA_CMD_TYPE_VTCAM_RULE_ADD = 0x550,
+ PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE = 0x551,
+ PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND = 0x560,
+ PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND = 0x561,
+
+ PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
+ PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET = 0x622,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET = 0x645,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD = 0x623,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE = 0x624,
+ PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
+ PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
+
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE = 0x700,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY = 0x701,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET = 0x702,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET = 0x703,
+
+ PRESTERA_CMD_TYPE_MDB_CREATE = 0x704,
+ PRESTERA_CMD_TYPE_MDB_DESTROY = 0x705,
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
@@ -56,9 +85,15 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
PRESTERA_CMD_TYPE_SPAN_GET = 0x1100,
- PRESTERA_CMD_TYPE_SPAN_BIND = 0x1101,
- PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND = 0x1101,
+ PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND = 0x1102,
PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND = 0x1104,
+ PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND = 0x1105,
+
+ PRESTERA_CMD_TYPE_POLICER_CREATE = 0x1500,
+ PRESTERA_CMD_TYPE_POLICER_RELEASE = 0x1501,
+ PRESTERA_CMD_TYPE_POLICER_SET = 0x1502,
PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000,
@@ -75,6 +110,7 @@ enum {
PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_LOCKED = 10,
PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12,
PRESTERA_CMD_PORT_ATTR_TYPE = 13,
PRESTERA_CMD_PORT_ATTR_STATS = 17,
@@ -150,6 +186,10 @@ enum {
};
enum {
+ PRESTERA_POLICER_MODE_SR_TCM
+};
+
+enum {
PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0,
PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1,
PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2,
@@ -163,6 +203,12 @@ struct prestera_fw_event_handler {
void *arg;
};
+enum {
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_REG_PORT = 0,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG = 1,
+ PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_MAX = 2,
+};
+
struct prestera_msg_cmd {
__le32 type;
};
@@ -249,6 +295,7 @@ union prestera_msg_port_param {
u8 duplex;
u8 fec;
u8 fc;
+ u8 br_locked;
union {
struct {
u8 admin;
@@ -359,76 +406,90 @@ struct prestera_msg_bridge_resp {
u8 pad[2];
};
-struct prestera_msg_acl_action {
- __le32 id;
- __le32 reserved[5];
+struct prestera_msg_vtcam_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ u8 direction;
+ u8 lookup;
+ u8 pad[2];
};
-struct prestera_msg_acl_match {
- __le32 type;
- __le32 __reserved;
- union {
- struct {
- u8 key;
- u8 mask;
- } u8;
- struct {
- __le16 key;
- __le16 mask;
- } u16;
- struct {
- __le32 key;
- __le32 mask;
- } u32;
- struct {
- __le64 key;
- __le64 mask;
- } u64;
- struct {
- u8 key[ETH_ALEN];
- u8 mask[ETH_ALEN];
- } mac;
- } keymask;
+struct prestera_msg_vtcam_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 vtcam_id;
};
-struct prestera_msg_acl_rule_req {
+struct prestera_msg_vtcam_rule_add_req {
struct prestera_msg_cmd cmd;
- __le32 id;
- __le32 priority;
- __le16 ruleset_id;
- u8 n_actions;
- u8 n_matches;
+ __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 vtcam_id;
+ __le32 prio;
+ __le32 n_act;
};
-struct prestera_msg_acl_rule_resp {
- struct prestera_msg_ret ret;
+struct prestera_msg_vtcam_rule_del_req {
+ struct prestera_msg_cmd cmd;
+ __le32 vtcam_id;
__le32 id;
};
-struct prestera_msg_acl_rule_stats_resp {
+struct prestera_msg_vtcam_bind_req {
+ struct prestera_msg_cmd cmd;
+ union {
+ struct {
+ __le32 hw_id;
+ __le32 dev_id;
+ } port;
+ __le32 index;
+ };
+ __le32 vtcam_id;
+ __le16 pcl_id;
+ __le16 type;
+};
+
+struct prestera_msg_vtcam_resp {
struct prestera_msg_ret ret;
- __le64 packets;
- __le64 bytes;
+ __le32 vtcam_id;
+ __le32 rule_id;
};
-struct prestera_msg_acl_ruleset_bind_req {
- struct prestera_msg_cmd cmd;
- __le32 port;
- __le32 dev;
- __le16 ruleset_id;
- u8 pad[2];
+struct prestera_msg_acl_action {
+ __le32 id;
+ __le32 __reserved;
+ union {
+ struct {
+ __le32 index;
+ } jump;
+ struct {
+ __le32 id;
+ } police;
+ struct {
+ __le32 id;
+ } count;
+ __le32 reserved[6];
+ };
};
-struct prestera_msg_acl_ruleset_req {
+struct prestera_msg_counter_req {
struct prestera_msg_cmd cmd;
- __le16 id;
- u8 pad[2];
+ __le32 client;
+ __le32 block_id;
+ __le32 num_counters;
+};
+
+struct prestera_msg_counter_stats {
+ __le64 packets;
+ __le64 bytes;
};
-struct prestera_msg_acl_ruleset_resp {
+struct prestera_msg_counter_resp {
struct prestera_msg_ret ret;
- __le16 id;
- u8 pad[2];
+ __le32 block_id;
+ __le32 offset;
+ __le32 num_counters;
+ __le32 done;
+ struct prestera_msg_counter_stats stats[];
};
struct prestera_msg_span_req {
@@ -465,6 +526,102 @@ struct prestera_msg_rxtx_resp {
__le32 map_addr;
};
+struct prestera_msg_iface {
+ union {
+ struct {
+ __le32 dev;
+ __le32 port;
+ };
+ __le16 lag_id;
+ };
+ __le16 vr_id;
+ __le16 vid;
+ u8 type;
+ u8 __pad[3];
+};
+
+struct prestera_msg_ip_addr {
+ union {
+ __be32 ipv4;
+ __be32 ipv6[4];
+ } u;
+ u8 v; /* e.g. PRESTERA_IPV4 */
+ u8 __pad[3];
+};
+
+struct prestera_msg_nh {
+ struct prestera_msg_iface oif;
+ __le32 hw_id;
+ u8 mac[ETH_ALEN];
+ u8 is_active;
+ u8 pad;
+};
+
+struct prestera_msg_rif_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_iface iif;
+ __le32 mtu;
+ __le16 rif_id;
+ __le16 __reserved;
+ u8 mac[ETH_ALEN];
+ u8 __pad[2];
+};
+
+struct prestera_msg_rif_resp {
+ struct prestera_msg_ret ret;
+ __le16 rif_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_lpm_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_ip_addr dst;
+ __le32 grp_id;
+ __le32 dst_len;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_nh_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_nh nh[PRESTERA_NHGR_SIZE_MAX];
+ __le32 size;
+ __le32 grp_id;
+};
+
+struct prestera_msg_nh_chunk_req {
+ struct prestera_msg_cmd cmd;
+ __le32 offset;
+};
+
+struct prestera_msg_nh_chunk_resp {
+ struct prestera_msg_ret ret;
+ u8 hw_state[PRESTERA_MSG_CHUNK_SIZE];
+};
+
+struct prestera_msg_nh_grp_req {
+ struct prestera_msg_cmd cmd;
+ __le32 grp_id;
+ __le32 size;
+};
+
+struct prestera_msg_nh_grp_resp {
+ struct prestera_msg_ret ret;
+ __le32 grp_id;
+};
+
+struct prestera_msg_vr_req {
+ struct prestera_msg_cmd cmd;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
+struct prestera_msg_vr_resp {
+ struct prestera_msg_ret ret;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
struct prestera_msg_lag_req {
struct prestera_msg_cmd cmd;
__le32 port;
@@ -485,6 +642,26 @@ struct mvsw_msg_cpu_code_counter_ret {
__le64 packet_count;
};
+struct prestera_msg_policer_req {
+ struct prestera_msg_cmd cmd;
+ __le32 id;
+ union {
+ struct {
+ __le64 cir;
+ __le32 cbs;
+ } __packed sr_tcm; /* make sure always 12 bytes size */
+ __le32 reserved[6];
+ };
+ u8 mode;
+ u8 type;
+ u8 pad[2];
+};
+
+struct prestera_msg_policer_resp {
+ struct prestera_msg_ret ret;
+ __le32 id;
+};
+
struct prestera_msg_event {
__le16 type;
__le16 id;
@@ -511,6 +688,57 @@ struct prestera_msg_event_fdb {
u8 dest_type;
};
+struct prestera_msg_flood_domain_create_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_flood_domain_create_resp {
+ struct prestera_msg_ret ret;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_ports_set_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le32 ports_num;
+};
+
+struct prestera_msg_flood_domain_ports_reset_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+};
+
+struct prestera_msg_flood_domain_port {
+ union {
+ struct {
+ __le32 port_num;
+ __le32 dev_num;
+ };
+ __le16 lag_id;
+ };
+ __le16 vid;
+ __le16 port_type;
+};
+
+struct prestera_msg_mdb_create_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_mdb_destroy_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le16 vid;
+ u8 mac[ETH_ALEN];
+};
+
static void prestera_hw_build_tests(void)
{
/* check requests */
@@ -521,14 +749,38 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_bind_req) != 16);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8);
BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_create_req) != 84);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_add_req) != 168);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_rule_del_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_bind_req) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_action) != 32);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_policer_req) != 36);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_req) != 4);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_destroy_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_set_req) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_ports_reset_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_create_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_mdb_destroy_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_req) != 124);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_req) != 12);
+
+ /* structure that are part of req/resp fw messages */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_port) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh) != 28);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
@@ -537,11 +789,16 @@ static void prestera_hw_build_tests(void)
BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248);
BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20);
BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_resp) != 12);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_stats_resp) != 24);
- BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12);
BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vtcam_resp) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_counter_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rif_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vr_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_policer_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_flood_domain_create_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_chunk_resp) != 1032);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_nh_grp_resp) != 12);
/* check events */
BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
@@ -819,6 +1076,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
sw->id = resp.switch_id;
sw->lag_member_max = resp.lag_member_max;
sw->lag_max = resp.lag_max;
+ sw->size_tbl_router_nexthop =
+ __le32_to_cpu(resp.size_tbl_router_nexthop);
return 0;
}
@@ -1044,225 +1303,168 @@ static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause)
}
}
-int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
+int prestera_hw_vtcam_create(struct prestera_switch *sw,
+ u8 lookup, const u32 *keymask, u32 *vtcam_id,
+ enum prestera_hw_vtcam_direction_t dir)
{
- struct prestera_msg_acl_ruleset_resp resp;
- struct prestera_msg_acl_ruleset_req req;
int err;
+ struct prestera_msg_vtcam_resp resp;
+ struct prestera_msg_vtcam_create_req req = {
+ .lookup = lookup,
+ .direction = dir,
+ };
+
+ if (keymask)
+ memcpy(req.keymask, keymask, sizeof(req.keymask));
+ else
+ memset(req.keymask, 0, sizeof(req.keymask));
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULESET_CREATE,
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
if (err)
return err;
- *ruleset_id = __le16_to_cpu(resp.id);
-
+ *vtcam_id = __le32_to_cpu(resp.vtcam_id);
return 0;
}
-int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id)
+int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id)
{
- struct prestera_msg_acl_ruleset_req req = {
- .id = __cpu_to_le16(ruleset_id),
+ struct prestera_msg_vtcam_destroy_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
};
- return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_DESTROY,
&req.cmd, sizeof(req));
}
-static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action,
- struct prestera_acl_rule *rule)
+static int
+prestera_acl_rule_add_put_action(struct prestera_msg_acl_action *action,
+ struct prestera_acl_hw_action_info *info)
{
- struct list_head *a_list = prestera_acl_rule_action_list_get(rule);
- struct prestera_acl_rule_action_entry *a_entry;
- int i = 0;
-
- list_for_each_entry(a_entry, a_list, list) {
- action[i].id = __cpu_to_le32(a_entry->id);
-
- switch (a_entry->id) {
- case PRESTERA_ACL_RULE_ACTION_ACCEPT:
- case PRESTERA_ACL_RULE_ACTION_DROP:
- case PRESTERA_ACL_RULE_ACTION_TRAP:
- /* just rule action id, no specific data */
- break;
- default:
- return -EINVAL;
- }
-
- i++;
- }
-
- return 0;
-}
-
-static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
- struct prestera_acl_rule *rule)
-{
- struct list_head *m_list = prestera_acl_rule_match_list_get(rule);
- struct prestera_acl_rule_match_entry *m_entry;
- int i = 0;
-
- list_for_each_entry(m_entry, m_list, list) {
- match[i].type = __cpu_to_le32(m_entry->type);
-
- switch (m_entry->type) {
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID:
- match[i].keymask.u16.key =
- __cpu_to_le16(m_entry->keymask.u16.key);
- match[i].keymask.u16.mask =
- __cpu_to_le16(m_entry->keymask.u16.mask);
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO:
- match[i].keymask.u8.key = m_entry->keymask.u8.key;
- match[i].keymask.u8.mask = m_entry->keymask.u8.mask;
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC:
- memcpy(match[i].keymask.mac.key,
- m_entry->keymask.mac.key,
- sizeof(match[i].keymask.mac.key));
- memcpy(match[i].keymask.mac.mask,
- m_entry->keymask.mac.mask,
- sizeof(match[i].keymask.mac.mask));
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC:
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST:
- match[i].keymask.u32.key =
- __cpu_to_le32(m_entry->keymask.u32.key);
- match[i].keymask.u32.mask =
- __cpu_to_le32(m_entry->keymask.u32.mask);
- break;
- case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT:
- match[i].keymask.u64.key =
- __cpu_to_le64(m_entry->keymask.u64.key);
- match[i].keymask.u64.mask =
- __cpu_to_le64(m_entry->keymask.u64.mask);
- break;
- default:
- return -EINVAL;
- }
+ action->id = __cpu_to_le32(info->id);
- i++;
+ switch (info->id) {
+ case PRESTERA_ACL_RULE_ACTION_ACCEPT:
+ case PRESTERA_ACL_RULE_ACTION_DROP:
+ case PRESTERA_ACL_RULE_ACTION_TRAP:
+ /* just rule action id, no specific data */
+ break;
+ case PRESTERA_ACL_RULE_ACTION_JUMP:
+ action->jump.index = __cpu_to_le32(info->jump.index);
+ break;
+ case PRESTERA_ACL_RULE_ACTION_POLICE:
+ action->police.id = __cpu_to_le32(info->police.id);
+ break;
+ case PRESTERA_ACL_RULE_ACTION_COUNT:
+ action->count.id = __cpu_to_le32(info->count.id);
+ break;
+ default:
+ return -EINVAL;
}
return 0;
}
-int prestera_hw_acl_rule_add(struct prestera_switch *sw,
- struct prestera_acl_rule *rule,
- u32 *rule_id)
+int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
+ u32 vtcam_id, u32 prio, void *key, void *keymask,
+ struct prestera_acl_hw_action_info *act,
+ u8 n_act, u32 *rule_id)
{
- struct prestera_msg_acl_action *actions;
- struct prestera_msg_acl_match *matches;
- struct prestera_msg_acl_rule_resp resp;
- struct prestera_msg_acl_rule_req *req;
- u8 n_actions;
- u8 n_matches;
+ struct prestera_msg_acl_action *actions_msg;
+ struct prestera_msg_vtcam_rule_add_req *req;
+ struct prestera_msg_vtcam_resp resp;
void *buff;
u32 size;
int err;
+ u8 i;
- n_actions = prestera_acl_rule_action_len(rule);
- n_matches = prestera_acl_rule_match_len(rule);
-
- size = sizeof(*req) + sizeof(*actions) * n_actions +
- sizeof(*matches) * n_matches;
+ size = sizeof(*req) + sizeof(*actions_msg) * n_act;
buff = kzalloc(size, GFP_KERNEL);
if (!buff)
return -ENOMEM;
req = buff;
- actions = buff + sizeof(*req);
- matches = buff + sizeof(*req) + sizeof(*actions) * n_actions;
-
- /* put acl actions into the message */
- err = prestera_hw_acl_actions_put(actions, rule);
- if (err)
- goto free_buff;
+ req->n_act = __cpu_to_le32(n_act);
+ actions_msg = buff + sizeof(*req);
/* put acl matches into the message */
- err = prestera_hw_acl_matches_put(matches, rule);
- if (err)
- goto free_buff;
+ memcpy(req->key, key, sizeof(req->key));
+ memcpy(req->keymask, keymask, sizeof(req->keymask));
- req->ruleset_id = __cpu_to_le16(prestera_acl_rule_ruleset_id_get(rule));
- req->priority = __cpu_to_le32(prestera_acl_rule_priority_get(rule));
- req->n_actions = prestera_acl_rule_action_len(rule);
- req->n_matches = prestera_acl_rule_match_len(rule);
+ /* put acl actions into the message */
+ for (i = 0; i < n_act; i++) {
+ err = prestera_acl_rule_add_put_action(&actions_msg[i],
+ &act[i]);
+ if (err)
+ goto free_buff;
+ }
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_ADD,
+ req->vtcam_id = __cpu_to_le32(vtcam_id);
+ req->prio = __cpu_to_le32(prio);
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_ADD,
&req->cmd, size, &resp.ret, sizeof(resp));
if (err)
goto free_buff;
- *rule_id = __le32_to_cpu(resp.id);
+ *rule_id = __le32_to_cpu(resp.rule_id);
free_buff:
kfree(buff);
return err;
}
-int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id)
+int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
+ u32 vtcam_id, u32 rule_id)
{
- struct prestera_msg_acl_rule_req req = {
+ struct prestera_msg_vtcam_rule_del_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
.id = __cpu_to_le32(rule_id)
};
- return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_RULE_DELETE,
&req.cmd, sizeof(req));
}
-int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
- u64 *packets, u64 *bytes)
+int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id, u16 pcl_id)
{
- struct prestera_msg_acl_rule_stats_resp resp;
- struct prestera_msg_acl_rule_req req = {
- .id = __cpu_to_le32(rule_id)
+ struct prestera_msg_vtcam_bind_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .type = __cpu_to_le16(iface->type),
+ .pcl_id = __cpu_to_le16(pcl_id)
};
- int err;
-
- err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *packets = __le64_to_cpu(resp.packets);
- *bytes = __le64_to_cpu(resp.bytes);
-
- return 0;
-}
-int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id)
-{
- struct prestera_msg_acl_ruleset_bind_req req = {
- .port = __cpu_to_le32(port->hw_id),
- .dev = __cpu_to_le32(port->dev_id),
- .ruleset_id = __cpu_to_le16(ruleset_id),
- };
+ if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
+ req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
+ req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
+ } else {
+ req.index = __cpu_to_le32(iface->index);
+ }
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND,
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_BIND,
&req.cmd, sizeof(req));
}
-int prestera_hw_acl_port_unbind(const struct prestera_port *port,
- u16 ruleset_id)
+int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id)
{
- struct prestera_msg_acl_ruleset_bind_req req = {
- .port = __cpu_to_le32(port->hw_id),
- .dev = __cpu_to_le32(port->dev_id),
- .ruleset_id = __cpu_to_le16(ruleset_id),
+ struct prestera_msg_vtcam_bind_req req = {
+ .vtcam_id = __cpu_to_le32(vtcam_id),
+ .type = __cpu_to_le16(iface->type)
};
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND,
+ if (iface->type == PRESTERA_ACL_IFACE_TYPE_PORT) {
+ req.port.dev_id = __cpu_to_le32(iface->port->dev_id);
+ req.port.hw_id = __cpu_to_le32(iface->port->hw_id);
+ } else {
+ req.index = __cpu_to_le32(iface->index);
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VTCAM_IFACE_UNBIND,
&req.cmd, sizeof(req));
}
@@ -1285,27 +1487,39 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
return 0;
}
-int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.id = span_id,
};
+ enum prestera_cmd_type_t cmd_type;
+
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_BIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_BIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_BIND,
- &req.cmd, sizeof(req));
}
-int prestera_hw_span_unbind(const struct prestera_port *port)
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress)
{
struct prestera_msg_span_req req = {
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
};
+ enum prestera_cmd_type_t cmd_type;
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND,
- &req.cmd, sizeof(req));
+ if (ingress)
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_INGRESS_UNBIND;
+ else
+ cmd_type = PRESTERA_CMD_TYPE_SPAN_EGRESS_UNBIND;
+
+ return prestera_cmd(port->sw, cmd_type, &req.cmd, sizeof(req));
}
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
@@ -1457,7 +1671,7 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
@@ -1475,7 +1689,7 @@ static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
@@ -1493,14 +1707,15 @@ static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked)
{
struct prestera_msg_port_attr_req req = {
- .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LOCKED),
.port = __cpu_to_le32(port->hw_id),
.dev = __cpu_to_le32(port->dev_id),
.param = {
- .flood = flood,
+ .br_locked = br_locked,
}
};
@@ -1508,41 +1723,6 @@ static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
-int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
- unsigned long val)
-{
- int err;
-
- if (port->sw->dev->fw_rev.maj <= 2) {
- if (!(mask & BR_FLOOD))
- return 0;
-
- return prestera_hw_port_flood_set_v2(port, val & BR_FLOOD);
- }
-
- if (mask & BR_FLOOD) {
- err = prestera_hw_port_uc_flood_set(port, val & BR_FLOOD);
- if (err)
- goto err_uc_flood;
- }
-
- if (mask & BR_MCAST_FLOOD) {
- err = prestera_hw_port_mc_flood_set(port, val & BR_MCAST_FLOOD);
- if (err)
- goto err_mc_flood;
- }
-
- return 0;
-
-err_mc_flood:
- prestera_hw_port_mc_flood_set(port, 0);
-err_uc_flood:
- if (mask & BR_FLOOD)
- prestera_hw_port_uc_flood_set(port, 0);
-
- return err;
-}
-
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
@@ -1796,6 +1976,197 @@ int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
&req.cmd, sizeof(req));
}
+static int prestera_iface_to_msg(struct prestera_iface *iface,
+ struct prestera_msg_iface *msg_if)
+{
+ switch (iface->type) {
+ case PRESTERA_IF_PORT_E:
+ case PRESTERA_IF_VID_E:
+ msg_if->port = __cpu_to_le32(iface->dev_port.port_num);
+ msg_if->dev = __cpu_to_le32(iface->dev_port.hw_dev_num);
+ break;
+ case PRESTERA_IF_LAG_E:
+ msg_if->lag_id = __cpu_to_le16(iface->lag_id);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ msg_if->vr_id = __cpu_to_le16(iface->vr_id);
+ msg_if->vid = __cpu_to_le16(iface->vlan_id);
+ msg_if->type = iface->type;
+ return 0;
+}
+
+int prestera_hw_rif_create(struct prestera_switch *sw,
+ struct prestera_iface *iif, u8 *mac, u16 *rif_id)
+{
+ struct prestera_msg_rif_resp resp;
+ struct prestera_msg_rif_req req;
+ int err;
+
+ memcpy(req.mac, mac, ETH_ALEN);
+
+ err = prestera_iface_to_msg(iif, &req.iif);
+ if (err)
+ return err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *rif_id = __le16_to_cpu(resp.rif_id);
+ return err;
+}
+
+int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
+ struct prestera_iface *iif)
+{
+ struct prestera_msg_rif_req req = {
+ .rif_id = __cpu_to_le16(rif_id),
+ };
+ int err;
+
+ err = prestera_iface_to_msg(iif, &req.iif);
+ if (err)
+ return err;
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id)
+{
+ struct prestera_msg_vr_resp resp;
+ struct prestera_msg_vr_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *vr_id = __le16_to_cpu(resp.vr_id);
+ return err;
+}
+
+int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id)
+{
+ struct prestera_msg_vr_req req = {
+ .vr_id = __cpu_to_le16(vr_id),
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_VR_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .grp_id = __cpu_to_le32(grp_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id)
+{
+ struct prestera_msg_nh_req req = { .size = __cpu_to_le32((u32)count),
+ .grp_id = __cpu_to_le32(grp_id) };
+ int i, err;
+
+ for (i = 0; i < count; i++) {
+ req.nh[i].is_active = nhs[i].connected;
+ memcpy(&req.nh[i].mac, nhs[i].ha, ETH_ALEN);
+ err = prestera_iface_to_msg(&nhs[i].iface, &req.nh[i].oif);
+ if (err)
+ return err;
+ }
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_SET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */)
+{
+ static struct prestera_msg_nh_chunk_resp resp;
+ struct prestera_msg_nh_chunk_req req;
+ u32 buf_offset;
+ int err;
+
+ memset(&hw_state[0], 0, buf_size);
+ buf_offset = 0;
+ while (1) {
+ if (buf_offset >= buf_size)
+ break;
+
+ memset(&req, 0, sizeof(req));
+ req.offset = __cpu_to_le32(buf_offset * 8); /* 8 bits in u8 */
+ err = prestera_cmd_ret(sw,
+ PRESTERA_CMD_TYPE_ROUTER_NH_GRP_BLK_GET,
+ &req.cmd, sizeof(req), &resp.ret,
+ sizeof(resp));
+ if (err)
+ return err;
+
+ memcpy(&hw_state[buf_offset], &resp.hw_state[0],
+ buf_offset + PRESTERA_MSG_CHUNK_SIZE > buf_size ?
+ buf_size - buf_offset : PRESTERA_MSG_CHUNK_SIZE);
+ buf_offset += PRESTERA_MSG_CHUNK_SIZE;
+ }
+
+ return 0;
+}
+
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id)
+{
+ struct prestera_msg_nh_grp_req req = { .size = __cpu_to_le32((u32)nh_count) };
+ struct prestera_msg_nh_grp_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_ADD,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *grp_id = __le32_to_cpu(resp.grp_id);
+ return err;
+}
+
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id)
+{
+ struct prestera_msg_nh_grp_req req = {
+ .grp_id = __cpu_to_le32(grp_id),
+ .size = __cpu_to_le32(nh_count)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_NH_GRP_DELETE,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
@@ -1916,3 +2287,275 @@ void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
list_del_rcu(&eh->list);
kfree_rcu(eh, rcu);
}
+
+int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_TRIGGER,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counter_abort(struct prestera_switch *sw)
+{
+ struct prestera_msg_counter_req req;
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_ABORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
+ u32 *len, bool *done,
+ struct prestera_counter_stats *stats)
+{
+ struct prestera_msg_counter_resp *resp;
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(idx),
+ .num_counters = __cpu_to_le32(*len),
+ };
+ size_t size = struct_size(resp, stats, *len);
+ int err, i;
+
+ resp = kmalloc(size, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_GET,
+ &req.cmd, sizeof(req), &resp->ret, size);
+ if (err)
+ goto free_buff;
+
+ for (i = 0; i < __le32_to_cpu(resp->num_counters); i++) {
+ stats[i].packets += __le64_to_cpu(resp->stats[i].packets);
+ stats[i].bytes += __le64_to_cpu(resp->stats[i].bytes);
+ }
+
+ *len = __le32_to_cpu(resp->num_counters);
+ *done = __le32_to_cpu(resp->done);
+
+free_buff:
+ kfree(resp);
+ return err;
+}
+
+int prestera_hw_counter_block_get(struct prestera_switch *sw,
+ u32 client, u32 *block_id, u32 *offset,
+ u32 *num_counters)
+{
+ struct prestera_msg_counter_resp resp;
+ struct prestera_msg_counter_req req = {
+ .client = __cpu_to_le32(client)
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *block_id = __le32_to_cpu(resp.block_id);
+ *offset = __le32_to_cpu(resp.offset);
+ *num_counters = __le32_to_cpu(resp.num_counters);
+
+ return 0;
+}
+
+int prestera_hw_counter_block_release(struct prestera_switch *sw,
+ u32 block_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_BLOCK_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
+ u32 counter_id)
+{
+ struct prestera_msg_counter_req req = {
+ .block_id = __cpu_to_le32(block_id),
+ .num_counters = __cpu_to_le32(counter_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_COUNTER_CLEAR,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
+ u32 *policer_id)
+{
+ struct prestera_msg_policer_resp resp;
+ struct prestera_msg_policer_req req = {
+ .type = type
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_POLICER_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *policer_id = __le32_to_cpu(resp.id);
+ return 0;
+}
+
+int prestera_hw_policer_release(struct prestera_switch *sw,
+ u32 policer_id)
+{
+ struct prestera_msg_policer_req req = {
+ .id = __cpu_to_le32(policer_id)
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
+ u32 policer_id, u64 cir, u32 cbs)
+{
+ struct prestera_msg_policer_req req = {
+ .mode = PRESTERA_POLICER_MODE_SR_TCM,
+ .id = __cpu_to_le32(policer_id),
+ .sr_tcm = {
+ .cir = __cpu_to_le64(cir),
+ .cbs = __cpu_to_le32(cbs)
+ }
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_POLICER_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_create_resp resp;
+ struct prestera_msg_flood_domain_create_req req;
+ int err;
+
+ err = prestera_cmd_ret(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_CREATE, &req.cmd,
+ sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ domain->idx = __le32_to_cpu(resp.flood_domain_idx);
+
+ return 0;
+}
+
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_DESTROY,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ struct prestera_msg_flood_domain_ports_set_req *req;
+ struct prestera_msg_flood_domain_port *ports;
+ struct prestera_switch *sw = domain->sw;
+ struct prestera_port *port;
+ u32 ports_num = 0;
+ int buf_size;
+ void *buff;
+ u16 lag_id;
+ int err;
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node)
+ ports_num++;
+
+ if (!ports_num)
+ return -EINVAL;
+
+ buf_size = sizeof(*req) + sizeof(*ports) * ports_num;
+
+ buff = kmalloc(buf_size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ req = buff;
+ ports = buff + sizeof(*req);
+
+ req->flood_domain_idx = __cpu_to_le32(domain->idx);
+ req->ports_num = __cpu_to_le32(ports_num);
+
+ list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
+ flood_domain_port_node) {
+ if (netif_is_lag_master(flood_domain_port->dev)) {
+ if (prestera_lag_id(sw, flood_domain_port->dev,
+ &lag_id)) {
+ kfree(buff);
+ return -EINVAL;
+ }
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
+ ports->lag_id = __cpu_to_le16(lag_id);
+ } else {
+ port = prestera_port_dev_lower_find(flood_domain_port->dev);
+
+ ports->port_type =
+ __cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
+ ports->dev_num = __cpu_to_le32(port->dev_id);
+ ports->port_num = __cpu_to_le32(port->hw_id);
+ }
+
+ ports->vid = __cpu_to_le16(flood_domain_port->vid);
+
+ ports++;
+ }
+
+ err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
+ &req->cmd, buf_size);
+
+ kfree(buff);
+
+ return err;
+}
+
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain)
+{
+ struct prestera_msg_flood_domain_ports_reset_req req = {
+ .flood_domain_idx = __cpu_to_le32(domain->idx),
+ };
+
+ return prestera_cmd(domain->sw,
+ PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_RESET, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_create_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_CREATE, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb)
+{
+ struct prestera_msg_mdb_destroy_req req = {
+ .flood_domain_idx = __cpu_to_le32(mdb->flood_domain->idx),
+ .vid = __cpu_to_le16(mdb->vid),
+ };
+
+ memcpy(req.mac, mdb->addr, ETH_ALEN);
+
+ return prestera_cmd(mdb->sw, PRESTERA_CMD_TYPE_MDB_DESTROY, &req.cmd,
+ sizeof(req));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 57a3c2e5b112..0a929279e1ce 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -5,6 +5,7 @@
#define _PRESTERA_HW_H_
#include <linux/types.h>
+#include "prestera_acl.h"
enum prestera_accept_frm_type {
PRESTERA_ACCEPT_FRAME_TYPE_TAGGED,
@@ -106,23 +107,46 @@ enum {
PRESTERA_STP_FORWARD,
};
+enum {
+ PRESTERA_POLICER_TYPE_INGRESS,
+ PRESTERA_POLICER_TYPE_EGRESS
+};
+
enum prestera_hw_cpu_code_cnt_t {
PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0,
PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1,
};
+enum prestera_hw_vtcam_direction_t {
+ PRESTERA_HW_VTCAM_DIR_INGRESS = 0,
+ PRESTERA_HW_VTCAM_DIR_EGRESS = 1,
+};
+
+enum {
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_0 = 0,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_1 = 1,
+ PRESTERA_HW_COUNTER_CLIENT_INGRESS_LOOKUP_2 = 2,
+ PRESTERA_HW_COUNTER_CLIENT_EGRESS_LOOKUP = 3,
+};
+
struct prestera_switch;
struct prestera_port;
struct prestera_port_stats;
struct prestera_port_caps;
enum prestera_event_type;
struct prestera_event;
-struct prestera_acl_rule;
typedef void (*prestera_event_cb_t)
(struct prestera_switch *sw, struct prestera_event *evt, void *arg);
struct prestera_rxtx_params;
+struct prestera_acl_hw_action_info;
+struct prestera_acl_iface;
+struct prestera_counter_stats;
+struct prestera_iface;
+struct prestera_flood_domain;
+struct prestera_mdb_entry;
+struct prestera_neigh_info;
/* Switch API */
int prestera_hw_switch_init(struct prestera_switch *sw);
@@ -158,8 +182,10 @@ int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *stats);
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
-int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
- unsigned long val);
+int prestera_hw_port_uc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_mc_flood_set(const struct prestera_port *port, bool flood);
+int prestera_hw_port_br_locked_set(const struct prestera_port *port,
+ bool br_locked);
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type);
/* Vlan API */
@@ -186,28 +212,71 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
-/* ACL API */
-int prestera_hw_acl_ruleset_create(struct prestera_switch *sw,
- u16 *ruleset_id);
-int prestera_hw_acl_ruleset_del(struct prestera_switch *sw,
- u16 ruleset_id);
-int prestera_hw_acl_rule_add(struct prestera_switch *sw,
- struct prestera_acl_rule *rule,
- u32 *rule_id);
-int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id);
-int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw,
- u32 rule_id, u64 *packets, u64 *bytes);
-int prestera_hw_acl_port_bind(const struct prestera_port *port,
- u16 ruleset_id);
-int prestera_hw_acl_port_unbind(const struct prestera_port *port,
- u16 ruleset_id);
+/* vTCAM API */
+int prestera_hw_vtcam_create(struct prestera_switch *sw,
+ u8 lookup, const u32 *keymask, u32 *vtcam_id,
+ enum prestera_hw_vtcam_direction_t direction);
+int prestera_hw_vtcam_rule_add(struct prestera_switch *sw, u32 vtcam_id,
+ u32 prio, void *key, void *keymask,
+ struct prestera_acl_hw_action_info *act,
+ u8 n_act, u32 *rule_id);
+int prestera_hw_vtcam_rule_del(struct prestera_switch *sw,
+ u32 vtcam_id, u32 rule_id);
+int prestera_hw_vtcam_destroy(struct prestera_switch *sw, u32 vtcam_id);
+int prestera_hw_vtcam_iface_bind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id, u16 pcl_id);
+int prestera_hw_vtcam_iface_unbind(struct prestera_switch *sw,
+ struct prestera_acl_iface *iface,
+ u32 vtcam_id);
+
+/* Counter API */
+int prestera_hw_counter_trigger(struct prestera_switch *sw, u32 block_id);
+int prestera_hw_counter_abort(struct prestera_switch *sw);
+int prestera_hw_counters_get(struct prestera_switch *sw, u32 idx,
+ u32 *len, bool *done,
+ struct prestera_counter_stats *stats);
+int prestera_hw_counter_block_get(struct prestera_switch *sw,
+ u32 client, u32 *block_id, u32 *offset,
+ u32 *num_counters);
+int prestera_hw_counter_block_release(struct prestera_switch *sw,
+ u32 block_id);
+int prestera_hw_counter_clear(struct prestera_switch *sw, u32 block_id,
+ u32 counter_id);
/* SPAN API */
int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
-int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id);
-int prestera_hw_span_unbind(const struct prestera_port *port);
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id,
+ bool ingress);
+int prestera_hw_span_unbind(const struct prestera_port *port, bool ingress);
int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
+/* Router API */
+int prestera_hw_rif_create(struct prestera_switch *sw,
+ struct prestera_iface *iif, u8 *mac, u16 *rif_id);
+int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
+ struct prestera_iface *iif);
+
+/* Virtual Router API */
+int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id);
+int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id);
+
+/* LPM PI */
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id);
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len);
+
+/* NH API */
+int prestera_hw_nh_entries_set(struct prestera_switch *sw, int count,
+ struct prestera_neigh_info *nhs, u32 grp_id);
+int prestera_hw_nhgrp_blk_get(struct prestera_switch *sw,
+ u8 *hw_state, u32 buf_size /* Buffer in bytes */);
+int prestera_hw_nh_group_create(struct prestera_switch *sw, u16 nh_count,
+ u32 *grp_id);
+int prestera_hw_nh_group_delete(struct prestera_switch *sw, u16 nh_count,
+ u32 grp_id);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
@@ -241,4 +310,21 @@ prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
enum prestera_hw_cpu_code_cnt_t counter_type,
u64 *packet_count);
+/* Policer API */
+int prestera_hw_policer_create(struct prestera_switch *sw, u8 type,
+ u32 *policer_id);
+int prestera_hw_policer_release(struct prestera_switch *sw,
+ u32 policer_id);
+int prestera_hw_policer_sr_tcm_set(struct prestera_switch *sw,
+ u32 policer_id, u64 cir, u32 cbs);
+
+/* Flood domain / MDB API */
+int prestera_hw_flood_domain_create(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_destroy(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain);
+int prestera_hw_flood_domain_ports_reset(struct prestera_flood_domain *domain);
+
+int prestera_hw_mdb_create(struct prestera_mdb_entry *mdb);
+int prestera_hw_mdb_destroy(struct prestera_mdb_entry *mdb);
+
#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 4369a3ffad45..9d504142e51a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -9,6 +9,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
#include "prestera.h"
#include "prestera_hw.h"
@@ -18,6 +19,7 @@
#include "prestera_rxtx.h"
#include "prestera_devlink.h"
#include "prestera_ethtool.h"
+#include "prestera_counter.h"
#include "prestera_switchdev.h"
#define PRESTERA_MTU_DEFAULT 1536
@@ -27,6 +29,43 @@
#define PRESTERA_MAC_ADDR_NUM_MAX 255
static struct workqueue_struct *prestera_wq;
+static struct workqueue_struct *prestera_owq;
+
+void prestera_queue_work(struct work_struct *work)
+{
+ queue_work(prestera_owq, work);
+}
+
+void prestera_queue_delayed_work(struct delayed_work *work, unsigned long delay)
+{
+ queue_delayed_work(prestera_wq, work, delay);
+}
+
+void prestera_queue_drain(void)
+{
+ drain_workqueue(prestera_wq);
+ drain_workqueue(prestera_owq);
+}
+
+int prestera_port_learning_set(struct prestera_port *port, bool learn)
+{
+ return prestera_hw_port_learning_set(port, learn);
+}
+
+int prestera_port_uc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_uc_flood_set(port, flood);
+}
+
+int prestera_port_mc_flood_set(struct prestera_port *port, bool flood)
+{
+ return prestera_hw_port_mc_flood_set(port, flood);
+}
+
+int prestera_port_br_locked_set(struct prestera_port *port, bool br_locked)
+{
+ return prestera_hw_port_br_locked_set(port, br_locked);
+}
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
@@ -54,12 +93,14 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
u32 dev_id, u32 hw_id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->dev_id == dev_id && port->hw_id == hw_id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->dev_id == dev_id && tmp->hw_id == hw_id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
@@ -68,18 +109,28 @@ struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
{
- struct prestera_port *port = NULL;
+ struct prestera_port *port = NULL, *tmp;
read_lock(&sw->port_list_lock);
- list_for_each_entry(port, &sw->port_list, list) {
- if (port->id == id)
+ list_for_each_entry(tmp, &sw->port_list, list) {
+ if (tmp->id == id) {
+ port = tmp;
break;
+ }
}
read_unlock(&sw->port_list_lock);
return port;
}
+struct prestera_switch *prestera_switch_get(struct net_device *dev)
+{
+ struct prestera_port *port;
+
+ port = prestera_port_dev_lower_find(dev);
+ return port ? port->sw : NULL;
+}
+
int prestera_port_cfg_mac_read(struct prestera_port *port,
struct prestera_port_mac_config *cfg)
{
@@ -108,18 +159,24 @@ static int prestera_port_open(struct net_device *dev)
struct prestera_port_mac_config cfg_mac;
int err = 0;
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
- err = prestera_port_cfg_mac_read(port, &cfg_mac);
- if (!err) {
- cfg_mac.admin = true;
- err = prestera_port_cfg_mac_write(port, &cfg_mac);
- }
+ if (port->phy_link) {
+ phylink_start(port->phy_link);
} else {
- port->cfg_phy.admin = true;
- err = prestera_hw_port_phy_mode_set(port, true, port->autoneg,
- port->cfg_phy.mode,
- port->adver_link_modes,
- port->cfg_phy.mdix);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = true;
+ err = prestera_port_cfg_mac_write(port,
+ &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = true;
+ err = prestera_hw_port_phy_mode_set(port, true,
+ port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
}
netif_start_queue(dev);
@@ -135,30 +192,267 @@ static int prestera_port_close(struct net_device *dev)
netif_stop_queue(dev);
- if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ if (port->phy_link) {
+ phylink_stop(port->phy_link);
+ phylink_disconnect_phy(port->phy_link);
err = prestera_port_cfg_mac_read(port, &cfg_mac);
if (!err) {
cfg_mac.admin = false;
prestera_port_cfg_mac_write(port, &cfg_mac);
}
} else {
- port->cfg_phy.admin = false;
- err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
- port->cfg_phy.mode,
- port->adver_link_modes,
- port->cfg_phy.mdix);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = false;
+ prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+ }
+
+ return err;
+}
+
+static void
+prestera_port_mac_state_cache_read(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ *state = port->state_mac;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static void
+prestera_port_mac_state_cache_write(struct prestera_port *port,
+ struct prestera_port_mac_state *state)
+{
+ spin_lock(&port->state_mac_lock);
+ port->state_mac = *state;
+ spin_unlock(&port->state_mac_lock);
+}
+
+static struct prestera_port *prestera_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct prestera_port, phylink_pcs);
+}
+
+static void prestera_mac_config(struct phylink_config *config,
+ unsigned int an_mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void prestera_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(ndev);
+ struct prestera_port_mac_state state_mac;
+
+ /* Invalidate. Parameters will update on next link event. */
+ memset(&state_mac, 0, sizeof(state_mac));
+ state_mac.valid = false;
+ prestera_port_mac_state_cache_write(port, &state_mac);
+}
+
+static void prestera_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+}
+
+static struct phylink_pcs *
+prestera_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *dev = to_net_dev(config->dev);
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->phylink_pcs;
+}
+
+static void prestera_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct prestera_port *port = container_of(pcs, struct prestera_port,
+ phylink_pcs);
+ struct prestera_port_mac_state smac;
+
+ prestera_port_mac_state_cache_read(port, &smac);
+
+ if (smac.valid) {
+ state->link = smac.oper ? 1 : 0;
+ /* AN is completed, when port is up */
+ state->an_complete = (smac.oper && port->autoneg) ? 1 : 0;
+ state->speed = smac.speed;
+ state->duplex = smac.duplex;
+ } else {
+ state->link = 0;
+ state->an_complete = 0;
+ }
+}
+
+static int prestera_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct prestera_port *port = prestera_pcs_to_port(pcs);
+ struct prestera_port_mac_config cfg_mac;
+ int err;
+
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (err)
+ return err;
+
+ cfg_mac.admin = true;
+ cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ cfg_mac.speed = SPEED_10000;
+ cfg_mac.inband = 0;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SR_LR;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ cfg_mac.speed = SPEED_2500;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.inband = test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ cfg_mac.inband = 1;
+ cfg_mac.mode = PRESTERA_MAC_MODE_SGMII;
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ default:
+ cfg_mac.speed = SPEED_1000;
+ cfg_mac.duplex = DUPLEX_FULL;
+ cfg_mac.inband = test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ cfg_mac.mode = PRESTERA_MAC_MODE_1000BASE_X;
+ break;
+ }
+
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void prestera_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ /* TODO: add 1000basex AN restart support
+ * (Currently FW has no support for 1000baseX AN restart, but it will in the future,
+ * so as for now the function would stay empty.)
+ */
+}
+
+static const struct phylink_mac_ops prestera_mac_ops = {
+ .mac_select_pcs = prestera_mac_select_pcs,
+ .mac_config = prestera_mac_config,
+ .mac_link_down = prestera_mac_link_down,
+ .mac_link_up = prestera_mac_link_up,
+};
+
+static const struct phylink_pcs_ops prestera_pcs_ops = {
+ .pcs_get_state = prestera_pcs_get_state,
+ .pcs_config = prestera_pcs_config,
+ .pcs_an_restart = prestera_pcs_an_restart,
+};
+
+static int prestera_port_sfp_bind(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct device_node *ports, *node;
+ struct fwnode_handle *fwnode;
+ struct phylink *phy_link;
+ int err;
+
+ if (!sw->np)
+ return 0;
+
+ of_node_get(sw->np);
+ ports = of_find_node_by_name(sw->np, "ports");
+
+ for_each_child_of_node(ports, node) {
+ int num;
+
+ err = of_property_read_u32(node, "prestera,port-num", &num);
+ if (err) {
+ dev_err(sw->dev->dev,
+ "device node %pOF has no valid reg property: %d\n",
+ node, err);
+ goto out;
+ }
+
+ if (port->fp_id != num)
+ continue;
+
+ port->phylink_pcs.ops = &prestera_pcs_ops;
+
+ port->phy_config.dev = &port->dev->dev;
+ port->phy_config.type = PHYLINK_NETDEV;
+
+ fwnode = of_fwnode_handle(node);
+
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phy_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phy_config.supported_interfaces);
+
+ port->phy_config.mac_capabilities =
+ MAC_1000 | MAC_2500FD | MAC_10000FD;
+
+ phy_link = phylink_create(&port->phy_config, fwnode,
+ PHY_INTERFACE_MODE_INTERNAL,
+ &prestera_mac_ops);
+ if (IS_ERR(phy_link)) {
+ netdev_err(port->dev, "failed to create phylink\n");
+ err = PTR_ERR(phy_link);
+ goto out;
+ }
+
+ port->phy_link = phy_link;
+ break;
}
+out:
+ of_node_put(node);
+ of_node_put(ports);
return err;
}
+static int prestera_port_sfp_unbind(struct prestera_port *port)
+{
+ if (port->phy_link)
+ phylink_destroy(port->phy_link);
+
+ return 0;
+}
+
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
struct net_device *dev)
{
return prestera_rxtx_xmit(netdev_priv(dev), skb);
}
-static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr)
+int prestera_is_valid_mac_addr(struct prestera_port *port, const u8 *addr)
{
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
@@ -274,7 +568,6 @@ static const struct net_device_ops prestera_netdev_ops = {
.ndo_change_mtu = prestera_port_change_mtu,
.ndo_get_stats64 = prestera_port_get_stats64,
.ndo_set_mac_address = prestera_port_set_mac_address,
- .ndo_get_devlink_port = prestera_devlink_get_port,
};
int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes)
@@ -332,6 +625,8 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
port->id = id;
port->sw = sw;
+ spin_lock_init(&port->state_mac_lock);
+
err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
&port->fp_id);
if (err) {
@@ -346,8 +641,11 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
+ SET_NETDEV_DEV(dev, sw->dev->dev);
+ SET_NETDEV_DEVLINK_PORT(dev, &port->dl_port);
- netif_carrier_off(dev);
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP)
+ netif_carrier_off(dev);
dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
dev->min_mtu = sw->mtu_min;
@@ -398,7 +696,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
cfg_mac.admin = false;
cfg_mac.mode = PRESTERA_MAC_MODE_MAX;
}
- cfg_mac.inband = false;
+ cfg_mac.inband = 0;
cfg_mac.speed = 0;
cfg_mac.duplex = DUPLEX_UNKNOWN;
cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
@@ -438,10 +736,14 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
if (err)
goto err_register_netdev;
- prestera_devlink_port_set(port);
+ err = prestera_port_sfp_bind(port);
+ if (err)
+ goto err_sfp_bind;
return 0;
+err_sfp_bind:
+ unregister_netdev(dev);
err_register_netdev:
prestera_port_list_del(port);
err_port_init:
@@ -457,7 +759,6 @@ static void prestera_port_destroy(struct prestera_port *port)
struct net_device *dev = port->dev;
cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw);
- prestera_devlink_port_clear(port);
unregister_netdev(dev);
prestera_port_list_del(port);
prestera_devlink_port_unregister(port);
@@ -487,8 +788,10 @@ static int prestera_create_ports(struct prestera_switch *sw)
return 0;
err_port_create:
- list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list) {
+ prestera_port_sfp_unbind(port);
prestera_port_destroy(port);
+ }
return err;
}
@@ -496,25 +799,45 @@ err_port_create:
static void prestera_port_handle_event(struct prestera_switch *sw,
struct prestera_event *evt, void *arg)
{
+ struct prestera_port_mac_state smac;
+ struct prestera_port_event *pevt;
struct delayed_work *caching_dw;
struct prestera_port *port;
- port = prestera_find_port(sw, evt->port_evt.port_id);
- if (!port || !port->dev)
- return;
-
- caching_dw = &port->cached_hw_stats.caching_dw;
-
- prestera_ethtool_port_state_changed(port, &evt->port_evt);
-
if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ pevt = &evt->port_evt;
+ port = prestera_find_port(sw, pevt->port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ memset(&smac, 0, sizeof(smac));
+ smac.valid = true;
+ smac.oper = pevt->data.mac.oper;
+ if (smac.oper) {
+ smac.mode = pevt->data.mac.mode;
+ smac.speed = pevt->data.mac.speed;
+ smac.duplex = pevt->data.mac.duplex;
+ smac.fc = pevt->data.mac.fc;
+ smac.fec = pevt->data.mac.fec;
+ }
+ prestera_port_mac_state_cache_write(port, &smac);
+
if (port->state_mac.oper) {
- netif_carrier_on(port->dev);
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, true);
+ else
+ netif_carrier_on(port->dev);
+
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
- } else if (netif_running(port->dev) &&
- netif_carrier_ok(port->dev)) {
- netif_carrier_off(port->dev);
+ } else {
+ if (port->phy_link)
+ phylink_mac_change(port->phy_link, false);
+ else if (netif_running(port->dev) && netif_carrier_ok(port->dev))
+ netif_carrier_off(port->dev);
+
if (delayed_work_pending(caching_dw))
cancel_delayed_work(caching_dw);
}
@@ -536,19 +859,14 @@ static void prestera_event_handlers_unregister(struct prestera_switch *sw)
static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
{
- struct device_node *base_mac_np;
- struct device_node *np;
int ret;
- np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
- base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
-
- ret = of_get_mac_address(base_mac_np, sw->base_mac);
- if (ret) {
+ if (sw->np)
+ ret = of_get_mac_address(sw->np, sw->base_mac);
+ if (!is_valid_ether_addr(sw->base_mac) || ret) {
eth_random_addr(sw->base_mac);
dev_info(prestera_dev(sw), "using random base mac address\n");
}
- of_node_put(base_mac_np);
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
@@ -573,6 +891,30 @@ static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw,
return NULL;
}
+int prestera_lag_id(struct prestera_switch *sw,
+ struct net_device *lag_dev, u16 *lag_id)
+{
+ struct prestera_lag *lag;
+ int free_id = -1;
+ int id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = prestera_lag_by_id(sw, id);
+ if (lag->member_count) {
+ if (lag->dev == lag_dev) {
+ *lag_id = id;
+ return 0;
+ }
+ } else if (free_id < 0) {
+ free_id = id;
+ }
+ }
+ if (free_id < 0)
+ return -ENOSPC;
+ *lag_id = free_id;
+ return 0;
+}
+
static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw,
struct net_device *lag_dev)
{
@@ -764,23 +1106,27 @@ static int prestera_netdev_port_event(struct net_device *lower,
struct net_device *dev,
unsigned long event, void *ptr)
{
- struct netdev_notifier_changeupper_info *info = ptr;
+ struct netdev_notifier_info *info = ptr;
+ struct netdev_notifier_changeupper_info *cu_info;
struct prestera_port *port = netdev_priv(dev);
struct netlink_ext_ack *extack;
struct net_device *upper;
- extack = netdev_notifier_info_to_extack(&info->info);
- upper = info->upper_dev;
+ extack = netdev_notifier_info_to_extack(info);
+ cu_info = container_of(info,
+ struct netdev_notifier_changeupper_info,
+ info);
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ upper = cu_info->upper_dev;
if (!netif_is_bridge_master(upper) &&
!netif_is_lag_master(upper)) {
NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
- if (!info->linking)
+ if (!cu_info->linking)
break;
if (netdev_has_any_upper_dev(upper)) {
@@ -789,7 +1135,7 @@ static int prestera_netdev_port_event(struct net_device *lower,
}
if (netif_is_lag_master(upper) &&
- !prestera_lag_master_check(upper, info->upper_info, extack))
+ !prestera_lag_master_check(upper, cu_info->upper_info, extack))
return -EOPNOTSUPP;
if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
NL_SET_ERR_MSG_MOD(extack,
@@ -805,14 +1151,15 @@ static int prestera_netdev_port_event(struct net_device *lower,
break;
case NETDEV_CHANGEUPPER:
+ upper = cu_info->upper_dev;
if (netif_is_bridge_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_bridge_port_join(upper, port,
extack);
else
prestera_bridge_port_leave(upper, port);
} else if (netif_is_lag_master(upper)) {
- if (info->linking)
+ if (cu_info->linking)
return prestera_lag_port_add(port, upper);
else
prestera_lag_port_del(port);
@@ -859,6 +1206,150 @@ static int prestera_netdev_event_handler(struct notifier_block *nb,
return notifier_from_errno(err);
}
+struct prestera_mdb_entry *
+prestera_mdb_entry_create(struct prestera_switch *sw,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_flood_domain *flood_domain;
+ struct prestera_mdb_entry *mdb_entry;
+
+ mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ flood_domain = prestera_flood_domain_create(sw);
+ if (!flood_domain)
+ goto err_flood_domain_create;
+
+ mdb_entry->sw = sw;
+ mdb_entry->vid = vid;
+ mdb_entry->flood_domain = flood_domain;
+ ether_addr_copy(mdb_entry->addr, addr);
+
+ if (prestera_hw_mdb_create(mdb_entry))
+ goto err_mdb_hw_create;
+
+ return mdb_entry;
+
+err_mdb_hw_create:
+ prestera_flood_domain_destroy(flood_domain);
+err_flood_domain_create:
+ kfree(mdb_entry);
+err_mdb_alloc:
+ return NULL;
+}
+
+void prestera_mdb_entry_destroy(struct prestera_mdb_entry *mdb_entry)
+{
+ prestera_hw_mdb_destroy(mdb_entry);
+ prestera_flood_domain_destroy(mdb_entry->flood_domain);
+ kfree(mdb_entry);
+}
+
+struct prestera_flood_domain *
+prestera_flood_domain_create(struct prestera_switch *sw)
+{
+ struct prestera_flood_domain *domain;
+
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return NULL;
+
+ domain->sw = sw;
+
+ if (prestera_hw_flood_domain_create(domain)) {
+ kfree(domain);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&domain->flood_domain_port_list);
+
+ return domain;
+}
+
+void prestera_flood_domain_destroy(struct prestera_flood_domain *flood_domain)
+{
+ WARN_ON(!list_empty(&flood_domain->flood_domain_port_list));
+ WARN_ON_ONCE(prestera_hw_flood_domain_destroy(flood_domain));
+ kfree(flood_domain);
+}
+
+int
+prestera_flood_domain_port_create(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev,
+ u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+ bool is_first_port_in_list = false;
+ int err;
+
+ flood_domain_port = kzalloc(sizeof(*flood_domain_port), GFP_KERNEL);
+ if (!flood_domain_port) {
+ err = -ENOMEM;
+ goto err_port_alloc;
+ }
+
+ flood_domain_port->vid = vid;
+
+ if (list_empty(&flood_domain->flood_domain_port_list))
+ is_first_port_in_list = true;
+
+ list_add(&flood_domain_port->flood_domain_port_node,
+ &flood_domain->flood_domain_port_list);
+
+ flood_domain_port->flood_domain = flood_domain;
+ flood_domain_port->dev = dev;
+
+ if (!is_first_port_in_list) {
+ err = prestera_hw_flood_domain_ports_reset(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+ }
+
+ err = prestera_hw_flood_domain_ports_set(flood_domain);
+ if (err)
+ goto err_prestera_mdb_port_create_hw;
+
+ return 0;
+
+err_prestera_mdb_port_create_hw:
+ list_del(&flood_domain_port->flood_domain_port_node);
+ kfree(flood_domain_port);
+err_port_alloc:
+ return err;
+}
+
+void
+prestera_flood_domain_port_destroy(struct prestera_flood_domain_port *port)
+{
+ struct prestera_flood_domain *flood_domain = port->flood_domain;
+
+ list_del(&port->flood_domain_port_node);
+
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_reset(flood_domain));
+
+ if (!list_empty(&flood_domain->flood_domain_port_list))
+ WARN_ON_ONCE(prestera_hw_flood_domain_ports_set(flood_domain));
+
+ kfree(port);
+}
+
+struct prestera_flood_domain_port *
+prestera_flood_domain_port_find(struct prestera_flood_domain *flood_domain,
+ struct net_device *dev, u16 vid)
+{
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ list_for_each_entry(flood_domain_port,
+ &flood_domain->flood_domain_port_list,
+ flood_domain_port_node)
+ if (flood_domain_port->dev == dev &&
+ vid == flood_domain_port->vid)
+ return flood_domain_port;
+
+ return NULL;
+}
+
static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
{
sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
@@ -875,6 +1366,8 @@ static int prestera_switch_init(struct prestera_switch *sw)
{
int err;
+ sw->np = sw->dev->dev->of_node;
+
err = prestera_hw_switch_init(sw);
if (err) {
dev_err(prestera_dev(sw), "Failed to init Switch device\n");
@@ -892,6 +1385,10 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
return err;
+ err = prestera_router_init(sw);
+ if (err)
+ goto err_router_init;
+
err = prestera_switchdev_init(sw);
if (err)
goto err_swdev_register;
@@ -904,6 +1401,10 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_handlers_register;
+ err = prestera_counter_init(sw);
+ if (err)
+ goto err_counter_init;
+
err = prestera_acl_init(sw);
if (err)
goto err_acl_init;
@@ -936,12 +1437,16 @@ err_dl_register:
err_span_init:
prestera_acl_fini(sw);
err_acl_init:
+ prestera_counter_fini(sw);
+err_counter_init:
prestera_event_handlers_unregister(sw);
err_handlers_register:
prestera_rxtx_switch_fini(sw);
err_rxtx_register:
prestera_switchdev_fini(sw);
err_swdev_register:
+ prestera_router_fini(sw);
+err_router_init:
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
@@ -956,11 +1461,14 @@ static void prestera_switch_fini(struct prestera_switch *sw)
prestera_devlink_traps_unregister(sw);
prestera_span_fini(sw);
prestera_acl_fini(sw);
+ prestera_counter_fini(sw);
prestera_event_handlers_unregister(sw);
prestera_rxtx_switch_fini(sw);
prestera_switchdev_fini(sw);
+ prestera_router_fini(sw);
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
+ of_node_put(sw->np);
}
int prestera_device_register(struct prestera_device *dev)
@@ -1000,12 +1508,19 @@ static int __init prestera_module_init(void)
if (!prestera_wq)
return -ENOMEM;
+ prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0);
+ if (!prestera_owq) {
+ destroy_workqueue(prestera_wq);
+ return -ENOMEM;
+ }
+
return 0;
}
static void __exit prestera_module_exit(void)
{
destroy_workqueue(prestera_wq);
+ destroy_workqueue(prestera_owq);
}
module_init(prestera_module_init);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.c b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
new file mode 100644
index 000000000000..1da9c1bc1ee9
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2022 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_flow.h"
+#include "prestera_flower.h"
+#include "prestera_matchall.h"
+#include "prestera_span.h"
+
+static int prestera_mall_prio_check(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ u32 flower_prio_min;
+ u32 flower_prio_max;
+ int err;
+
+ err = prestera_flower_prio_get(block, f->common.chain_index,
+ &flower_prio_min, &flower_prio_max);
+ if (err == -ENOENT)
+ /* No flower filters installed on this chain. */
+ return 0;
+
+ if (err) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
+ return err;
+ }
+
+ if (f->common.prio <= flower_prio_max && !block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+ if (f->common.prio >= flower_prio_min && block->ingress) {
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing flower rules");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max)
+{
+ if (!block->mall.bound)
+ return -ENOENT;
+
+ *prio_min = block->mall.prio_min;
+ *prio_max = block->mall.prio_max;
+ return 0;
+}
+
+static void prestera_mall_prio_update(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ block->mall.prio_min = min(block->mall.prio_min, f->common.prio);
+ block->mall.prio_max = max(block->mall.prio_max, f->common.prio);
+}
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct prestera_flow_block_binding *binding;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ struct prestera_port *port;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+
+ if (!prestera_netdev_check(act->dev)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only Marvell Prestera port is supported");
+ return -EINVAL;
+ }
+ if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
+ return -EOPNOTSUPP;
+ if (act->id != FLOW_ACTION_MIRRED)
+ return -EOPNOTSUPP;
+ if (protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ err = prestera_mall_prio_check(block, f);
+ if (err)
+ return err;
+
+ port = netdev_priv(act->dev);
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_span_rule_add(binding, port, block->ingress);
+ if (err == -EEXIST)
+ return err;
+ if (err)
+ goto rollback;
+ }
+
+ prestera_mall_prio_update(block, f);
+
+ block->mall.bound = true;
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding,
+ &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+ return err;
+}
+
+void prestera_mall_destroy(struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_span_rule_del(binding, block->ingress);
+
+ block->mall.prio_min = UINT_MAX;
+ block->mall.prio_max = 0;
+ block->mall.bound = false;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_matchall.h b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
new file mode 100644
index 000000000000..fed08be80257
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_matchall.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2022 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_MATCHALL_H_
+#define _PRESTERA_MATCHALL_H_
+
+#include <net/pkt_cls.h>
+
+struct prestera_flow_block;
+
+int prestera_mall_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void prestera_mall_destroy(struct prestera_flow_block *block);
+int prestera_mall_prio_get(struct prestera_flow_block *block,
+ u32 *prio_min, u32 *prio_max);
+
+#endif /* _PRESTERA_MATCHALL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index f538a749ebd4..f328d957b2db 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -15,12 +15,13 @@
#define PRESTERA_MSG_MAX_SIZE 1500
#define PRESTERA_SUPP_FW_MAJ_VER 4
-#define PRESTERA_SUPP_FW_MIN_VER 0
+#define PRESTERA_SUPP_FW_MIN_VER 1
#define PRESTERA_PREV_FW_MAJ_VER 4
#define PRESTERA_PREV_FW_MIN_VER 0
#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
+#define PRESTERA_FW_ARM64_PATH_FMT "mrvl/prestera/mvsw_prestera_fw_arm64-v%u.%u.img"
#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
#define PRESTERA_FW_DL_TIMEOUT_MS 50000
@@ -184,6 +185,15 @@ struct prestera_fw_regs {
#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000
#define PRESTERA_FW_READY_WAIT_MS 20000
+#define PRESTERA_DEV_ID_AC3X_98DX_55 0xC804
+#define PRESTERA_DEV_ID_AC3X_98DX_65 0xC80C
+#define PRESTERA_DEV_ID_ALDRIN2 0xCC1E
+#define PRESTERA_DEV_ID_98DX7312M 0x981F
+#define PRESTERA_DEV_ID_98DX3500 0x9820
+#define PRESTERA_DEV_ID_98DX3501 0x9826
+#define PRESTERA_DEV_ID_98DX3510 0x9821
+#define PRESTERA_DEV_ID_98DX3520 0x9822
+
struct prestera_fw_evtq {
u8 __iomem *addr;
size_t len;
@@ -201,6 +211,7 @@ struct prestera_fw {
const struct firmware *bin;
struct workqueue_struct *wq;
struct prestera_device dev;
+ struct pci_dev *pci_dev;
u8 __iomem *ldr_regs;
u8 __iomem *ldr_ring_buf;
u32 ldr_buf_len;
@@ -689,6 +700,20 @@ static int prestera_fw_hdr_parse(struct prestera_fw *fw)
return prestera_fw_rev_check(fw);
}
+static const char *prestera_fw_path_fmt_get(struct prestera_fw *fw)
+{
+ switch (fw->pci_dev->device) {
+ case PRESTERA_DEV_ID_98DX3500:
+ case PRESTERA_DEV_ID_98DX3501:
+ case PRESTERA_DEV_ID_98DX3510:
+ case PRESTERA_DEV_ID_98DX3520:
+ return PRESTERA_FW_ARM64_PATH_FMT;
+
+ default:
+ return PRESTERA_FW_PATH_FMT;
+ }
+}
+
static int prestera_fw_get(struct prestera_fw *fw)
{
int ver_maj = PRESTERA_SUPP_FW_MAJ_VER;
@@ -697,7 +722,7 @@ static int prestera_fw_get(struct prestera_fw *fw)
int err;
pick_fw_ver:
- snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
+ snprintf(fw_path, sizeof(fw_path), prestera_fw_path_fmt_get(fw),
ver_maj, ver_min);
err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
@@ -774,22 +799,56 @@ out_release:
return err;
}
+static bool prestera_pci_pp_use_bar2(struct pci_dev *pdev)
+{
+ switch (pdev->device) {
+ case PRESTERA_DEV_ID_98DX7312M:
+ case PRESTERA_DEV_ID_98DX3500:
+ case PRESTERA_DEV_ID_98DX3501:
+ case PRESTERA_DEV_ID_98DX3510:
+ case PRESTERA_DEV_ID_98DX3520:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static u32 prestera_pci_pp_bar2_offs(struct pci_dev *pdev)
+{
+ if (pci_resource_len(pdev, 2) == 0x1000000)
+ return 0x0;
+ else
+ return (pci_resource_len(pdev, 2) / 2);
+}
+
+static u32 prestera_pci_fw_bar2_offs(struct pci_dev *pdev)
+{
+ if (pci_resource_len(pdev, 2) == 0x1000000)
+ return 0x400000;
+ else
+ return 0x0;
+}
+
static int prestera_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
const char *driver_name = dev_driver_string(&pdev->dev);
+ u8 __iomem *mem_addr, *pp_addr = NULL;
struct prestera_fw *fw;
int err;
err = pcim_enable_device(pdev);
- if (err)
- return err;
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto err_pci_enable_device;
+ }
- err = pcim_iomap_regions(pdev, BIT(PRESTERA_PCI_BAR_FW) |
- BIT(PRESTERA_PCI_BAR_PP),
- pci_name(pdev));
- if (err)
- return err;
+ err = pci_request_regions(pdev, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed\n");
+ goto err_pci_request_regions;
+ }
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30));
if (err) {
@@ -797,6 +856,26 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_dma_mask;
}
+ mem_addr = pcim_iomap(pdev, 2, 0);
+ if (!mem_addr) {
+ dev_err(&pdev->dev, "pci mem ioremap failed\n");
+ err = -EIO;
+ goto err_mem_ioremap;
+ }
+
+ /* AC5X devices use second half of BAR2 */
+ if (prestera_pci_pp_use_bar2(pdev)) {
+ pp_addr = mem_addr + prestera_pci_pp_bar2_offs(pdev);
+ mem_addr = mem_addr + prestera_pci_fw_bar2_offs(pdev);
+ } else {
+ pp_addr = pcim_iomap(pdev, 4, 0);
+ if (!pp_addr) {
+ dev_err(&pdev->dev, "pp regs ioremap failed\n");
+ err = -EIO;
+ goto err_pp_ioremap;
+ }
+ }
+
pci_set_master(pdev);
fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL);
@@ -805,8 +884,9 @@ static int prestera_pci_probe(struct pci_dev *pdev,
goto err_pci_dev_alloc;
}
- fw->dev.ctl_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_FW];
- fw->dev.pp_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_PP];
+ fw->pci_dev = pdev;
+ fw->dev.ctl_regs = mem_addr;
+ fw->dev.pp_regs = pp_addr;
fw->dev.dev = &pdev->dev;
pci_set_drvdata(pdev, fw);
@@ -854,7 +934,12 @@ err_wq_alloc:
prestera_fw_uninit(fw);
err_prestera_fw_init:
err_pci_dev_alloc:
+err_pp_ioremap:
+err_mem_ioremap:
err_dma_mask:
+ pci_release_regions(pdev);
+err_pci_request_regions:
+err_pci_enable_device:
return err;
}
@@ -867,11 +952,18 @@ static void prestera_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
destroy_workqueue(fw->wq);
prestera_fw_uninit(fw);
+ pci_release_regions(pdev);
}
static const struct pci_device_id prestera_pci_devices[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
- { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC80C) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_55) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_AC3X_98DX_65) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_ALDRIN2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX7312M) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3500) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3501) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3510) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PRESTERA_DEV_ID_98DX3520) },
{ }
};
MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
new file mode 100644
index 000000000000..a9a1028cb17b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -0,0 +1,1645 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/inetdevice.h>
+#include <net/inet_dscp.h>
+#include <net/switchdev.h>
+#include <linux/rhashtable.h>
+#include <net/nexthop.h>
+#include <net/arp.h>
+#include <linux/if_vlan.h>
+#include <linux/if_macvlan.h>
+#include <net/netevent.h>
+
+#include "prestera.h"
+#include "prestera_router_hw.h"
+
+#define PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+#define PRESTERA_NH_PROBE_INTERVAL 5000 /* ms */
+
+struct prestera_kern_neigh_cache_key {
+ struct prestera_ip_addr addr;
+ struct net_device *dev;
+};
+
+struct prestera_kern_neigh_cache {
+ struct prestera_kern_neigh_cache_key key;
+ struct rhash_head ht_node;
+ struct list_head kern_fib_cache_list;
+ /* Hold prepared nh_neigh info if is in_kernel */
+ struct prestera_neigh_info nh_neigh_info;
+ /* Indicate if neighbour is reachable by direct route */
+ bool reachable;
+ /* Lock cache if neigh is present in kernel */
+ bool in_kernel;
+};
+
+struct prestera_kern_fib_cache_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 kern_tb_id; /* tb_id from kernel (not fixed) */
+};
+
+/* Subscribing on neighbours in kernel */
+struct prestera_kern_fib_cache {
+ struct prestera_kern_fib_cache_key key;
+ struct {
+ struct prestera_fib_key fib_key;
+ enum prestera_fib_type fib_type;
+ struct prestera_nexthop_group_key nh_grp_key;
+ } lpm_info; /* hold prepared lpm info */
+ /* Indicate if route is not overlapped by another table */
+ struct rhash_head ht_node; /* node of prestera_router */
+ struct prestera_kern_neigh_cache_head {
+ struct prestera_kern_fib_cache *this;
+ struct list_head head;
+ struct prestera_kern_neigh_cache *n_cache;
+ } kern_neigh_cache_head[PRESTERA_NHGR_SIZE_MAX];
+ union {
+ struct fib_notifier_info info; /* point to any of 4/6 */
+ struct fib_entry_notifier_info fen4_info;
+ };
+ bool reachable;
+};
+
+static const struct rhashtable_params __prestera_kern_neigh_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_neigh_cache, key),
+ .head_offset = offsetof(struct prestera_kern_neigh_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_neigh_cache_key),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_fib_cache, key),
+ .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_fib_cache_key),
+ .automatic_shrinking = true,
+};
+
+/* This util to be used, to convert kernel rules for default vr in hw_vr */
+static u32 prestera_fix_tb_id(u32 tb_id)
+{
+ if (tb_id == RT_TABLE_UNSPEC ||
+ tb_id == RT_TABLE_LOCAL ||
+ tb_id == RT_TABLE_DEFAULT)
+ tb_id = RT_TABLE_MAIN;
+
+ return tb_id;
+}
+
+static void
+prestera_util_fen_info2fib_cache_key(struct fib_notifier_info *info,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ memset(key, 0, sizeof(*key));
+ key->addr.v = PRESTERA_IPV4;
+ key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
+ key->prefix_len = fen_info->dst_len;
+ key->kern_tb_id = fen_info->tb_id;
+}
+
+static int prestera_util_nhc2nc_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ if (nhc->nhc_gw_family == AF_INET) {
+ nk->addr.v = PRESTERA_IPV4;
+ nk->addr.u.ipv4 = nhc->nhc_gw.ipv4;
+ } else {
+ nk->addr.v = PRESTERA_IPV6;
+ nk->addr.u.ipv6 = nhc->nhc_gw.ipv6;
+ }
+
+ nk->dev = nhc->nhc_dev;
+ return 0;
+}
+
+static void
+prestera_util_nc_key2nh_key(struct prestera_kern_neigh_cache_key *ck,
+ struct prestera_nh_neigh_key *nk)
+{
+ memset(nk, 0, sizeof(*nk));
+ nk->addr = ck->addr;
+ nk->rif = (void *)ck->dev;
+}
+
+static bool
+prestera_util_nhc_eq_n_cache_key(struct prestera_switch *sw,
+ struct fib_nh_common *nhc,
+ struct prestera_kern_neigh_cache_key *nk)
+{
+ struct prestera_kern_neigh_cache_key tk;
+ int err;
+
+ err = prestera_util_nhc2nc_key(sw, nhc, &tk);
+ if (err)
+ return false;
+
+ if (memcmp(&tk, nk, sizeof(tk)))
+ return false;
+
+ return true;
+}
+
+static int
+prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ memset(key, 0, sizeof(*key));
+ if (n->tbl->family == AF_INET) {
+ key->addr.v = PRESTERA_IPV4;
+ key->addr.u.ipv4 = *(__be32 *)n->primary_key;
+ } else {
+ return -ENOENT;
+ }
+
+ key->dev = n->dev;
+
+ return 0;
+}
+
+static bool __prestera_fi_is_direct(struct fib_info *fi)
+{
+ struct fib_nh *fib_nh;
+
+ if (fib_info_num_path(fi) == 1) {
+ fib_nh = fib_info_nh(fi, 0);
+ if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
+ return true;
+ }
+
+ return false;
+}
+
+static bool prestera_fi_is_direct(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi_is_direct(fi);
+}
+
+static bool prestera_fi_is_nh(struct fib_info *fi)
+{
+ if (fi->fib_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi_is_direct(fi);
+}
+
+static bool __prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (!fi->fib6_nh->nh_common.nhc_gw_family)
+ return true;
+
+ return false;
+}
+
+static bool prestera_fi6_is_direct(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return __prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fi6_is_nh(struct fib6_info *fi)
+{
+ if (fi->fib6_type != RTN_UNICAST)
+ return false;
+
+ return !__prestera_fi6_is_direct(fi);
+}
+
+static bool prestera_fib_info_is_direct(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_direct(fen_info->fi);
+ else
+ return prestera_fi6_is_direct(fen6_info->rt);
+}
+
+static bool prestera_fib_info_is_nh(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info =
+ container_of(info, struct fib6_entry_notifier_info, info);
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+
+ if (info->family == AF_INET)
+ return prestera_fi_is_nh(fen_info->fi);
+ else
+ return prestera_fi6_is_nh(fen6_info->rt);
+}
+
+/* must be called with rcu_read_lock() */
+static int prestera_util_kern_get_route(struct fib_result *res, u32 tb_id,
+ __be32 *addr)
+{
+ struct flowi4 fl4;
+
+ /* TODO: walkthrough appropriate tables in kernel
+ * to know if the same prefix exists in several tables
+ */
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = *addr;
+ return fib_lookup(&init_net, &fl4, res, 0 /* FIB_LOOKUP_NOREF */);
+}
+
+static bool
+__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
+ struct net_device *dev)
+{
+ struct fib_nh *fib_nh;
+ struct fib_result res;
+ bool reachable;
+
+ reachable = false;
+
+ if (!prestera_util_kern_get_route(&res, tb_id, addr))
+ if (prestera_fi_is_direct(res.fi)) {
+ fib_nh = fib_info_nh(res.fi, 0);
+ if (dev == fib_nh->fib_nh_dev)
+ reachable = true;
+ }
+
+ return reachable;
+}
+
+/* Check if neigh route is reachable */
+static bool
+prestera_util_kern_n_is_reachable(u32 tb_id,
+ struct prestera_ip_addr *addr,
+ struct net_device *dev)
+{
+ if (addr->v == PRESTERA_IPV4)
+ return __prestera_util_kern_n_is_reachable_v4(tb_id,
+ &addr->u.ipv4,
+ dev);
+ else
+ return false;
+}
+
+static void prestera_util_kern_set_neigh_offload(struct neighbour *n,
+ bool offloaded)
+{
+ if (offloaded)
+ n->flags |= NTF_OFFLOADED;
+ else
+ n->flags &= ~NTF_OFFLOADED;
+}
+
+static void
+prestera_util_kern_set_nh_offload(struct fib_nh_common *nhc, bool offloaded, bool trap)
+{
+ if (offloaded)
+ nhc->nhc_flags |= RTNH_F_OFFLOAD;
+ else
+ nhc->nhc_flags &= ~RTNH_F_OFFLOAD;
+
+ if (trap)
+ nhc->nhc_flags |= RTNH_F_TRAP;
+ else
+ nhc->nhc_flags &= ~RTNH_F_TRAP;
+}
+
+static struct fib_nh_common *
+prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+ struct fib6_info *iter;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return &fib_info_nh(fen4_info->fi, n)->nh_common;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ if (!n)
+ return &fen6_info->rt->fib6_nh->nh_common;
+
+ list_for_each_entry(iter, &fen6_info->rt->fib6_siblings,
+ fib6_siblings) {
+ if (!--n)
+ return &iter->fib6_nh->nh_common;
+ }
+ }
+
+ /* if family is incorrect - than upper functions has BUG */
+ /* if doesn't find requested index - there is alsi bug, because
+ * valid index must be produced by nhs, which checks list length
+ */
+ WARN(1, "Invalid parameters passed to %s n=%d i=%p",
+ __func__, n, info);
+ return NULL;
+}
+
+static int prestera_kern_fib_info_nhs(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fib_info_num_path(fen4_info->fi);
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ return fen6_info->rt->fib6_nsiblings + 1;
+ }
+
+ return 0;
+}
+
+static unsigned char
+prestera_kern_fib_info_type(struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ struct fib_entry_notifier_info *fen4_info;
+
+ if (info->family == AF_INET) {
+ fen4_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ return fen4_info->fi->fib_type;
+ } else if (info->family == AF_INET6) {
+ fen6_info = container_of(info, struct fib6_entry_notifier_info,
+ info);
+ /* TODO: ECMP in ipv6 is several routes.
+ * Every route has single nh.
+ */
+ return fen6_info->rt->fib6_type;
+ }
+
+ return RTN_UNSPEC;
+}
+
+/* Decided, that uc_nh route with key==nh is obviously neighbour route */
+static bool
+prestera_fib_node_util_is_neighbour(struct prestera_fib_node *fib_node)
+{
+ if (fib_node->info.type != PRESTERA_FIB_TYPE_UC_NH)
+ return false;
+
+ if (fib_node->info.nh_grp->nh_neigh_head[1].neigh)
+ return false;
+
+ if (!fib_node->info.nh_grp->nh_neigh_head[0].neigh)
+ return false;
+
+ if (memcmp(&fib_node->info.nh_grp->nh_neigh_head[0].neigh->key.addr,
+ &fib_node->key.addr, sizeof(struct prestera_ip_addr)))
+ return false;
+
+ return true;
+}
+
+static int prestera_dev_if_type(const struct net_device *dev)
+{
+ struct macvlan_dev *vlan;
+
+ if (is_vlan_dev(dev) &&
+ netif_is_bridge_master(vlan_dev_real_dev(dev))) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_bridge_master(dev)) {
+ return PRESTERA_IF_VID_E;
+ } else if (netif_is_lag_master(dev)) {
+ return PRESTERA_IF_LAG_E;
+ } else if (netif_is_macvlan(dev)) {
+ vlan = netdev_priv(dev);
+ return prestera_dev_if_type(vlan->lowerdev);
+ } else {
+ return PRESTERA_IF_PORT_E;
+ }
+}
+
+static int
+prestera_neigh_iface_init(struct prestera_switch *sw,
+ struct prestera_iface *iface,
+ struct neighbour *n)
+{
+ struct prestera_port *port;
+
+ iface->vlan_id = 0; /* TODO: vlan egress */
+ iface->type = prestera_dev_if_type(n->dev);
+ if (iface->type != PRESTERA_IF_PORT_E)
+ return -EINVAL;
+
+ if (!prestera_netdev_check(n->dev))
+ return -EINVAL;
+
+ port = netdev_priv(n->dev);
+ iface->dev_port.hw_dev_num = port->dev_id;
+ iface->dev_port.port_num = port->hw_id;
+
+ return 0;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache =
+ rhashtable_lookup_fast(&sw->router->kern_neigh_cache_ht, key,
+ __prestera_kern_neigh_cache_ht_params);
+ return n_cache;
+}
+
+static void
+__prestera_kern_neigh_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ dev_put(n_cache->key.dev);
+}
+
+static void
+__prestera_kern_neigh_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ rhashtable_remove_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static struct prestera_kern_neigh_cache *
+__prestera_kern_neigh_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ n_cache = kzalloc(sizeof(*n_cache), GFP_KERNEL);
+ if (!n_cache)
+ goto err_kzalloc;
+
+ memcpy(&n_cache->key, key, sizeof(*key));
+ dev_hold(n_cache->key.dev);
+
+ INIT_LIST_HEAD(&n_cache->kern_fib_cache_list);
+ err = rhashtable_insert_fast(&sw->router->kern_neigh_cache_ht,
+ &n_cache->ht_node,
+ __prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return n_cache;
+
+err_ht_insert:
+ dev_put(n_cache->key.dev);
+ kfree(n_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_get(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache_key *key)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, key);
+ if (!n_cache)
+ n_cache = __prestera_kern_neigh_cache_create(sw, key);
+
+ return n_cache;
+}
+
+static struct prestera_kern_neigh_cache *
+prestera_kern_neigh_cache_put(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache)
+{
+ if (!n_cache->in_kernel &&
+ list_empty(&n_cache->kern_fib_cache_list)) {
+ __prestera_kern_neigh_cache_destroy(sw, n_cache);
+ return NULL;
+ }
+
+ return n_cache;
+}
+
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+
+ fib_cache =
+ rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
+ __prestera_kern_fib_cache_ht_params);
+ return fib_cache;
+}
+
+static void
+__prestera_kern_fib_cache_destruct(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ int i;
+
+ for (i = 0; i < PRESTERA_NHGR_SIZE_MAX; i++) {
+ n_cache = fib_cache->kern_neigh_cache_head[i].n_cache;
+ if (n_cache) {
+ list_del(&fib_cache->kern_neigh_cache_head[i].head);
+ prestera_kern_neigh_cache_put(sw, n_cache);
+ }
+ }
+
+ fib_info_put(fib_cache->fen4_info.fi);
+}
+
+static void
+prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
+ kfree(fib_cache);
+}
+
+static int
+__prestera_kern_fib_cache_create_nhs(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_neigh_cache_key nc_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ struct fib_nh_common *nhc;
+ int i, nhs, err;
+
+ if (!prestera_fib_info_is_nh(&fc->info))
+ return 0;
+
+ nhs = prestera_kern_fib_info_nhs(&fc->info);
+ if (nhs > PRESTERA_NHGR_SIZE_MAX)
+ return 0;
+
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fc->fen4_info.info, i);
+ err = prestera_util_nhc2nc_key(sw, nhc, &nc_key);
+ if (err)
+ return 0;
+
+ n_cache = prestera_kern_neigh_cache_get(sw, &nc_key);
+ if (!n_cache)
+ return 0;
+
+ fc->kern_neigh_cache_head[i].this = fc;
+ fc->kern_neigh_cache_head[i].n_cache = n_cache;
+ list_add(&fc->kern_neigh_cache_head[i].head,
+ &n_cache->kern_fib_cache_list);
+ }
+
+ return 0;
+}
+
+/* Operations on fi (offload, etc) must be wrapped in utils.
+ * This function just create storage.
+ */
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key,
+ struct fib_notifier_info *info)
+{
+ struct fib_entry_notifier_info *fen_info =
+ container_of(info, struct fib_entry_notifier_info, info);
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
+ if (!fib_cache)
+ goto err_kzalloc;
+
+ memcpy(&fib_cache->key, key, sizeof(*key));
+ fib_info_hold(fen_info->fi);
+ memcpy(&fib_cache->fen4_info, fen_info, sizeof(*fen_info));
+
+ err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ /* Handle nexthops */
+ err = __prestera_kern_fib_cache_create_nhs(sw, fib_cache);
+ if (err)
+ goto out; /* Not critical */
+
+out:
+ return fib_cache;
+
+err_ht_insert:
+ fib_info_put(fen_info->fi);
+ kfree(fib_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_k_arb_fib_nh_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fibc,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded, bool trap)
+{
+ struct fib_nh_common *nhc;
+ int i, nhs;
+
+ nhs = prestera_kern_fib_info_nhs(&fibc->info);
+ for (i = 0; i < nhs; i++) {
+ nhc = prestera_kern_fib_info_nhc(&fibc->info, i);
+ if (!nc) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ continue;
+ }
+
+ if (prestera_util_nhc_eq_n_cache_key(sw, nhc, &nc->key)) {
+ prestera_util_kern_set_nh_offload(nhc, offloaded, trap);
+ break;
+ }
+ }
+}
+
+static void
+__prestera_k_arb_n_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc,
+ bool offloaded)
+{
+ struct neighbour *n;
+
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ return;
+
+ prestera_util_kern_set_neigh_offload(n, offloaded);
+ neigh_release(n);
+}
+
+static void
+__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool fail, bool offload, bool trap)
+{
+ struct fib_rt_info fri;
+
+ switch (fc->key.addr.v) {
+ case PRESTERA_IPV4:
+ fri.fi = fc->fen4_info.fi;
+ fri.tb_id = fc->key.kern_tb_id;
+ fri.dst = fc->key.addr.u.ipv4;
+ fri.dst_len = fc->key.prefix_len;
+ fri.dscp = fc->fen4_info.dscp;
+ fri.type = fc->fen4_info.type;
+ /* flags begin */
+ fri.offload = offload;
+ fri.trap = trap;
+ fri.offload_failed = fail;
+ /* flags end */
+ fib_alias_hw_flags_set(&init_net, &fri);
+ return;
+ case PRESTERA_IPV6:
+ /* TODO */
+ return;
+ }
+}
+
+static void
+__prestera_k_arb_n_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *n_cache,
+ bool enabled)
+{
+ struct prestera_nexthop_group_key nh_grp_key;
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ struct prestera_fib_node *fib_node;
+ struct prestera_fib_key fib_key;
+
+ /* Exception for fc with prefix 32: LPM entry is already used by fib */
+ memset(&fc_key, 0, sizeof(fc_key));
+ fc_key.addr = n_cache->key.addr;
+ fc_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ /* But better to use tb_id of route, which pointed to this neighbour. */
+ /* We take it from rif, because rif inconsistent.
+ * Must be separated in_rif and out_rif.
+ * Also note: for each fib pointed to this neigh should be separated
+ * neigh lpm entry (for each ingress vr)
+ */
+ fc_key.kern_tb_id = l3mdev_fib_table(n_cache->key.dev);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ memset(&fib_key, 0, sizeof(fib_key));
+ fib_key.addr = n_cache->key.addr;
+ fib_key.prefix_len = PRESTERA_IP_ADDR_PLEN(n_cache->key.addr.v);
+ fib_key.tb_id = prestera_fix_tb_id(fc_key.kern_tb_id);
+ fib_node = prestera_fib_node_find(sw, &fib_key);
+ if (!fib_cache || !fib_cache->reachable) {
+ if (!enabled && fib_node) {
+ if (prestera_fib_node_util_is_neighbour(fib_node))
+ prestera_fib_node_destroy(sw, fib_node);
+ return;
+ }
+ }
+
+ if (enabled && !fib_node) {
+ memset(&nh_grp_key, 0, sizeof(nh_grp_key));
+ prestera_util_nc_key2nh_key(&n_cache->key,
+ &nh_grp_key.neigh[0]);
+ fib_node = prestera_fib_node_create(sw, &fib_key,
+ PRESTERA_FIB_TYPE_UC_NH,
+ &nh_grp_key);
+ if (!fib_node)
+ pr_err("%s failed ip=%pI4n", "prestera_fib_node_create",
+ &fib_key.addr.u.ipv4);
+ return;
+ }
+}
+
+static void
+__prestera_k_arb_nc_kern_fib_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ if (prestera_util_kern_n_is_reachable(l3mdev_fib_table(nc->key.dev),
+ &nc->key.addr, nc->key.dev))
+ nc->reachable = true;
+ else
+ nc->reachable = false;
+}
+
+/* Kernel neighbour -> neigh_cache info */
+static void
+__prestera_k_arb_nc_kern_n_fetch(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct neighbour *n;
+ int err;
+
+ memset(&nc->nh_neigh_info, 0, sizeof(nc->nh_neigh_info));
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4, nc->key.dev);
+ if (!n)
+ goto out;
+
+ read_lock_bh(&n->lock);
+ if (n->nud_state & NUD_VALID && !n->dead) {
+ err = prestera_neigh_iface_init(sw, &nc->nh_neigh_info.iface,
+ n);
+ if (err)
+ goto n_read_out;
+
+ memcpy(&nc->nh_neigh_info.ha[0], &n->ha[0], ETH_ALEN);
+ nc->nh_neigh_info.connected = true;
+ }
+n_read_out:
+ read_unlock_bh(&n->lock);
+out:
+ nc->in_kernel = nc->nh_neigh_info.connected;
+ if (n)
+ neigh_release(n);
+}
+
+/* neigh_cache info -> lpm update */
+static void
+__prestera_k_arb_nc_apply(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_kern_neigh_cache_head *nhead;
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ int err;
+
+ __prestera_k_arb_n_lpm_set(sw, nc, nc->reachable && nc->in_kernel);
+ __prestera_k_arb_n_offload_set(sw, nc, nc->reachable && nc->in_kernel);
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh)
+ goto out;
+
+ /* Do hw update only if something changed to prevent nh flap */
+ if (memcmp(&nc->nh_neigh_info, &nh_neigh->info,
+ sizeof(nh_neigh->info))) {
+ memcpy(&nh_neigh->info, &nc->nh_neigh_info,
+ sizeof(nh_neigh->info));
+ err = prestera_nh_neigh_set(sw, nh_neigh);
+ if (err) {
+ pr_err("%s failed with err=%d ip=%pI4n mac=%pM",
+ "prestera_nh_neigh_set", err,
+ &nh_neigh->key.addr.u.ipv4,
+ &nh_neigh->info.ha[0]);
+ goto out;
+ }
+ }
+
+out:
+ list_for_each_entry(nhead, &nc->kern_fib_cache_list, head) {
+ __prestera_k_arb_fib_nh_offload_set(sw, nhead->this, nc,
+ nc->in_kernel,
+ !nc->in_kernel);
+ }
+}
+
+static int
+__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct fib_nh_common *nhc;
+ int nh_cnt;
+
+ memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
+
+ switch (prestera_kern_fib_info_type(&fc->info)) {
+ case RTN_UNICAST:
+ if (prestera_fib_info_is_direct(&fc->info) &&
+ fc->key.prefix_len ==
+ PRESTERA_IP_ADDR_PLEN(fc->key.addr.v)) {
+ /* This is special case.
+ * When prefix is 32. Than we will have conflict in lpm
+ * for direct route - once TRAP added, there is no
+ * place for neighbour entry. So represent direct route
+ * with prefix 32, as NH. So neighbour will be resolved
+ * as nexthop of this route.
+ */
+ nhc = prestera_kern_fib_info_nhc(&fc->info, 0);
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_UC_NH;
+ fc->lpm_info.nh_grp_key.neigh[0].addr =
+ fc->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[0].rif =
+ nhc->nhc_dev;
+
+ break;
+ }
+
+ /* We can also get nh_grp_key from fi. This will be correct to
+ * because cache not always represent, what actually written to
+ * lpm. But we use nh cache, as well for now (for this case).
+ */
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!fc->kern_neigh_cache_head[nh_cnt].n_cache)
+ break;
+
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].addr =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.addr;
+ fc->lpm_info.nh_grp_key.neigh[nh_cnt].rif =
+ fc->kern_neigh_cache_head[nh_cnt].n_cache->key.dev;
+ }
+
+ fc->lpm_info.fib_type = nh_cnt ?
+ PRESTERA_FIB_TYPE_UC_NH :
+ PRESTERA_FIB_TYPE_TRAP;
+ break;
+ /* Unsupported. Leave it for kernel: */
+ case RTN_BROADCAST:
+ case RTN_MULTICAST:
+ /* Routes we must trap by design: */
+ case RTN_LOCAL:
+ case RTN_UNREACHABLE:
+ case RTN_PROHIBIT:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ case RTN_BLACKHOLE:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
+ break;
+ default:
+ dev_err(sw->dev->dev, "Unsupported fib_type");
+ return -EOPNOTSUPP;
+ }
+
+ fc->lpm_info.fib_key.addr = fc->key.addr;
+ fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
+ fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
+
+ return 0;
+}
+
+static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool enabled)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
+ if (fib_node)
+ prestera_fib_node_destroy(sw, fib_node);
+
+ if (!enabled)
+ return 0;
+
+ fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
+ fc->lpm_info.fib_type,
+ &fc->lpm_info.nh_grp_key);
+
+ if (!fib_node) {
+ dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
+ &fc->key.addr.u.ipv4, fc->key.prefix_len,
+ fc->key.kern_tb_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ int err;
+
+ err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
+ if (err)
+ return err;
+
+ err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
+ if (err) {
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc,
+ true, false, false);
+ return err;
+ }
+
+ switch (fc->lpm_info.fib_type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ fc->reachable, false);
+ break;
+ case PRESTERA_FIB_TYPE_TRAP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ false, fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
+ fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_INVALID:
+ break;
+ }
+
+ return 0;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_MAIN;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_LOCAL;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static void __prestera_k_arb_hw_state_upd(struct prestera_switch *sw,
+ struct prestera_kern_neigh_cache *nc)
+{
+ struct prestera_nh_neigh_key nh_key;
+ struct prestera_nh_neigh *nh_neigh;
+ struct neighbour *n;
+ bool hw_active;
+
+ prestera_util_nc_key2nh_key(&nc->key, &nh_key);
+ nh_neigh = prestera_nh_neigh_find(sw, &nh_key);
+ if (!nh_neigh) {
+ pr_err("Cannot find nh_neigh for cached %pI4n",
+ &nc->key.addr.u.ipv4);
+ return;
+ }
+
+ hw_active = prestera_nh_neigh_util_hw_state(sw, nh_neigh);
+
+#ifdef PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH
+ if (!hw_active && nc->in_kernel)
+ goto out;
+#else /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+ if (!hw_active)
+ goto out;
+#endif /* PRESTERA_IMPLICITY_RESOLVE_DEAD_NEIGH */
+
+ if (nc->key.addr.v == PRESTERA_IPV4) {
+ n = neigh_lookup(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ if (!n)
+ n = neigh_create(&arp_tbl, &nc->key.addr.u.ipv4,
+ nc->key.dev);
+ } else {
+ n = NULL;
+ }
+
+ if (!IS_ERR(n) && n) {
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ } else {
+ pr_err("Cannot create neighbour %pI4n", &nc->key.addr.u.ipv4);
+ }
+
+out:
+ return;
+}
+
+/* Propagate hw state to kernel */
+static void prestera_k_arb_hw_evt(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_hw_state_upd(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+/* Propagate kernel event to hw */
+static void prestera_k_arb_n_evt(struct prestera_switch *sw,
+ struct neighbour *n)
+{
+ struct prestera_kern_neigh_cache_key n_key;
+ struct prestera_kern_neigh_cache *n_cache;
+ int err;
+
+ err = prestera_util_neigh2nc_key(sw, n, &n_key);
+ if (err)
+ return;
+
+ n_cache = prestera_kern_neigh_cache_find(sw, &n_key);
+ if (!n_cache) {
+ n_cache = prestera_kern_neigh_cache_get(sw, &n_key);
+ if (!n_cache)
+ return;
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ }
+
+ __prestera_k_arb_nc_kern_n_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+
+ prestera_kern_neigh_cache_put(sw, n_cache);
+}
+
+static void __prestera_k_arb_fib_evt2nc(struct prestera_switch *sw)
+{
+ struct prestera_kern_neigh_cache *n_cache;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&sw->router->kern_neigh_cache_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while (1) {
+ n_cache = rhashtable_walk_next(&iter);
+
+ if (!n_cache)
+ break;
+
+ if (IS_ERR(n_cache))
+ continue;
+
+ rhashtable_walk_stop(&iter);
+ __prestera_k_arb_nc_kern_fib_fetch(sw, n_cache);
+ __prestera_k_arb_nc_apply(sw, n_cache);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+static int
+prestera_k_arb_fib_evt(struct prestera_switch *sw,
+ bool replace, /* replace or del */
+ struct fib_notifier_info *info)
+{
+ struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ prestera_util_fen_info2fib_cache_key(info, &fc_key);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ if (fib_cache) {
+ fib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying destroyed fib_cache failed");
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache && bfib_cache) {
+ bfib_cache->reachable = true;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ prestera_kern_fib_cache_destroy(sw, fib_cache);
+ }
+
+ if (replace) {
+ fib_cache = prestera_kern_fib_cache_create(sw, &fc_key, info);
+ if (!fib_cache) {
+ dev_err(sw->dev->dev, "fib_cache == NULL");
+ return -ENOENT;
+ }
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache)
+ fib_cache->reachable = true;
+
+ if (bfib_cache) {
+ bfib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev, "Applying fib_cache failed");
+ }
+
+ /* Update all neighs to resolve overlapped and apply related */
+ __prestera_k_arb_fib_evt2nc(sw);
+
+ return 0;
+}
+
+static void __prestera_k_arb_abort_neigh_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_neigh_cache *n_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ if (!list_empty(&n_cache->kern_fib_cache_list)) {
+ WARN_ON(1); /* BUG */
+ return;
+ }
+ __prestera_k_arb_n_offload_set(sw, n_cache, false);
+ n_cache->in_kernel = false;
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_neigh_cache_destruct(sw, n_cache);
+ kfree(n_cache);
+}
+
+static void __prestera_k_arb_abort_fib_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_kern_fib_cache *fib_cache = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_k_arb_fib_lpm_offload_set(sw, fib_cache,
+ false, false,
+ false);
+ __prestera_k_arb_fib_nh_offload_set(sw, fib_cache, NULL,
+ false, false);
+ /* No need to destroy lpm.
+ * It will be aborted by destroy_ht
+ */
+ __prestera_kern_fib_cache_destruct(sw, fib_cache);
+ kfree(fib_cache);
+}
+
+static void prestera_k_arb_abort(struct prestera_switch *sw)
+{
+ /* Function to remove all arbiter entries and related hw objects. */
+ /* Sequence:
+ * 1) Clear arbiter tables, but don't touch hw
+ * 2) Clear hw
+ * We use such approach, because arbiter object is not directly mapped
+ * to hw. So deletion of one arbiter object may even lead to creation of
+ * hw object (e.g. in case of overlapped routes).
+ */
+ rhashtable_free_and_destroy(&sw->router->kern_fib_cache_ht,
+ __prestera_k_arb_abort_fib_ht_cb,
+ sw);
+ rhashtable_free_and_destroy(&sw->router->kern_neigh_cache_ht,
+ __prestera_k_arb_abort_neigh_ht_cb,
+ sw);
+}
+
+static int __prestera_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(port_dev);
+ struct prestera_rif_entry_key re_key = {};
+ struct prestera_rif_entry *re;
+ u32 kern_tb_id;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "RIF MAC must have the same prefix");
+ return err;
+ }
+
+ kern_tb_id = l3mdev_fib_table(port_dev);
+ re_key.iface.type = PRESTERA_IF_PORT_E;
+ re_key.iface.dev_port.hw_dev_num = port->dev_id;
+ re_key.iface.dev_port.port_num = port->hw_id;
+ re = prestera_rif_entry_find(port->sw, &re_key);
+
+ switch (event) {
+ case NETDEV_UP:
+ if (re) {
+ NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
+ return -EEXIST;
+ }
+ re = prestera_rif_entry_create(port->sw, &re_key,
+ prestera_fix_tb_id(kern_tb_id),
+ port_dev->dev_addr);
+ if (!re) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
+ return -EINVAL;
+ }
+ dev_hold(port_dev);
+ break;
+ case NETDEV_DOWN:
+ if (!re) {
+ NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
+ return -EEXIST;
+ }
+ prestera_rif_entry_destroy(port->sw, re);
+ dev_put(port_dev);
+ break;
+ }
+
+ return 0;
+}
+
+static int __prestera_inetaddr_event(struct prestera_switch *sw,
+ struct net_device *dev,
+ unsigned long event,
+ struct netlink_ext_ack *extack)
+{
+ if (!prestera_netdev_check(dev) || netif_is_any_bridge_port(dev) ||
+ netif_is_lag_port(dev))
+ return 0;
+
+ return __prestera_inetaddr_port_event(dev, event, extack);
+}
+
+static int __prestera_inetaddr_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct prestera_router *router = container_of(nb,
+ struct prestera_router,
+ inetaddr_nb);
+ struct in_device *idev;
+ int err = 0;
+
+ if (event != NETDEV_DOWN)
+ goto out;
+
+ /* Ignore if this is not latest address */
+ idev = __in_dev_get_rtnl(dev);
+ if (idev && idev->ifa_list)
+ goto out;
+
+ err = __prestera_inetaddr_event(router->sw, dev, event, NULL);
+out:
+ return notifier_from_errno(err);
+}
+
+static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct in_validator_info *ivi = (struct in_validator_info *)ptr;
+ struct net_device *dev = ivi->ivi_dev->dev;
+ struct prestera_router *router = container_of(nb,
+ struct prestera_router,
+ inetaddr_valid_nb);
+ struct in_device *idev;
+ int err = 0;
+
+ if (event != NETDEV_UP)
+ goto out;
+
+ /* Ignore if this is not first address */
+ idev = __in_dev_get_rtnl(dev);
+ if (idev && idev->ifa_list)
+ goto out;
+
+ if (ipv4_is_multicast(ivi->ivi_addr)) {
+ NL_SET_ERR_MSG_MOD(ivi->extack,
+ "Multicast addr on RIF is not supported");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = __prestera_inetaddr_event(router->sw, dev, event, ivi->extack);
+out:
+ return notifier_from_errno(err);
+}
+
+struct prestera_fib_event_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct fib_entry_notifier_info fen_info;
+ unsigned long event;
+};
+
+static void __prestera_router_fib_event_work(struct work_struct *work)
+{
+ struct prestera_fib_event_work *fib_work =
+ container_of(work, struct prestera_fib_event_work, work);
+ struct prestera_switch *sw = fib_work->sw;
+ int err;
+
+ rtnl_lock();
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = prestera_k_arb_fib_evt(sw, true,
+ &fib_work->fen_info.info);
+ if (err)
+ goto err_out;
+
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ err = prestera_k_arb_fib_evt(sw, false,
+ &fib_work->fen_info.info);
+ if (err)
+ goto err_out;
+
+ break;
+ }
+
+ goto out;
+
+err_out:
+ dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
+ &fib_work->fen_info.dst,
+ fib_work->fen_info.dst_len);
+out:
+ fib_info_put(fib_work->fen_info.fi);
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int __prestera_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_fib_event_work *fib_work;
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_notifier_info *info = ptr;
+ struct prestera_router *router;
+
+ if (info->family != AF_INET)
+ return NOTIFY_DONE;
+
+ router = container_of(nb, struct prestera_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ if (!fen_info->fi)
+ return NOTIFY_DONE;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NOTIFY_BAD;
+
+ fib_info_hold(fen_info->fi);
+ fib_work->fen_info = *fen_info;
+ fib_work->event = event;
+ fib_work->sw = router->sw;
+ INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
+ prestera_queue_work(&fib_work->work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct prestera_netevent_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct neighbour *n;
+};
+
+static void prestera_router_neigh_event_work(struct work_struct *work)
+{
+ struct prestera_netevent_work *net_work =
+ container_of(work, struct prestera_netevent_work, work);
+ struct prestera_switch *sw = net_work->sw;
+ struct neighbour *n = net_work->n;
+
+ /* neigh - its not hw related object. It stored only in kernel. So... */
+ rtnl_lock();
+
+ prestera_k_arb_n_evt(sw, n);
+
+ neigh_release(n);
+ rtnl_unlock();
+ kfree(net_work);
+}
+
+static int prestera_router_netevent_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_netevent_work *net_work;
+ struct prestera_router *router;
+ struct neighbour *n = ptr;
+
+ router = container_of(nb, struct prestera_router, netevent_nb);
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ if (n->tbl->family != AF_INET)
+ return NOTIFY_DONE;
+
+ net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
+ if (WARN_ON(!net_work))
+ return NOTIFY_BAD;
+
+ neigh_clone(n);
+ net_work->n = n;
+ net_work->sw = router->sw;
+ INIT_WORK(&net_work->work, prestera_router_neigh_event_work);
+ prestera_queue_work(&net_work->work);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void prestera_router_update_neighs_work(struct work_struct *work)
+{
+ struct prestera_router *router;
+
+ router = container_of(work, struct prestera_router,
+ neighs_update.dw.work);
+ rtnl_lock();
+
+ prestera_k_arb_hw_evt(router->sw);
+
+ rtnl_unlock();
+ prestera_queue_delayed_work(&router->neighs_update.dw,
+ msecs_to_jiffies(PRESTERA_NH_PROBE_INTERVAL));
+}
+
+static int prestera_neigh_work_init(struct prestera_switch *sw)
+{
+ INIT_DELAYED_WORK(&sw->router->neighs_update.dw,
+ prestera_router_update_neighs_work);
+ prestera_queue_delayed_work(&sw->router->neighs_update.dw, 0);
+ return 0;
+}
+
+static void prestera_neigh_work_fini(struct prestera_switch *sw)
+{
+ cancel_delayed_work_sync(&sw->router->neighs_update.dw);
+}
+
+int prestera_router_init(struct prestera_switch *sw)
+{
+ struct prestera_router *router;
+ int err, nhgrp_cache_bytes;
+
+ router = kzalloc(sizeof(*sw->router), GFP_KERNEL);
+ if (!router)
+ return -ENOMEM;
+
+ sw->router = router;
+ router->sw = sw;
+
+ err = prestera_router_hw_init(sw);
+ if (err)
+ goto err_router_lib_init;
+
+ err = rhashtable_init(&router->kern_fib_cache_ht,
+ &__prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_kern_fib_cache_ht_init;
+
+ err = rhashtable_init(&router->kern_neigh_cache_ht,
+ &__prestera_kern_neigh_cache_ht_params);
+ if (err)
+ goto err_kern_neigh_cache_ht_init;
+
+ nhgrp_cache_bytes = sw->size_tbl_router_nexthop / 8 + 1;
+ router->nhgrp_hw_state_cache = kzalloc(nhgrp_cache_bytes, GFP_KERNEL);
+ if (!router->nhgrp_hw_state_cache) {
+ err = -ENOMEM;
+ goto err_nh_state_cache_alloc;
+ }
+
+ err = prestera_neigh_work_init(sw);
+ if (err)
+ goto err_neigh_work_init;
+
+ router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
+ err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+ if (err)
+ goto err_register_inetaddr_validator_notifier;
+
+ router->inetaddr_nb.notifier_call = __prestera_inetaddr_cb;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->netevent_nb.notifier_call = prestera_router_netevent_event;
+ err = register_netevent_notifier(&router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
+ router->fib_nb.notifier_call = __prestera_router_fib_event;
+ err = register_fib_notifier(&init_net, &router->fib_nb,
+ /* TODO: flush fib entries */ NULL, NULL);
+ if (err)
+ goto err_register_fib_notifier;
+
+ return 0;
+
+err_register_fib_notifier:
+ unregister_netevent_notifier(&router->netevent_nb);
+err_register_netevent_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
+ unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+err_register_inetaddr_validator_notifier:
+ prestera_neigh_work_fini(sw);
+err_neigh_work_init:
+ kfree(router->nhgrp_hw_state_cache);
+err_nh_state_cache_alloc:
+ rhashtable_destroy(&router->kern_neigh_cache_ht);
+err_kern_neigh_cache_ht_init:
+ rhashtable_destroy(&router->kern_fib_cache_ht);
+err_kern_fib_cache_ht_init:
+ prestera_router_hw_fini(sw);
+err_router_lib_init:
+ kfree(sw->router);
+ return err;
+}
+
+void prestera_router_fini(struct prestera_switch *sw)
+{
+ unregister_fib_notifier(&init_net, &sw->router->fib_nb);
+ unregister_netevent_notifier(&sw->router->netevent_nb);
+ unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
+ unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ prestera_neigh_work_fini(sw);
+ prestera_queue_drain();
+
+ prestera_k_arb_abort(sw);
+
+ kfree(sw->router->nhgrp_hw_state_cache);
+ rhashtable_destroy(&sw->router->kern_fib_cache_ht);
+ prestera_router_hw_fini(sw);
+ kfree(sw->router);
+ sw->router = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
new file mode 100644
index 000000000000..02faaea2aefa
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved */
+
+#include <linux/rhashtable.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_router_hw.h"
+#include "prestera_acl.h"
+
+/* Nexthop is pointed
+ * to port (not rif)
+ * +-------+
+ * +>|nexthop|
+ * | +-------+
+ * |
+ * +--+ +-----++
+ * +------->|vr|<-+ +>|nh_grp|
+ * | +--+ | | +------+
+ * | | |
+ * +-+-------+ +--+---+-+
+ * |rif_entry| |fib_node|
+ * +---------+ +--------+
+ * Rif is Fib - is exit point
+ * used as
+ * entry point
+ * for vr in hw
+ */
+
+#define PRESTERA_NHGR_UNUSED (0)
+#define PRESTERA_NHGR_DROP (0xFFFFFFFF)
+/* Need to merge it with router_manager */
+#define PRESTERA_NH_ACTIVE_JIFFER_FILTER 3000 /* ms */
+
+static const struct rhashtable_params __prestera_fib_ht_params = {
+ .key_offset = offsetof(struct prestera_fib_node, key),
+ .head_offset = offsetof(struct prestera_fib_node, ht_node),
+ .key_len = sizeof(struct prestera_fib_key),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params __prestera_nh_neigh_ht_params = {
+ .key_offset = offsetof(struct prestera_nh_neigh, key),
+ .key_len = sizeof(struct prestera_nh_neigh_key),
+ .head_offset = offsetof(struct prestera_nh_neigh, ht_node),
+};
+
+static const struct rhashtable_params __prestera_nexthop_group_ht_params = {
+ .key_offset = offsetof(struct prestera_nexthop_group, key),
+ .key_len = sizeof(struct prestera_nexthop_group_key),
+ .head_offset = offsetof(struct prestera_nexthop_group, ht_node),
+};
+
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp);
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg);
+
+/* TODO: move to router.h as macros */
+static bool prestera_nh_neigh_key_is_valid(struct prestera_nh_neigh_key *key)
+{
+ return memchr_inv(key, 0, sizeof(*key)) ? true : false;
+}
+
+int prestera_router_hw_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = rhashtable_init(&sw->router->nh_neigh_ht,
+ &__prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_nh_neigh_ht_init;
+
+ err = rhashtable_init(&sw->router->nexthop_group_ht,
+ &__prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_nexthop_grp_ht_init;
+
+ err = rhashtable_init(&sw->router->fib_ht,
+ &__prestera_fib_ht_params);
+ if (err)
+ goto err_fib_ht_init;
+
+ INIT_LIST_HEAD(&sw->router->vr_list);
+ INIT_LIST_HEAD(&sw->router->rif_entry_list);
+
+ return 0;
+
+err_fib_ht_init:
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+err_nexthop_grp_ht_init:
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
+err_nh_neigh_ht_init:
+ return 0;
+}
+
+void prestera_router_hw_fini(struct prestera_switch *sw)
+{
+ rhashtable_free_and_destroy(&sw->router->fib_ht,
+ prestera_fib_node_destroy_ht_cb, sw);
+ WARN_ON(!list_empty(&sw->router->vr_list));
+ WARN_ON(!list_empty(&sw->router->rif_entry_list));
+ rhashtable_destroy(&sw->router->fib_ht);
+ rhashtable_destroy(&sw->router->nexthop_group_ht);
+ rhashtable_destroy(&sw->router->nh_neigh_ht);
+}
+
+static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
+ u32 tb_id)
+{
+ struct prestera_vr *vr;
+
+ list_for_each_entry(vr, &sw->router->vr_list, router_node) {
+ if (vr->tb_id == tb_id)
+ return vr;
+ }
+
+ return NULL;
+}
+
+static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw,
+ u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_vr *vr;
+ int err;
+
+ vr = kzalloc(sizeof(*vr), GFP_KERNEL);
+ if (!vr) {
+ err = -ENOMEM;
+ goto err_alloc_vr;
+ }
+
+ vr->tb_id = tb_id;
+
+ err = prestera_hw_vr_create(sw, &vr->hw_vr_id);
+ if (err)
+ goto err_hw_create;
+
+ list_add(&vr->router_node, &sw->router->vr_list);
+
+ return vr;
+
+err_hw_create:
+ kfree(vr);
+err_alloc_vr:
+ return ERR_PTR(err);
+}
+
+static void __prestera_vr_destroy(struct prestera_switch *sw,
+ struct prestera_vr *vr)
+{
+ list_del(&vr->router_node);
+ prestera_hw_vr_delete(sw, vr->hw_vr_id);
+ kfree(vr);
+}
+
+static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_vr *vr;
+
+ vr = __prestera_vr_find(sw, tb_id);
+ if (vr) {
+ refcount_inc(&vr->refcount);
+ } else {
+ vr = __prestera_vr_create(sw, tb_id, extack);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ refcount_set(&vr->refcount, 1);
+ }
+
+ return vr;
+}
+
+static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr)
+{
+ if (refcount_dec_and_test(&vr->refcount))
+ __prestera_vr_destroy(sw, vr);
+}
+
+/* iface is overhead struct. vr_id also can be removed. */
+static int
+__prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in,
+ struct prestera_rif_entry_key *out)
+{
+ memset(out, 0, sizeof(*out));
+
+ switch (in->iface.type) {
+ case PRESTERA_IF_PORT_E:
+ out->iface.dev_port.hw_dev_num = in->iface.dev_port.hw_dev_num;
+ out->iface.dev_port.port_num = in->iface.dev_port.port_num;
+ break;
+ case PRESTERA_IF_LAG_E:
+ out->iface.lag_id = in->iface.lag_id;
+ break;
+ case PRESTERA_IF_VID_E:
+ out->iface.vlan_id = in->iface.vlan_id;
+ break;
+ default:
+ WARN(1, "Unsupported iface type");
+ return -EINVAL;
+ }
+
+ out->iface.type = in->iface.type;
+ return 0;
+}
+
+struct prestera_rif_entry *
+prestera_rif_entry_find(const struct prestera_switch *sw,
+ const struct prestera_rif_entry_key *k)
+{
+ struct prestera_rif_entry *rif_entry;
+ struct prestera_rif_entry_key lk; /* lookup key */
+
+ if (__prestera_rif_entry_key_copy(k, &lk))
+ return NULL;
+
+ list_for_each_entry(rif_entry, &sw->router->rif_entry_list,
+ router_node) {
+ if (!memcmp(k, &rif_entry->key, sizeof(*k)))
+ return rif_entry;
+ }
+
+ return NULL;
+}
+
+void prestera_rif_entry_destroy(struct prestera_switch *sw,
+ struct prestera_rif_entry *e)
+{
+ struct prestera_iface iface;
+
+ list_del(&e->router_node);
+
+ memcpy(&iface, &e->key.iface, sizeof(iface));
+ iface.vr_id = e->vr->hw_vr_id;
+ prestera_hw_rif_delete(sw, e->hw_id, &iface);
+
+ prestera_vr_put(sw, e->vr);
+ kfree(e);
+}
+
+struct prestera_rif_entry *
+prestera_rif_entry_create(struct prestera_switch *sw,
+ struct prestera_rif_entry_key *k,
+ u32 tb_id, const unsigned char *addr)
+{
+ int err;
+ struct prestera_rif_entry *e;
+ struct prestera_iface iface;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ goto err_kzalloc;
+
+ if (__prestera_rif_entry_key_copy(k, &e->key))
+ goto err_key_copy;
+
+ e->vr = prestera_vr_get(sw, tb_id, NULL);
+ if (IS_ERR(e->vr))
+ goto err_vr_get;
+
+ memcpy(&e->addr, addr, sizeof(e->addr));
+
+ /* HW */
+ memcpy(&iface, &e->key.iface, sizeof(iface));
+ iface.vr_id = e->vr->hw_vr_id;
+ err = prestera_hw_rif_create(sw, &iface, e->addr, &e->hw_id);
+ if (err)
+ goto err_hw_create;
+
+ list_add(&e->router_node, &sw->router->rif_entry_list);
+
+ return e;
+
+err_hw_create:
+ prestera_vr_put(sw, e->vr);
+err_vr_get:
+err_key_copy:
+ kfree(e);
+err_kzalloc:
+ return NULL;
+}
+
+static void __prestera_nh_neigh_destroy(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ rhashtable_remove_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ kfree(neigh);
+}
+
+static struct prestera_nh_neigh *
+__prestera_nh_neigh_create(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+ int err;
+
+ neigh = kzalloc(sizeof(*neigh), GFP_KERNEL);
+ if (!neigh)
+ goto err_kzalloc;
+
+ memcpy(&neigh->key, key, sizeof(*key));
+ neigh->info.connected = false;
+ INIT_LIST_HEAD(&neigh->nexthop_group_list);
+ err = rhashtable_insert_fast(&sw->router->nh_neigh_ht,
+ &neigh->ht_node,
+ __prestera_nh_neigh_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
+ return neigh;
+
+err_rhashtable_insert:
+ kfree(neigh);
+err_kzalloc:
+ return NULL;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *nh_neigh;
+
+ nh_neigh = rhashtable_lookup_fast(&sw->router->nh_neigh_ht,
+ key, __prestera_nh_neigh_ht_params);
+ return nh_neigh;
+}
+
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key)
+{
+ struct prestera_nh_neigh *neigh;
+
+ neigh = prestera_nh_neigh_find(sw, key);
+ if (!neigh)
+ return __prestera_nh_neigh_create(sw, key);
+
+ return neigh;
+}
+
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ if (list_empty(&neigh->nexthop_group_list))
+ __prestera_nh_neigh_destroy(sw, neigh);
+}
+
+/* Updates new prestera_neigh_info */
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh)
+{
+ struct prestera_nh_neigh_head *nh_head;
+ struct prestera_nexthop_group *nh_grp;
+ int err;
+
+ list_for_each_entry(nh_head, &neigh->nexthop_group_list, head) {
+ nh_grp = nh_head->this;
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh)
+{
+ bool state;
+ struct prestera_nh_neigh_head *nh_head, *tmp;
+
+ state = false;
+ list_for_each_entry_safe(nh_head, tmp,
+ &nh_neigh->nexthop_group_list, head) {
+ state = prestera_nexthop_group_util_hw_state(sw, nh_head->this);
+ if (state)
+ goto out;
+ }
+
+out:
+ return state;
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_create(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt, err, gid;
+
+ nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
+ if (!nh_grp)
+ goto err_kzalloc;
+
+ memcpy(&nh_grp->key, key, sizeof(*key));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ if (!prestera_nh_neigh_key_is_valid(&nh_grp->key.neigh[nh_cnt]))
+ break;
+
+ nh_neigh = prestera_nh_neigh_get(sw,
+ &nh_grp->key.neigh[nh_cnt]);
+ if (!nh_neigh)
+ goto err_nh_neigh_get;
+
+ nh_grp->nh_neigh_head[nh_cnt].neigh = nh_neigh;
+ nh_grp->nh_neigh_head[nh_cnt].this = nh_grp;
+ list_add(&nh_grp->nh_neigh_head[nh_cnt].head,
+ &nh_neigh->nexthop_group_list);
+ }
+
+ err = prestera_hw_nh_group_create(sw, nh_cnt, &nh_grp->grp_id);
+ if (err)
+ goto err_nh_group_create;
+
+ err = prestera_nexthop_group_set(sw, nh_grp);
+ if (err)
+ goto err_nexthop_group_set;
+
+ err = rhashtable_insert_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ /* reset cache for created group */
+ gid = nh_grp->grp_id;
+ sw->router->nhgrp_hw_state_cache[gid / 8] &= ~BIT(gid % 8);
+
+ return nh_grp;
+
+err_ht_insert:
+err_nexthop_group_set:
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+err_nh_group_create:
+err_nh_neigh_get:
+ for (nh_cnt--; nh_cnt >= 0; nh_cnt--) {
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_grp->nh_neigh_head[nh_cnt].neigh);
+ }
+
+ kfree(nh_grp);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_nexthop_group_destroy(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_nh_neigh *nh_neigh;
+ int nh_cnt;
+
+ rhashtable_remove_fast(&sw->router->nexthop_group_ht,
+ &nh_grp->ht_node,
+ __prestera_nexthop_group_ht_params);
+
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ nh_neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!nh_neigh)
+ break;
+
+ list_del(&nh_grp->nh_neigh_head[nh_cnt].head);
+ prestera_nh_neigh_put(sw, nh_neigh);
+ }
+
+ prestera_hw_nh_group_delete(sw, nh_cnt, nh_grp->grp_id);
+ kfree(nh_grp);
+}
+
+static struct prestera_nexthop_group *
+__prestera_nexthop_group_find(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = rhashtable_lookup_fast(&sw->router->nexthop_group_ht,
+ key, __prestera_nexthop_group_ht_params);
+ return nh_grp;
+}
+
+static struct prestera_nexthop_group *
+prestera_nexthop_group_get(struct prestera_switch *sw,
+ struct prestera_nexthop_group_key *key)
+{
+ struct prestera_nexthop_group *nh_grp;
+
+ nh_grp = __prestera_nexthop_group_find(sw, key);
+ if (nh_grp) {
+ refcount_inc(&nh_grp->refcount);
+ } else {
+ nh_grp = __prestera_nexthop_group_create(sw, key);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&nh_grp->refcount, 1);
+ }
+
+ return nh_grp;
+}
+
+static void prestera_nexthop_group_put(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ if (refcount_dec_and_test(&nh_grp->refcount))
+ __prestera_nexthop_group_destroy(sw, nh_grp);
+}
+
+/* Updates with new nh_neigh's info */
+static int prestera_nexthop_group_set(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ struct prestera_neigh_info info[PRESTERA_NHGR_SIZE_MAX];
+ struct prestera_nh_neigh *neigh;
+ int nh_cnt;
+
+ memset(&info[0], 0, sizeof(info));
+ for (nh_cnt = 0; nh_cnt < PRESTERA_NHGR_SIZE_MAX; nh_cnt++) {
+ neigh = nh_grp->nh_neigh_head[nh_cnt].neigh;
+ if (!neigh)
+ break;
+
+ memcpy(&info[nh_cnt], &neigh->info, sizeof(neigh->info));
+ }
+
+ return prestera_hw_nh_entries_set(sw, nh_cnt, &info[0], nh_grp->grp_id);
+}
+
+static bool
+prestera_nexthop_group_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nexthop_group *nh_grp)
+{
+ int err;
+ u32 buf_size = sw->size_tbl_router_nexthop / 8 + 1;
+ u32 gid = nh_grp->grp_id;
+ u8 *cache = sw->router->nhgrp_hw_state_cache;
+
+ /* Antijitter
+ * Prevent situation, when we read state of nh_grp twice in short time,
+ * and state bit is still cleared on second call. So just stuck active
+ * state for PRESTERA_NH_ACTIVE_JIFFER_FILTER, after last occurred.
+ */
+ if (!time_before(jiffies, sw->router->nhgrp_hw_cache_kick +
+ msecs_to_jiffies(PRESTERA_NH_ACTIVE_JIFFER_FILTER))) {
+ err = prestera_hw_nhgrp_blk_get(sw, cache, buf_size);
+ if (err) {
+ pr_err("Failed to get hw state nh_grp's");
+ return false;
+ }
+
+ sw->router->nhgrp_hw_cache_kick = jiffies;
+ }
+
+ if (cache[gid / 8] & BIT(gid % 8))
+ return true;
+
+ return false;
+}
+
+struct prestera_fib_node *
+prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key,
+ __prestera_fib_ht_params);
+ return fib_node;
+}
+
+static void __prestera_fib_node_destruct(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ struct prestera_vr *vr;
+
+ vr = fib_node->info.vr;
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4,
+ fib_node->key.prefix_len);
+ switch (fib_node->info.type) {
+ case PRESTERA_FIB_TYPE_UC_NH:
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
+ break;
+ case PRESTERA_FIB_TYPE_TRAP:
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ break;
+ default:
+ pr_err("Unknown fib_node->info.type = %d",
+ fib_node->info.type);
+ }
+
+ prestera_vr_put(sw, vr);
+}
+
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ __prestera_fib_node_destruct(sw, fib_node);
+ rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ kfree(fib_node);
+}
+
+static void prestera_fib_node_destroy_ht_cb(void *ptr, void *arg)
+{
+ struct prestera_fib_node *node = ptr;
+ struct prestera_switch *sw = arg;
+
+ __prestera_fib_node_destruct(sw, node);
+ kfree(node);
+}
+
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key)
+{
+ struct prestera_fib_node *fib_node;
+ u32 grp_id;
+ struct prestera_vr *vr;
+ int err;
+
+ fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+ if (!fib_node)
+ goto err_kzalloc;
+
+ memcpy(&fib_node->key, key, sizeof(*key));
+ fib_node->info.type = fib_type;
+
+ vr = prestera_vr_get(sw, key->tb_id, NULL);
+ if (IS_ERR(vr))
+ goto err_vr_get;
+
+ fib_node->info.vr = vr;
+
+ switch (fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ grp_id = PRESTERA_NHGR_UNUSED;
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ grp_id = PRESTERA_NHGR_DROP;
+ break;
+ case PRESTERA_FIB_TYPE_UC_NH:
+ fib_node->info.nh_grp = prestera_nexthop_group_get(sw,
+ nh_grp_key);
+ if (IS_ERR(fib_node->info.nh_grp))
+ goto err_nh_grp_get;
+
+ grp_id = fib_node->info.nh_grp->grp_id;
+ break;
+ default:
+ pr_err("Unsupported fib_type %d", fib_type);
+ goto err_nh_grp_get;
+ }
+
+ err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len, grp_id);
+ if (err)
+ goto err_lpm_add;
+
+ err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_node;
+
+err_ht_insert:
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len);
+err_lpm_add:
+ if (fib_type == PRESTERA_FIB_TYPE_UC_NH)
+ prestera_nexthop_group_put(sw, fib_node->info.nh_grp);
+err_nh_grp_get:
+ prestera_vr_put(sw, vr);
+err_vr_get:
+ kfree(fib_node);
+err_kzalloc:
+ return NULL;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
new file mode 100644
index 000000000000..9ca97919c863
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2021 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_ROUTER_HW_H_
+#define _PRESTERA_ROUTER_HW_H_
+
+struct prestera_vr {
+ struct list_head router_node;
+ refcount_t refcount;
+ u32 tb_id; /* key (kernel fib table id) */
+ u16 hw_vr_id; /* virtual router ID */
+ u8 __pad[2];
+};
+
+struct prestera_rif_entry {
+ struct prestera_rif_entry_key {
+ struct prestera_iface iface;
+ } key;
+ struct prestera_vr *vr;
+ unsigned char addr[ETH_ALEN];
+ u16 hw_id; /* rif_id */
+ struct list_head router_node; /* ht */
+};
+
+struct prestera_ip_addr {
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } u;
+ enum {
+ PRESTERA_IPV4 = 0,
+ PRESTERA_IPV6
+ } v;
+#define PRESTERA_IP_ADDR_PLEN(V) ((V) == PRESTERA_IPV4 ? 32 : \
+ /* (V) == PRESTERA_IPV6 ? */ 128 /* : 0 */)
+};
+
+struct prestera_nh_neigh_key {
+ struct prestera_ip_addr addr;
+ /* Seems like rif is obsolete, because there is iface in info ?
+ * Key can contain functional fields, or fields, which is used to
+ * filter duplicate objects on logical level (before you pass it to
+ * HW)... also key can be used to cover hardware restrictions.
+ * In our case rif - is logical interface (even can be VLAN), which
+ * is used in combination with IP address (which is also not related to
+ * hardware nexthop) to provide logical compression of created nexthops.
+ * You even can imagine, that rif+IPaddr is just cookie.
+ */
+ /* struct prestera_rif *rif; */
+ /* Use just as cookie, to divide ARP domains (in order with addr) */
+ void *rif;
+};
+
+/* Used for hw call */
+struct prestera_neigh_info {
+ struct prestera_iface iface;
+ unsigned char ha[ETH_ALEN];
+ u8 connected; /* bool. indicate, if mac/oif valid */
+ u8 __pad[1];
+};
+
+/* Used to notify nh about neigh change */
+struct prestera_nh_neigh {
+ struct prestera_nh_neigh_key key;
+ struct prestera_neigh_info info;
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct list_head nexthop_group_list;
+};
+
+#define PRESTERA_NHGR_SIZE_MAX 4
+
+struct prestera_nexthop_group {
+ struct prestera_nexthop_group_key {
+ struct prestera_nh_neigh_key neigh[PRESTERA_NHGR_SIZE_MAX];
+ } key;
+ /* Store intermediate object here.
+ * This prevent overhead kzalloc call.
+ */
+ /* nh_neigh is used only to notify nexthop_group */
+ struct prestera_nh_neigh_head {
+ struct prestera_nexthop_group *this;
+ struct list_head head;
+ /* ptr to neigh is not necessary.
+ * It used to prevent lookup of nh_neigh by key (n) on destroy
+ */
+ struct prestera_nh_neigh *neigh;
+ } nh_neigh_head[PRESTERA_NHGR_SIZE_MAX];
+ struct rhash_head ht_node; /* node of prestera_vr */
+ refcount_t refcount;
+ u32 grp_id; /* hw */
+};
+
+struct prestera_fib_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 tb_id;
+};
+
+struct prestera_fib_info {
+ struct prestera_vr *vr;
+ struct list_head vr_node;
+ enum prestera_fib_type {
+ PRESTERA_FIB_TYPE_INVALID = 0,
+ /* must be pointer to nh_grp id */
+ PRESTERA_FIB_TYPE_UC_NH,
+ /* It can be connected route
+ * and will be overlapped with neighbours
+ */
+ PRESTERA_FIB_TYPE_TRAP,
+ PRESTERA_FIB_TYPE_DROP
+ } type;
+ /* Valid only if type = UC_NH*/
+ struct prestera_nexthop_group *nh_grp;
+};
+
+struct prestera_fib_node {
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct prestera_fib_key key;
+ struct prestera_fib_info info; /* action related info */
+};
+
+struct prestera_rif_entry *
+prestera_rif_entry_find(const struct prestera_switch *sw,
+ const struct prestera_rif_entry_key *k);
+void prestera_rif_entry_destroy(struct prestera_switch *sw,
+ struct prestera_rif_entry *e);
+struct prestera_rif_entry *
+prestera_rif_entry_create(struct prestera_switch *sw,
+ struct prestera_rif_entry_key *k,
+ u32 tb_id, const unsigned char *addr);
+struct prestera_nh_neigh *
+prestera_nh_neigh_find(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+struct prestera_nh_neigh *
+prestera_nh_neigh_get(struct prestera_switch *sw,
+ struct prestera_nh_neigh_key *key);
+void prestera_nh_neigh_put(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+int prestera_nh_neigh_set(struct prestera_switch *sw,
+ struct prestera_nh_neigh *neigh);
+bool prestera_nh_neigh_util_hw_state(struct prestera_switch *sw,
+ struct prestera_nh_neigh *nh_neigh);
+struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw,
+ struct prestera_fib_key *key);
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node);
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type,
+ struct prestera_nexthop_group_key *nh_grp_key);
+int prestera_router_hw_init(struct prestera_switch *sw);
+void prestera_router_hw_fini(struct prestera_switch *sw);
+
+#endif /* _PRESTERA_ROUTER_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index e452cdeaf703..9277a8fd1339 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -102,7 +102,7 @@ struct prestera_sdma {
struct net_device napi_dev;
u32 map_addr;
u64 dma_mask;
- /* protect SDMA with concurrrent access from multiple CPUs */
+ /* protect SDMA with concurrent access from multiple CPUs */
spinlock_t tx_lock;
};
@@ -659,7 +659,7 @@ static int prestera_sdma_switch_init(struct prestera_switch *sw)
init_dummy_netdev(&sdma->napi_dev);
- netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
napi_enable(&sdma->rx_napi);
return 0;
@@ -776,6 +776,7 @@ tx_done:
int prestera_rxtx_switch_init(struct prestera_switch *sw)
{
struct prestera_rxtx *rxtx;
+ int err;
rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
if (!rxtx)
@@ -783,7 +784,11 @@ int prestera_rxtx_switch_init(struct prestera_switch *sw)
sw->rxtx = rxtx;
- return prestera_sdma_switch_init(sw);
+ err = prestera_sdma_switch_init(sw);
+ if (err)
+ kfree(rxtx);
+
+ return err;
}
void prestera_rxtx_switch_fini(struct prestera_switch *sw)
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
index 3cafca827bb7..1005182ce3bc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -7,6 +7,7 @@
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_acl.h"
+#include "prestera_flow.h"
#include "prestera_span.h"
struct prestera_span_entry {
@@ -106,7 +107,7 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
entry = prestera_span_entry_find_by_id(sw->span, span_id);
if (!entry)
- return false;
+ return -ENOENT;
if (!refcount_dec_and_test(&entry->ref_count))
return 0;
@@ -119,8 +120,9 @@ static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
return 0;
}
-static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
- struct prestera_port *to_port)
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress)
{
struct prestera_switch *sw = binding->port->sw;
u8 span_id;
@@ -134,7 +136,7 @@ static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
if (err)
return err;
- err = prestera_hw_span_bind(binding->port, span_id);
+ err = prestera_hw_span_bind(binding->port, span_id, ingress);
if (err) {
prestera_span_put(sw, span_id);
return err;
@@ -144,11 +146,15 @@ static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
return 0;
}
-static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress)
{
int err;
- err = prestera_hw_span_unbind(binding->port);
+ if (binding->span_id == PRESTERA_SPAN_INVALID_ID)
+ return -ENOENT;
+
+ err = prestera_hw_span_unbind(binding->port, ingress);
if (err)
return err;
@@ -160,60 +166,6 @@ static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
return 0;
}
-int prestera_span_replace(struct prestera_flow_block *block,
- struct tc_cls_matchall_offload *f)
-{
- struct prestera_flow_block_binding *binding;
- __be16 protocol = f->common.protocol;
- struct flow_action_entry *act;
- struct prestera_port *port;
- int err;
-
- if (!flow_offload_has_one_action(&f->rule->action)) {
- NL_SET_ERR_MSG(f->common.extack,
- "Only singular actions are supported");
- return -EOPNOTSUPP;
- }
-
- act = &f->rule->action.entries[0];
-
- if (!prestera_netdev_check(act->dev)) {
- NL_SET_ERR_MSG(f->common.extack,
- "Only Marvell Prestera port is supported");
- return -EINVAL;
- }
- if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
- return -EOPNOTSUPP;
- if (act->id != FLOW_ACTION_MIRRED)
- return -EOPNOTSUPP;
- if (protocol != htons(ETH_P_ALL))
- return -EOPNOTSUPP;
-
- port = netdev_priv(act->dev);
-
- list_for_each_entry(binding, &block->binding_list, list) {
- err = prestera_span_rule_add(binding, port);
- if (err)
- goto rollback;
- }
-
- return 0;
-
-rollback:
- list_for_each_entry_continue_reverse(binding,
- &block->binding_list, list)
- prestera_span_rule_del(binding);
- return err;
-}
-
-void prestera_span_destroy(struct prestera_flow_block *block)
-{
- struct prestera_flow_block_binding *binding;
-
- list_for_each_entry(binding, &block->binding_list, list)
- prestera_span_rule_del(binding);
-}
-
int prestera_span_init(struct prestera_switch *sw)
{
struct prestera_span *span;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h
index f0644521f78a..493b68524bcb 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_span.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h
@@ -8,13 +8,17 @@
#define PRESTERA_SPAN_INVALID_ID -1
+struct prestera_port;
struct prestera_switch;
-struct prestera_flow_block;
+struct prestera_flow_block_binding;
int prestera_span_init(struct prestera_switch *sw);
void prestera_span_fini(struct prestera_switch *sw);
-int prestera_span_replace(struct prestera_flow_block *block,
- struct tc_cls_matchall_offload *f);
-void prestera_span_destroy(struct prestera_flow_block *block);
+
+int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port,
+ bool ingress);
+int prestera_span_rule_del(struct prestera_flow_block_binding *binding,
+ bool ingress);
#endif /* _PRESTERA_SPAN_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index b4599fe4ca8d..e548cd32582e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -39,7 +39,10 @@ struct prestera_bridge {
struct net_device *dev;
struct prestera_switchdev *swdev;
struct list_head port_list;
+ struct list_head br_mdb_entry_list;
+ bool mrouter_exist;
bool vlan_enabled;
+ bool multicast_enabled;
u16 bridge_id;
};
@@ -48,8 +51,10 @@ struct prestera_bridge_port {
struct net_device *dev;
struct prestera_bridge *bridge;
struct list_head vlan_list;
+ struct list_head br_mdb_port_list;
refcount_t ref_count;
unsigned long flags;
+ bool mrouter;
u8 stp_state;
};
@@ -67,6 +72,20 @@ struct prestera_port_vlan {
u16 vid;
};
+struct prestera_br_mdb_port {
+ struct prestera_bridge_port *br_port;
+ struct list_head br_mdb_port_node;
+};
+
+/* Software representation of MDB table. */
+struct prestera_br_mdb_entry {
+ struct prestera_bridge *bridge;
+ struct prestera_mdb_entry *mdb;
+ struct list_head br_mdb_port_list;
+ struct list_head br_mdb_entry_node;
+ bool enabled;
+};
+
static struct workqueue_struct *swdev_wq;
static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
@@ -74,6 +93,88 @@ static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
u8 state);
+static struct prestera_bridge *
+prestera_bridge_find(const struct prestera_switch *sw,
+ const struct net_device *br_dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &sw->swdev->bridge_list, head)
+ if (bridge->dev == br_dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_find(const struct prestera_bridge *bridge,
+ const struct net_device *brport_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head)
+ if (br_port->dev == brport_dev)
+ return br_port;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_find(struct prestera_switch *sw,
+ struct net_device *brport_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_find(sw, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_find(bridge, brport_dev);
+}
+
+static void
+prestera_br_port_flags_reset(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ prestera_port_uc_flood_set(port, false);
+ prestera_port_mc_flood_set(port, false);
+ prestera_port_learning_set(port, false);
+ prestera_port_br_locked_set(port, false);
+}
+
+static int prestera_br_port_flags_set(struct prestera_bridge_port *br_port,
+ struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_port_uc_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_mc_flood_set(port, br_port->flags & BR_MCAST_FLOOD);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_out;
+
+ err = prestera_port_br_locked_set(port,
+ br_port->flags & BR_PORT_LOCKED);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ prestera_br_port_flags_reset(br_port, port);
+ return err;
+}
+
static struct prestera_bridge_vlan *
prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
{
@@ -220,6 +321,70 @@ static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode)
}
static void
+prestera_mdb_port_del(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_flood_domain *fl_domain = mdb->flood_domain;
+ struct prestera_flood_domain_port *flood_domain_port;
+
+ flood_domain_port = prestera_flood_domain_port_find(fl_domain,
+ orig_dev,
+ mdb->vid);
+ if (flood_domain_port)
+ prestera_flood_domain_port_destroy(flood_domain_port);
+}
+
+static void
+prestera_br_mdb_entry_put(struct prestera_br_mdb_entry *br_mdb)
+{
+ struct prestera_bridge_port *br_port;
+
+ if (list_empty(&br_mdb->br_mdb_port_list)) {
+ list_for_each_entry(br_port, &br_mdb->bridge->port_list, head)
+ prestera_mdb_port_del(br_mdb->mdb, br_port->dev);
+
+ prestera_mdb_entry_destroy(br_mdb->mdb);
+ list_del(&br_mdb->br_mdb_entry_node);
+ kfree(br_mdb);
+ }
+}
+
+static void
+prestera_br_mdb_port_del(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp;
+
+ list_for_each_entry_safe(br_mdb_port, tmp, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ if (br_mdb_port->br_port == br_port) {
+ list_del(&br_mdb_port->br_mdb_port_node);
+ kfree(br_mdb_port);
+ }
+ }
+}
+
+static void
+prestera_mdb_flush_bridge_port(struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port, *tmp_port;
+ struct prestera_br_mdb_entry *br_mdb, *br_mdb_tmp;
+ struct prestera_bridge *br_dev = br_port->bridge;
+
+ list_for_each_entry_safe(br_mdb, br_mdb_tmp, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ list_for_each_entry_safe(br_mdb_port, tmp_port,
+ &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ prestera_mdb_port_del(br_mdb->mdb,
+ br_mdb_port->br_port->dev);
+ prestera_br_mdb_port_del(br_mdb, br_mdb_port->br_port);
+ }
+ prestera_br_mdb_entry_put(br_mdb);
+ }
+}
+
+static void
prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
{
u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
@@ -244,6 +409,8 @@ prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
else
prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+ prestera_mdb_flush_bridge_port(br_port);
+
list_del(&port_vlan->br_vlan_head);
prestera_bridge_vlan_put(br_vlan);
prestera_bridge_port_put(br_port);
@@ -295,8 +462,10 @@ prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
bridge->vlan_enabled = vlan_enabled;
bridge->swdev = swdev;
bridge->dev = dev;
+ bridge->multicast_enabled = br_multicast_enabled(dev);
INIT_LIST_HEAD(&bridge->port_list);
+ INIT_LIST_HEAD(&bridge->br_mdb_entry_list);
list_add(&bridge->head, &swdev->bridge_list);
@@ -314,6 +483,7 @@ static void prestera_bridge_destroy(struct prestera_bridge *bridge)
else
prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+ WARN_ON(!list_empty(&bridge->br_mdb_entry_list));
WARN_ON(!list_empty(&bridge->port_list));
kfree(bridge);
}
@@ -405,6 +575,7 @@ prestera_bridge_port_create(struct prestera_bridge *bridge,
INIT_LIST_HEAD(&br_port->vlan_list);
list_add(&br_port->head, &bridge->port_list);
+ INIT_LIST_HEAD(&br_port->br_mdb_port_list);
return br_port;
}
@@ -414,6 +585,7 @@ prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
{
list_del(&br_port->head);
WARN_ON(!list_empty(&br_port->vlan_list));
+ WARN_ON(!list_empty(&br_port->br_mdb_port_list));
kfree(br_port);
}
@@ -461,19 +633,13 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
if (err)
return err;
- err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
- br_port->flags);
- if (err)
- goto err_port_flood_set;
-
- err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ err = prestera_br_port_flags_set(br_port, port);
if (err)
- goto err_port_learning_set;
+ goto err_flags2port_set;
return 0;
-err_port_learning_set:
-err_port_flood_set:
+err_flags2port_set:
prestera_hw_bridge_port_delete(port, bridge->bridge_id);
return err;
@@ -592,8 +758,9 @@ void prestera_bridge_port_leave(struct net_device *br_dev,
switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
- prestera_hw_port_learning_set(port, false);
- prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0);
+ prestera_mdb_flush_bridge_port(br_port);
+
+ prestera_br_port_flags_reset(br_port, port);
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
prestera_bridge_port_put(br_port);
}
@@ -603,26 +770,14 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
struct switchdev_brport_flags flags)
{
struct prestera_bridge_port *br_port;
- int err;
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
- err = prestera_hw_port_flood_set(port, flags.mask, flags.val);
- if (err)
- return err;
-
- if (flags.mask & BR_LEARNING) {
- err = prestera_hw_port_learning_set(port,
- flags.val & BR_LEARNING);
- if (err)
- return err;
- }
-
- memcpy(&br_port->flags, &flags.val, sizeof(flags.val));
-
- return 0;
+ br_port->flags &= ~flags.mask;
+ br_port->flags |= flags.val & flags.mask;
+ return prestera_br_port_flags_set(br_port, port);
}
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
@@ -716,6 +871,290 @@ err_port_stp_set:
return err;
}
+static int
+prestera_br_port_lag_mdb_mc_enable_sync(struct prestera_bridge_port *br_port,
+ bool enabled)
+{
+ struct prestera_port *pr_port;
+ struct prestera_switch *sw;
+ u16 lag_id;
+ int err;
+
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+ if (!pr_port)
+ return 0;
+
+ sw = pr_port->sw;
+ err = prestera_lag_id(sw, br_port->dev, &lag_id);
+ if (err)
+ return err;
+
+ list_for_each_entry(pr_port, &sw->port_list, list) {
+ if (pr_port->lag->lag_id == lag_id) {
+ err = prestera_port_mc_flood_set(pr_port, enabled);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int prestera_br_mdb_mc_enable_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_port *port;
+ bool enabled;
+ int err;
+
+ /* if mrouter exists:
+ * - make sure every mrouter receives unreg mcast traffic;
+ * if mrouter doesn't exists:
+ * - make sure every port receives unreg mcast traffic;
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ if (br_dev->multicast_enabled && br_dev->mrouter_exist)
+ enabled = br_port->mrouter;
+ else
+ enabled = br_port->flags & BR_MCAST_FLOOD;
+
+ if (netif_is_lag_master(br_port->dev)) {
+ err = prestera_br_port_lag_mdb_mc_enable_sync(br_port,
+ enabled);
+ if (err)
+ return err;
+ continue;
+ }
+
+ port = prestera_port_dev_lower_find(br_port->dev);
+ if (!port)
+ continue;
+
+ err = prestera_port_mc_flood_set(port, enabled);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool
+prestera_br_mdb_port_is_member(struct prestera_br_mdb_entry *br_mdb,
+ struct net_device *orig_dev)
+{
+ struct prestera_br_mdb_port *tmp_port;
+
+ list_for_each_entry(tmp_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (tmp_port->br_port->dev == orig_dev)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_mdb_port_add(struct prestera_mdb_entry *mdb,
+ struct net_device *orig_dev,
+ const unsigned char addr[ETH_ALEN], u16 vid)
+{
+ struct prestera_flood_domain *flood_domain = mdb->flood_domain;
+ int err;
+
+ if (!prestera_flood_domain_port_find(flood_domain,
+ orig_dev, vid)) {
+ err = prestera_flood_domain_port_create(flood_domain, orig_dev,
+ vid);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Sync bridge mdb (software table) with HW table (if MC is enabled). */
+static int prestera_br_mdb_sync(struct prestera_bridge *br_dev)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+ struct prestera_bridge_port *br_port;
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_mdb_entry *mdb;
+ struct prestera_port *pr_port;
+ int err = 0;
+
+ if (!br_dev->multicast_enabled)
+ return 0;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ mdb = br_mdb->mdb;
+ /* Make sure every port that explicitly been added to the mdb
+ * joins the specified group.
+ */
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node) {
+ br_port = br_mdb_port->br_port;
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Match only mdb and br_mdb ports that belong to the
+ * same broadcast domain.
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ /* If port is not in MDB or there's no Mrouter
+ * clear HW mdb.
+ */
+ if (prestera_br_mdb_port_is_member(br_mdb,
+ br_mdb_port->br_port->dev) &&
+ br_dev->mrouter_exist)
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ else
+ prestera_mdb_port_del(mdb, br_port->dev);
+
+ if (err)
+ return err;
+ }
+
+ /* Make sure that every mrouter port joins every MC group int
+ * broadcast domain. If it's not an mrouter - it should leave
+ */
+ list_for_each_entry(br_port, &br_dev->port_list, head) {
+ pr_port = prestera_port_dev_lower_find(br_port->dev);
+
+ /* Make sure mrouter woudln't receive traffci from
+ * another broadcast domain (e.g. from a vlan, which
+ * mrouter port is not a member of).
+ */
+ if (br_dev->vlan_enabled &&
+ !prestera_port_vlan_by_vid(pr_port,
+ mdb->vid))
+ continue;
+
+ if (br_port->mrouter) {
+ err = prestera_mdb_port_add(mdb, br_port->dev,
+ mdb->addr,
+ mdb->vid);
+ if (err)
+ return err;
+ } else if (!br_port->mrouter &&
+ !prestera_br_mdb_port_is_member
+ (br_mdb, br_port->dev)) {
+ prestera_mdb_port_del(mdb, br_port->dev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+prestera_mdb_enable_set(struct prestera_br_mdb_entry *br_mdb, bool enable)
+{
+ int err;
+
+ if (enable != br_mdb->enabled) {
+ if (enable)
+ err = prestera_hw_mdb_create(br_mdb->mdb);
+ else
+ err = prestera_hw_mdb_destroy(br_mdb->mdb);
+
+ if (err)
+ return err;
+
+ br_mdb->enabled = enable;
+ }
+
+ return 0;
+}
+
+static int
+prestera_br_mdb_enable_set(struct prestera_bridge *br_dev, bool enable)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ int err;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node) {
+ err = prestera_mdb_enable_set(br_mdb, enable);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_mc_disabled_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool mc_disabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *br_dev;
+
+ br_dev = prestera_bridge_find(sw, orig_dev);
+ if (!br_dev)
+ return 0;
+
+ br_dev->multicast_enabled = !mc_disabled;
+
+ /* There's no point in enabling mdb back if router is missing. */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
+static bool
+prestera_bridge_mdb_mc_mrouter_exists(struct prestera_bridge *br_dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &br_dev->port_list, head)
+ if (br_port->mrouter)
+ return true;
+
+ return false;
+}
+
+static int
+prestera_port_attr_mrouter_set(struct prestera_port *port,
+ struct net_device *orig_dev,
+ bool is_port_mrouter)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+
+ br_port = prestera_bridge_port_find(port->sw, orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+ br_port->mrouter = is_port_mrouter;
+
+ br_dev->mrouter_exist = prestera_bridge_mdb_mc_mrouter_exists(br_dev);
+
+ /* Enable MDB processing if both mrouter exists and mc is enabled.
+ * In case if MC enabled, but there is no mrouter, device would flood
+ * all multicast traffic (even if MDB table is not empty) with the use
+ * of bridge's flood capabilities (without the use of flood_domain).
+ */
+ WARN_ON(prestera_br_mdb_enable_set(br_dev, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ WARN_ON(prestera_br_mdb_sync(br_dev));
+
+ WARN_ON(prestera_br_mdb_mc_enable_sync(br_dev));
+
+ return 0;
+}
+
static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
const struct switchdev_attr *attr,
struct netlink_ext_ack *extack)
@@ -730,7 +1169,7 @@ static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
if (attr->u.brport_flags.mask &
- ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_PORT_LOCKED))
err = -EINVAL;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
@@ -745,6 +1184,14 @@ static int prestera_port_obj_attr_set(struct net_device *dev, const void *ctx,
err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
attr->u.vlan_filtering);
break;
+ case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+ err = prestera_port_attr_mrouter_set(port, attr->orig_dev,
+ attr->u.mrouter);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+ err = prestera_port_attr_br_mc_disabled_set(port, attr->orig_dev,
+ attr->u.mc_disabled);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -918,14 +1365,9 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
if (port_vlan->br_port)
return 0;
- err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
- br_port->flags);
+ err = prestera_br_port_flags_set(br_port, port);
if (err)
- return err;
-
- err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
- if (err)
- goto err_port_learning_set;
+ goto err_flags2port_set;
err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
if (err)
@@ -950,8 +1392,8 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
err_bridge_vlan_get:
prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
err_port_vid_stp_set:
- prestera_hw_port_learning_set(port, false);
-err_port_learning_set:
+ prestera_br_port_flags_reset(br_port, port);
+err_flags2port_set:
return err;
}
@@ -1048,20 +1490,162 @@ static int prestera_port_vlans_add(struct prestera_port *port,
flag_pvid, extack);
}
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_create(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb_entry;
+ struct prestera_mdb_entry *mdb_entry;
+
+ br_mdb_entry = kzalloc(sizeof(*br_mdb_entry), GFP_KERNEL);
+ if (!br_mdb_entry)
+ return NULL;
+
+ mdb_entry = prestera_mdb_entry_create(sw, addr, vid);
+ if (!mdb_entry)
+ goto err_mdb_alloc;
+
+ br_mdb_entry->mdb = mdb_entry;
+ br_mdb_entry->bridge = br_dev;
+ br_mdb_entry->enabled = true;
+ INIT_LIST_HEAD(&br_mdb_entry->br_mdb_port_list);
+
+ list_add(&br_mdb_entry->br_mdb_entry_node, &br_dev->br_mdb_entry_list);
+
+ return br_mdb_entry;
+
+err_mdb_alloc:
+ kfree(br_mdb_entry);
+ return NULL;
+}
+
+static int prestera_br_mdb_port_add(struct prestera_br_mdb_entry *br_mdb,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_br_mdb_port *br_mdb_port;
+
+ list_for_each_entry(br_mdb_port, &br_mdb->br_mdb_port_list,
+ br_mdb_port_node)
+ if (br_mdb_port->br_port == br_port)
+ return 0;
+
+ br_mdb_port = kzalloc(sizeof(*br_mdb_port), GFP_KERNEL);
+ if (!br_mdb_port)
+ return -ENOMEM;
+
+ br_mdb_port->br_port = br_port;
+ list_add(&br_mdb_port->br_mdb_port_node,
+ &br_mdb->br_mdb_port_list);
+
+ return 0;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_find(struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ list_for_each_entry(br_mdb, &br_dev->br_mdb_entry_list,
+ br_mdb_entry_node)
+ if (ether_addr_equal(&br_mdb->mdb->addr[0], addr) &&
+ vid == br_mdb->mdb->vid)
+ return br_mdb;
+
+ return NULL;
+}
+
+static struct prestera_br_mdb_entry *
+prestera_br_mdb_entry_get(struct prestera_switch *sw,
+ struct prestera_bridge *br_dev,
+ const unsigned char *addr, u16 vid)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+
+ br_mdb = prestera_br_mdb_entry_find(br_dev, addr, vid);
+ if (br_mdb)
+ return br_mdb;
+
+ return prestera_br_mdb_entry_create(sw, br_dev, addr, vid);
+}
+
+static int
+prestera_mdb_port_addr_obj_add(const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ struct prestera_switch *sw;
+ struct prestera_port *port;
+ int err;
+
+ sw = prestera_switch_get(mdb->obj.orig_dev);
+ port = prestera_port_dev_lower_find(mdb->obj.orig_dev);
+
+ br_port = prestera_bridge_port_find(sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ if (mdb->vid)
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_get(sw, br_dev, &mdb->addr[0],
+ br_dev->bridge_id);
+
+ if (!br_mdb)
+ return -ENOMEM;
+
+ /* Make sure newly allocated MDB entry gets disabled if either MC is
+ * disabled, or the mrouter does not exist.
+ */
+ WARN_ON(prestera_mdb_enable_set(br_mdb, br_dev->multicast_enabled &&
+ br_dev->mrouter_exist));
+
+ err = prestera_br_mdb_port_add(br_mdb, br_port);
+ if (err) {
+ prestera_br_mdb_entry_put(br_mdb);
+ return err;
+ }
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int prestera_port_obj_add(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
const struct switchdev_obj_port_vlan *vlan;
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
return prestera_port_vlans_add(port, vlan, extack);
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_add(mdb);
+ break;
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ fallthrough;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+
+ return err;
}
static int prestera_port_vlans_del(struct prestera_port *port,
@@ -1086,17 +1670,71 @@ static int prestera_port_vlans_del(struct prestera_port *port,
return 0;
}
+static int
+prestera_mdb_port_addr_obj_del(struct prestera_port *port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct prestera_br_mdb_entry *br_mdb;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *br_dev;
+ int err;
+
+ /* Bridge port no longer exists - and so does this MDB entry */
+ br_port = prestera_bridge_port_find(port->sw, mdb->obj.orig_dev);
+ if (!br_port)
+ return 0;
+
+ /* Removing MDB with non-existing VLAN - not supported; */
+ if (mdb->vid && !prestera_port_vlan_by_vid(port, mdb->vid))
+ return 0;
+
+ br_dev = br_port->bridge;
+
+ if (br_port->bridge->vlan_enabled)
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ mdb->vid);
+ else
+ br_mdb = prestera_br_mdb_entry_find(br_dev, &mdb->addr[0],
+ br_port->bridge->bridge_id);
+
+ if (!br_mdb)
+ return 0;
+
+ /* Since there might be a situation that this port was the last in the
+ * MDB group, we have to both remove this port from software and HW MDB,
+ * sync MDB table, and then destroy software MDB (if needed).
+ */
+ prestera_br_mdb_port_del(br_mdb, br_port);
+
+ prestera_br_mdb_entry_put(br_mdb);
+
+ err = prestera_br_mdb_sync(br_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int prestera_port_obj_del(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj)
{
struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_mdb *mdb;
+ int err = 0;
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ err = prestera_mdb_port_addr_obj_del(port, mdb);
+ break;
default:
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
+
+ return err;
}
static int prestera_switchdev_blk_event(struct notifier_block *unused,
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1d607bc6b59e..d5691b6a2bc5 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -965,7 +965,7 @@ static int pxa168_init_phy(struct net_device *dev)
if (dev->phydev)
return 0;
- phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ phy = mdiobus_scan_c22(pep->smi_bus, pep->phy_addr);
if (IS_ERR(phy))
return PTR_ERR(phy);
@@ -1354,10 +1354,10 @@ static void pxa168_eth_netpoll(struct net_device *dev)
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
- strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strscpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static const struct ethtool_ops pxa168_ethtool_ops = {
@@ -1388,7 +1388,6 @@ static int pxa168_eth_probe(struct platform_device *pdev)
{
struct pxa168_eth_private *pep = NULL;
struct net_device *dev = NULL;
- struct resource *res;
struct clk *clk;
struct device_node *np;
int err;
@@ -1419,9 +1418,11 @@ static int pxa168_eth_probe(struct platform_device *pdev)
goto err_netdev;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- BUG_ON(!res);
- dev->irq = res->start;
+ err = platform_get_irq(pdev, 0);
+ if (err == -EPROBE_DEFER)
+ goto err_netdev;
+ BUG_ON(dev->irq < 0);
+ dev->irq = err;
dev->netdev_ops = &pxa168_eth_netdev_ops;
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
@@ -1485,7 +1486,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
/* Hardware supports only 3 ports */
BUG_ON(pep->port_num > 2);
- netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
+ netif_napi_add_weight(dev, &pep->napi, pxa168_rx_poll,
+ pep->rx_ring_size);
memset(&pep->timeout, 0, sizeof(struct timer_list));
timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
@@ -1584,7 +1586,7 @@ static struct platform_driver pxa168_eth_driver = {
.suspend = pxa168_eth_suspend,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(pxa168_eth_of_match),
+ .of_match_table = pxa168_eth_of_match,
},
};
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 0c864e5bf0a6..1b43704baceb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -50,7 +50,6 @@
#define PHY_RETRIES 1000
#define ETH_JUMBO_MTU 9000
#define TX_WATCHDOG (5 * HZ)
-#define NAPI_WEIGHT 64
#define BLINK_MS 250
#define LINK_HZ HZ
@@ -395,9 +394,9 @@ static void skge_get_drvinfo(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(skge->hw->pdev),
sizeof(info->bus_info));
}
@@ -492,7 +491,9 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
static void skge_get_ring_param(struct net_device *dev,
- struct ethtool_ringparam *p)
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
{
struct skge_port *skge = netdev_priv(dev);
@@ -504,7 +505,9 @@ static void skge_get_ring_param(struct net_device *dev,
}
static int skge_set_ring_param(struct net_device *dev,
- struct ethtool_ringparam *p)
+ struct ethtool_ringparam *p,
+ struct kernel_ethtool_ringparam *kernel_p,
+ struct netlink_ext_ack *extack)
{
struct skge_port *skge = netdev_priv(dev);
int err = 0;
@@ -3829,7 +3832,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
dev->features |= NETIF_F_HIGHDMA;
skge = netdev_priv(dev);
- netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT);
+ netif_napi_add(dev, &skge->napi, skge_poll);
skge->netdev = dev;
skge->hw = hw;
skge->msg_enable = netif_msg_init(debug, default_msg);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 28b5b9341145..7c487f9b36ec 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -63,7 +63,6 @@
#define TX_DEF_PENDING 63
#define TX_WATCHDOG (5 * HZ)
-#define NAPI_WEIGHT 64
#define PHY_RETRIES 1000
#define SKY2_EEPROM_MAGIC 0x9955aabb
@@ -1864,7 +1863,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (mss != 0) {
if (!(hw->flags & SKY2_HW_NEW_LE))
- mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
+ mss += skb_tcp_all_headers(skb);
if (mss != sky2->tx_last_mss) {
le = get_tx_le(sky2, &slot);
@@ -3688,9 +3687,9 @@ static void sky2_get_drvinfo(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(sky2->hw->pdev),
sizeof(info->bus_info));
}
@@ -3895,19 +3894,19 @@ static void sky2_get_stats(struct net_device *dev,
u64 _bytes, _packets;
do {
- start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
+ start = u64_stats_fetch_begin(&sky2->rx_stats.syncp);
_bytes = sky2->rx_stats.bytes;
_packets = sky2->rx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
+ } while (u64_stats_fetch_retry(&sky2->rx_stats.syncp, start));
stats->rx_packets = _packets;
stats->rx_bytes = _bytes;
do {
- start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
+ start = u64_stats_fetch_begin(&sky2->tx_stats.syncp);
_bytes = sky2->tx_stats.bytes;
_packets = sky2->tx_stats.packets;
- } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
+ } while (u64_stats_fetch_retry(&sky2->tx_stats.syncp, start));
stats->tx_packets = _packets;
stats->tx_bytes = _bytes;
@@ -4149,7 +4148,9 @@ static unsigned long roundup_ring_size(unsigned long pending)
}
static void sky2_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4161,7 +4162,9 @@ static void sky2_get_ringparam(struct net_device *dev,
}
static int sky2_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ering)
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4266,96 +4269,36 @@ static int sky2_get_eeprom_len(struct net_device *dev)
return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
}
-static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
-{
- unsigned long start = jiffies;
-
- while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
- /* Can take up to 10.6 ms for write */
- if (time_after(jiffies, start + HZ/4)) {
- dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
- return -ETIMEDOUT;
- }
- msleep(1);
- }
-
- return 0;
-}
-
-static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
- u16 offset, size_t length)
-{
- int rc = 0;
-
- while (length > 0) {
- u32 val;
-
- sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
- rc = sky2_vpd_wait(hw, cap, 0);
- if (rc)
- break;
-
- val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
-
- memcpy(data, &val, min(sizeof(val), length));
- offset += sizeof(u32);
- data += sizeof(u32);
- length -= sizeof(u32);
- }
-
- return rc;
-}
-
-static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
- u16 offset, unsigned int length)
-{
- unsigned int i;
- int rc = 0;
-
- for (i = 0; i < length; i += sizeof(u32)) {
- u32 val = *(u32 *)(data + i);
-
- sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
- sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
-
- rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
- if (rc)
- break;
- }
- return rc;
-}
-
static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-
- if (!cap)
- return -EINVAL;
+ int rc;
eeprom->magic = SKY2_EEPROM_MAGIC;
+ rc = pci_read_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
+ if (rc < 0)
+ return rc;
- return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+ eeprom->len = rc;
+
+ return 0;
}
static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *data)
{
struct sky2_port *sky2 = netdev_priv(dev);
- int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
-
- if (!cap)
- return -EINVAL;
+ int rc;
if (eeprom->magic != SKY2_EEPROM_MAGIC)
return -EINVAL;
- /* Partial writes not supported */
- if ((eeprom->offset & 3) || (eeprom->len & 3))
- return -EINVAL;
+ rc = pci_write_vpd_any(sky2->hw->pdev, eeprom->offset, eeprom->len,
+ data);
- return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
+ return rc < 0 ? rc : 0;
}
static netdev_features_t sky2_fix_features(struct net_device *dev,
@@ -4768,7 +4711,7 @@ static irqreturn_t sky2_test_intr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Test interrupt path by forcing a a software IRQ */
+/* Test interrupt path by forcing a software IRQ */
static int sky2_test_msi(struct sky2_hw *hw)
{
struct pci_dev *pdev = hw->pdev;
@@ -4994,7 +4937,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
+ netif_napi_add(dev, &hw->napi, sky2_poll);
err = register_netdev(dev);
if (err) {
@@ -5070,7 +5013,7 @@ static void sky2_remove(struct pci_dev *pdev)
if (!hw)
return;
- del_timer_sync(&hw->watchdog_timer);
+ timer_shutdown_sync(&hw->watchdog_timer);
cancel_work_sync(&hw->restart_work);
for (i = hw->ports-1; i >= 0; --i)
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index c357c193378e..da0db417ab69 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,17 +1,26 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek devices"
- depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
help
If you have a Mediatek SoC with ethernet, say Y.
if NET_VENDOR_MEDIATEK
+config NET_MEDIATEK_SOC_WED
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ def_bool NET_MEDIATEK_SOC != n
+
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
depends on NET_DSA || !NET_DSA
+ select PINCTRL
select PHYLINK
select DIMLIB
+ select PAGE_POOL
+ select PAGE_POOL_STATS
+ select PCS_MTK_LYNXI
+ select REGMAP_MMIO
help
This driver supports the gigabit ethernet MACs in the
MediaTek SoC family.
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 79d4cdbbcbf5..03e008fbc859 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -4,5 +4,10 @@
#
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+mtk_eth-y := mtk_eth_soc.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
+ifdef CONFIG_DEBUG_FS
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
+endif
+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
index 72648535a14d..317e447f4991 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_path.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -96,12 +96,20 @@ static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path)
static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
{
- unsigned int val = 0;
+ unsigned int val = 0, mask = 0, reg = 0;
bool updated = true;
switch (path) {
case MTK_ETH_PATH_GMAC2_SGMII:
- val = CO_QPHY_SEL;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_U3_COPHY_V2)) {
+ reg = USB_PHY_SWITCH_REG;
+ val = SGMII_QPHY_SEL;
+ mask = QPHY_SEL_MASK;
+ } else {
+ reg = INFRA_MISC2;
+ val = CO_QPHY_SEL;
+ mask = val;
+ }
break;
default:
updated = false;
@@ -109,7 +117,7 @@ static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path)
}
if (updated)
- regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val);
+ regmap_update_bits(eth->infra, reg, mask, val);
dev_dbg(eth->dev, "path %s in %s updated = %d\n",
mtk_eth_path_name(path), __func__, updated);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 75d67d1b5f6b..a75fd072082c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
@@ -19,10 +20,14 @@
#include <linux/interrupt.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
+#include <linux/pcs/pcs-mtk-lynxi.h>
#include <linux/jhash.h>
+#include <linux/bitfield.h>
#include <net/dsa.h>
+#include <net/dst_metadata.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
static int mtk_msg_level = -1;
module_param_named(msg_level, mtk_msg_level, int, 0);
@@ -31,6 +36,122 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
+ sizeof(u64) }
+
+static const struct mtk_reg_map mtk_reg_map = {
+ .tx_irq_mask = 0x1a1c,
+ .tx_irq_status = 0x1a18,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .adma_rx_dbg0 = 0x0a38,
+ .int_grp = 0x0a50,
+ },
+ .qdma = {
+ .qtx_cfg = 0x1800,
+ .qtx_sch = 0x1804,
+ .rx_ptr = 0x1900,
+ .rx_cnt_cfg = 0x1904,
+ .qcrx_ptr = 0x1908,
+ .glo_cfg = 0x1a04,
+ .rst_idx = 0x1a08,
+ .delay_irq = 0x1a0c,
+ .fc_th = 0x1a10,
+ .tx_sch_rate = 0x1a14,
+ .int_grp = 0x1a20,
+ .hred = 0x1a44,
+ .ctx_ptr = 0x1b00,
+ .dtx_ptr = 0x1b04,
+ .crx_ptr = 0x1b10,
+ .drx_ptr = 0x1b14,
+ .fq_head = 0x1b20,
+ .fq_tail = 0x1b24,
+ .fq_count = 0x1b28,
+ .fq_blen = 0x1b2c,
+ },
+ .gdm1_cnt = 0x2400,
+ .gdma_to_ppe = 0x4444,
+ .ppe_base = 0x0c00,
+ .wdma_base = {
+ [0] = 0x2800,
+ [1] = 0x2c00,
+ },
+ .pse_iq_sta = 0x0110,
+ .pse_oq_sta = 0x0118,
+};
+
+static const struct mtk_reg_map mt7628_reg_map = {
+ .tx_irq_mask = 0x0a28,
+ .tx_irq_status = 0x0a20,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .int_grp = 0x0a50,
+ },
+};
+
+static const struct mtk_reg_map mt7986_reg_map = {
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+ .rx_ptr = 0x6100,
+ .rx_cnt_cfg = 0x6104,
+ .pcrx_ptr = 0x6108,
+ .glo_cfg = 0x6204,
+ .rst_idx = 0x6208,
+ .delay_irq = 0x620c,
+ .irq_status = 0x6220,
+ .irq_mask = 0x6228,
+ .adma_rx_dbg0 = 0x6238,
+ .int_grp = 0x6250,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+ .qtx_sch = 0x4404,
+ .rx_ptr = 0x4500,
+ .rx_cnt_cfg = 0x4504,
+ .qcrx_ptr = 0x4508,
+ .glo_cfg = 0x4604,
+ .rst_idx = 0x4608,
+ .delay_irq = 0x460c,
+ .fc_th = 0x4610,
+ .int_grp = 0x4620,
+ .hred = 0x4644,
+ .ctx_ptr = 0x4700,
+ .dtx_ptr = 0x4704,
+ .crx_ptr = 0x4710,
+ .drx_ptr = 0x4714,
+ .fq_head = 0x4720,
+ .fq_tail = 0x4724,
+ .fq_count = 0x4728,
+ .fq_blen = 0x472c,
+ .tx_sch_rate = 0x4798,
+ },
+ .gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
+};
+
/* strings used by ethtool */
static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN];
@@ -48,13 +169,20 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_long_errors),
MTK_ETHTOOL_STAT(rx_checksum_errors),
MTK_ETHTOOL_STAT(rx_flow_control_packets),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
};
static const char * const mtk_clks_source_name[] = {
"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
- "sgmii_ck", "eth2pll",
+ "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -91,61 +219,155 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
}
dev_err(eth->dev, "mdio: MDIO timeout\n");
- return -1;
+ return -ETIMEDOUT;
}
-static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
- u32 phy_register, u32 write_data)
+static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
+ u32 write_data)
{
- if (mtk_mdio_busy_wait(eth))
- return -1;
+ int ret;
- write_data &= 0xffff;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
- (phy_register << PHY_IAC_REG_SHIFT) |
- (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
MTK_PHY_IAC);
- if (mtk_mdio_busy_wait(eth))
- return -1;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
return 0;
}
-static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
+ u32 devad, u32 phy_reg, u32 write_data)
{
- u32 d;
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- if (mtk_mdio_busy_wait(eth))
- return 0xffff;
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(phy_reg),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
- (phy_reg << PHY_IAC_REG_SHIFT) |
- (phy_addr << PHY_IAC_ADDR_SHIFT),
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
MTK_PHY_IAC);
- if (mtk_mdio_busy_wait(eth))
- return 0xffff;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
+ return 0;
+}
+
+static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_C22_READ |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
+}
+
+static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
+ u32 devad, u32 phy_reg)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(phy_reg),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_READ |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- return d;
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
}
-static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
- int phy_reg, u16 val)
+static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
+ int phy_reg, u16 val)
{
struct mtk_eth *eth = bus->priv;
- return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
+ return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
}
-static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
+static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
+ int devad, int phy_reg, u16 val)
{
struct mtk_eth *eth = bus->priv;
- return _mtk_mdio_read(eth, phy_addr, phy_reg);
+ return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
+}
+
+static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
+}
+
+static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
+ int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
}
static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
@@ -153,17 +375,6 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
{
u32 val;
- /* Check DDR memory type.
- * Currently TRGMII mode with DDR2 memory is not supported.
- */
- regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
- if (interface == PHY_INTERFACE_MODE_TRGMII &&
- val & SYSCFG_DRAM_TYPE_DDR2) {
- dev_err(eth->dev,
- "TRGMII mode with DDR2 memory is not supported!\n");
- return -EOPNOTSUPP;
- }
-
val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
@@ -176,38 +387,61 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
phy_interface_t interface, int speed)
{
- u32 val;
+ unsigned long rate;
+ u32 tck, rck, intf;
int ret;
if (interface == PHY_INTERFACE_MODE_TRGMII) {
mtk_w32(eth, TRGMII_MODE, INTF_MODE);
- val = 500000000;
- ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
if (ret)
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
return;
}
- val = (speed == SPEED_1000) ?
- INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
- mtk_w32(eth, val, INTF_MODE);
+ if (speed == SPEED_1000) {
+ intf = INTF_MODE_RGMII_1000;
+ rate = 250000000;
+ rck = RCK_CTRL_RGMII_1000;
+ tck = TCK_CTRL_RGMII_1000;
+ } else {
+ intf = INTF_MODE_RGMII_10_100;
+ rate = 500000000;
+ rck = RCK_CTRL_RGMII_10_100;
+ tck = TCK_CTRL_RGMII_10_100;
+ }
+
+ mtk_w32(eth, intf, INTF_MODE);
regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
ETHSYS_TRGMII_CLK_SEL362_5,
ETHSYS_TRGMII_CLK_SEL362_5);
- val = (speed == SPEED_1000) ? 250000000 : 500000000;
- ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], rate);
if (ret)
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
- val = (speed == SPEED_1000) ?
- RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
- mtk_w32(eth, val, TRGMII_RCK_CTRL);
+ mtk_w32(eth, rck, TRGMII_RCK_CTRL);
+ mtk_w32(eth, tck, TRGMII_TCK_CTRL);
+}
+
+static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ unsigned int sid;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface)) {
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ return eth->sgmii_pcs[sid];
+ }
- val = (speed == SPEED_1000) ?
- TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
- mtk_w32(eth, val, TRGMII_TCK_CTRL);
+ return NULL;
}
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
@@ -216,8 +450,8 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
- u32 mcr_cur, mcr_new, sid, i;
- int val, ge_mode, err;
+ int val, ge_mode, err = 0;
+ u32 i;
/* MT76x8 has no hardware settings between for the MAC */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
@@ -225,19 +459,11 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
/* Setup soc pin functions */
switch (state->interface) {
case PHY_INTERFACE_MODE_TRGMII:
- if (mac->id)
- goto err_phy;
- if (!MTK_HAS_CAPS(mac->hw->soc->caps,
- MTK_GMAC1_TRGMII))
- goto err_phy;
- fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_RMII:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
err = mtk_gmac_rgmii_path_setup(eth, mac->id);
if (err)
@@ -247,11 +473,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
case PHY_INTERFACE_MODE_SGMII:
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- err = mtk_gmac_sgmii_path_setup(eth, mac->id);
- if (err)
- goto init_err;
- }
+ err = mtk_gmac_sgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
break;
case PHY_INTERFACE_MODE_GMII:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
@@ -274,6 +498,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
+ /* FIXME: this is incorrect. Not only does it
+ * use state->speed (which is not guaranteed
+ * to be correct) but it also makes use of it
+ * in a code path that will only be reachable
+ * when the PHY interface mode changes, not
+ * when the speed changes. Consequently, RGMII
+ * is probably broken.
+ */
mtk_gmac0_rgmii_adjust(mac->hw,
state->interface,
state->speed);
@@ -291,21 +523,13 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
}
}
- ge_mode = 0;
switch (state->interface) {
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
ge_mode = 1;
break;
- case PHY_INTERFACE_MODE_REVMII:
- ge_mode = 2;
- break;
- case PHY_INTERFACE_MODE_RMII:
- if (mac->id)
- goto err_phy;
- ge_mode = 3;
- break;
default:
+ ge_mode = 0;
break;
}
@@ -330,38 +554,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
SYSCFG0_SGMII_MASK,
~(u32)SYSCFG0_SGMII_MASK);
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
- 0 : mac->id;
-
- /* Setup SGMIISYS with the determined property */
- if (state->interface != PHY_INTERFACE_MODE_SGMII)
- err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
- state);
- else if (phylink_autoneg_inband(mode))
- err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
-
- if (err)
- goto init_err;
-
- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
- SYSCFG0_SGMII_MASK, val);
+ /* Save the syscfg0 value for mac_finish */
+ mac->syscfg0 = val;
} else if (phylink_autoneg_inband(mode)) {
dev_err(eth->dev,
"In-band mode not supported in non SGMII mode!\n");
return;
}
- /* Setup gmac */
- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr_new = mcr_cur;
- mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
-
- /* Only update control register when needed! */
- if (mcr_new != mcr_cur)
- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
-
return;
err_phy:
@@ -374,6 +574,34 @@ init_err:
mac->id, phy_modes(state->interface), err);
}
+static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ /* Enable SGMII */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface))
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, mac->syscfg0);
+
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
+ MAC_MCR_RX_FIFO_CLR_DIS;
+
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static void mtk_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
@@ -406,14 +634,6 @@ static void mtk_mac_pcs_get_state(struct phylink_config *config,
state->pause |= MLO_PAUSE_TX;
}
-static void mtk_mac_an_restart(struct phylink_config *config)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
-
- mtk_sgmii_restart_an(mac->hw, mac->id);
-}
-
static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -425,6 +645,75 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
+static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
+ int speed)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 ofs, val;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ return;
+
+ val = MTK_QTX_SCH_MIN_RATE_EN |
+ /* minimum: 10 Mbps */
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
+ MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
+
+ if (IS_ENABLED(CONFIG_SOC_MT7621)) {
+ switch (speed) {
+ case SPEED_10:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_100:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_1000:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (speed) {
+ case SPEED_10:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_100:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_1000:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
+ break;
+ default:
+ break;
+ }
+ }
+
+ ofs = MTK_QTX_OFFSET * idx;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
+}
+
static void mtk_mac_link_up(struct phylink_config *config,
struct phy_device *phy,
unsigned int mode, phy_interface_t interface,
@@ -432,13 +721,15 @@ static void mtk_mac_link_up(struct phylink_config *config,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ u32 mcr;
+ mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
MAC_MCR_FORCE_RX_FC);
/* Configure speed */
+ mac->speed = speed;
switch (speed) {
case SPEED_2500:
case SPEED_1000:
@@ -463,105 +754,21 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static void mtk_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
- phy_interface_mode_is_rgmii(state->interface)) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
- !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
- (state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_8023z(state->interface)))) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_TRGMII:
- phylink_set(mask, 1000baseT_Full);
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseT_Half);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_RMII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_NA:
- default:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- break;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_NA) {
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
- }
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
-}
-
static const struct phylink_mac_ops mtk_phylink_ops = {
- .validate = mtk_validate,
+ .mac_select_pcs = mtk_mac_select_pcs,
.mac_pcs_get_state = mtk_mac_pcs_get_state,
- .mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
+ .mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
};
static int mtk_mdio_init(struct mtk_eth *eth)
{
+ unsigned int max_clk = 2500000, divider;
struct device_node *mii_np;
int ret;
+ u32 val;
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
@@ -581,12 +788,33 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
eth->mii_bus->name = "mdio";
- eth->mii_bus->read = mtk_mdio_read;
- eth->mii_bus->write = mtk_mdio_write;
+ eth->mii_bus->read = mtk_mdio_read_c22;
+ eth->mii_bus->write = mtk_mdio_write_c22;
+ eth->mii_bus->read_c45 = mtk_mdio_read_c45;
+ eth->mii_bus->write_c45 = mtk_mdio_write_c45;
eth->mii_bus->priv = eth;
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
+
+ if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
+ if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
+ dev_err(eth->dev, "MDIO clock frequency out of range");
+ ret = -EINVAL;
+ goto err_put_node;
+ }
+ max_clk = val;
+ }
+ divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+
+ /* Configure MDC Divider */
+ val = mtk_r32(eth, MTK_PPSC);
+ val &= ~PPSC_MDC_CFG;
+ val |= FIELD_PREP(PPSC_MDC_CFG, divider) | PPSC_MDC_TURBO;
+ mtk_w32(eth, val, MTK_PPSC);
+
+ dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
+
ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
@@ -608,8 +836,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, eth->tx_int_mask_reg);
- mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -619,8 +847,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, eth->tx_int_mask_reg);
- mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -630,8 +858,8 @@ static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags);
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
@@ -641,8 +869,8 @@ static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags);
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
@@ -693,39 +921,39 @@ void mtk_stats_update_mac(struct mtk_mac *mac)
hw_stats->rx_checksum_errors +=
mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
} else {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
unsigned int offs = hw_stats->reg_offset;
u64 stats;
- hw_stats->rx_bytes += mtk_r32(mac->hw,
- MTK_GDM1_RX_GBCNT_L + offs);
- stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
if (stats)
hw_stats->rx_bytes += (stats << 32);
hw_stats->rx_packets +=
- mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
hw_stats->rx_overflow +=
- mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
hw_stats->rx_fcs_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
hw_stats->rx_short_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
hw_stats->rx_long_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
hw_stats->rx_checksum_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
hw_stats->tx_skip +=
- mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
hw_stats->tx_collisions +=
- mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
hw_stats->tx_bytes +=
- mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
- stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
- mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
}
u64_stats_update_end(&hw_stats->syncp);
@@ -760,7 +988,7 @@ static void mtk_get_stats64(struct net_device *dev,
}
do {
- start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
+ start = u64_stats_fetch_begin(&hw_stats->syncp);
storage->rx_packets = hw_stats->rx_packets;
storage->tx_packets = hw_stats->tx_packets;
storage->rx_bytes = hw_stats->rx_bytes;
@@ -772,7 +1000,7 @@ static void mtk_get_stats64(struct net_device *dev,
storage->rx_crc_errors = hw_stats->rx_fcs_errors;
storage->rx_errors = hw_stats->rx_checksum_errors;
storage->tx_aborted_errors = hw_stats->tx_skip;
- } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
+ } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
storage->tx_errors = dev->stats.tx_errors;
storage->rx_dropped = dev->stats.rx_dropped;
@@ -799,8 +1027,8 @@ static inline int mtk_max_buf_size(int frag_size)
return buf_size;
}
-static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
- struct mtk_rx_dma *dma_rxd)
+static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
+ struct mtk_rx_dma_v2 *dma_rxd)
{
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
if (!(rxd->rxd2 & RX_DMA_DONE))
@@ -809,67 +1037,89 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ }
return true;
}
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+ unsigned long data;
+
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+ get_order(size));
+
+ return (void *)data;
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
- int cnt = MTK_DMA_SIZE;
+ int cnt = MTK_QDMA_RING_SIZE;
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+ cnt * soc->txrx.txd_size,
&eth->phy_scratch_ring,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
- GFP_KERNEL);
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
if (unlikely(!eth->scratch_head))
return -ENOMEM;
- dma_addr = dma_map_single(eth->dev,
+ dma_addr = dma_map_single(eth->dma_dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
- phy_ring_tail = eth->phy_scratch_ring +
- (sizeof(struct mtk_tx_dma) * (cnt - 1));
+ phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
- eth->scratch_ring[i].txd1 =
- (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + i * soc->txrx.txd_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
- ((i + 1) * sizeof(struct mtk_tx_dma)));
- eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
+ txd->txd2 = eth->phy_scratch_ring +
+ (i + 1) * soc->txrx.txd_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ txd->txd4 = 0;
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
- mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
- mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+ mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
+ mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
+ mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
return 0;
}
-static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
{
- void *ret = ring->dma;
-
- return ret + (desc - ring->phys);
+ return ring->dma + (desc - ring->phys);
}
-static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+ void *txd, u32 txd_size)
{
- int idx = txd - ring->dma;
+ int idx = (txd - ring->dma) / txd_size;
return &ring->buf[idx];
}
@@ -877,54 +1127,66 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
struct mtk_tx_dma *dma)
{
- return ring->dma_pdma - ring->dma + dma;
+ return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
}
-static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
{
- return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+ return (dma - ring->dma) / txd_size;
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
- bool napi)
+ struct xdp_frame_bulk *bq, bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(eth->dev,
+ dma_unmap_single(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
} else {
if (dma_unmap_len(tx_buf, dma_len0)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
if (dma_unmap_len(tx_buf, dma_len1)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr1),
dma_unmap_len(tx_buf, dma_len1),
DMA_TO_DEVICE);
}
}
- tx_buf->flags = 0;
- if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
- if (napi)
- napi_consume_skb(tx_buf->skb, napi);
- else
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+ } else {
+ struct xdp_frame *xdpf = tx_buf->data;
+
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else if (bq)
+ xdp_return_frame_bulk(xdpf, bq);
+ else
+ xdp_return_frame(xdpf);
+ }
}
- tx_buf->skb = NULL;
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
}
static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
@@ -941,7 +1203,7 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len1, size);
} else {
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd->txd1 = mapped_addr;
txd->txd2 = TX_DMA_PLEN0(size);
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -950,71 +1212,145 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
}
}
+static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_dma *desc = txd;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
+ FIELD_PREP(TX_DMA_PQID, info->qid);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM;
+ /* vlan header offload */
+ if (info->vlan)
+ data |= TX_DMA_INS_VLAN | info->vlan_tci;
+ }
+ WRITE_ONCE(desc->txd4, data);
+}
+
+static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma_v2 *desc = txd;
+ struct mtk_eth *eth = mac->hw;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_PLEN0(info->size);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
+ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
+ WRITE_ONCE(desc->txd4, data);
+
+ data = 0;
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO_V2;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM_V2;
+ }
+ WRITE_ONCE(desc->txd5, data);
+
+ data = 0;
+ if (info->first && info->vlan)
+ data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
+ WRITE_ONCE(desc->txd6, data);
+
+ WRITE_ONCE(desc->txd7, 0);
+ WRITE_ONCE(desc->txd8, 0);
+}
+
+static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mtk_tx_set_dma_desc_v2(dev, txd, info);
+ else
+ mtk_tx_set_dma_desc_v1(dev, txd, info);
+}
+
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = skb_headlen(skb),
+ .gso = gso,
+ .csum = skb->ip_summed == CHECKSUM_PARTIAL,
+ .vlan = skb_vlan_tag_present(skb),
+ .qid = skb_get_queue_mapping(skb),
+ .vlan_tci = skb_vlan_tag_get(skb),
+ .first = true,
+ .last = !skb_is_nonlinear(skb),
+ };
+ struct netdev_queue *txq;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
int i, n_desc = 1;
- u32 txd4 = 0, fport;
+ int queue = skb_get_queue_mapping(skb);
int k = 0;
+ txq = netdev_get_tx_queue(dev, queue);
itxd = ring->next_free;
itxd_pdma = qdma_to_pdma(ring, itxd);
if (itxd == ring->last_free)
return -ENOMEM;
- /* set the forward port */
- fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
- txd4 |= fport;
-
- itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
memset(itx_buf, 0, sizeof(*itx_buf));
- if (gso)
- txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb))
- txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
-
- mapped_addr = dma_map_single(eth->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
return -ENOMEM;
- WRITE_ONCE(itxd->txd1, mapped_addr);
+ mtk_tx_set_dma_desc(dev, itxd, &txd_info);
+
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++);
/* TX SG offload */
txd = itxd;
txd_pdma = qdma_to_pdma(ring, txd);
- nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nr_frags; i++) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
while (frag_size) {
- bool last_frag = false;
- unsigned int frag_map_size;
bool new_desc = true;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
(i & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
txd_pdma = qdma_to_pdma(ring, txd);
@@ -1026,54 +1362,49 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
new_desc = false;
}
-
- frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = min_t(unsigned int, frag_size,
+ soc->txrx.dma_max_len);
+ txd_info.qid = queue;
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ !(frag_size - txd_info.size);
+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
+ offset, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
goto err_dma;
- if (i == nr_frags - 1 &&
- (frag_size - frag_map_size) == 0)
- last_frag = true;
-
- WRITE_ONCE(txd->txd1, mapped_addr);
- WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
- TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0));
- WRITE_ONCE(txd->txd4, fport);
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
- frag_map_size, k++);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
+ txd_info.size, k++);
- frag_size -= frag_map_size;
- offset += frag_map_size;
+ frag_size -= txd_info.size;
+ offset += txd_info.size;
}
}
/* store skb to cleanup */
- itx_buf->skb = skb;
+ itx_buf->type = MTK_TYPE_SKB;
+ itx_buf->data = skb;
- WRITE_ONCE(itxd->txd4, txd4);
- WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
- (!nr_frags * TX_DMA_LS0)));
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
txd_pdma->txd2 |= TX_DMA_LS1;
}
- netdev_sent_queue(dev, skb->len);
+ netdev_tx_sent_queue(txq, skb->len);
skb_tx_timestamp(skb);
ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
@@ -1084,13 +1415,14 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
- !netdev_xmit_more())
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else {
- int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
- ring->dma_size);
+ int next_idx;
+
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1098,13 +1430,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */
- mtk_tx_unmap(eth, tx_buf, false);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -1114,17 +1446,16 @@ err_dma:
return -ENOMEM;
}
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
+static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
{
- int i, nfrags;
+ int i, nfrags = 1;
skb_frag_t *frag;
- nfrags = 1;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- MTK_TX_DMA_BUF_LEN);
+ eth->soc->txrx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1154,7 +1485,7 @@ static void mtk_wake_queue(struct mtk_eth *eth)
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!eth->netdev[i])
continue;
- netif_wake_queue(eth->netdev[i]);
+ netif_tx_wake_all_queues(eth->netdev[i]);
}
}
@@ -1176,9 +1507,9 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto drop;
- tx_num = mtk_cal_txd_req(skb);
+ tx_num = mtk_cal_txd_req(eth, skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
spin_unlock(&eth->page_lock);
@@ -1204,7 +1535,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
spin_unlock(&eth->page_lock);
@@ -1227,9 +1558,12 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
return &eth->rx_ring[0];
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ struct mtk_rx_dma *rxd;
+
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
}
@@ -1257,42 +1591,355 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
}
}
+static bool mtk_page_pool_enabled(struct mtk_eth *eth)
+{
+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
+}
+
+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
+ : DMA_FROM_DEVICE;
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ eth->rx_napi.napi_id, PAGE_SIZE);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ return pp;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(xdp_q);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return ERR_PTR(err);
+}
+
+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
+ return page_address(page);
+}
+
+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
+{
+ if (ring->page_pool)
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(data), napi);
+ else
+ skb_free_frag(data);
+}
+
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+ struct mtk_tx_dma_desc_info *txd_info,
+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+ void *data, u16 headroom, int index, bool dma_map)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd_pdma;
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info->addr = dma_map_single(eth->dma_dev, data,
+ txd_info->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+ return -ENOMEM;
+
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(data);
+
+ txd_info->addr = page_pool_get_dma_addr(page) +
+ sizeof(struct xdp_frame) + headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+ txd_info->size, DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+ index);
+
+ return 0;
+}
+
+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+ .last = !xdp_frame_has_frags(xdpf),
+ .qid = mac->id,
+ };
+ int err, index = 0, n_desc = 1, nr_frags;
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+ struct mtk_tx_dma *htxd, *txd;
+ void *data = xdpf->data;
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ return -EBUSY;
+
+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
+ return -EBUSY;
+
+ spin_lock(&eth->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+ spin_unlock(&eth->page_lock);
+ return -ENOMEM;
+ }
+ htxd = txd;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+ for (;;) {
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ data, xdpf->headroom, index, dma_map);
+ if (err < 0)
+ goto unmap;
+
+ if (txd_info.last)
+ break;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ if (txd == ring->last_free)
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = skb_frag_size(&sinfo->frags[index]);
+ txd_info.last = index + 1 == nr_frags;
+ txd_info.qid = mac->id;
+ data = skb_frag_address(&sinfo->frags[index]);
+
+ index++;
+ }
+ /* store xdpf for cleanup */
+ htx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
+
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int idx;
+
+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return 0;
+
+unmap:
+ while (htxd != txd) {
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
+
+ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+ }
+
+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return err;
+}
+
+static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
+ break;
+ nxmit++;
+ }
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
+ u64_stats_update_end(&hw_stats->syncp);
+
+ return nxmit;
+}
+
+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+
+ prog = rcu_dereference(eth->prog);
+ if (!prog)
+ goto out;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ count = &hw_stats->xdp_stats.rx_xdp_pass;
+ goto update_stats;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
+ case XDP_TX: {
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_tx;
+ goto update_stats;
+ }
+ default:
+ bpf_warn_invalid_xdp_action(dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
+
+update_stats:
+ u64_stats_update_begin(&hw_stats->syncp);
+ *count = *count + 1;
+ u64_stats_update_end(&hw_stats->syncp);
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
+ bool xdp_flush = false;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
- struct mtk_rx_dma *rxd, trxd;
+ struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
while (done < budget) {
+ unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
- unsigned int pktlen;
dma_addr_t dma_addr;
- u32 hash;
- int mac;
+ u32 hash, reason;
+ int mac = 0;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = &ring->dma[idx];
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
data = ring->data[idx];
- if (!mtk_rx_get_desc(&trxd, rxd))
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
- (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
- mac = 0;
- else
- mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK) - 1;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
+ else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
+ mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -1303,70 +1950,137 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+
/* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
- if (unlikely(!new_data)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(eth->dev,
- new_data + NET_SKB_PAD +
- eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
- skb_free_frag(new_data);
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
+ if (ring->page_pool) {
+ struct page *page = virt_to_head_page(data);
+ struct xdp_buff xdp;
+ u32 ret;
+
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_sync_single_for_cpu(eth->dma_dev,
+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
+ pktlen, page_pool_get_dma_dir(ring->page_pool));
- dma_unmap_single(eth->dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
+ false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ ret = mtk_xdp_run(eth, ring, &xdp, netdev);
+ if (ret == XDP_REDIRECT)
+ xdp_flush = true;
+
+ if (ret != XDP_PASS)
+ goto skip_rx;
+
+ skb = build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_put_full_page(ring->page_pool,
+ page, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
+ skb_mark_for_recycle(skb);
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ netdev->stats.rx_dropped++;
+ skb_free_frag(data);
+ goto skip_rx;
+ }
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- skb_free_frag(data);
- netdev->stats.rx_dropped++;
- goto skip_rx;
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, pktlen);
}
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
- skb_put(skb, pktlen);
- if (trxd.rxd4 & eth->rx_dma_l4_valid)
+ bytes += skb->len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd3;
+ } else {
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd4;
+ }
+
+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- bytes += pktlen;
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
- hash = jhash_1word(hash, 0);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ /* When using VLAN untagging in combination with DSA, the
+ * hardware treats the MTK special tag as a VLAN and untags it.
+ */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) &&
+ (trxd.rxd2 & RX_DMA_VTAG) && netdev_uses_dsa(netdev)) {
+ unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
+
+ if (port < ARRAY_SIZE(eth->dsa_meta) &&
+ eth->dsa_meta[port])
+ skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
}
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- (trxd.rxd2 & RX_DMA_VTAG))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- RX_DMA_VID(trxd.rxd3));
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
-
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx;
-
done++;
}
@@ -1385,22 +2099,65 @@ rx_done:
&dim_sample);
net_dim(&eth->rx_dim, dim_sample);
+ if (xdp_flush)
+ xdp_do_flush_map();
+
return done;
}
+struct mtk_poll_state {
+ struct netdev_queue *txq;
+ unsigned int total;
+ unsigned int done;
+ unsigned int bytes;
+};
+
+static void
+mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
+ struct sk_buff *skb)
+{
+ struct netdev_queue *txq;
+ struct net_device *dev;
+ unsigned int bytes = skb->len;
+
+ state->total++;
+ eth->tx_packets++;
+ eth->tx_bytes += bytes;
+
+ dev = eth->netdev[mac];
+ if (!dev)
+ return;
+
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ if (state->txq == txq) {
+ state->done++;
+ state->bytes += bytes;
+ return;
+ }
+
+ if (state->txq)
+ netdev_tx_completed_queue(state->txq, state->done, state->bytes);
+
+ state->txq = txq;
+ state->done = 1;
+ state->bytes = bytes;
+}
+
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
- unsigned int *done, unsigned int *bytes)
+ struct mtk_poll_state *state)
{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->last_free_ptr;
- dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+ dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
@@ -1410,65 +2167,67 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break;
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
+ eth->soc->txrx.txd_size);
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB)
+ mtk_poll_tx_done(eth, state, mac, tx_buf->data);
+
budget--;
}
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = next_cpu;
}
+ xdp_flush_frame_bulk(&bq);
ring->last_free_ptr = cpu;
- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+ mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
return budget;
}
static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
- unsigned int *done, unsigned int *bytes)
+ struct mtk_poll_state *state)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[0] += skb->len;
- done[0]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB)
+ mtk_poll_tx_done(eth, state, 0, tx_buf->data);
budget--;
}
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
- mtk_tx_unmap(eth, tx_buf, true);
-
- desc = &ring->dma[cpu];
+ desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
}
+ xdp_flush_frame_bulk(&bq);
ring->cpu_idx = cpu;
@@ -1479,26 +2238,15 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct dim_sample dim_sample = {};
- unsigned int done[MTK_MAX_DEVS];
- unsigned int bytes[MTK_MAX_DEVS];
- int total = 0, i;
-
- memset(done, 0, sizeof(done));
- memset(bytes, 0, sizeof(bytes));
+ struct mtk_poll_state state = {};
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
+ budget = mtk_poll_tx_qdma(eth, budget, &state);
else
- budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
+ budget = mtk_poll_tx_pdma(eth, budget, &state);
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i] || !done[i])
- continue;
- netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
- total += done[i];
- eth->tx_packets += done[i];
- eth->tx_bytes += bytes[i];
- }
+ if (state.txq)
+ netdev_tx_completed_queue(state.txq, state.done, state.bytes);
dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
&dim_sample);
@@ -1508,7 +2256,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
(atomic_read(&ring->free_count) > ring->thresh))
mtk_wake_queue(eth);
- return total;
+ return state.total;
}
static void mtk_handle_status_irq(struct mtk_eth *eth)
@@ -1525,24 +2273,25 @@ static void mtk_handle_status_irq(struct mtk_eth *eth)
static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int tx_done = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n", tx_done,
- mtk_r32(eth, eth->tx_int_status_reg),
- mtk_r32(eth, eth->tx_int_mask_reg));
+ mtk_r32(eth, reg_map->tx_irq_status),
+ mtk_r32(eth, reg_map->tx_irq_mask));
}
if (tx_done == budget)
return budget;
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
return budget;
if (napi_complete_done(napi, tx_done))
@@ -1554,6 +2303,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int rx_done_total = 0;
mtk_handle_status_irq(eth);
@@ -1561,73 +2311,91 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done rx %d, intr 0x%08x/0x%x\n", rx_done,
- mtk_r32(eth, MTK_PDMA_INT_STATUS),
- mtk_r32(eth, MTK_PDMA_INT_MASK));
+ mtk_r32(eth, reg_map->pdma.irq_status),
+ mtk_r32(eth, reg_map->pdma.irq_mask));
}
if (rx_done_total == budget)
return budget;
- } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
return rx_done_total;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = sizeof(*ring->dma);
+ int i, sz = soc->txrx.txd_size;
+ struct mtk_tx_dma_v2 *txd;
+ int ring_size;
+ u32 ofs, val;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ ring_size = MTK_QDMA_RING_SIZE;
+ else
+ ring_size = MTK_DMA_SIZE;
- ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
+ ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL);
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
+ &ring->phys, GFP_KERNEL);
if (!ring->dma)
goto no_tx_mem;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
- int next = (i + 1) % MTK_DMA_SIZE;
+ for (i = 0; i < ring_size; i++) {
+ int next = (i + 1) % ring_size;
u32 next_ptr = ring->phys + next * sz;
- ring->dma[i].txd2 = next_ptr;
- ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd = ring->dma + i * sz;
+ txd->txd2 = next_ptr;
+ txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd->txd4 = 0;
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
/* On MT7688 (PDMA only) this driver uses the ring->dma structs
* only as the framework. The real HW descriptors are the PDMA
* descriptors in ring->dma_pdma.
*/
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys_pdma,
- GFP_ATOMIC);
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
+ &ring->phys_pdma, GFP_KERNEL);
if (!ring->dma_pdma)
goto no_tx_mem;
- for (i = 0; i < MTK_DMA_SIZE; i++) {
+ for (i = 0; i < ring_size; i++) {
ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
ring->dma_pdma[i].txd4 = 0;
}
}
- ring->dma_size = MTK_DMA_SIZE;
- atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
- ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
- ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
+ ring->dma_size = ring_size;
+ atomic_set(&ring->free_count, ring_size - 2);
+ ring->next_free = ring->dma;
+ ring->last_free = (void *)txd;
+ ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
@@ -1635,20 +2403,37 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
*/
wmb();
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth,
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
- mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
- MTK_QTX_CFG(0));
+ ring->phys + ((ring_size - 1) * sz),
+ soc->reg_map->qdma.crx_ptr);
+ mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
+
+ for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
+ val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
+
+ val = MTK_QTX_SCH_MIN_RATE_EN |
+ /* minimum: 10 Mbps */
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
+ MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
+ ofs += MTK_QTX_OFFSET;
+ }
+ val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
+ mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
- mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
+ mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
- mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
}
return 0;
@@ -1659,45 +2444,43 @@ no_tx_mem:
static void mtk_tx_clean(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
int i;
if (ring->buf) {
- for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth, &ring->buf[i], false);
+ for (i = 0; i < ring->dma_size; i++)
+ mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
+ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * soc->txrx.txd_size,
+ ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
- ring->dma_pdma,
- ring->phys_pdma);
+ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * soc->txrx.txd_size,
+ ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
}
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size;
int i;
- u32 offset = 0;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
ring = &eth->rx_ring_qdma;
- offset = 0x1000;
} else {
ring = &eth->rx_ring[ring_no];
}
@@ -1717,45 +2500,100 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < rx_dma_size; i++) {
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
- if (!ring->data[i])
- return -ENOMEM;
+ if (mtk_page_pool_enabled(eth)) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+ rx_dma_size);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ ring->page_pool = pp;
}
- ring->dma = dma_alloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+ rx_dma_size * eth->soc->txrx.rxd_size,
+ &ring->phys, GFP_KERNEL);
if (!ring->dma)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- return -ENOMEM;
- ring->dma[i].rxd1 = (unsigned int)dma_addr;
+ struct mtk_rx_dma_v2 *rxd;
+ dma_addr_t dma_addr;
+ void *data;
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ data = netdev_alloc_frag(ring->frag_size);
+ else
+ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(data);
+ return -ENOMEM;
+ }
+ }
+ rxd->rxd1 = (unsigned int)dma_addr;
+ ring->data[i] = data;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- ring->dma[i].rxd2 = RX_DMA_LSO;
+ rxd->rxd2 = RX_DMA_LSO;
else
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+
+ rxd->rxd3 = 0;
+ rxd->rxd4 = 0;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ rxd->rxd5 = 0;
+ rxd->rxd6 = 0;
+ rxd->rxd7 = 0;
+ rxd->rxd8 = 0;
+ }
}
+
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
- ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
+ if (rx_flag == MTK_RX_FLAGS_QDMA)
+ ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
+ else
+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
- mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
- mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
- mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
- mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ mtk_w32(eth, ring->phys,
+ reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->qdma.rst_idx);
+ } else {
+ mtk_w32(eth, ring->phys,
+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->pdma.rst_idx);
+ }
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
return 0;
}
@@ -1766,27 +2604,36 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
if (ring->data && ring->dma) {
for (i = 0; i < ring->dma_size; i++) {
+ struct mtk_rx_dma *rxd;
+
if (!ring->data[i])
continue;
- if (!ring->dma[i].rxd1)
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (!rxd->rxd1)
continue;
- dma_unmap_single(eth->dev,
- ring->dma[i].rxd1,
- ring->buf_size,
- DMA_FROM_DEVICE);
- skb_free_frag(ring->data[i]);
+
+ dma_unmap_single(eth->dma_dev, rxd->rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+ mtk_rx_put_buff(ring, ring->data[i], false);
}
kfree(ring->data);
ring->data = NULL;
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
- ring->dma_size * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
+ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma, ring->phys);
ring->dma = NULL;
}
+
+ if (ring->page_pool) {
+ if (xdp_rxq_info_is_reg(&ring->xdp_q))
+ xdp_rxq_info_unreg(&ring->xdp_q);
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
@@ -1984,6 +2831,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
+ return -EINVAL;
+
/* only tcp dst ipv4 is meaningful, others are meaningless */
fsp->flow_type = TCP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
@@ -2040,15 +2890,12 @@ static netdev_features_t mtk_fix_features(struct net_device *dev,
static int mtk_set_features(struct net_device *dev, netdev_features_t features)
{
- int err = 0;
+ netdev_features_t diff = dev->features ^ features;
- if (!((dev->features ^ features) & NETIF_F_LRO))
- return 0;
-
- if (!(features & NETIF_F_LRO))
+ if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
mtk_hwlro_netdev_disable(dev);
- return err;
+ return 0;
}
/* wait for DMA to finish whatever it is doing before we start using it again */
@@ -2059,9 +2906,9 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- reg = MTK_QDMA_GLO_CFG;
+ reg = eth->soc->reg_map->qdma.glo_cfg;
else
- reg = MTK_PDMA_GLO_CFG;
+ reg = eth->soc->reg_map->pdma.glo_cfg;
ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
@@ -2119,8 +2966,8 @@ static int mtk_dma_init(struct mtk_eth *eth)
* automatically
*/
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
- FC_THRES_MIN, MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
+ mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
}
return 0;
@@ -2128,16 +2975,16 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
int i;
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
- eth->scratch_ring,
- eth->phy_scratch_ring);
+ dma_free_coherent(eth->dma_dev,
+ MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
+ eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
@@ -2154,14 +3001,29 @@ static void mtk_dma_free(struct mtk_eth *eth)
kfree(eth->scratch_head);
}
+static bool mtk_hw_reset_check(struct mtk_eth *eth)
+{
+ u32 val = mtk_r32(eth, MTK_INT_STATUS2);
+
+ return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
+ (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
+ (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
+}
+
static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ if (test_bit(MTK_RESETTING, &eth->state))
+ return;
+
+ if (!mtk_hw_reset_check(eth))
+ return;
+
eth->netdev[mac->id]->stats.tx_errors++;
- netif_err(eth, tx_err, dev,
- "transmit timed out\n");
+ netif_err(eth, tx_err, dev, "transmit timed out\n");
+
schedule_work(&eth->pending_work);
}
@@ -2172,7 +3034,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
}
return IRQ_HANDLED;
@@ -2194,13 +3056,16 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
- if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
- if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+ eth->soc->txrx.rx_irq_done_mask) {
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
- if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth);
}
@@ -2214,16 +3079,17 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
}
#endif
static int mtk_start_dma(struct mtk_eth *eth)
{
- u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int err;
err = mtk_dma_init(eth);
@@ -2233,21 +3099,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
- MTK_RX_BT_32DWORDS,
- MTK_QDMA_GLO_CFG);
+ val = mtk_r32(eth, reg_map->qdma.glo_cfg);
+ val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
+ MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
+ MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
+ else
+ val |= MTK_RX_BT_32DWORDS;
+ mtk_w32(eth, val, reg_map->qdma.glo_cfg);
mtk_w32(eth,
MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
- MTK_PDMA_GLO_CFG);
+ reg_map->pdma.glo_cfg);
} else {
mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
- MTK_PDMA_GLO_CFG);
+ reg_map->pdma.glo_cfg);
}
return 0;
@@ -2271,7 +3143,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
- if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
+ if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
@@ -2281,11 +3153,64 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
mtk_w32(eth, 0, MTK_RST_GL);
}
+
+static bool mtk_uses_dsa(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ return netdev_uses_dsa(dev) &&
+ dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
+#else
+ return false;
+#endif
+}
+
+static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
+{
+ struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
+ struct mtk_eth *eth = mac->hw;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ethtool_link_ksettings s;
+ struct net_device *ldev;
+ struct list_head *iter;
+ struct dsa_port *dp;
+
+ if (event != NETDEV_CHANGE)
+ return NOTIFY_DONE;
+
+ netdev_for_each_lower_dev(dev, ldev, iter) {
+ if (netdev_priv(ldev) == mac)
+ goto found;
+ }
+
+ return NOTIFY_DONE;
+
+found:
+ if (!dsa_slave_dev_check(dev))
+ return NOTIFY_DONE;
+
+ if (__ethtool_get_link_ksettings(dev, &s))
+ return NOTIFY_DONE;
+
+ if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
+ return NOTIFY_DONE;
+
+ dp = dsa_port_from_netdev(dev);
+ if (dp->index >= MTK_QDMA_NUM_QUEUES)
+ return NOTIFY_DONE;
+
+ if (mac->speed > 0 && mac->speed <= s.base.speed)
+ s.base.speed = 0;
+
+ mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
+
+ return NOTIFY_DONE;
+}
+
static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- int err;
+ int i, err;
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
@@ -2296,29 +3221,69 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
- u32 gdm_config = MTK_GDMA_TO_PDMA;
- int err;
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 gdm_config;
+ int i;
err = mtk_start_dma(eth);
- if (err)
+ if (err) {
+ phylink_disconnect_phy(mac->phylink);
return err;
+ }
- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
- gdm_config = MTK_GDMA_TO_PPE;
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_start(eth->ppe[i]);
+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+ : MTK_GDMA_TO_PDMA;
mtk_gdm_config(eth, gdm_config);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
refcount_inc(&eth->dma_refcnt);
phylink_start(mac->phylink);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return 0;
+
+ if (mtk_uses_dsa(dev) && !eth->prog) {
+ for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
+ struct metadata_dst *md_dst = eth->dsa_meta[i];
+
+ if (md_dst)
+ continue;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ eth->dsa_meta[i] = md_dst;
+ }
+ } else {
+ /* Hardware special tag parsing needs to be disabled if at least
+ * one MAC does not use DSA.
+ */
+ u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+
+ val &= ~MTK_CDMP_STAG_EN;
+ mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
+
+ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+ val &= ~MTK_CDMQ_STAG_EN;
+ mtk_w32(eth, val, MTK_CDMQ_IG_CTRL);
+
+ mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
+ }
+
return 0;
}
@@ -2349,6 +3314,7 @@ static int mtk_stop(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int i;
phylink_stop(mac->phylink);
@@ -2363,7 +3329,7 @@ static int mtk_stop(struct net_device *dev)
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -2371,17 +3337,59 @@ static int mtk_stop(struct net_device *dev)
cancel_work_sync(&eth->tx_dim.work);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
- mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+ mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
+ mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
mtk_dma_free(eth);
- if (eth->soc->offload_version)
- mtk_ppe_stop(&eth->ppe);
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_stop(eth->ppe[i]);
return 0;
}
+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct bpf_prog *old_prog;
+ bool need_update;
+
+ if (eth->hwlro) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!eth->prog != !!prog;
+ if (netif_running(dev) && need_update)
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev) && need_update)
+ return mtk_open(dev);
+
+ return 0;
+}
+
+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
@@ -2426,6 +3434,7 @@ static void mtk_dim_rx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
@@ -2433,7 +3442,7 @@ static void mtk_dim_rx(struct work_struct *work)
dim->profile_ix);
spin_lock_bh(&eth->dim_lock);
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_TX_MASK;
val |= MTK_PDMA_DELAY_RX_EN;
@@ -2443,9 +3452,9 @@ static void mtk_dim_rx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock);
@@ -2456,6 +3465,7 @@ static void mtk_dim_tx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
@@ -2463,7 +3473,7 @@ static void mtk_dim_tx(struct work_struct *work)
dim->profile_ix);
spin_lock_bh(&eth->dim_lock);
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_RX_MASK;
val |= MTK_PDMA_DELAY_TX_EN;
@@ -2473,28 +3483,225 @@ static void mtk_dim_tx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock);
dim->state = DIM_START_MEASURE;
}
-static int mtk_hw_init(struct mtk_eth *eth)
+static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
{
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+
+ if (val <= 1518)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
+ else if (val <= 1536)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
+ else if (val <= 1552)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
+ else
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
+
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_hw_reset(struct mtk_eth *eth)
+{
+ u32 val;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+ val = RSTCTRL_PPE0_V2;
+ } else {
+ val = RSTCTRL_PPE0;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
+
+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x3ffffff);
+}
+
+static u32 mtk_hw_reset_read(struct mtk_eth *eth)
+{
+ u32 val;
+
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
+ return val;
+}
+
+static void mtk_hw_warm_reset(struct mtk_eth *eth)
+{
+ u32 rst_mask, val;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
+ RSTCTRL_FE);
+ if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
+ val & RSTCTRL_FE, 1, 1000)) {
+ dev_err(eth->dev, "warm reset failed\n");
+ mtk_hw_reset(eth);
+ return;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
+ else
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ rst_mask |= RSTCTRL_PPE1;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
+
+ udelay(1);
+ val = mtk_hw_reset_read(eth);
+ if (!(val & rst_mask))
+ dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
+ val, rst_mask);
+
+ rst_mask |= RSTCTRL_FE;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
+
+ udelay(1);
+ val = mtk_hw_reset_read(eth);
+ if (val & rst_mask)
+ dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
+ val, rst_mask);
+}
+
+static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
+{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
+ bool oq_hang, cdm1_busy, adma_busy;
+ bool wtx_busy, cdm_full, oq_free;
+ u32 wdidx, val, gdm1_fc, gdm2_fc;
+ bool qfsm_hang, qfwd_hang;
+ bool ret = false;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return false;
+
+ /* WDMA sanity checks */
+ wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
+
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
+ wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
+
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
+ cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
+
+ oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
+
+ if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
+ if (++eth->reset.wdma_hang_count > 2) {
+ eth->reset.wdma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ /* QDMA sanity checks */
+ qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
+ qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
+
+ gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
+ gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
+ gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
+ gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
+ gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
+ gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
+
+ if (qfsm_hang && qfwd_hang &&
+ ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
+ (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
+ if (++eth->reset.qdma_hang_count > 2) {
+ eth->reset.qdma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ /* ADMA sanity checks */
+ oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
+ cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
+ adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
+ !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
+
+ if (oq_hang && cdm1_busy && adma_busy) {
+ if (++eth->reset.adma_hang_count > 2) {
+ eth->reset.adma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ eth->reset.wdma_hang_count = 0;
+ eth->reset.qdma_hang_count = 0;
+ eth->reset.adma_hang_count = 0;
+out:
+ eth->reset.wdidx = wdidx;
+
+ return ret;
+}
+
+static void mtk_hw_reset_monitor_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
+ reset.monitor_work);
+
+ if (test_bit(MTK_RESETTING, &eth->state))
+ goto out;
+
+ /* DMA stuck checks */
+ if (mtk_hw_check_dma_hang(eth))
+ schedule_work(&eth->pending_work);
+
+out:
+ schedule_delayed_work(&eth->reset.monitor_work,
+ MTK_DMA_MONITOR_TIMEOUT);
+}
+
+static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+{
+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+ ETHSYS_DMA_AG_MAP_PPE;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
- if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+ if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
return 0;
- pm_runtime_enable(eth->dev);
- pm_runtime_get_sync(eth->dev);
+ if (!reset) {
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
- ret = mtk_clk_enable(eth);
- if (ret)
- goto err_disable_pm;
+ ret = mtk_clk_enable(eth);
+ if (ret)
+ goto err_disable_pm;
+ }
+
+ if (eth->ethsys)
+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
ret = device_reset(eth->dev);
@@ -2514,9 +3721,18 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
}
- /* Non-MT7628 handling... */
- ethsys_reset(eth, RSTCTRL_FE);
- ethsys_reset(eth, RSTCTRL_PPE);
+ msleep(100);
+
+ if (reset)
+ mtk_hw_warm_reset(eth);
+ else
+ mtk_hw_reset(eth);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ /* Set FE to PDMAv2 if necessary */
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
+ }
if (eth->pctl) {
/* Set GE2 driving and slew rate */
@@ -2533,17 +3749,28 @@ static int mtk_hw_init(struct mtk_eth *eth)
* up with the more appropriate value when mtk_mac_config call is being
* invoked.
*/
- for (i = 0; i < MTK_MAC_COUNT; i++)
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ struct net_device *dev = eth->netdev[i];
+
mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+ if (dev) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN);
+ }
+ }
/* Indicates CDM to parse the MTK special tag from CPU
* which also is working out for untag packets.
*/
val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
- /* Enable RX VLan Offloading */
- mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+ }
/* set interrupt delays based on current Net DIM sample */
mtk_dim_rx(&eth->rx_dim.work);
@@ -2554,17 +3781,58 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_rx_irq_disable(eth, ~0);
/* FE int grouping */
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ /* PSE should not drop port8 and port9 packets from WDMA Tx */
+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
+ mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+
+ /* PSE Free Queue Flow Control */
+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+
+ /* PSE config input queue threshold */
+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
+
+ /* PSE config output queue threshold */
+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
+
+ /* GDM and CDM Threshold */
+ mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
+ }
+
return 0;
err_disable_pm:
- pm_runtime_put_sync(eth->dev);
- pm_runtime_disable(eth->dev);
+ if (!reset) {
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+ }
return ret;
}
@@ -2614,25 +3882,14 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
int length = new_mtu + MTK_RX_ETH_HLEN;
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- u32 mcr_cur, mcr_new;
-
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
-
- if (length <= 1518)
- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
- else if (length <= 1536)
- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
- else if (length <= 1552)
- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
- else
- mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
- if (mcr_new != mcr_cur)
- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+ if (rcu_access_pointer(eth->prog) &&
+ length > MTK_PP_MAX_BUF_SIZE) {
+ netdev_err(dev, "Invalid MTU for XDP mode\n");
+ return -EINVAL;
}
+ mtk_set_mcr_max_rx(mac, length);
dev->mtu = new_mtu;
return 0;
@@ -2654,54 +3911,85 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
+static void mtk_prepare_for_reset(struct mtk_eth *eth)
+{
+ u32 val;
+ int i;
+
+ /* disabe FE P3 and P4 */
+ val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= MTK_FE_LINK_DOWN_P4;
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
+
+ /* adjust PPE configurations to prepare for reset */
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_prepare_reset(eth->ppe[i]);
+
+ /* disable NETSYS interrupts */
+ mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
+
+ /* force link down GMAC */
+ for (i = 0; i < 2; i++) {
+ val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
+ mtk_w32(eth, val, MTK_MAC_MCR(i));
+ }
+}
+
static void mtk_pending_work(struct work_struct *work)
{
struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
- int err, i;
unsigned long restart = 0;
+ u32 val;
+ int i;
rtnl_lock();
+ set_bit(MTK_RESETTING, &eth->state);
- dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
-
- while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
- cpu_relax();
+ mtk_prepare_for_reset(eth);
+ mtk_wed_fe_reset();
+ /* Run again reset preliminary configuration in order to avoid any
+ * possible race during FE reset since it can run releasing RTNL lock.
+ */
+ mtk_prepare_for_reset(eth);
- dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i])
+ if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
continue;
+
mtk_stop(eth->netdev[i]);
__set_bit(i, &restart);
}
- dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
- /* restart underlying hardware such as power, clock, pin mux
- * and the connected phy
- */
- mtk_hw_deinit(eth);
+ usleep_range(15000, 16000);
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
eth->dev->pins->default_state);
- mtk_hw_init(eth);
+ mtk_hw_init(eth, true);
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!test_bit(i, &restart))
continue;
- err = mtk_open(eth->netdev[i]);
- if (err) {
+
+ if (mtk_open(eth->netdev[i])) {
netif_alert(eth, ifup, eth->netdev[i],
- "Driver up/down cycle failed, closing device.\n");
+ "Driver up/down cycle failed\n");
dev_close(eth->netdev[i]);
}
}
- dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+ /* enabe FE P3 and P4 */
+ val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val &= ~MTK_FE_LINK_DOWN_P4;
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
- clear_bit_unlock(MTK_RESETTING, &eth->state);
+ clear_bit(MTK_RESETTING, &eth->state);
+
+ mtk_wed_fe_reset_complete();
rtnl_unlock();
}
@@ -2716,6 +4004,12 @@ static int mtk_free_dev(struct mtk_eth *eth)
free_netdev(eth->netdev[i]);
}
+ for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
+ if (!eth->dsa_meta[i])
+ break;
+ metadata_dst_free(eth->dsa_meta[i]);
+ }
+
return 0;
}
@@ -2724,19 +4018,33 @@ static int mtk_unreg_dev(struct mtk_eth *eth)
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
+ struct mtk_mac *mac;
if (!eth->netdev[i])
continue;
+ mac = netdev_priv(eth->netdev[i]);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ unregister_netdevice_notifier(&mac->device_notifier);
unregister_netdev(eth->netdev[i]);
}
return 0;
}
+static void mtk_sgmii_destroy(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++)
+ mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
+}
+
static int mtk_cleanup(struct mtk_eth *eth)
{
+ mtk_sgmii_destroy(eth);
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(&eth->pending_work);
+ cancel_delayed_work_sync(&eth->reset.monitor_work);
return 0;
}
@@ -2768,8 +4076,8 @@ static void mtk_get_drvinfo(struct net_device *dev,
{
struct mtk_mac *mac = netdev_priv(dev);
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
}
@@ -2805,11 +4113,18 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
int i;
switch (stringset) {
- case ETH_SS_STATS:
+ case ETH_SS_STATS: {
+ struct mtk_mac *mac = netdev_priv(dev);
+
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ if (mtk_page_pool_enabled(mac->hw))
+ page_pool_ethtool_stats_get_strings(data);
+ break;
+ }
+ default:
break;
}
}
@@ -2817,13 +4132,35 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int mtk_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(mtk_ethtool_stats);
+ case ETH_SS_STATS: {
+ int count = ARRAY_SIZE(mtk_ethtool_stats);
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (mtk_page_pool_enabled(mac->hw))
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+ }
default:
return -EOPNOTSUPP;
}
}
+static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
+ struct mtk_rx_ring *ring = &eth->rx_ring[i];
+
+ if (!ring->page_pool)
+ continue;
+
+ page_pool_get_stats(ring->page_pool, &stats);
+ }
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mtk_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -2847,11 +4184,13 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
do {
data_dst = data;
- start = u64_stats_fetch_begin_irq(&hwstats->syncp);
+ start = u64_stats_fetch_begin(&hwstats->syncp);
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
- } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
+ if (mtk_page_pool_enabled(mac->hw))
+ mtk_ethtool_pp_stats(mac->hw, data_dst);
+ } while (u64_stats_fetch_retry(&hwstats->syncp, start));
}
static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
@@ -2910,6 +4249,23 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ unsigned int queue = 0;
+
+ if (netdev_uses_dsa(dev))
+ queue = skb_get_queue_mapping(skb) + 3;
+ else
+ queue = mac->id;
+
+ if (queue >= dev->num_tx_queues)
+ queue = 0;
+
+ return queue;
+}
+
static const struct ethtool_ops mtk_ethtool_ops = {
.get_link_ksettings = mtk_get_link_ksettings,
.set_link_ksettings = mtk_set_link_ksettings,
@@ -2943,6 +4299,9 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_poll_controller = mtk_poll_controller,
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
+ .ndo_xdp_xmit = mtk_xdp_xmit,
+ .ndo_select_queue = mtk_select_queue,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
@@ -2952,6 +4311,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
struct phylink *phylink;
struct mtk_mac *mac;
int id, err;
+ int txqs = 1;
+ u32 val;
if (!_id) {
dev_err(eth->dev, "missing mac id\n");
@@ -2969,7 +4330,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
return -EINVAL;
}
- eth->netdev[id] = alloc_etherdev(sizeof(*mac));
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ txqs = MTK_QDMA_NUM_QUEUES;
+
+ eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
if (!eth->netdev[id]) {
dev_err(eth->dev, "alloc_etherdev failed\n");
return -ENOMEM;
@@ -3004,11 +4368,44 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
/* mac config is not set */
mac->interface = PHY_INTERFACE_MODE_NA;
- mac->mode = MLO_AN_PHY;
mac->speed = SPEED_UNKNOWN;
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ /* This driver makes use of state->speed in mac_config */
+ mac->phylink_config.legacy_pre_march2020 = true;
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+
+ /* TRGMII is not permitted on MT7621 if using DDR2 */
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
+ if (val & SYSCFG_DRAM_TYPE_DDR2)
+ __clear_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+ }
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ mac->phylink_config.supported_interfaces);
+ }
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
@@ -3030,7 +4427,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->hw_features |= NETIF_F_LRO;
eth->netdev[id]->vlan_features = eth->soc->hw_features &
- ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+ ~NETIF_F_HW_VLAN_CTAG_TX;
eth->netdev[id]->features |= eth->soc->hw_features;
eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
@@ -3042,6 +4439,17 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
else
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mac->device_notifier.notifier_call = mtk_device_event;
+ register_netdevice_notifier(&mac->device_notifier);
+ }
+
+ if (mtk_page_pool_enabled(eth))
+ eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
return 0;
free_netdev:
@@ -3049,8 +4457,68 @@ free_netdev:
return err;
}
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+ struct net_device *dev, *tmp;
+ LIST_HEAD(dev_list);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ dev = eth->netdev[i];
+
+ if (!dev || !(dev->flags & IFF_UP))
+ continue;
+
+ list_add_tail(&dev->close_list, &dev_list);
+ }
+
+ dev_close_many(&dev_list, false);
+
+ eth->dma_dev = dma_dev;
+
+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+ list_del_init(&dev->close_list);
+ dev_open(dev, NULL);
+ }
+
+ rtnl_unlock();
+}
+
+static int mtk_sgmii_init(struct mtk_eth *eth)
+{
+ struct device_node *np;
+ struct regmap *regmap;
+ u32 flags;
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ regmap = syscon_node_to_regmap(np);
+ flags = 0;
+ if (of_property_read_bool(np, "mediatek,pnswap"))
+ flags |= MTK_SGMII_FLAG_PN_SWAP;
+
+ of_node_put(np);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
+ eth->soc->ana_rgc3,
+ flags);
+ }
+
+ return 0;
+}
+
static int mtk_probe(struct platform_device *pdev)
{
+ struct resource *res = NULL;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -3062,24 +4530,13 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
+ eth->dma_dev = &pdev->dev;
eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
- eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
- } else {
- eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
- eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
- }
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
- } else {
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
- }
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
@@ -3088,6 +4545,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
+ INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
@@ -3110,14 +4568,18 @@ static int mtk_probe(struct platform_device *pdev)
}
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
- eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
- GFP_KERNEL);
- if (!eth->sgmii)
- return -ENOMEM;
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ struct regmap *cci;
- err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
- eth->soc->ana_rgc3);
+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "cci-control-port");
+ /* enable CPU/bus coherency */
+ if (!IS_ERR(cci))
+ regmap_write(cci, 0, 3);
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ err = mtk_sgmii_init(eth);
if (err)
return err;
@@ -3128,7 +4590,37 @@ static int mtk_probe(struct platform_device *pdev)
"mediatek,pctl");
if (IS_ERR(eth->pctl)) {
dev_err(&pdev->dev, "no pctl regmap found\n");
- return PTR_ERR(eth->pctl);
+ err = PTR_ERR(eth->pctl);
+ goto err_destroy_sgmii;
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err_destroy_sgmii;
+ }
+ }
+
+ if (eth->soc->offload_version) {
+ for (i = 0;; i++) {
+ struct device_node *np;
+ phys_addr_t wdma_phy;
+ u32 wdma_base;
+
+ if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+ break;
+
+ np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+ if (!np)
+ break;
+
+ wdma_base = eth->soc->reg_map->wdma_base[i];
+ wdma_phy = res ? res->start + wdma_base : 0;
+ mtk_wed_add_hw(np, eth, eth->base + wdma_base,
+ wdma_phy, i);
}
}
@@ -3139,19 +4631,23 @@ static int mtk_probe(struct platform_device *pdev)
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- return -ENXIO;
+ err = -ENXIO;
+ goto err_wed_exit;
}
}
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
if (IS_ERR(eth->clks[i])) {
- if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_wed_exit;
+ }
if (eth->soc->required_clks & BIT(i)) {
dev_err(&pdev->dev, "clock %s not found\n",
mtk_clks_source_name[i]);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_wed_exit;
}
eth->clks[i] = NULL;
}
@@ -3160,9 +4656,9 @@ static int mtk_probe(struct platform_device *pdev)
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
- err = mtk_hw_init(eth);
+ err = mtk_hw_init(eth, false);
if (err)
- return err;
+ goto err_wed_exit;
eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
@@ -3207,14 +4703,24 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- err = mtk_ppe_init(&eth->ppe, eth->dev,
- eth->base + MTK_ETH_PPE_BASE, 2);
- if (err)
- goto err_free_dev;
+ u32 num_ppe;
+
+ num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+ for (i = 0; i < num_ppe; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
+
+ if (!eth->ppe[i]) {
+ err = -ENOMEM;
+ goto err_deinit_ppe;
+ }
+ }
err = mtk_eth_offload_init(eth);
if (err)
- goto err_free_dev;
+ goto err_deinit_ppe;
}
for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -3224,7 +4730,7 @@ static int mtk_probe(struct platform_device *pdev)
err = register_netdev(eth->netdev[i]);
if (err) {
dev_err(eth->dev, "error bringing up device\n");
- goto err_deinit_mdio;
+ goto err_deinit_ppe;
} else
netif_info(eth, probe, eth->netdev[i],
"mediatek frame engine at 0x%08lx, irq %d\n",
@@ -3235,21 +4741,26 @@ static int mtk_probe(struct platform_device *pdev)
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
- MTK_NAPI_WEIGHT);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
- MTK_NAPI_WEIGHT);
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
+ schedule_delayed_work(&eth->reset.monitor_work,
+ MTK_DMA_MONITOR_TIMEOUT);
return 0;
-err_deinit_mdio:
+err_deinit_ppe:
+ mtk_ppe_deinit(eth);
mtk_mdio_cleanup(eth);
err_free_dev:
mtk_free_dev(eth);
err_deinit_hw:
mtk_hw_deinit(eth);
+err_wed_exit:
+ mtk_wed_exit();
+err_destroy_sgmii:
+ mtk_sgmii_destroy(eth);
return err;
}
@@ -3269,6 +4780,7 @@ static int mtk_remove(struct platform_device *pdev)
phylink_disconnect_phy(mac->phylink);
}
+ mtk_wed_exit();
mtk_hw_deinit(eth);
netif_napi_del(&eth->tx_napi);
@@ -3280,50 +4792,154 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7621_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7621_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
- .offload_version = 2,
+ .offload_version = 1,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7622_data = {
+ .reg_map = &mtk_reg_map,
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .has_accounting = true,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7623_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
- .offload_version = 2,
+ .offload_version = 1,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7629_data = {
+ .reg_map = &mtk_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
+ .has_accounting = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7981_data = {
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7981_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7981_CLKS_BITMAP,
+ .required_pctl = false,
+ .offload_version = 2,
+ .hash_offset = 4,
+ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .has_accounting = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+};
+
+static const struct mtk_soc_data mt7986_data = {
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7986_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
+ .offload_version = 2,
+ .hash_offset = 4,
+ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .has_accounting = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
};
static const struct mtk_soc_data rt5350_data = {
+ .reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS,
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
const struct of_device_id of_mtk_match[] = {
@@ -3332,6 +4948,8 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data},
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 5ef70dd8b49c..707445f6bcb1 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -17,14 +17,22 @@
#include <linux/phylink.h>
#include <linux/rhashtable.h>
#include <linux/dim.h>
+#include <linux/bitfield.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
#include "mtk_ppe.h"
+#define MTK_MAX_DSA_PORTS 7
+#define MTK_DSA_PORT_MASK GENMASK(2, 0)
+
+#define MTK_QDMA_NUM_QUEUES 16
#define MTK_QDMA_PAGE_SIZE 2048
#define MTK_MAX_RX_LENGTH 1536
#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
+#define MTK_TX_DMA_BUF_LEN_V2 0xffff
+#define MTK_QDMA_RING_SIZE 2048
#define MTK_DMA_SIZE 512
-#define MTK_NAPI_WEIGHT 64
#define MTK_MAC_COUNT 2
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
@@ -40,7 +48,6 @@
#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_TX | \
- NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_SG | NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_IPV6_CSUM |\
@@ -48,6 +55,13 @@
#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
+#define MTK_PP_PAD (MTK_PP_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
+
+#define MTK_QRX_OFFSET 0x10
+
#define MTK_MAX_RX_RING_NUM 4
#define MTK_HW_LRO_DMA_SIZE 8
@@ -62,12 +76,24 @@
#define MTK_HW_LRO_REPLACE_DELTA 1000
#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
+/* Frame Engine Global Configuration */
+#define MTK_FE_GLO_CFG 0x00
+#define MTK_FE_LINK_DOWN_P3 BIT(11)
+#define MTK_FE_LINK_DOWN_P4 BIT(12)
+
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
#define RST_GL_PSE BIT(0)
/* Frame Engine Interrupt Status Register */
#define MTK_INT_STATUS2 0x08
+#define MTK_FE_INT_ENABLE 0x0c
+#define MTK_FE_INT_FQ_EMPTY BIT(8)
+#define MTK_FE_INT_TSO_FAIL BIT(12)
+#define MTK_FE_INT_TSO_ILLEGAL BIT(13)
+#define MTK_FE_INT_TSO_ALIGN BIT(14)
+#define MTK_FE_INT_RFIFO_OV BIT(18)
+#define MTK_FE_INT_RFIFO_UF BIT(19)
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
@@ -81,6 +107,13 @@
#define MTK_CDMQ_IG_CTRL 0x1400
#define MTK_CDMQ_STAG_EN BIT(0)
+/* CDMQ Exgress Control Register */
+#define MTK_CDMQ_EG_CTRL 0x1404
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL 0x400
+#define MTK_CDMP_STAG_EN BIT(0)
+
/* CDMP Exgress Control Register */
#define MTK_CDMP_EG_CTRL 0x404
@@ -91,7 +124,6 @@
#define MTK_GDMA_TCS_EN BIT(21)
#define MTK_GDMA_UCS_EN BIT(20)
#define MTK_GDMA_TO_PDMA 0x0
-#define MTK_GDMA_TO_PPE 0x4444
#define MTK_GDMA_DROP_ALL 0x7777
/* Unicast Filter MAC Address Register - Low */
@@ -100,25 +132,39 @@
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
-/* PDMA RX Base Pointer Register */
-#define MTK_PRX_BASE_PTR0 0x900
-#define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
+/* FE global misc reg*/
+#define MTK_FE_GLO_MISC 0x124
-/* PDMA RX Maximum Count Register */
-#define MTK_PRX_MAX_CNT0 0x904
-#define MTK_PRX_MAX_CNT_CFG(x) (MTK_PRX_MAX_CNT0 + (x * 0x10))
+/* PSE Free Queue Flow Control */
+#define PSE_FQFC_CFG1 0x100
+#define PSE_FQFC_CFG2 0x104
+#define PSE_DROP_CFG 0x108
+#define PSE_PPE0_DROP 0x110
-/* PDMA RX CPU Pointer Register */
-#define MTK_PRX_CRX_IDX0 0x908
-#define MTK_PRX_CRX_IDX_CFG(x) (MTK_PRX_CRX_IDX0 + (x * 0x10))
+/* PSE Input Queue Reservation Register*/
+#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
+
+/* PSE Output Queue Threshold Register*/
+#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
+
+/* GDM and CDM Threshold */
+#define MTK_GDM2_THRES 0x1530
+#define MTK_CDMW0_THRES 0x164c
+#define MTK_CDMW1_THRES 0x1650
+#define MTK_CDME0_THRES 0x1654
+#define MTK_CDME1_THRES 0x1658
+#define MTK_CDMM_THRES 0x165c
/* PDMA HW LRO Control Registers */
#define MTK_PDMA_LRO_CTRL_DW0 0x980
#define MTK_LRO_EN BIT(0)
#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
#define MTK_PDMA_LRO_CTRL_DW1 0x984
#define MTK_PDMA_LRO_CTRL_DW2 0x988
@@ -126,18 +172,19 @@
#define MTK_ADMA_MODE BIT(15)
#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
-/* PDMA Global Configuration Register */
-#define MTK_PDMA_GLO_CFG 0xa04
+#define MTK_RX_DMA_LRO_EN BIT(8)
#define MTK_MULTI_EN BIT(10)
#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_LRO_SDL 0x3000
+#define MTK_RX_CFG_SDL_OFFSET 16
+
/* PDMA Reset Index Register */
-#define MTK_PDMA_RST_IDX 0xa08
#define MTK_PST_DRX_IDX0 BIT(16)
#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
/* PDMA Delay Interrupt Register */
-#define MTK_PDMA_DELAY_INT 0xa0c
#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
#define MTK_PDMA_DELAY_RX_EN BIT(15)
#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
@@ -151,19 +198,9 @@
#define MTK_PDMA_DELAY_PINT_MASK 0x7f
#define MTK_PDMA_DELAY_PTIME_MASK 0xff
-/* PDMA Interrupt Status Register */
-#define MTK_PDMA_INT_STATUS 0xa20
-
-/* PDMA Interrupt Mask Register */
-#define MTK_PDMA_INT_MASK 0xa28
-
/* PDMA HW LRO Alter Flow Delta Register */
#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
-/* PDMA Interrupt grouping registers */
-#define MTK_PDMA_INT_GRP1 0xa50
-#define MTK_PDMA_INT_GRP2 0xa54
-
/* PDMA HW LRO IP Setting Registers */
#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
@@ -185,26 +222,27 @@
#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
/* QDMA TX Queue Configuration Registers */
-#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
+#define MTK_QTX_OFFSET 0x10
#define QDMA_RES_THRES 4
-/* QDMA TX Queue Scheduler Registers */
-#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
-
-/* QDMA RX Base Pointer Register */
-#define MTK_QRX_BASE_PTR0 0x1900
+/* QDMA Tx Queue Scheduler Configuration Registers */
+#define MTK_QTX_SCH_TX_SEL BIT(31)
+#define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30)
-/* QDMA RX Maximum Count Register */
-#define MTK_QRX_MAX_CNT0 0x1904
+#define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30)
+#define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28)
+#define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
+#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
+#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
+#define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12)
+#define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
+#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
+#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
-/* QDMA RX CPU Pointer Register */
-#define MTK_QRX_CRX_IDX0 0x1908
-
-/* QDMA RX DMA Pointer Register */
-#define MTK_QRX_DRX_IDX0 0x190C
+/* QDMA TX Scheduler Rate Control Register */
+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
/* QDMA Global Configuration Register */
-#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
#define MTK_NDP_CO_PRO BIT(10)
@@ -216,20 +254,20 @@
#define MTK_TX_DMA_EN BIT(0)
#define MTK_DMA_BUSY_TIMEOUT_US 1000000
-/* QDMA Reset Index Register */
-#define MTK_QDMA_RST_IDX 0x1A08
-
-/* QDMA Delay Interrupt Register */
-#define MTK_QDMA_DELAY_INT 0x1A0C
+/* QDMA V2 Global Configuration Register */
+#define MTK_CHK_DDONE_EN BIT(28)
+#define MTK_DMAD_WR_WDONE BIT(26)
+#define MTK_WCOMP_EN BIT(24)
+#define MTK_RESV_BUF (0x40 << 16)
+#define MTK_MUTLI_CNT (0x4 << 12)
+#define MTK_LEAKY_BUCKET_EN BIT(11)
/* QDMA Flow Control Register */
-#define MTK_QDMA_FC_THRES 0x1A10
#define FC_THRES_DROP_MODE BIT(20)
#define FC_THRES_DROP_EN (7 << 16)
#define FC_THRES_MIN 0x4444
/* QDMA Interrupt Status Register */
-#define MTK_QDMA_INT_STATUS 0x1A18
#define MTK_RX_DONE_DLY BIT(30)
#define MTK_TX_DONE_DLY BIT(28)
#define MTK_RX_DONE_INT3 BIT(19)
@@ -243,58 +281,32 @@
#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
+#define MTK_RX_DONE_INT_V2 BIT(14)
+
+#define MTK_CDM_TXFIFO_RDY BIT(7)
+
/* QDMA Interrupt grouping registers */
-#define MTK_QDMA_INT_GRP1 0x1a20
-#define MTK_QDMA_INT_GRP2 0x1a24
#define MTK_RLS_DONE_INT BIT(0)
-/* QDMA Interrupt Status Register */
-#define MTK_QDMA_INT_MASK 0x1A1C
-
-/* QDMA Interrupt Mask Register */
-#define MTK_QDMA_HRED2 0x1A44
-
-/* QDMA TX Forward CPU Pointer Register */
-#define MTK_QTX_CTX_PTR 0x1B00
-
-/* QDMA TX Forward DMA Pointer Register */
-#define MTK_QTX_DTX_PTR 0x1B04
-
-/* QDMA TX Release CPU Pointer Register */
-#define MTK_QTX_CRX_PTR 0x1B10
-
-/* QDMA TX Release DMA Pointer Register */
-#define MTK_QTX_DRX_PTR 0x1B14
-
-/* QDMA FQ Head Pointer Register */
-#define MTK_QDMA_FQ_HEAD 0x1B20
-
-/* QDMA FQ Head Pointer Register */
-#define MTK_QDMA_FQ_TAIL 0x1B24
-
-/* QDMA FQ Free Page Counter Register */
-#define MTK_QDMA_FQ_CNT 0x1B28
-
-/* QDMA FQ Free Page Buffer Length Register */
-#define MTK_QDMA_FQ_BLEN 0x1B2C
-
-/* GMA1 counter / statics register */
-#define MTK_GDM1_RX_GBCNT_L 0x2400
-#define MTK_GDM1_RX_GBCNT_H 0x2404
-#define MTK_GDM1_RX_GPCNT 0x2408
-#define MTK_GDM1_RX_OERCNT 0x2410
-#define MTK_GDM1_RX_FERCNT 0x2414
-#define MTK_GDM1_RX_SERCNT 0x2418
-#define MTK_GDM1_RX_LENCNT 0x241c
-#define MTK_GDM1_RX_CERCNT 0x2420
-#define MTK_GDM1_RX_FCCNT 0x2424
-#define MTK_GDM1_TX_SKIPCNT 0x2428
-#define MTK_GDM1_TX_COLCNT 0x242c
-#define MTK_GDM1_TX_GBCNT_L 0x2430
-#define MTK_GDM1_TX_GBCNT_H 0x2434
-#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
+/* QDMA TX NUM */
+#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
+#define MTK_QDMA_GMAC2_QID 8
+
+#define MTK_TX_DMA_BUF_SHIFT 8
+
+/* QDMA V2 descriptor txd6 */
+#define TX_DMA_INS_VLAN_V2 BIT(16)
+/* QDMA V2 descriptor txd5 */
+#define TX_DMA_CHKSUM_V2 (0x7 << 28)
+#define TX_DMA_TSO_V2 BIT(31)
+
+/* QDMA V2 descriptor txd4 */
+#define TX_DMA_FPORT_SHIFT_V2 8
+#define TX_DMA_FPORT_MASK_V2 0xf
+#define TX_DMA_SWC_V2 BIT(30)
+
/* QDMA descriptor txd4 */
#define TX_DMA_CHKSUM (0x7 << 29)
#define TX_DMA_TSO BIT(28)
@@ -305,10 +317,10 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x) ((_x) & MTK_TX_DMA_BUF_LEN)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
#define TX_DMA_SWC BIT(14)
-#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16)
+#define TX_DMA_PQID GENMASK(3, 0)
/* PDMA on MT7628 */
#define TX_DMA_DONE BIT(31)
@@ -318,12 +330,14 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
/* QDMA descriptor rxd3 */
-#define RX_DMA_VID(_x) ((_x) & 0xfff)
+#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
/* QDMA descriptor rxd4 */
#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
@@ -334,18 +348,44 @@
/* QDMA descriptor rxd4 */
#define RX_DMA_L4_VALID BIT(24)
#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
-#define RX_DMA_FPORT_SHIFT 19
-#define RX_DMA_FPORT_MASK 0x7
#define RX_DMA_SPECIAL_TAG BIT(22)
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
+
+/* PDMA V2 descriptor rxd3 */
+#define RX_DMA_VTAG_V2 BIT(0)
+#define RX_DMA_L4_VALID_V2 BIT(2)
+
+/* PHY Polling and SMI Master Control registers */
+#define MTK_PPSC 0x10000
+#define PPSC_MDC_CFG GENMASK(29, 24)
+#define PPSC_MDC_TURBO BIT(20)
+#define MDC_MAX_FREQ 25000000
+#define MDC_MAX_DIVIDER 63
+
/* PHY Indirect Access Control registers */
#define MTK_PHY_IAC 0x10004
#define PHY_IAC_ACCESS BIT(31)
-#define PHY_IAC_READ BIT(19)
-#define PHY_IAC_WRITE BIT(18)
-#define PHY_IAC_START BIT(16)
-#define PHY_IAC_ADDR_SHIFT 20
-#define PHY_IAC_REG_SHIFT 25
+#define PHY_IAC_REG_MASK GENMASK(29, 25)
+#define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x))
+#define PHY_IAC_ADDR_MASK GENMASK(24, 20)
+#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
+#define PHY_IAC_CMD_MASK GENMASK(19, 18)
+#define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0)
+#define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1)
+#define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2)
+#define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3)
+#define PHY_IAC_START_MASK GENMASK(17, 16)
+#define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0)
+#define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1)
+#define PHY_IAC_DATA_MASK GENMASK(15, 0)
+#define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x))
#define PHY_IAC_TIMEOUT HZ
#define MTK_MAC_MISC 0x1000c
@@ -363,6 +403,7 @@
#define MAC_MCR_FORCE_MODE BIT(15)
#define MAC_MCR_TX_EN BIT(14)
#define MAC_MCR_RX_EN BIT(13)
+#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
#define MAC_MCR_BACKOFF_EN BIT(9)
#define MAC_MCR_BACKPR_EN BIT(8)
#define MAC_MCR_FORCE_RX_FC BIT(5)
@@ -452,56 +493,32 @@
#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
/* ethernet reset control register */
-#define ETHSYS_RSTCTRL 0x34
-#define RSTCTRL_FE BIT(6)
-#define RSTCTRL_PPE BIT(31)
-
-/* SGMII subsystem config registers */
-/* Register to auto-negotiation restart */
-#define SGMSYS_PCS_CONTROL_1 0x0
-#define SGMII_AN_RESTART BIT(9)
-#define SGMII_ISOLATE BIT(10)
-#define SGMII_AN_ENABLE BIT(12)
-#define SGMII_LINK_STATYS BIT(18)
-#define SGMII_AN_ABILITY BIT(19)
-#define SGMII_AN_COMPLETE BIT(21)
-#define SGMII_PCS_FAULT BIT(23)
-#define SGMII_AN_EXPANSION_CLR BIT(30)
-
-/* Register to programmable link timer, the unit in 2 * 8ns */
-#define SGMSYS_PCS_LINK_TIMER 0x18
-#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0))
-
-/* Register to control remote fault */
-#define SGMSYS_SGMII_MODE 0x20
-#define SGMII_IF_MODE_BIT0 BIT(0)
-#define SGMII_SPEED_DUPLEX_AN BIT(1)
-#define SGMII_SPEED_10 0x0
-#define SGMII_SPEED_100 BIT(2)
-#define SGMII_SPEED_1000 BIT(3)
-#define SGMII_DUPLEX_FULL BIT(4)
-#define SGMII_IF_MODE_BIT5 BIT(5)
-#define SGMII_REMOTE_FAULT_DIS BIT(8)
-#define SGMII_CODE_SYNC_SET_VAL BIT(9)
-#define SGMII_CODE_SYNC_SET_EN BIT(10)
-#define SGMII_SEND_AN_ERROR_EN BIT(11)
-#define SGMII_IF_MODE_MASK GENMASK(5, 1)
-
-/* Register to set SGMII speed, ANA RG_ Control Signals III*/
-#define SGMSYS_ANA_RG_CS3 0x2028
-#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3))
-#define RG_PHY_SPEED_1_25G 0x0
-#define RG_PHY_SPEED_3_125G BIT(2)
-
-/* Register to power up QPHY */
-#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8
-#define SGMII_PHYA_PWD BIT(4)
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_PPE0 BIT(31)
+#define RSTCTRL_PPE0_V2 BIT(30)
+#define RSTCTRL_PPE1 BIT(31)
+#define RSTCTRL_ETH BIT(23)
+
+/* ethernet reset check idle register */
+#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
+
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP 0x408
+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
/* Infrasys subsystem config registers */
#define INFRA_MISC2 0x70c
#define CO_QPHY_SEL BIT(0)
#define GEPHY_MAC_SEL BIT(1)
+/* Top misc registers */
+#define USB_PHY_SWITCH_REG 0x218
+#define QPHY_SEL_MASK GENMASK(1, 0)
+#define SGMII_QPHY_SEL 0x2
+
/* MT7628/88 specific stuff */
#define MT7628_PDMA_OFFSET 0x0800
#define MT7628_SDM_OFFSET 0x0c00
@@ -522,6 +539,17 @@
#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+#define MTK_FE_CDM1_FSM 0x220
+#define MTK_FE_CDM2_FSM 0x224
+#define MTK_FE_CDM3_FSM 0x238
+#define MTK_FE_CDM4_FSM 0x298
+#define MTK_FE_CDM5_FSM 0x318
+#define MTK_FE_CDM6_FSM 0x328
+#define MTK_FE_GDM1_FSM 0x228
+#define MTK_FE_GDM2_FSM 0x22C
+
+#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -529,6 +557,17 @@ struct mtk_rx_dma {
unsigned int rxd4;
} __packed __aligned(4);
+struct mtk_rx_dma_v2 {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+ unsigned int rxd5;
+ unsigned int rxd6;
+ unsigned int rxd7;
+ unsigned int rxd8;
+} __packed __aligned(4);
+
struct mtk_tx_dma {
unsigned int txd1;
unsigned int txd2;
@@ -536,9 +575,30 @@ struct mtk_tx_dma {
unsigned int txd4;
} __packed __aligned(4);
+struct mtk_tx_dma_v2 {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+ unsigned int txd5;
+ unsigned int txd6;
+ unsigned int txd7;
+ unsigned int txd8;
+} __packed __aligned(4);
+
struct mtk_eth;
struct mtk_mac;
+struct mtk_xdp_stats {
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_pass;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_errors;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_xmit_errors;
+};
+
/* struct mtk_hw_stats - the structure that holds the traffic statistics.
* @stats_lock: make sure that stats operations are atomic
* @reg_offset: the status register offset of the SoC
@@ -562,6 +622,8 @@ struct mtk_hw_stats {
u64 rx_checksum_errors;
u64 rx_flow_control_packets;
+ struct mtk_xdp_stats xdp_stats;
+
spinlock_t stats_lock;
u32 reg_offset;
struct u64_stats_sync syncp;
@@ -603,6 +665,10 @@ enum mtk_clks_map {
MTK_CLK_SGMII2_CDR_FB,
MTK_CLK_SGMII_CK,
MTK_CLK_ETH2PLL,
+ MTK_CLK_WOCPU0,
+ MTK_CLK_WOCPU1,
+ MTK_CLK_NETSYS0,
+ MTK_CLK_NETSYS1,
MTK_CLK_MAX
};
@@ -633,12 +699,39 @@ enum mtk_clks_map {
BIT(MTK_CLK_SGMII2_CDR_FB) | \
BIT(MTK_CLK_SGMII_CK) | \
BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP))
+#define MT7981_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_WOCPU0) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT(MTK_CLK_SGMII_CK))
+#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \
+ BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \
+ BIT(MTK_CLK_SGMII_TX_250M) | \
+ BIT(MTK_CLK_SGMII_RX_250M) | \
+ BIT(MTK_CLK_SGMII_CDR_REF) | \
+ BIT(MTK_CLK_SGMII_CDR_FB) | \
+ BIT(MTK_CLK_SGMII2_TX_250M) | \
+ BIT(MTK_CLK_SGMII2_RX_250M) | \
+ BIT(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT(MTK_CLK_SGMII2_CDR_FB))
enum mtk_dev_state {
MTK_HW_INIT,
MTK_RESETTING
};
+enum mtk_tx_buf_type {
+ MTK_TYPE_SKB,
+ MTK_TYPE_XDP_TX,
+ MTK_TYPE_XDP_NDO,
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -648,7 +741,9 @@ enum mtk_dev_state {
* @dma_len1: The length of the second segment
*/
struct mtk_tx_buf {
- struct sk_buff *skb;
+ enum mtk_tx_buf_type type;
+ void *data;
+
u32 flags;
DEFINE_DMA_UNMAP_ADDR(dma_addr0);
DEFINE_DMA_UNMAP_LEN(dma_len0);
@@ -668,7 +763,7 @@ struct mtk_tx_buf {
* are present
*/
struct mtk_tx_ring {
- struct mtk_tx_dma *dma;
+ void *dma;
struct mtk_tx_buf *buf;
dma_addr_t phys;
struct mtk_tx_dma *next_free;
@@ -698,7 +793,7 @@ enum mtk_rx_flags {
* @calc_idx: The current head of ring
*/
struct mtk_rx_ring {
- struct mtk_rx_dma *dma;
+ void *dma;
u8 **data;
dma_addr_t phys;
u16 frag_size;
@@ -707,6 +802,9 @@ struct mtk_rx_ring {
bool calc_idx_update;
u16 calc_idx;
u32 crx_idx_reg;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_q;
};
enum mkt_eth_capabilities {
@@ -722,7 +820,10 @@ enum mkt_eth_capabilities {
MTK_SHARED_INT_BIT,
MTK_TRGMII_MT7621_CLK_BIT,
MTK_QDMA_BIT,
+ MTK_NETSYS_V2_BIT,
MTK_SOC_MT7628_BIT,
+ MTK_RSTCTRL_PPE1_BIT,
+ MTK_U3_COPHY_V2_BIT,
/* MUX BITS*/
MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
@@ -754,7 +855,10 @@ enum mkt_eth_capabilities {
#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT)
#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT)
#define MTK_QDMA BIT(MTK_QDMA_BIT)
+#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT)
#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT)
+#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT)
+#define MTK_U3_COPHY_V2 BIT(MTK_U3_COPHY_V2_BIT)
#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
@@ -827,8 +931,75 @@ enum mkt_eth_capabilities {
MTK_MUX_U3_GMAC2_TO_QPHY | \
MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
+ MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1)
+
+struct mtk_tx_dma_desc_info {
+ dma_addr_t addr;
+ u32 size;
+ u16 vlan_tci;
+ u16 qid;
+ u8 gso:1;
+ u8 csum:1;
+ u8 vlan:1;
+ u8 first:1;
+ u8 last:1;
+};
+
+struct mtk_reg_map {
+ u32 tx_irq_mask;
+ u32 tx_irq_status;
+ struct {
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 pcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 irq_status; /* interrupt status */
+ u32 irq_mask; /* interrupt mask */
+ u32 adma_rx_dbg0;
+ u32 int_grp;
+ } pdma;
+ struct {
+ u32 qtx_cfg; /* tx queue configuration */
+ u32 qtx_sch; /* tx queue scheduler configuration */
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 qcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 fc_th; /* flow control */
+ u32 int_grp;
+ u32 hred; /* interrupt mask */
+ u32 ctx_ptr; /* tx acquire cpu pointer */
+ u32 dtx_ptr; /* tx acquire dma pointer */
+ u32 crx_ptr; /* tx release cpu pointer */
+ u32 drx_ptr; /* tx release dma pointer */
+ u32 fq_head; /* fq head pointer */
+ u32 fq_tail; /* fq tail pointer */
+ u32 fq_count; /* fq free page count */
+ u32 fq_blen; /* fq free page buffer length */
+ u32 tx_sch_rate; /* tx scheduler rate control registers */
+ } qdma;
+ u32 gdm1_cnt;
+ u32 gdma_to_ppe;
+ u32 ppe_base;
+ u32 wdma_base[2];
+ u32 pse_iq_sta;
+ u32 pse_oq_sta;
+};
+
/* struct mtk_eth_data - This is the structure holding all differences
* among various plaforms
+ * @reg_map Soc register map.
* @ana_rgc3: The offset for register ANA_RGC3 related to
* sgmiisys syscon
* @caps Flags shown the extra capability for the SoC
@@ -837,42 +1008,47 @@ enum mkt_eth_capabilities {
* the target SoC
* @required_pctl A bool value to show whether the SoC requires
* the extra setup for those pins used by GMAC.
+ * @hash_offset Flow table hash offset.
+ * @foe_entry_size Foe table entry size.
+ * @has_accounting Bool indicating support for accounting of
+ * offloaded flows.
+ * @txd_size Tx DMA descriptor size.
+ * @rxd_size Rx DMA descriptor size.
+ * @rx_irq_done_mask Rx irq done register mask.
+ * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @dma_max_len Max DMA tx/rx buffer length.
+ * @dma_len_offset Tx/Rx DMA length field offset.
*/
struct mtk_soc_data {
+ const struct mtk_reg_map *reg_map;
u32 ana_rgc3;
u32 caps;
u32 required_clks;
bool required_pctl;
u8 offload_version;
+ u8 hash_offset;
+ u16 foe_entry_size;
netdev_features_t hw_features;
+ bool has_accounting;
+ struct {
+ u32 txd_size;
+ u32 rxd_size;
+ u32 rx_irq_done_mask;
+ u32 rx_dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ } txrx;
};
+#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
+
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
-#define MTK_SGMII_PHYSPEED_AN BIT(31)
-#define MTK_SGMII_PHYSPEED_MASK GENMASK(2, 0)
-#define MTK_SGMII_PHYSPEED_1000 BIT(0)
-#define MTK_SGMII_PHYSPEED_2500 BIT(1)
-#define MTK_HAS_FLAGS(flags, _x) (((flags) & (_x)) == (_x))
-
-/* struct mtk_sgmii - This is the structure holding sgmii regmap and its
- * characteristics
- * @regmap: The register map pointing at the range used to setup
- * SGMII modes
- * @flags: The enum refers to which mode the sgmii wants to run on
- * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
- */
-
-struct mtk_sgmii {
- struct regmap *regmap[MTK_MAX_DEVS];
- u32 flags[MTK_MAX_DEVS];
- u32 ana_rgc3;
-};
-
/* struct mtk_eth - This is the main datasructure for holding the state
* of the driver
* @dev: The device pointer
+ * @dev: The device pointer used for dma mapping/alloc
* @base: The mapped register i/o base
* @page_lock: Make sure that register operations are atomic
* @tx_irq__lock: Make sure that IRQ register operations are atomic
@@ -888,6 +1064,7 @@ struct mtk_sgmii {
* MII modes
* @infra: The register map pointing at the range used to setup
* SGMII and GePHY path
+ * @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances
* @pctl: The register map pointing at the range used to setup
* GMAC port drive/slew values
* @dma_refcnt: track how many netdevs are using the DMA engine
@@ -916,6 +1093,7 @@ struct mtk_sgmii {
struct mtk_eth {
struct device *dev;
+ struct device *dma_dev;
void __iomem *base;
spinlock_t page_lock;
spinlock_t tx_irq_lock;
@@ -927,8 +1105,8 @@ struct mtk_eth {
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
- struct regmap *infra;
- struct mtk_sgmii *sgmii;
+ struct regmap *infra;
+ struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS];
struct regmap *pctl;
bool hwlro;
refcount_t dma_refcnt;
@@ -937,7 +1115,7 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring_qdma;
struct napi_struct tx_napi;
struct napi_struct rx_napi;
- struct mtk_tx_dma *scratch_ring;
+ void *scratch_ring;
dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clks[MTK_CLK_MAX];
@@ -960,13 +1138,22 @@ struct mtk_eth {
u32 tx_bytes;
struct dim tx_dim;
- u32 tx_int_mask_reg;
- u32 tx_int_status_reg;
- u32 rx_dma_l4_valid;
int ip_align;
- struct mtk_ppe ppe;
+ struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
+
+ struct mtk_ppe *ppe[2];
struct rhashtable flow_table;
+
+ struct bpf_prog __rcu *prog;
+
+ struct {
+ struct delayed_work monitor_work;
+ u32 wdidx;
+ u8 wdma_hang_count;
+ u8 qdma_hang_count;
+ u8 adma_hang_count;
+ } reset;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
@@ -980,7 +1167,6 @@ struct mtk_eth {
struct mtk_mac {
int id;
phy_interface_t interface;
- unsigned int mode;
int speed;
struct device_node *of_node;
struct phylink *phylink;
@@ -989,24 +1175,99 @@ struct mtk_mac {
struct mtk_hw_stats *hw_stats;
__be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
int hwlro_ip_cnt;
+ unsigned int syscfg0;
+ struct notifier_block device_notifier;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
extern const struct of_device_id of_mtk_match[];
+static inline struct mtk_foe_entry *
+mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+
+ return ppe->foe_table + hash * soc->foe_entry_size;
+}
+
+static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+
+ return MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
+static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_PPPOE_V2;
+
+ return MTK_FOE_IB1_BIND_PPPOE;
+}
+
+static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_TAG;
+}
+
+static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_LAYER;
+}
+
+static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB1_PACKET_TYPE_V2;
+
+ return MTK_FOE_IB1_PACKET_TYPE;
+}
+
+static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
+}
+
+static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ return MTK_FOE_IB2_MULTICAST_V2;
+
+ return MTK_FOE_IB2_MULTICAST;
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
-int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np,
- u32 ana_rgc3);
-int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id);
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
- const struct phylink_link_state *state);
-void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id);
-
int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
@@ -1014,6 +1275,10 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
+ int ppe_index);
+void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 3ad10c793308..9129821f3ab8 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -6,9 +6,23 @@
#include <linux/iopoll.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/dst_metadata.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
#include "mtk_ppe_regs.h"
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params mtk_flow_l2_ht_params = {
+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
+ .automatic_shrinking = true,
+};
+
static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
{
writel(val, ppe->base + reg);
@@ -41,6 +55,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
return ppe_m32(ppe, reg, val, 0);
}
+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+{
+ return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
+}
+
static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
{
int ret;
@@ -56,6 +75,48 @@ static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
return ret;
}
+static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
+ !(val & MTK_PPE_MIB_SER_CR_ST),
+ 20, MTK_PPE_WAIT_TIMEOUT_US);
+
+ if (ret)
+ dev_err(ppe->dev, "MIB table busy");
+
+ return ret;
+}
+
+static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
+{
+ u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high;
+ u32 val, cnt_r0, cnt_r1, cnt_r2;
+ int ret;
+
+ val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
+ ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
+
+ ret = mtk_ppe_mib_wait_busy(ppe);
+ if (ret)
+ return ret;
+
+ cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
+ cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
+ cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
+
+ byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
+ byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
+ pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
+ pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
+ *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
+ *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
+
+ return 0;
+}
+
static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
{
ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
@@ -70,19 +131,12 @@ static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
enable * MTK_PPE_CACHE_CTL_EN);
}
-static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
+static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
{
u32 hv1, hv2, hv3;
u32 hash;
- switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
- case MTK_PPE_PKT_TYPE_BRIDGE:
- hv1 = e->bridge.src_mac_lo;
- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
- hv2 = e->bridge.src_mac_hi >> 16;
- hv2 ^= e->bridge.dest_mac_lo;
- hv3 = e->bridge.dest_mac_hi;
- break;
+ switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
hv1 = e->ipv4.orig.ports;
@@ -111,16 +165,19 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
hash = (hash >> 24) | ((hash & 0xffffff) << 8);
hash ^= hv1 ^ hv2 ^ hv3;
hash ^= hash >> 16;
- hash <<= 1;
+ hash <<= (ffs(eth->soc->hash_offset) - 1);
hash &= MTK_PPE_ENTRIES - 1;
return hash;
}
static inline struct mtk_foe_mac_info *
-mtk_foe_entry_l2(struct mtk_foe_entry *entry)
+mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.l2;
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.l2;
@@ -129,9 +186,12 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
}
static inline u32 *
-mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
+mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.ib2;
if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
return &entry->ipv6.ib2;
@@ -139,27 +199,40 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
return &entry->ipv4.ib2;
}
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
- u8 pse_port, u8 *src_mac, u8 *dest_mac)
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac)
{
struct mtk_foe_mac_info *l2;
u32 ports_pad, val;
memset(entry, 0, sizeof(*entry));
- val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
- FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
- FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
- MTK_FOE_IB1_BIND_TTL |
- MTK_FOE_IB1_BIND_CACHE;
- entry->ib1 = val;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
+ entry->ib1 = val;
- val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
- FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
- FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
+ } else {
+ int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
+
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
+ entry->ib1 = val;
+
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
+ }
if (is_multicast_ether_addr(dest_mac))
- val |= MTK_FOE_IB2_MULTICAST;
+ val |= mtk_get_ib2_multicast_mask(eth);
ports_pad = 0xa5a5a500 | (l4proto & 0xff);
if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
@@ -167,7 +240,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
entry->ipv6.ports = ports_pad;
- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
+ ether_addr_copy(entry->bridge.src_mac, src_mac);
+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
+ entry->bridge.ib2 = val;
+ l2 = &entry->bridge.l2;
+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
entry->ipv6.ib2 = val;
l2 = &entry->ipv6.l2;
} else {
@@ -188,24 +266,30 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
return 0;
}
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port)
{
- u32 *ib2 = mtk_foe_entry_ib2(entry);
- u32 val;
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+ u32 val = *ib2;
- val = *ib2;
- val &= ~MTK_FOE_IB2_DEST_PORT;
- val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ val &= ~MTK_FOE_IB2_DEST_PORT_V2;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
+ } else {
+ val &= ~MTK_FOE_IB2_DEST_PORT;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ }
*ib2 = val;
return 0;
}
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool egress,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
struct mtk_ipv4_tuple *t;
switch (type) {
@@ -240,11 +324,12 @@ int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
return 0;
}
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port)
{
- int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
u32 *src, *dest;
int i;
@@ -275,39 +360,41 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
return 0;
}
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
l2->etype = BIT(port);
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
+ entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
else
l2->etype |= BIT(8);
- entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
+ entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
return 0;
}
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
- switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
+ switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
case 0:
- entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
- FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
+ mtk_prep_ib1_vlan_layer(eth, 1);
l2->vlan1 = vid;
return 0;
case 1:
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
+ if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
l2->vlan1 = vid;
l2->etype |= BIT(8);
} else {
l2->vlan2 = vid;
- entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
+ entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
}
return 0;
default:
@@ -315,79 +402,534 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
}
}
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid)
{
- struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
- if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
- (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
+ (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
l2->etype = ETH_P_PPP_SES;
- entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
+ entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
l2->pppoe_id = sid;
return 0;
}
-static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
+ } else {
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+ if (wdma_idx)
+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+ }
+
+ return 0;
+}
+
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue)
{
- return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
- FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ *ib2 &= ~MTK_FOE_IB2_QID_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
+ *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
+ } else {
+ *ib2 &= ~MTK_FOE_IB2_QID;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
+ *ib2 |= MTK_FOE_IB2_PSE_QOS;
+ }
+
+ return 0;
}
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp)
+static bool
+mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
+ struct mtk_foe_entry *data)
{
+ int type, len;
+
+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+ type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+ else
+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+ return !memcmp(&entry->data.data, &data->data, len - 4);
+}
+
+static void
+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+
+ head = &entry->l2_flows;
+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
+ __mtk_foe_entry_clear(ppe, entry);
+ return;
+ }
+
+ hlist_del_init(&entry->list);
+ if (entry->hash != 0xffff) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
+
+ hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
+ dma_wmb();
+ mtk_ppe_cache_clear(ppe);
+
+ if (ppe->accounting) {
+ struct mtk_foe_accounting *acct;
+
+ acct = ppe->acct_table + entry->hash * sizeof(*acct);
+ acct->packets = 0;
+ acct->bytes = 0;
+ }
+ }
+ entry->hash = 0xffff;
+
+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
+ return;
+
+ hlist_del_init(&entry->l2_data.list);
+ kfree(entry);
+}
+
+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
+{
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ u16 now = mtk_eth_timestamp(ppe->eth);
+ u16 timestamp = ib1 & ib1_ts_mask;
+
+ if (timestamp > now)
+ return ib1_ts_mask + 1 - timestamp + now;
+ else
+ return now - timestamp;
+}
+
+static void
+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ struct mtk_flow_entry *cur;
struct mtk_foe_entry *hwe;
- u32 hash;
+ struct hlist_node *tmp;
+ int idle;
- timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
- entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
+ int cur_idle;
+ u32 ib1;
- hash = mtk_ppe_hash_entry(entry);
- hwe = &ppe->foe_table[hash];
- if (!mtk_foe_entry_usable(hwe)) {
- hwe++;
- hash++;
+ hwe = mtk_foe_get_entry(ppe, cur->hash);
+ ib1 = READ_ONCE(hwe->ib1);
- if (!mtk_foe_entry_usable(hwe))
- return -ENOSPC;
+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+ cur->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, cur);
+ continue;
+ }
+
+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
+ if (cur_idle >= idle)
+ continue;
+
+ idle = cur_idle;
+ entry->data.ib1 &= ~ib1_ts_mask;
+ entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
+ }
+}
+
+static void
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct mtk_foe_entry foe = {};
+ struct mtk_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ mtk_flow_entry_update_l2(ppe, entry);
+ goto out;
+ }
+
+ if (entry->hash == 0xffff)
+ goto out;
+
+ hwe = mtk_foe_get_entry(ppe, entry->hash);
+ memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
+ entry->hash = 0xffff;
+ goto out;
}
- memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
+ entry->data.ib1 = foe.ib1;
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static void
+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 hash)
+{
+ struct mtk_eth *eth = ppe->eth;
+ u16 timestamp = mtk_eth_timestamp(eth);
+ struct mtk_foe_entry *hwe;
+ u32 val;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
+ timestamp);
+ } else {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
+ timestamp);
+ }
+
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
wmb();
hwe->ib1 = entry->ib1;
+ if (ppe->accounting) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val = MTK_FOE_IB2_MIB_CNT_V2;
+ else
+ val = MTK_FOE_IB2_MIB_CNT;
+ *mtk_foe_entry_ib2(eth, hwe) |= val;
+ }
+
dma_wmb();
mtk_ppe_cache_clear(ppe);
+}
- return hash;
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ spin_lock_bh(&ppe_lock);
+ __mtk_foe_entry_clear(ppe, entry);
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct mtk_flow_entry *prev;
+
+ entry->type = MTK_FLOW_TYPE_L2;
+
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+ if (likely(!prev))
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &entry->l2_node, mtk_flow_l2_ht_params);
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
+ u32 hash;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return mtk_foe_entry_commit_l2(ppe, entry);
+
+ hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
+ entry->hash = 0xffff;
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void
+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+ u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct mtk_flow_entry *flow_info;
+ struct mtk_foe_entry foe = {}, *hwe;
+ struct mtk_foe_mac_info *l2;
+ u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
+ int type;
+
+ flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
+ if (!flow_info)
+ return;
+
+ flow_info->l2_data.base_flow = entry;
+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+ flow_info->hash = hash;
+ hlist_add_head(&flow_info->list,
+ &ppe->foe_flow[hash / soc->hash_offset]);
+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&foe, hwe, soc->foe_entry_size);
+ foe.ib1 &= ib1_mask;
+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+ l2 = mtk_foe_entry_l2(ppe->eth, &foe);
+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+ type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
+ l2->etype = ETH_P_IPV6;
+
+ *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
+
+ __mtk_foe_entry_commit(ppe, &foe, hash);
+}
+
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
+ struct hlist_node *n;
+ struct ethhdr *eh;
+ bool found = false;
+ u8 *tag;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
+ goto out;
+
+ hlist_for_each_entry_safe(entry, n, head, list) {
+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
+ MTK_FOE_STATE_BIND))
+ continue;
+
+ entry->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, entry);
+ continue;
+ }
+
+ if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
+ if (entry->hash != 0xffff)
+ entry->hash = 0xffff;
+ continue;
+ }
+
+ entry->hash = hash;
+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
+ found = true;
+ }
+
+ if (found)
+ goto out;
+
+ eh = eth_hdr(skb);
+ ether_addr_copy(key.dest_mac, eh->h_dest);
+ ether_addr_copy(key.src_mac, eh->h_source);
+ tag = skb->data - 2;
+ key.vlan = 0;
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_NET_DSA)
+ case htons(ETH_P_XDSA):
+ if (!netdev_uses_dsa(skb->dev) ||
+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ goto out;
+
+ if (!skb_metadata_dst(skb))
+ tag += 4;
+
+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
+ break;
+
+ fallthrough;
+#endif
+ case htons(ETH_P_8021Q):
+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
+ break;
+ default:
+ break;
+ }
+
+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
+ if (!entry)
+ goto out;
+
+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
+
+out:
+ spin_unlock_bh(&ppe_lock);
}
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
- int version)
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
- struct mtk_foe_entry *foe;
+ mtk_flow_entry_update(ppe, entry);
+
+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+}
+
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
+{
+ if (!ppe)
+ return -EINVAL;
+
+ /* disable KA */
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
+ ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
+ usleep_range(10000, 11000);
+
+ /* set KA timer to maximum */
+ ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
+
+ /* set KA tick select */
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
+ usleep_range(10000, 11000);
+
+ /* disable scan mode */
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
+ usleep_range(10000, 11000);
+
+ return mtk_ppe_wait_busy(ppe);
+}
+
+struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
+ struct mtk_foe_accounting *diff)
+{
+ struct mtk_foe_accounting *acct;
+ int size = sizeof(struct mtk_foe_accounting);
+ u64 bytes, packets;
+
+ if (!ppe->accounting)
+ return NULL;
+
+ if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
+ return NULL;
+
+ acct = ppe->acct_table + index * size;
+
+ acct->bytes += bytes;
+ acct->packets += packets;
+
+ if (diff) {
+ diff->bytes = bytes;
+ diff->packets = packets;
+ }
+
+ return acct;
+}
+
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
+{
+ bool accounting = eth->soc->has_accounting;
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_foe_accounting *acct;
+ struct device *dev = eth->dev;
+ struct mtk_mib_entry *mib;
+ struct mtk_ppe *ppe;
+ u32 foe_flow_size;
+ void *foe;
+
+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return NULL;
+
+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
/* need to allocate a separate device, since it PPE DMA access is
* not coherent.
*/
ppe->base = base;
+ ppe->eth = eth;
ppe->dev = dev;
- ppe->version = version;
+ ppe->version = eth->soc->offload_version;
+ ppe->accounting = accounting;
- foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
+ foe = dmam_alloc_coherent(ppe->dev,
+ MTK_PPE_ENTRIES * soc->foe_entry_size,
&ppe->foe_phys, GFP_KERNEL);
if (!foe)
- return -ENOMEM;
+ goto err_free_l2_flows;
ppe->foe_table = foe;
- mtk_ppe_debugfs_init(ppe);
+ foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
+ sizeof(*ppe->foe_flow);
+ ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
+ if (!ppe->foe_flow)
+ goto err_free_l2_flows;
- return 0;
+ if (accounting) {
+ mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
+ &ppe->mib_phys, GFP_KERNEL);
+ if (!mib)
+ return NULL;
+
+ ppe->mib_table = mib;
+
+ acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
+ GFP_KERNEL);
+
+ if (!acct)
+ return NULL;
+
+ ppe->acct_table = acct;
+ }
+
+ mtk_ppe_debugfs_init(ppe, index);
+
+ return ppe;
+
+err_free_l2_flows:
+ rhashtable_destroy(&ppe->l2_flows);
+ return NULL;
+}
+
+void mtk_ppe_deinit(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
+ if (!eth->ppe[i])
+ return;
+ rhashtable_destroy(&eth->ppe[i]->l2_flows);
+ }
}
static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
@@ -395,21 +937,30 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
+ memset(ppe->foe_table, 0,
+ MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;
/* skip all entries that cross the 1024 byte boundary */
- for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
- for (k = 0; k < ARRAY_SIZE(skip); k++)
- ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
+ for (k = 0; k < ARRAY_SIZE(skip); k++) {
+ struct mtk_foe_entry *hwe;
+
+ hwe = mtk_foe_get_entry(ppe, i + skip[k]);
+ hwe->ib1 |= MTK_FOE_IB1_STATIC;
+ }
+ }
}
-int mtk_ppe_start(struct mtk_ppe *ppe)
+void mtk_ppe_start(struct mtk_ppe *ppe)
{
u32 val;
+ if (!ppe)
+ return;
+
mtk_ppe_init_foe_table(ppe);
ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
@@ -428,6 +979,8 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_PPE_TB_CFG_INFO_SEL;
ppe_w32(ppe, MTK_PPE_TB_CFG, val);
ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
@@ -435,16 +988,21 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
mtk_ppe_cache_enable(ppe, true);
- val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
- MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
- MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
+ val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
MTK_PPE_FLOW_CFG_IP6_6RD |
MTK_PPE_FLOW_CFG_IP4_NAT |
MTK_PPE_FLOW_CFG_IP4_NAPT |
MTK_PPE_FLOW_CFG_IP4_DSLITE |
- MTK_PPE_FLOW_CFG_L2_BRIDGE |
MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
+ MTK_PPE_MD_TOAP_BYP_CRSN1 |
+ MTK_PPE_MD_TOAP_BYP_CRSN2 |
+ MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
+ else
+ val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
@@ -479,7 +1037,20 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
- return 0;
+ if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) {
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
+ ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
+ }
+
+ if (ppe->accounting && ppe->mib_phys) {
+ ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
+ ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
+ MTK_PPE_MIB_CFG_EN);
+ ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
+ MTK_PPE_MIB_CFG_RD_CLR);
+ ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
+ MTK_PPE_MIB_CFG_RD_CLR);
+ }
}
int mtk_ppe_stop(struct mtk_ppe *ppe)
@@ -487,9 +1058,15 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
u32 val;
int i;
- for (i = 0; i < MTK_PPE_ENTRIES; i++)
- ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
- MTK_FOE_STATE_INVALID);
+ if (!ppe)
+ return 0;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
+
+ hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_INVALID);
+ }
mtk_ppe_cache_enable(ppe, false);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 242fb8f2ae65..e51de31a52ec 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -6,8 +6,7 @@
#include <linux/kernel.h>
#include <linux/bitfield.h>
-
-#define MTK_ETH_PPE_BASE 0xc00
+#include <linux/rhashtable.h>
#define MTK_PPE_ENTRIES_SHIFT 3
#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
@@ -33,6 +32,15 @@
#define MTK_FOE_IB1_UDP BIT(30)
#define MTK_FOE_IB1_STATIC BIT(31)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0)
+#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14)
+#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17)
+#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18)
+#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20)
+#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22)
+#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23)
+
enum {
MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
@@ -47,20 +55,35 @@ enum {
#define MTK_FOE_IB2_PSE_QOS BIT(4)
#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
#define MTK_FOE_IB2_MULTICAST BIT(8)
+#define MTK_FOE_IB2_MIB_CNT BIT(10)
-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
+#define MTK_FOE_IB2_MIB_CNT_V2 BIT(15)
+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
+#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17)
#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB2_QID_V2 GENMASK(6, 0)
+#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
+#define MTK_FOE_IB2_PSE_QOS_V2 BIT(8)
+#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
+#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
+#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
+#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20)
+
+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
+
+#define MTK_FOE_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_WINFO_WCID GENMASK(15, 6)
enum {
MTK_FOE_STATE_INVALID,
@@ -82,21 +105,21 @@ struct mtk_foe_mac_info {
u16 pppoe_id;
u16 src_mac_lo;
+
+ u16 minfo;
+ u16 winfo;
};
+/* software-only entry type */
struct mtk_foe_bridge {
- u32 dest_mac_hi;
+ u8 dest_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u16 vlan;
- u16 src_mac_lo;
- u16 dest_mac_lo;
-
- u32 src_mac_hi;
+ struct {} key_end;
u32 ib2;
- u32 _rsv[5];
-
- u32 udf_tsid;
struct mtk_foe_mac_info l2;
};
@@ -202,7 +225,7 @@ struct mtk_foe_entry {
struct mtk_foe_ipv4_dslite dslite;
struct mtk_foe_ipv6 ipv6;
struct mtk_foe_ipv6_6rd ipv6_6rd;
- u32 data[19];
+ u32 data[23];
};
};
@@ -235,54 +258,128 @@ enum {
MTK_PPE_CPU_REASON_INVALID = 0x1f,
};
+enum {
+ MTK_FLOW_TYPE_L4,
+ MTK_FLOW_TYPE_L2,
+ MTK_FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct mtk_flow_entry {
+ union {
+ struct hlist_node list;
+ struct {
+ struct rhash_head l2_node;
+ struct hlist_head l2_flows;
+ };
+ };
+ u8 type;
+ s8 wed_index;
+ u8 ppe_index;
+ u16 hash;
+ union {
+ struct mtk_foe_entry data;
+ struct {
+ struct mtk_flow_entry *base_flow;
+ struct hlist_node list;
+ } l2_data;
+ };
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
+struct mtk_mib_entry {
+ u32 byt_cnt_l;
+ u16 byt_cnt_h;
+ u32 pkt_cnt_l;
+ u8 pkt_cnt_h;
+ u8 _rsv0;
+ u32 _rsv1;
+} __packed;
+
+struct mtk_foe_accounting {
+ u64 bytes;
+ u64 packets;
+};
+
struct mtk_ppe {
+ struct mtk_eth *eth;
struct device *dev;
void __iomem *base;
int version;
+ char dirname[5];
+ bool accounting;
- struct mtk_foe_entry *foe_table;
+ void *foe_table;
dma_addr_t foe_phys;
+ struct mtk_mib_entry *mib_table;
+ dma_addr_t mib_phys;
+
+ u16 foe_check_time[MTK_PPE_ENTRIES];
+ struct hlist_head *foe_flow;
+
+ struct rhashtable l2_flows;
+
void *acct_table;
};
-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
- int version);
-int mtk_ppe_start(struct mtk_ppe *ppe);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index);
+
+void mtk_ppe_deinit(struct mtk_eth *eth);
+void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
+
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
static inline void
-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
{
- ppe->foe_table[hash].ib1 = 0;
- dma_wmb();
-}
+ u16 now, diff;
-static inline int
-mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
-{
- u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
+ if (!ppe)
+ return;
+
+ if (hash > MTK_PPE_HASH_MASK)
+ return;
- if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
- return -1;
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
- return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
+ ppe->foe_check_time[hash] = now;
+ __mtk_ppe_check_skb(ppe, skb, hash);
}
-int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
- u8 pse_port, u8 *src_mac, u8 *dest_mac);
-int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
-int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac);
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port);
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool orig,
__be32 src_addr, __be16 src_port,
__be32 dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
__be32 *src_addr, __be16 src_port,
__be32 *dest_addr, __be16 dest_port);
-int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
-int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
-int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
- u16 timestamp);
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port);
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid);
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid);
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid);
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue);
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
+struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
+ struct mtk_foe_accounting *diff);
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
index 98b1d3577bcd..316fe2e70fea 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
static const char * const type_str[] = {
[MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
[MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
[MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
[MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
@@ -48,7 +47,7 @@ static const char *mtk_foe_pkt_type_str(int type)
static void
mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
{
- u32 n_addr[4];
+ __be32 n_addr[4];
int i;
if (!ipv6) {
@@ -80,9 +79,10 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
int i;
for (i = 0; i < MTK_PPE_ENTRIES; i++) {
- struct mtk_foe_entry *entry = &ppe->foe_table[i];
+ struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
struct mtk_foe_mac_info *l2;
struct mtk_flow_addr_info ai = {};
+ struct mtk_foe_accounting *acct;
unsigned char h_source[ETH_ALEN];
unsigned char h_dest[ETH_ALEN];
int type, state;
@@ -96,6 +96,8 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
if (bind && state != MTK_FOE_STATE_BIND)
continue;
+ acct = mtk_foe_entry_get_mib(ppe, i, NULL);
+
type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
seq_printf(m, "%05x %s %7s", i,
mtk_foe_entry_state_str(state),
@@ -154,64 +156,39 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
*((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
seq_printf(m, " eth=%pM->%pM etype=%04x"
- " vlan=%d,%d ib1=%08x ib2=%08x\n",
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
h_source, h_dest, ntohs(l2->etype),
- l2->vlan1, l2->vlan2, entry->ib1, ib2);
+ l2->vlan1, l2->vlan2, entry->ib1, ib2,
+ acct ? acct->packets : 0, acct ? acct->bytes : 0);
}
return 0;
}
static int
-mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
+mtk_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, false);
}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_all);
static int
-mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
+mtk_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
{
return mtk_ppe_debugfs_foe_show(m, private, true);
}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_bind);
-static int
-mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
{
- return single_open(file, mtk_ppe_debugfs_foe_show_all,
- inode->i_private);
-}
-
-static int
-mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
-{
- return single_open(file, mtk_ppe_debugfs_foe_show_bind,
- inode->i_private);
-}
-
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
-{
- static const struct file_operations fops_all = {
- .open = mtk_ppe_debugfs_foe_open_all,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
- static const struct file_operations fops_bind = {
- .open = mtk_ppe_debugfs_foe_open_bind,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- };
-
struct dentry *root;
- root = debugfs_create_dir("mtk_ppe", NULL);
- if (!root)
- return -ENOMEM;
+ snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
- debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
- debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
+ root = debugfs_create_dir(ppe->dirname, NULL);
+ debugfs_create_file("entries", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_bind_fops);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index 7bb1f20002b5..02eebff02d45 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -6,10 +6,12 @@
#include <linux/if_ether.h>
#include <linux/rhashtable.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
struct mtk_flow_data {
struct ethhdr eth;
@@ -19,11 +21,18 @@ struct mtk_flow_data {
__be32 src_addr;
__be32 dst_addr;
} v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
};
__be16 src_port;
__be16 dst_port;
+ u16 vlan_in;
+
struct {
u16 id;
__be16 proto;
@@ -35,12 +44,6 @@ struct mtk_flow_data {
} pppoe;
};
-struct mtk_flow_entry {
- struct rhash_head node;
- unsigned long cookie;
- u16 hash;
-};
-
static const struct rhashtable_params mtk_flow_ht_params = {
.head_offset = offsetof(struct mtk_flow_entry, node),
.key_offset = offsetof(struct mtk_flow_entry, cookie),
@@ -48,19 +51,22 @@ static const struct rhashtable_params mtk_flow_ht_params = {
.automatic_shrinking = true,
};
-static u32
-mtk_eth_timestamp(struct mtk_eth *eth)
+static int
+mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data, bool egress)
{
- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
+ return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
+ data->v4.src_addr, data->src_port,
+ data->v4.dst_addr, data->dst_port);
}
static int
-mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
- bool egress)
+mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data)
{
- return mtk_foe_entry_set_ipv4_tuple(foe, egress,
- data->v4.src_addr, data->src_port,
- data->v4.dst_addr, data->dst_port);
+ return mtk_foe_entry_set_ipv6_tuple(eth, foe,
+ data->v6.src_addr.s6_addr32, data->src_port,
+ data->v6.dst_addr.s6_addr32, data->dst_port);
}
static void
@@ -80,6 +86,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
memcpy(dest, src, act->mangle.mask ? 2 : 4);
}
+static int
+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
+{
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
+
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->wdma_idx = path->mtk_wdma.wdma_idx;
+ info->queue = path->mtk_wdma.queue;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
+
+ return 0;
+}
+
static int
mtk_flow_mangle_ports(const struct flow_action_entry *act,
@@ -139,7 +174,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
return -ENODEV;
- *dev = dp->cpu_dp->master;
+ *dev = dsa_port_to_master(dp);
return dp->index;
#else
@@ -149,13 +184,34 @@ mtk_flow_get_dsa_port(struct net_device **dev)
static int
mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
- struct net_device *dev)
+ struct net_device *dev, const u8 *dest_mac,
+ int *wed_index)
{
- int pse_port, dsa_port;
+ struct mtk_wdma_info info = {};
+ int pse_port, dsa_port, queue;
+
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+ info.bss, info.wcid);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ switch (info.wdma_idx) {
+ case 0:
+ pse_port = 8;
+ break;
+ case 1:
+ pse_port = 9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pse_port = 3;
+ }
+ *wed_index = info.wdma_idx;
+ goto out;
+ }
dsa_port = mtk_flow_get_dsa_port(&dev);
- if (dsa_port >= 0)
- mtk_foe_entry_set_dsa(foe, dsa_port);
if (dev == eth->netdev[0])
pse_port = 1;
@@ -164,13 +220,23 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
else
return -EOPNOTSUPP;
- mtk_foe_entry_set_pse_port(foe, pse_port);
+ if (dsa_port >= 0) {
+ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
+ queue = 3 + dsa_port;
+ } else {
+ queue = pse_port - 1;
+ }
+ mtk_foe_entry_set_queue(eth, foe, queue);
+
+out:
+ mtk_foe_entry_set_pse_port(eth, foe, pse_port);
return 0;
}
static int
-mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
+ int ppe_index)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_action_entry *act;
@@ -179,11 +245,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
struct net_device *odev = NULL;
struct mtk_flow_entry *entry;
int offload_type = 0;
+ int wed_index = -1;
u16 addr_type = 0;
- u32 timestamp;
u8 l4proto = 0;
int err = 0;
- int hash;
int i;
if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
@@ -215,9 +280,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return -EOPNOTSUPP;
}
+ switch (addr_type) {
+ case 0:
+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan_in = match.key->vlan_id;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
flow_action_for_each(i, act, &rule->action) {
switch (act->id) {
case FLOW_ACTION_MANGLE:
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
mtk_flow_offload_mangle_eth(act, &data.eth);
break;
@@ -249,31 +350,25 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
}
}
- switch (addr_type) {
- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
if (!is_valid_ether_addr(data.eth.h_source) ||
!is_valid_ether_addr(data.eth.h_dest))
return -EINVAL;
- err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
- data.eth.h_source,
- data.eth.h_dest);
+ err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
+ data.eth.h_source, data.eth.h_dest);
if (err)
return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports ports;
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
flow_rule_match_ports(rule, &ports);
data.src_port = ports.key->src;
data.dst_port = ports.key->dst;
- } else {
+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
return -EOPNOTSUPP;
}
@@ -285,13 +380,27 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
data.v4.src_addr = addrs.key->src;
data.v4.dst_addr = addrs.key->dst;
- mtk_flow_set_ipv4_addr(&foe, &data, false);
+ mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+
+ mtk_flow_set_ipv6_addr(eth, &foe, &data);
}
flow_action_for_each(i, act, &rule->action) {
if (act->id != FLOW_ACTION_MANGLE)
continue;
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
switch (act->mangle.htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
@@ -312,47 +421,57 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- err = mtk_flow_set_ipv4_addr(&foe, &data, true);
+ err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
if (err)
return err;
}
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ foe.bridge.vlan = data.vlan_in;
+
if (data.vlan.num == 1) {
if (data.vlan.proto != htons(ETH_P_8021Q))
return -EOPNOTSUPP;
- mtk_foe_entry_set_vlan(&foe, data.vlan.id);
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
}
if (data.pppoe.num == 1)
- mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
+ mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
- err = mtk_flow_set_output_device(eth, &foe, odev);
+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+ &wed_index);
if (err)
return err;
+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+ return err;
+
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->cookie = f->cookie;
- timestamp = mtk_eth_timestamp(eth);
- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
- if (hash < 0) {
- err = hash;
+ memcpy(&entry->data, &foe, sizeof(entry->data));
+ entry->wed_index = wed_index;
+ entry->ppe_index = ppe_index;
+
+ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
+ if (err < 0)
goto free;
- }
- entry->hash = hash;
err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
if (err < 0)
- goto clear_flow;
+ goto clear;
return 0;
-clear_flow:
- mtk_foe_entry_clear(&eth->ppe, hash);
+
+clear:
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
free:
kfree(entry);
+ if (wed_index >= 0)
+ mtk_wed_flow_remove(wed_index);
return err;
}
@@ -366,9 +485,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- mtk_foe_entry_clear(&eth->ppe, entry->hash);
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
rhashtable_remove_fast(&eth->flow_table, &entry->node,
mtk_flow_ht_params);
+ if (entry->wed_index >= 0)
+ mtk_wed_flow_remove(entry->wed_index);
kfree(entry);
return 0;
@@ -378,7 +499,7 @@ static int
mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
{
struct mtk_flow_entry *entry;
- int timestamp;
+ struct mtk_foe_accounting diff;
u32 idle;
entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
@@ -386,37 +507,30 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
- if (timestamp < 0)
- return -ETIMEDOUT;
-
- idle = mtk_eth_timestamp(eth) - timestamp;
+ idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
f->stats.lastused = jiffies - idle * HZ;
+ if (entry->hash != 0xFFFF &&
+ mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
+ &diff)) {
+ f->stats.pkts += diff.packets;
+ f->stats.bytes += diff.bytes;
+ }
+
return 0;
}
static DEFINE_MUTEX(mtk_flow_offload_mutex);
-static int
-mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
+ int ppe_index)
{
- struct flow_cls_offload *cls = type_data;
- struct net_device *dev = cb_priv;
- struct mtk_mac *mac = netdev_priv(dev);
- struct mtk_eth *eth = mac->hw;
int err;
- if (!tc_can_offload(dev))
- return -EOPNOTSUPP;
-
- if (type != TC_SETUP_CLSFLOWER)
- return -EOPNOTSUPP;
-
mutex_lock(&mtk_flow_offload_mutex);
switch (cls->command) {
case FLOW_CLS_REPLACE:
- err = mtk_flow_offload_replace(eth, cls);
+ err = mtk_flow_offload_replace(eth, cls, ppe_index);
break;
case FLOW_CLS_DESTROY:
err = mtk_flow_offload_destroy(eth, cls);
@@ -434,6 +548,26 @@ mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri
}
static int
+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct flow_cls_offload *cls = type_data;
+ struct net_device *dev = cb_priv;
+ struct mtk_mac *mac;
+ struct mtk_eth *eth;
+
+ mac = netdev_priv(dev);
+ eth = mac->hw;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ return mtk_flow_offload_cmd(eth, cls, 0);
+}
+
+static int
mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -442,7 +576,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- if (!eth->ppe.foe_table)
+ if (!eth->soc->offload_version)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -462,6 +596,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
if (IS_ERR(block_cb))
return PTR_ERR(block_cb);
+ flow_block_cb_incref(block_cb);
flow_block_cb_add(block_cb, f);
list_add_tail(&block_cb->driver_list, &block_cb_list);
return 0;
@@ -470,7 +605,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
if (!block_cb)
return -ENOENT;
- if (flow_block_cb_decref(block_cb)) {
+ if (!flow_block_cb_decref(block_cb)) {
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
}
@@ -483,16 +618,16 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
- if (type == TC_SETUP_FT)
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
return mtk_eth_setup_tc_block(dev, type_data);
-
- return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
}
int mtk_eth_offload_init(struct mtk_eth *eth)
{
- if (!eth->ppe.foe_table)
- return 0;
-
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
index 0c45ea0900f1..a2e61b3eb006 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -21,6 +21,9 @@
#define MTK_PPE_GLO_CFG_BUSY BIT(31)
#define MTK_PPE_FLOW_CFG 0x204
+#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1)
+#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2)
+#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3)
#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
@@ -54,6 +57,13 @@
#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
+#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
+#define MTK_PPE_TB_TICK_SEL BIT(24)
+
+#define MTK_PPE_BIND_LMT1 0x230
+#define MTK_PPE_NTU_KEEPALIVE GENMASK(23, 16)
+
+#define MTK_PPE_KEEPALIVE 0x234
enum {
MTK_PPE_SCAN_MODE_DISABLED,
@@ -112,6 +122,8 @@ enum {
#define MTK_PPE_DEFAULT_CPU_PORT 0x248
#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
+#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c
+
#define MTK_PPE_MTU_DROP 0x308
#define MTK_PPE_VLAN_MTU0 0x30c
@@ -137,8 +149,24 @@ enum {
#define MTK_PPE_MIB_TB_BASE 0x338
+#define MTK_PPE_MIB_SER_CR 0x33C
+#define MTK_PPE_MIB_SER_CR_ST BIT(16)
+#define MTK_PPE_MIB_SER_CR_ADDR GENMASK(13, 0)
+
+#define MTK_PPE_MIB_SER_R0 0x340
+#define MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW GENMASK(31, 0)
+
+#define MTK_PPE_MIB_SER_R1 0x344
+#define MTK_PPE_MIB_SER_R1_PKT_CNT_LOW GENMASK(31, 16)
+#define MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH GENMASK(15, 0)
+
+#define MTK_PPE_MIB_SER_R2 0x348
+#define MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH GENMASK(23, 0)
+
#define MTK_PPE_MIB_CACHE_CTL 0x350
#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
+#define MTK_PPE_SBW_CTRL 0x374
+
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
deleted file mode 100644
index 32d83421226a..000000000000
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ /dev/null
@@ -1,126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2018-2019 MediaTek Inc.
-
-/* A library for MediaTek SGMII circuit
- *
- * Author: Sean Wang <sean.wang@mediatek.com>
- *
- */
-
-#include <linux/mfd/syscon.h>
-#include <linux/of.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-
-int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
-{
- struct device_node *np;
- int i;
-
- ss->ana_rgc3 = ana_rgc3;
-
- for (i = 0; i < MTK_MAX_DEVS; i++) {
- np = of_parse_phandle(r, "mediatek,sgmiisys", i);
- if (!np)
- break;
-
- ss->regmap[i] = syscon_node_to_regmap(np);
- if (IS_ERR(ss->regmap[i]))
- return PTR_ERR(ss->regmap[i]);
- }
-
- return 0;
-}
-
-int mtk_sgmii_setup_mode_an(struct mtk_sgmii *ss, int id)
-{
- unsigned int val;
-
- if (!ss->regmap[id])
- return -EINVAL;
-
- /* Setup the link timer and QPHY power up inside SGMIISYS */
- regmap_write(ss->regmap[id], SGMSYS_PCS_LINK_TIMER,
- SGMII_LINK_TIMER_DEFAULT);
-
- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
- val |= SGMII_REMOTE_FAULT_DIS;
- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
-
- regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
- val |= SGMII_AN_RESTART;
- regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
-
- regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
- val &= ~SGMII_PHYA_PWD;
- regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
-
- return 0;
-}
-
-int mtk_sgmii_setup_mode_force(struct mtk_sgmii *ss, int id,
- const struct phylink_link_state *state)
-{
- unsigned int val;
-
- if (!ss->regmap[id])
- return -EINVAL;
-
- regmap_read(ss->regmap[id], ss->ana_rgc3, &val);
- val &= ~RG_PHY_SPEED_MASK;
- if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
- val |= RG_PHY_SPEED_3_125G;
- regmap_write(ss->regmap[id], ss->ana_rgc3, val);
-
- /* Disable SGMII AN */
- regmap_read(ss->regmap[id], SGMSYS_PCS_CONTROL_1, &val);
- val &= ~SGMII_AN_ENABLE;
- regmap_write(ss->regmap[id], SGMSYS_PCS_CONTROL_1, val);
-
- /* SGMII force mode setting */
- regmap_read(ss->regmap[id], SGMSYS_SGMII_MODE, &val);
- val &= ~SGMII_IF_MODE_MASK;
-
- switch (state->speed) {
- case SPEED_10:
- val |= SGMII_SPEED_10;
- break;
- case SPEED_100:
- val |= SGMII_SPEED_100;
- break;
- case SPEED_2500:
- case SPEED_1000:
- val |= SGMII_SPEED_1000;
- break;
- }
-
- if (state->duplex == DUPLEX_FULL)
- val |= SGMII_DUPLEX_FULL;
-
- regmap_write(ss->regmap[id], SGMSYS_SGMII_MODE, val);
-
- /* Release PHYA power down state */
- regmap_read(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, &val);
- val &= ~SGMII_PHYA_PWD;
- regmap_write(ss->regmap[id], SGMSYS_QPHY_PWR_STATE_CTRL, val);
-
- return 0;
-}
-
-void mtk_sgmii_restart_an(struct mtk_eth *eth, int mac_id)
-{
- struct mtk_sgmii *ss = eth->sgmii;
- unsigned int val, sid;
-
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
- 0 : mac_id;
-
- if (!ss->regmap[sid])
- return;
-
- regmap_read(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, &val);
- val |= SGMII_AN_RESTART;
- regmap_write(ss->regmap[sid], SGMSYS_PCS_CONTROL_1, val);
-}
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 89ca7960b225..02c03325911f 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/platform_device.h>
@@ -30,9 +31,9 @@
#define MTK_STAR_WAIT_TIMEOUT 300
#define MTK_STAR_MAX_FRAME_SIZE 1514
#define MTK_STAR_SKB_ALIGNMENT 16
-#define MTK_STAR_NAPI_WEIGHT 64
#define MTK_STAR_HASHTABLE_MC_LIMIT 256
#define MTK_STAR_HASHTABLE_SIZE_MAX 512
+#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
* work for this controller.
@@ -130,6 +131,11 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_INT_MASK 0x0054
#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
+/* Delay-Macro Register */
+#define MTK_STAR_REG_TEST0 0x0058
+#define MTK_STAR_BIT_INV_RX_CLK BIT(30)
+#define MTK_STAR_BIT_INV_TX_CLK BIT(31)
+
/* Misc. Config Register */
#define MTK_STAR_REG_TEST1 0x005c
#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
@@ -150,6 +156,7 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
#define MTK_STAR_BIT_CLK_DIV_10 0x0a
+#define MTK_STAR_BIT_CLK_DIV_50 0x32
/* Counter registers. */
#define MTK_STAR_REG_C_RXOKPKT 0x0100
@@ -182,9 +189,14 @@ static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
#define MTK_STAR_REG_C_RX_TWIST 0x0218
/* Ethernet CFG Control */
-#define MTK_PERICFG_REG_NIC_CFG_CON 0x03c4
-#define MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII GENMASK(3, 0)
-#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII BIT(0)
+#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4
+#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8
+#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10
+#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0
+#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8)
/* Represents the actual structure of descriptors used by the MAC. We can
* reuse the same structure for both TX and RX - the layout is the same, only
@@ -217,7 +229,8 @@ struct mtk_star_ring_desc_data {
struct sk_buff *skb;
};
-#define MTK_STAR_RING_NUM_DESCS 128
+#define MTK_STAR_RING_NUM_DESCS 512
+#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
@@ -232,6 +245,11 @@ struct mtk_star_ring {
unsigned int tail;
};
+struct mtk_star_compat {
+ int (*set_interface_mode)(struct net_device *ndev);
+ unsigned char bit_clk_div;
+};
+
struct mtk_star_priv {
struct net_device *ndev;
@@ -247,7 +265,8 @@ struct mtk_star_priv {
struct mtk_star_ring rx_ring;
struct mii_bus *mii;
- struct napi_struct napi;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
struct device_node *phy_node;
phy_interface_t phy_intf;
@@ -256,6 +275,11 @@ struct mtk_star_priv {
int speed;
int duplex;
int pause;
+ bool rmii_rxc;
+ bool rx_inv;
+ bool tx_inv;
+
+ const struct mtk_star_compat *compat_data;
/* Protects against concurrent descriptor access. */
spinlock_t lock;
@@ -358,19 +382,16 @@ mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
mtk_star_ring_push_head(ring, desc_data, flags);
}
-static unsigned int mtk_star_ring_num_used_descs(struct mtk_star_ring *ring)
+static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
{
- return abs(ring->head - ring->tail);
-}
+ u32 avail;
-static bool mtk_star_ring_full(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) == MTK_STAR_RING_NUM_DESCS;
-}
+ if (ring->tail > ring->head)
+ avail = ring->tail - ring->head - 1;
+ else
+ avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
-static bool mtk_star_ring_descs_available(struct mtk_star_ring *ring)
-{
- return mtk_star_ring_num_used_descs(ring) > 0;
+ return avail;
}
static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
@@ -415,6 +436,36 @@ static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
MTK_STAR_BIT_MAC_CFG_NIC_PD);
}
+static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value &= ~MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value &= ~MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
+static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value |= MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value |= MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
/* Unmask the three interrupts we care about, mask all others. */
static void mtk_star_intr_enable(struct mtk_star_priv *priv)
{
@@ -430,20 +481,11 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
}
-static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
-{
- unsigned int val;
-
- regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
-
- return val;
-}
-
static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
{
unsigned int val;
- val = mtk_star_intr_read(priv);
+ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
return val;
@@ -715,25 +757,44 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
}
-/* All processing for TX and RX happens in the napi poll callback.
- *
- * FIXME: The interrupt handling should be more fine-grained with each
- * interrupt enabled/disabled independently when needed. Unfortunatly this
- * turned out to impact the driver's stability and until we have something
- * working properly, we're disabling all interrupts during TX & RX processing
- * or when resetting the counter registers.
- */
+/**
+ * mtk_star_handle_irq - Interrupt Handler.
+ * @irq: interrupt number.
+ * @data: pointer to a network interface device structure.
+ * Description : this is the driver interrupt service routine.
+ * it mainly handles:
+ * 1. tx complete interrupt for frame transmission.
+ * 2. rx complete interrupt for frame reception.
+ * 3. MAC Management Counter interrupt to avoid counter overflow.
+ **/
static irqreturn_t mtk_star_handle_irq(int irq, void *data)
{
- struct mtk_star_priv *priv;
- struct net_device *ndev;
-
- ndev = data;
- priv = netdev_priv(ndev);
+ struct net_device *ndev = data;
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int intr_status = mtk_star_intr_ack_all(priv);
+ bool rx, tx;
+
+ rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
+ napi_schedule_prep(&priv->rx_napi);
+ tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
+ napi_schedule_prep(&priv->tx_napi);
+
+ if (rx || tx) {
+ spin_lock(&priv->lock);
+ /* mask Rx and TX Complete interrupt */
+ mtk_star_disable_dma_irq(priv, rx, tx);
+ spin_unlock(&priv->lock);
+
+ if (rx)
+ __napi_schedule(&priv->rx_napi);
+ if (tx)
+ __napi_schedule(&priv->tx_napi);
+ }
- if (netif_running(ndev)) {
- mtk_star_intr_disable(priv);
- napi_schedule(&priv->napi);
+ /* interrupt is triggered once any counters reach 0x8000000 */
+ if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
}
return IRQ_HANDLED;
@@ -822,32 +883,26 @@ static void mtk_star_phy_config(struct mtk_star_priv *priv)
val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
- /* Only full-duplex supported for now. */
- val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
-
- regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
-
if (priv->pause) {
- val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
- val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
- val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
} else {
- val = 0;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
}
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
+ val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
+ val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
+ val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
- if (priv->pause) {
- val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
- val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
- } else {
- val = 0;
- }
-
+ val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
+ val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
}
@@ -899,14 +954,7 @@ static void mtk_star_init_config(struct mtk_star_priv *priv)
regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
MTK_STAR_MSK_MAC_CLK_CONF,
- MTK_STAR_BIT_CLK_DIV_10);
-}
-
-static void mtk_star_set_mode_rmii(struct mtk_star_priv *priv)
-{
- regmap_update_bits(priv->pericfg, MTK_PERICFG_REG_NIC_CFG_CON,
- MTK_PERICFG_MSK_NIC_CFG_CON_CFG_MII,
- MTK_PERICFG_BIT_NIC_CFG_CON_RMII);
+ priv->compat_data->bit_clk_div);
}
static int mtk_star_enable(struct net_device *ndev)
@@ -952,11 +1000,12 @@ static int mtk_star_enable(struct net_device *ndev)
/* Request the interrupt */
ret = request_irq(ndev->irq, mtk_star_handle_irq,
- IRQF_TRIGGER_FALLING, ndev->name, ndev);
+ IRQF_TRIGGER_NONE, ndev->name, ndev);
if (ret)
goto err_free_skbs;
- napi_enable(&priv->napi);
+ napi_enable(&priv->tx_napi);
+ napi_enable(&priv->rx_napi);
mtk_star_intr_ack_all(priv);
mtk_star_intr_enable(priv);
@@ -977,6 +1026,8 @@ static int mtk_star_enable(struct net_device *ndev)
return 0;
err_free_irq:
+ napi_disable(&priv->rx_napi);
+ napi_disable(&priv->tx_napi);
free_irq(ndev->irq, ndev);
err_free_skbs:
mtk_star_free_rx_skbs(priv);
@@ -989,7 +1040,8 @@ static void mtk_star_disable(struct net_device *ndev)
struct mtk_star_priv *priv = netdev_priv(ndev);
netif_stop_queue(ndev);
- napi_disable(&priv->napi);
+ napi_disable(&priv->tx_napi);
+ napi_disable(&priv->rx_napi);
mtk_star_intr_disable(priv);
mtk_star_dma_disable(priv);
mtk_star_intr_ack_all(priv);
@@ -1021,13 +1073,45 @@ static int mtk_star_netdev_ioctl(struct net_device *ndev,
return phy_mii_ioctl(ndev->phydev, req, cmd);
}
-static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
+static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ netif_stop_queue(priv->ndev);
+
+ /* Might race with mtk_star_tx_poll, check again */
+ smp_mb();
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
+ return -EBUSY;
+
+ netif_start_queue(priv->ndev);
+
+ return 0;
+}
+
+static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
+ return 0;
+
+ return __mtk_star_maybe_stop_tx(priv, size);
+}
+
+static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
struct mtk_star_ring *ring = &priv->tx_ring;
struct device *dev = mtk_star_get_dev(priv);
struct mtk_star_ring_desc_data desc_data;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ /* This is a hard error, log it. */
+ pr_err_ratelimited("Tx ring full when queue awake\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
if (dma_mapping_error(dev, desc_data.dma_addr))
@@ -1035,17 +1119,11 @@ static int mtk_star_netdev_start_xmit(struct sk_buff *skb,
desc_data.skb = skb;
desc_data.len = skb->len;
-
- spin_lock_bh(&priv->lock);
-
mtk_star_ring_push_head_tx(ring, &desc_data);
netdev_sent_queue(ndev, skb->len);
- if (mtk_star_ring_full(ring))
- netif_stop_queue(ndev);
-
- spin_unlock_bh(&priv->lock);
+ mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
mtk_star_dma_resume_tx(priv);
@@ -1077,31 +1155,40 @@ static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
return ret;
}
-static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
+static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
{
+ struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
+ tx_napi);
+ int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
struct mtk_star_ring *ring = &priv->tx_ring;
struct net_device *ndev = priv->ndev;
- int ret, pkts_compl, bytes_compl;
- bool wake = false;
-
- spin_lock(&priv->lock);
-
- for (pkts_compl = 0, bytes_compl = 0;;
- pkts_compl++, bytes_compl += ret, wake = true) {
- if (!mtk_star_ring_descs_available(ring))
- break;
+ unsigned int head = ring->head;
+ unsigned int entry = ring->tail;
+ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
ret = mtk_star_tx_complete_one(priv);
if (ret < 0)
break;
+
+ count++;
+ pkts_compl++;
+ bytes_compl += ret;
+ entry = ring->tail;
}
netdev_completed_queue(ndev, pkts_compl, bytes_compl);
- if (wake && netif_queue_stopped(ndev))
+ if (unlikely(netif_queue_stopped(ndev)) &&
+ (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
netif_wake_queue(ndev);
- spin_unlock(&priv->lock);
+ if (napi_complete(napi)) {
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, false, true);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
}
static void mtk_star_netdev_get_stats64(struct net_device *ndev,
@@ -1170,7 +1257,7 @@ static const struct net_device_ops mtk_star_netdev_ops = {
static void mtk_star_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
+ strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
}
/* TODO Add ethtool stats. */
@@ -1181,7 +1268,7 @@ static const struct ethtool_ops mtk_star_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int mtk_star_receive_packet(struct mtk_star_priv *priv)
+static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
{
struct mtk_star_ring *ring = &priv->rx_ring;
struct device *dev = mtk_star_get_dev(priv);
@@ -1189,107 +1276,85 @@ static int mtk_star_receive_packet(struct mtk_star_priv *priv)
struct net_device *ndev = priv->ndev;
struct sk_buff *curr_skb, *new_skb;
dma_addr_t new_dma_addr;
- int ret;
+ int ret, count = 0;
- spin_lock(&priv->lock);
- ret = mtk_star_ring_pop_tail(ring, &desc_data);
- spin_unlock(&priv->lock);
- if (ret)
- return -1;
+ while (count < budget) {
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return -1;
- curr_skb = desc_data.skb;
+ curr_skb = desc_data.skb;
- if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
- (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
- /* Error packet -> drop and reuse skb. */
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+ /* Error packet -> drop and reuse skb. */
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- /* Prepare new skb before receiving the current one. Reuse the current
- * skb if we fail at any point.
- */
- new_skb = mtk_star_alloc_skb(ndev);
- if (!new_skb) {
- ndev->stats.rx_dropped++;
- new_skb = curr_skb;
- goto push_new_skb;
- }
+ /* Prepare new skb before receiving the current one.
+ * Reuse the current skb if we fail at any point.
+ */
+ new_skb = mtk_star_alloc_skb(ndev);
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
- new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
- if (dma_mapping_error(dev, new_dma_addr)) {
- ndev->stats.rx_dropped++;
- dev_kfree_skb(new_skb);
- new_skb = curr_skb;
- netdev_err(ndev, "DMA mapping error of RX descriptor\n");
- goto push_new_skb;
- }
+ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma_addr)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb(new_skb);
+ new_skb = curr_skb;
+ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+ goto push_new_skb;
+ }
- /* We can't fail anymore at this point: it's safe to unmap the skb. */
- mtk_star_dma_unmap_rx(priv, &desc_data);
+ /* We can't fail anymore at this point:
+ * it's safe to unmap the skb.
+ */
+ mtk_star_dma_unmap_rx(priv, &desc_data);
- skb_put(desc_data.skb, desc_data.len);
- desc_data.skb->ip_summed = CHECKSUM_NONE;
- desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
- desc_data.skb->dev = ndev;
- netif_receive_skb(desc_data.skb);
+ skb_put(desc_data.skb, desc_data.len);
+ desc_data.skb->ip_summed = CHECKSUM_NONE;
+ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+ desc_data.skb->dev = ndev;
+ netif_receive_skb(desc_data.skb);
- /* update dma_addr for new skb */
- desc_data.dma_addr = new_dma_addr;
+ /* update dma_addr for new skb */
+ desc_data.dma_addr = new_dma_addr;
push_new_skb:
- desc_data.len = skb_tailroom(new_skb);
- desc_data.skb = new_skb;
-
- spin_lock(&priv->lock);
- mtk_star_ring_push_head_rx(ring, &desc_data);
- spin_unlock(&priv->lock);
-
- return 0;
-}
-static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
-{
- int received, ret;
+ count++;
- for (received = 0, ret = 0; received < budget && ret == 0; received++)
- ret = mtk_star_receive_packet(priv);
+ desc_data.len = skb_tailroom(new_skb);
+ desc_data.skb = new_skb;
+ mtk_star_ring_push_head_rx(ring, &desc_data);
+ }
mtk_star_dma_resume_rx(priv);
- return received;
+ return count;
}
-static int mtk_star_poll(struct napi_struct *napi, int budget)
+static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
{
struct mtk_star_priv *priv;
- unsigned int status;
- int received = 0;
-
- priv = container_of(napi, struct mtk_star_priv, napi);
-
- status = mtk_star_intr_read(priv);
- mtk_star_intr_ack_all(priv);
-
- if (status & MTK_STAR_BIT_INT_STS_TNTC)
- /* Clean-up all TX descriptors. */
- mtk_star_tx_complete_all(priv);
+ int work_done = 0;
- if (status & MTK_STAR_BIT_INT_STS_FNRC)
- /* Receive up to $budget packets. */
- received = mtk_star_process_rx(priv, budget);
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
- if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
- mtk_star_update_stats(priv);
- mtk_star_reset_counters(priv);
+ work_done = mtk_star_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, true, false);
+ spin_unlock(&priv->lock);
}
- if (received < budget)
- napi_complete_done(napi, received);
-
- mtk_star_intr_enable(priv);
-
- return received;
+ return work_done;
}
static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
@@ -1313,9 +1378,6 @@ static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
unsigned int val, data;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mtk_star_mdio_rwok_clear(priv);
val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
@@ -1342,9 +1404,6 @@ static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
struct mtk_star_priv *priv = mii->priv;
unsigned int val;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mtk_star_mdio_rwok_clear(priv);
val = data;
@@ -1443,6 +1502,25 @@ static void mtk_star_clk_disable_unprepare(void *data)
clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
}
+static int mtk_star_set_timing(struct mtk_star_priv *priv)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int delay_val = 0;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
+}
+
static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
@@ -1461,6 +1539,7 @@ static int mtk_star_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->ndev = ndev;
+ priv->compat_data = of_device_get_match_data(&pdev->dev);
SET_NETDEV_DEV(ndev, dev);
platform_set_drvdata(pdev, ndev);
@@ -1511,7 +1590,8 @@ static int mtk_star_probe(struct platform_device *pdev)
ret = of_get_phy_mode(of_node, &priv->phy_intf);
if (ret) {
return ret;
- } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII) {
+ } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
+ priv->phy_intf != PHY_INTERFACE_MODE_MII) {
dev_err(dev, "unsupported phy mode: %s\n",
phy_modes(priv->phy_intf));
return -EINVAL;
@@ -1523,7 +1603,23 @@ static int mtk_star_probe(struct platform_device *pdev)
return -ENODEV;
}
- mtk_star_set_mode_rmii(priv);
+ priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
+ priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
+ priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
+
+ if (priv->compat_data->set_interface_mode) {
+ ret = priv->compat_data->set_interface_mode(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
+ return -EINVAL;
+ }
+ }
+
+ ret = mtk_star_set_timing(priv);
+ if (ret) {
+ dev_err(dev, "Failed to set timing, err = %d\n", ret);
+ return -EINVAL;
+ }
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1551,18 +1647,95 @@ static int mtk_star_probe(struct platform_device *pdev)
ndev->netdev_ops = &mtk_star_netdev_ops;
ndev->ethtool_ops = &mtk_star_ethtool_ops;
- netif_napi_add(ndev, &priv->napi, mtk_star_poll, MTK_STAR_NAPI_WEIGHT);
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
+ netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
return devm_register_netdev(dev, ndev);
}
+#ifdef CONFIG_OF
+static int mt8516_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val, ret, rmii_rxc;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ rmii_rxc = 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG1_CON,
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
+ rmii_rxc);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG0_CON,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
+ intf_val);
+}
+
+static int mt8365_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG_CON_V2,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
+ intf_val);
+}
+
+static const struct mtk_star_compat mtk_star_mt8516_compat = {
+ .set_interface_mode = mt8516_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
+};
+
+static const struct mtk_star_compat mtk_star_mt8365_compat = {
+ .set_interface_mode = mt8365_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
+};
+
static const struct of_device_id mtk_star_of_match[] = {
- { .compatible = "mediatek,mt8516-eth", },
- { .compatible = "mediatek,mt8518-eth", },
- { .compatible = "mediatek,mt8175-eth", },
+ { .compatible = "mediatek,mt8516-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8518-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8175-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8365-eth",
+ .data = &mtk_star_mt8365_compat },
{ }
};
MODULE_DEVICE_TABLE(of, mtk_star_of_match);
+#endif
static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
mtk_star_suspend, mtk_star_resume);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
new file mode 100644
index 000000000000..985cff910f30
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -0,0 +1,1965 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/mfd/syscon.h>
+#include <linux/debugfs.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <net/flow_offload.h>
+#include <net/pkt_cls.h>
+#include "mtk_eth_soc.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed.h"
+#include "mtk_ppe.h"
+#include "mtk_wed_wo.h"
+
+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
+
+#define MTK_WED_PKT_SIZE 1900
+#define MTK_WED_BUF_SIZE 2048
+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+#define MTK_WED_RX_RING_SIZE 1536
+
+#define MTK_WED_TX_RING_SIZE 2048
+#define MTK_WED_WDMA_RING_SIZE 1024
+#define MTK_WED_MAX_GROUP_SIZE 0x100
+#define MTK_WED_VLD_GROUP_SIZE 0x40
+#define MTK_WED_PER_GROUP_PKT 128
+
+#define MTK_WED_FBUF_SIZE 128
+#define MTK_WED_MIOD_CNT 16
+#define MTK_WED_FB_CMD_CNT 1024
+#define MTK_WED_RRO_QUE_CNT 8192
+#define MTK_WED_MIOD_ENTRY_CNT 128
+
+static struct mtk_wed_hw *hw_list[2];
+static DEFINE_MUTEX(hw_lock);
+
+struct mtk_wed_flow_block_priv {
+ struct mtk_wed_hw *hw;
+ struct net_device *dev;
+};
+
+static void
+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
+}
+
+static void
+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, 0, mask);
+}
+
+static void
+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, mask, 0);
+}
+
+static void
+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
+}
+
+static void
+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ wdma_m32(dev, reg, 0, mask);
+}
+
+static void
+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ wdma_m32(dev, reg, mask, 0);
+}
+
+static u32
+wifi_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ return readl(dev->wlan.base + reg);
+}
+
+static void
+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ writel(val, dev->wlan.base + reg);
+}
+
+static u32
+mtk_wed_read_reset(struct mtk_wed_device *dev)
+{
+ return wed_r32(dev, MTK_WED_RESET);
+}
+
+static u32
+mtk_wdma_read_reset(struct mtk_wed_device *dev)
+{
+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
+}
+
+static int
+mtk_wdma_rx_reset(struct mtk_wed_device *dev)
+{
+ u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
+ int i, ret;
+
+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
+ ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
+ !(status & mask), 0, 10000);
+ if (ret)
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
+ if (dev->rx_wdma[i].desc)
+ continue;
+
+ wdma_w32(dev,
+ MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
+ }
+
+ return ret;
+}
+
+static void
+mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+{
+ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
+ int i;
+
+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
+ !(status & mask), 0, 10000))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ wdma_w32(dev,
+ MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
+}
+
+static void
+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 status;
+
+ wed_w32(dev, MTK_WED_RESET, mask);
+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
+ !(status & mask), 0, 1000))
+ WARN_ON_ONCE(1);
+}
+
+static u32
+mtk_wed_wo_read_status(struct mtk_wed_device *dev)
+{
+ return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS);
+}
+
+static void
+mtk_wed_wo_reset(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ u8 state = MTK_WED_WO_STATE_DISABLE;
+ void __iomem *reg;
+ u32 val;
+
+ mtk_wdma_tx_reset(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+ if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &state,
+ sizeof(state), false))
+ return;
+
+ if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
+ val == MTK_WED_WOIF_DISABLE_DONE,
+ 100, MTK_WOCPU_TIMEOUT))
+ dev_err(dev->hw->dev, "failed to disable wed-wo\n");
+
+ reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4);
+
+ val = readl(reg);
+ switch (dev->hw->index) {
+ case 0:
+ val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ break;
+ case 1:
+ val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ break;
+ default:
+ break;
+ }
+ iounmap(reg);
+}
+
+void mtk_wed_fe_reset(void)
+{
+ int i;
+
+ mutex_lock(&hw_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw = hw_list[i];
+ struct mtk_wed_device *dev = hw->wed_dev;
+ int err;
+
+ if (!dev || !dev->wlan.reset)
+ continue;
+
+ /* reset callback blocks until WLAN reset is completed */
+ err = dev->wlan.reset(dev);
+ if (err)
+ dev_err(dev->dev, "wlan reset failed: %d\n", err);
+ }
+
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_fe_reset_complete(void)
+{
+ int i;
+
+ mutex_lock(&hw_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw = hw_list[i];
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev || !dev->wlan.reset_complete)
+ continue;
+
+ dev->wlan.reset_complete(dev);
+ }
+
+ mutex_unlock(&hw_lock);
+}
+
+static struct mtk_wed_hw *
+mtk_wed_assign(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw;
+ int i;
+
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+ if (!hw)
+ return NULL;
+
+ if (!hw->wed_dev)
+ goto out;
+
+ if (hw->version == 1)
+ return NULL;
+
+ /* MT7986 WED devices do not have any pcie slot restrictions */
+ }
+ /* MT7986 PCIE or AXI */
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = hw_list[i];
+ if (hw && !hw->wed_dev)
+ goto out;
+ }
+
+ return NULL;
+
+out:
+ hw->wed_dev = dev;
+ return hw;
+}
+
+static int
+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ void **page_list;
+ int token = dev->wlan.token_start;
+ int ring_size;
+ int n_pages;
+ int i, page_idx;
+
+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->tx_buf_ring.size = ring_size;
+ dev->tx_buf_ring.pages = page_list;
+
+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->tx_buf_ring.desc = desc;
+ dev->tx_buf_ring.desc_phys = desc_phys;
+
+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+ int s;
+
+ page = __dev_alloc_pages(GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx++] = page;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf = page_to_virt(page);
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+ u32 txd_size;
+ u32 ctrl;
+
+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+
+ if (dev->hw->version == 1)
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ else
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG0;
+ desc->ctrl = cpu_to_le32(ctrl);
+ desc->info = 0;
+ desc++;
+
+ buf += MTK_WED_BUF_SIZE;
+ buf_phys += MTK_WED_BUF_SIZE;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+ void **page_list = dev->tx_buf_ring.pages;
+ int page_idx;
+ int i;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+ i += MTK_WED_BUF_PER_PAGE) {
+ void *page = page_list[page_idx++];
+ dma_addr_t buf_addr;
+
+ if (!page)
+ break;
+
+ buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
+ desc, dev->tx_buf_ring.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
+}
+
+static int
+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_rxbm_desc *desc;
+ dma_addr_t desc_phys;
+
+ dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
+ desc = dma_alloc_coherent(dev->hw->dev,
+ dev->wlan.rx_nbuf * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->rx_buf_ring.desc = desc;
+ dev->rx_buf_ring.desc_phys = desc_phys;
+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
+
+ return 0;
+}
+
+static void
+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
+
+ if (!desc)
+ return;
+
+ dev->wlan.release_rx_buf(dev);
+ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
+ desc, dev->rx_buf_ring.desc_phys);
+}
+
+static void
+mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+}
+
+static void
+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
+{
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
+ ring->desc, ring->desc_phys);
+}
+
+static void
+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
+{
+ mtk_wed_free_rx_buffer(dev);
+ mtk_wed_free_ring(dev, &dev->rro.ring);
+}
+
+static void
+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
+ mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
+}
+
+static void
+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
+{
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (dev->hw->version == 1)
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
+}
+
+static void
+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+{
+ if (enable) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
+ } else {
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ }
+}
+
+#define MTK_WFMDA_RX_DMA_EN BIT(2)
+static void
+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
+{
+ u32 val;
+ int i;
+
+ if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
+ return; /* queue is not configured by mt76 */
+
+ for (i = 0; i < 3; i++) {
+ u32 cur_idx;
+
+ cur_idx = wed_r32(dev,
+ MTK_WED_WPDMA_RING_RX_DATA(idx) +
+ MTK_WED_RING_OFS_CPU_IDX);
+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
+ break;
+
+ usleep_range(100000, 200000);
+ }
+
+ if (i == 3) {
+ dev_err(dev->hw->dev, "rx dma enable failed\n");
+ return;
+ }
+
+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
+ MTK_WFMDA_RX_DMA_EN;
+ wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
+}
+
+static void
+mtk_wed_dma_disable(struct mtk_wed_device *dev)
+{
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+ if (dev->hw->version == 1) {
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+ }
+
+ mtk_wed_set_512_support(dev, false);
+}
+
+static void
+mtk_wed_stop(struct mtk_wed_device *dev)
+{
+ mtk_wed_set_ext_int(dev, false);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+ if (dev->hw->version == 1)
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
+}
+
+static void
+mtk_wed_deinit(struct mtk_wed_device *dev)
+{
+ mtk_wed_stop(dev);
+ mtk_wed_dma_disable(dev);
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ if (dev->hw->version == 1)
+ return;
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
+ MTK_WED_CTRL_WED_RX_BM_EN |
+ MTK_WED_CTRL_RX_RRO_QM_EN);
+}
+
+static void
+__mtk_wed_detach(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw = dev->hw;
+
+ mtk_wed_deinit(dev);
+
+ mtk_wdma_rx_reset(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_free_tx_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+ if (mtk_wed_get_rx_capa(dev)) {
+ if (hw->wed_wo)
+ mtk_wed_wo_reset(dev);
+ mtk_wed_free_rx_rings(dev);
+ if (hw->wed_wo)
+ mtk_wed_wo_deinit(hw);
+ }
+
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ struct device_node *wlan_node;
+
+ wlan_node = dev->wlan.pci_dev->dev.of_node;
+ if (of_dma_is_coherent(wlan_node) && hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), BIT(hw->index));
+ }
+
+ if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
+ hw->eth->dma_dev != hw->eth->dev)
+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
+
+ memset(dev, 0, sizeof(*dev));
+ module_put(THIS_MODULE);
+
+ hw->wed_dev = NULL;
+}
+
+static void
+mtk_wed_detach(struct mtk_wed_device *dev)
+{
+ mutex_lock(&hw_lock);
+ __mtk_wed_detach(dev);
+ mutex_unlock(&hw_lock);
+}
+
+#define PCIE_BASE_ADDR0 0x11280000
+static void
+mtk_wed_bus_init(struct mtk_wed_device *dev)
+{
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+ struct regmap *regs;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ if (IS_ERR(regs))
+ break;
+
+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
+
+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+
+ /* pcie interrupt control: pola/source selection */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+
+ /* pcie interrupt status trigger register */
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+
+ /* pola setting */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ break;
+ }
+ case MTK_WED_BUS_AXI:
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+{
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
+ }
+}
+
+static void
+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+{
+ u32 mask, set;
+
+ mtk_wed_deinit(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_set_wpdma(dev);
+
+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+ if (dev->hw->version == 1) {
+ u32 offset = dev->hw->index ? 0x04000400 : 0;
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ MTK_PCIE_BASE(dev->hw->index));
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0,
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
+ MTK_WDMA_INT_STATUS) |
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
+ MTK_WDMA_GLO_CFG));
+
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1,
+ FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
+ MTK_WDMA_RING_TX(0)) |
+ FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
+ MTK_WDMA_RING_RX(0)));
+ }
+}
+
+static int
+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+ int size)
+{
+ ring->desc = dma_alloc_coherent(dev->hw->dev,
+ size * sizeof(*ring->desc),
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_size = sizeof(*ring->desc);
+ ring->size = size;
+
+ return 0;
+}
+
+#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT)
+static int
+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
+{
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ int index;
+
+ index = of_property_match_string(dev->hw->node, "memory-region-names",
+ "wo-dlm");
+ if (index < 0)
+ return index;
+
+ np = of_parse_phandle(dev->hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ dev->rro.miod_phys = rmem->base;
+ dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
+
+ return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
+ MTK_WED_RRO_QUE_CNT);
+}
+
+static int
+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ struct {
+ struct {
+ __le32 base;
+ __le32 cnt;
+ __le32 unit;
+ } ring[2];
+ __le32 wed;
+ u8 version;
+ } req = {
+ .ring[0] = {
+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE),
+ .cnt = cpu_to_le32(MTK_WED_MIOD_CNT),
+ .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT),
+ },
+ .ring[1] = {
+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE +
+ MTK_WED_MIOD_COUNT),
+ .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT),
+ .unit = cpu_to_le32(4),
+ },
+ };
+
+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_WED_CFG,
+ &req, sizeof(req), true);
+}
+
+static void
+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
+ MTK_WED_MIOD_ENTRY_CNT >> 2));
+
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys);
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys);
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys);
+
+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
+ MTK_WED_RROQM_RST_IDX_MIOD |
+ MTK_WED_RROQM_RST_IDX_FDBK);
+
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
+}
+
+static void
+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
+
+ for (;;) {
+ usleep_range(100, 200);
+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
+ break;
+ }
+
+ /* configure RX_ROUTE_QM */
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ /* enable RX_ROUTE_QM */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+}
+
+static void
+mtk_wed_hw_init(struct mtk_wed_device *dev)
+{
+ if (dev->init_done)
+ return;
+
+ dev->init_done = true;
+ mtk_wed_set_ext_int(dev, false);
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
+
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+ } else {
+ wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ dev->tx_buf_ring.size / 128));
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (dev->hw->version == 1) {
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ } else {
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ /* rx hw init */
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+
+ mtk_wed_rx_buffer_hw_init(dev);
+ mtk_wed_rro_hw_init(dev);
+ mtk_wed_route_qm_hw_init(dev);
+ }
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+}
+
+static void
+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
+{
+ void *head = (void *)ring->desc;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct mtk_wdma_desc *desc;
+
+ desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
+ desc->buf0 = 0;
+ if (tx)
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ else
+ desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
+ desc->buf1 = 0;
+ desc->info = 0;
+ }
+}
+
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return !!(wed_r32(dev, reg) & mask);
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ int sleep = 15000;
+ int timeout = 100 * sleep;
+ u32 val;
+
+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+ timeout, false, dev, reg, mask);
+}
+
+static int
+mtk_wed_rx_reset(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ u8 val = MTK_WED_WO_STATE_SER_RESET;
+ int i, ret;
+
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &val,
+ sizeof(val), true);
+ if (ret)
+ return ret;
+
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+ }
+
+ /* reset rro qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_RRO_QM_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
+ } else {
+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
+ MTK_WED_RROQM_RST_IDX_MIOD |
+ MTK_WED_RROQM_RST_IDX_FDBK);
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ }
+
+ /* reset route qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
+ if (ret)
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+ else
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ MTK_WED_RTQM_Q_RST);
+
+ /* reset tx wdma */
+ mtk_wdma_tx_reset(dev);
+
+ /* reset tx wdma drv */
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
+
+ /* reset wed rx dma */
+ ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_RX_DMA_BUSY);
+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
+ } else {
+ struct mtk_eth *eth = dev->hw->eth;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ wed_set(dev, MTK_WED_RESET_IDX,
+ MTK_WED_RESET_IDX_RX_V2);
+ else
+ wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ /* reset rx bm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_RX_BM_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+
+ /* wo change to enable state */
+ val = MTK_WED_WO_STATE_ENABLE;
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &val,
+ sizeof(val), true);
+ if (ret)
+ return ret;
+
+ /* wed_rx_ring_reset */
+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
+ if (!dev->rx_ring[i].desc)
+ continue;
+
+ mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
+ false);
+ }
+ mtk_wed_free_rx_buffer(dev);
+
+ return 0;
+}
+
+static void
+mtk_wed_reset_dma(struct mtk_wed_device *dev)
+{
+ bool busy = false;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
+ if (!dev->tx_ring[i].desc)
+ continue;
+
+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE,
+ true);
+ }
+
+ /* 1. reset WED tx DMA */
+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_BUSY);
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+ } else {
+ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ /* 2. reset WDMA rx DMA */
+ busy = !!mtk_wdma_rx_reset(dev);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+ }
+
+ /* 3. reset WED WPDMA tx */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ for (i = 0; i < 100; i++) {
+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+ break;
+ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ /* 4. reset WED WPDMA tx */
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ MTK_WED_WPDMA_RESET_IDX_TX |
+ MTK_WED_WPDMA_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
+ }
+
+ dev->init_done = false;
+ if (dev->hw->version == 1)
+ return;
+
+ if (!busy) {
+ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ mtk_wed_rx_reset(dev);
+}
+
+static int
+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+ int size, u32 desc_size, bool tx)
+{
+ ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_size = desc_size;
+ ring->size = size;
+ mtk_wed_ring_reset(ring, size, tx);
+
+ return 0;
+}
+
+static int
+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+{
+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->rx_wdma))
+ return -EINVAL;
+
+ wdma = &dev->rx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+ desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+
+ return 0;
+}
+
+static int
+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+{
+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->tx_wdma))
+ return -EINVAL;
+
+ wdma = &dev->tx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+ desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
+
+ if (reset)
+ mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true);
+
+ if (!idx) {
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT,
+ size);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX,
+ 0);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX,
+ 0);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
+ u32 reason, u32 hash)
+{
+ struct mtk_eth *eth = dev->hw->eth;
+ struct ethhdr *eh;
+
+ if (!skb)
+ return;
+
+ if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ return;
+
+ skb_set_mac_header(skb, 0);
+ eh = eth_hdr(skb);
+ skb->protocol = eh->h_proto;
+ mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash);
+}
+
+static void
+mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+
+ /* wed control cr set */
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+ GENMASK(1, 0));
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
+ dev->wlan.tx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
+ dev->wlan.tx_tbit[1]));
+
+ /* initail txfree interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ dev->wlan.txfree_tbit));
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+ dev->wlan.rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+ dev->wlan.rx_tbit[1]));
+
+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+ FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
+ dev->wdma_idx));
+ }
+
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+
+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+}
+
+static void
+mtk_wed_dma_enable(struct mtk_wed_device *dev)
+{
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+ if (dev->hw->version == 1) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+ int i;
+
+ wed_set(dev, MTK_WED_WPDMA_CTRL,
+ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+ 0x2));
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+ mtk_wed_check_wfdma_rx_fill(dev, i);
+ }
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ int i;
+
+ if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
+ if (!dev->rx_wdma[i].desc)
+ mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
+
+ mtk_wed_hw_init(dev);
+ mtk_wed_configure_irq(dev, irq_mask);
+
+ mtk_wed_set_ext_int(dev, true);
+
+ if (dev->hw->version == 1) {
+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+ dev->hw->index);
+
+ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+ } else {
+ /* driver set mid ready and only once */
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+
+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+
+ if (mtk_wed_rro_cfg(dev))
+ return;
+
+ }
+
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+
+ mtk_wed_dma_enable(dev);
+ dev->running = true;
+}
+
+static int
+mtk_wed_attach(struct mtk_wed_device *dev)
+ __releases(RCU)
+{
+ struct mtk_wed_hw *hw;
+ struct device *device;
+ int ret = 0;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "mtk_wed_attach without holding the RCU read lock");
+
+ if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
+ pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
+ !try_module_get(THIS_MODULE))
+ ret = -ENODEV;
+
+ rcu_read_unlock();
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&hw_lock);
+
+ hw = mtk_wed_assign(dev);
+ if (!hw) {
+ module_put(THIS_MODULE);
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
+ ? &dev->wlan.pci_dev->dev
+ : &dev->wlan.platform_dev->dev;
+ dev_info(device, "attaching wed device %d version %d\n",
+ hw->index, hw->version);
+
+ dev->hw = hw;
+ dev->dev = hw->dev;
+ dev->irq = hw->irq;
+ dev->wdma_idx = hw->index;
+ dev->version = hw->version;
+
+ if (hw->eth->dma_dev == hw->eth->dev &&
+ of_dma_is_coherent(hw->eth->dev->of_node))
+ mtk_eth_set_dma_device(hw->eth, hw->dev);
+
+ ret = mtk_wed_tx_buffer_alloc(dev);
+ if (ret)
+ goto out;
+
+ if (mtk_wed_get_rx_capa(dev)) {
+ ret = mtk_wed_rro_alloc(dev);
+ if (ret)
+ goto out;
+ }
+
+ mtk_wed_hw_init_early(dev);
+ if (hw->version == 1) {
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+ } else {
+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
+ ret = mtk_wed_wo_init(hw);
+ }
+out:
+ if (ret) {
+ dev_err(dev->hw->dev, "failed to attach wed device\n");
+ __mtk_wed_detach(dev);
+ }
+unlock:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+static int
+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
+ bool reset)
+{
+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
+
+ /*
+ * Tx ring redirection:
+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
+ * registers.
+ *
+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
+ * into MTK_WED_WPDMA_RING_TX(n) registers.
+ * It gets filled with packets picked up from WED TX ring and from
+ * WDMA RX.
+ */
+
+ if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
+ return -EINVAL;
+
+ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
+ sizeof(*ring->desc), true))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
+ reset))
+ return -ENOMEM;
+
+ ring->reg_base = MTK_WED_RING_TX(idx);
+ ring->wpdma = regs;
+
+ /* WED -> WPDMA */
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ ring->desc_phys);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+ MTK_WED_TX_RING_SIZE);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ return 0;
+}
+
+static int
+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+ int i, index = dev->hw->version == 1;
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+ * and WLAN. The WLAN driver accesses the ring index registers through
+ * WED
+ */
+ ring->reg_base = MTK_WED_RING_RX(index);
+ ring->wpdma = regs;
+
+ for (i = 0; i < 12; i += 4) {
+ u32 val = readl(regs + i);
+
+ wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
+ }
+
+ return 0;
+}
+
+static int
+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
+ bool reset)
+{
+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
+
+ if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
+ return -EINVAL;
+
+ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
+ sizeof(*ring->desc), false))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
+ reset))
+ return -ENOMEM;
+
+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
+ ring->wpdma = regs;
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+
+ /* WPDMA -> WED */
+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
+
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
+ ring->desc_phys);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
+ MTK_WED_RX_RING_SIZE);
+
+ return 0;
+}
+
+static u32
+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (dev->hw->version == 1)
+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+ val &= ext_mask;
+ if (!dev->hw->num_flows)
+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ if (val && net_ratelimit())
+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
+
+ val = wed_r32(dev, MTK_WED_INT_STATUS);
+ val &= mask;
+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
+
+ return val;
+}
+
+static void
+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
+{
+ if (!dev->running)
+ return;
+
+ mtk_wed_set_ext_int(dev, !!mask);
+ wed_w32(dev, MTK_WED_INT_MASK, mask);
+}
+
+int mtk_wed_flow_add(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+ int ret;
+
+ if (!hw || !hw->wed_dev)
+ return -ENODEV;
+
+ if (hw->num_flows) {
+ hw->num_flows++;
+ return 0;
+ }
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
+ if (!ret)
+ hw->num_flows++;
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+void mtk_wed_flow_remove(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+
+ if (!hw)
+ return;
+
+ if (--hw->num_flows)
+ return;
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev)
+ goto out;
+
+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+}
+
+static int
+mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct mtk_wed_flow_block_priv *priv = cb_priv;
+ struct flow_cls_offload *cls = type_data;
+ struct mtk_wed_hw *hw = priv->hw;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
+}
+
+static int
+mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
+ struct flow_block_offload *f)
+{
+ struct mtk_wed_flow_block_priv *priv;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+ struct mtk_eth *eth = hw->eth;
+ flow_setup_cb_t *cb;
+
+ if (!eth->soc->offload_version)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ cb = mtk_wed_setup_tc_block_cb;
+ f->driver_block_list = &block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hw = hw;
+ priv->dev = dev;
+ block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
+ if (IS_ERR(block_cb)) {
+ kfree(priv);
+ return PTR_ERR(block_cb);
+ }
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ kfree(block_cb->cb_priv);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
+ enum tc_setup_type type, void *type_data)
+{
+ struct mtk_wed_hw *hw = wed->hw;
+
+ if (hw->version < 2)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return mtk_wed_setup_tc_block(hw, dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
+{
+ static const struct mtk_wed_ops wed_ops = {
+ .attach = mtk_wed_attach,
+ .tx_ring_setup = mtk_wed_tx_ring_setup,
+ .rx_ring_setup = mtk_wed_rx_ring_setup,
+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
+ .msg_update = mtk_wed_mcu_msg_update,
+ .start = mtk_wed_start,
+ .stop = mtk_wed_stop,
+ .reset_dma = mtk_wed_reset_dma,
+ .reg_read = wed_r32,
+ .reg_write = wed_w32,
+ .irq_get = mtk_wed_irq_get,
+ .irq_set_mask = mtk_wed_irq_set_mask,
+ .detach = mtk_wed_detach,
+ .ppe_check = mtk_wed_ppe_check,
+ .setup_tc = mtk_wed_setup_tc,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+ struct mtk_wed_hw *hw;
+ struct regmap *regs;
+ int irq;
+
+ if (!np)
+ return;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ goto err_of_node_put;
+
+ get_device(&pdev->dev);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err_put_device;
+
+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
+ if (IS_ERR(regs))
+ goto err_put_device;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
+
+ mutex_lock(&hw_lock);
+
+ if (WARN_ON(hw_list[index]))
+ goto unlock;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ goto unlock;
+
+ hw->node = np;
+ hw->regs = regs;
+ hw->eth = eth;
+ hw->dev = &pdev->dev;
+ hw->wdma_phy = wdma_phy;
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+ hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+
+ if (hw->version == 1) {
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,hifsys");
+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+ kfree(hw);
+ goto unlock;
+ }
+
+ if (!index) {
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
+ }
+
+ mtk_wed_hw_add_debugfs(hw);
+
+ hw_list[index] = hw;
+
+ mutex_unlock(&hw_lock);
+
+ return;
+
+unlock:
+ mutex_unlock(&hw_lock);
+err_put_device:
+ put_device(&pdev->dev);
+err_of_node_put:
+ of_node_put(np);
+}
+
+void mtk_wed_exit(void)
+{
+ int i;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
+
+ synchronize_rcu();
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw;
+
+ hw = hw_list[i];
+ if (!hw)
+ continue;
+
+ hw_list[i] = NULL;
+ debugfs_remove(hw->debugfs_dir);
+ put_device(hw->dev);
+ of_node_put(hw->node);
+ kfree(hw);
+ }
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
new file mode 100644
index 000000000000..43ab77eaf683
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_PRIV_H
+#define __MTK_WED_PRIV_H
+
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <linux/debugfs.h>
+#include <linux/regmap.h>
+#include <linux/netdevice.h>
+
+struct mtk_eth;
+struct mtk_wed_wo;
+
+struct mtk_wed_hw {
+ struct device_node *node;
+ struct mtk_eth *eth;
+ struct regmap *regs;
+ struct regmap *hifsys;
+ struct device *dev;
+ void __iomem *wdma;
+ phys_addr_t wdma_phy;
+ struct regmap *mirror;
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
+ u32 debugfs_reg;
+ u32 num_flows;
+ u8 version;
+ char dirname[5];
+ int irq;
+ int index;
+};
+
+struct mtk_wdma_info {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+};
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static inline void
+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ regmap_write(dev->hw->regs, reg, val);
+}
+
+static inline u32
+wed_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(dev->hw->regs, reg, &val);
+
+ return val;
+}
+
+static inline void
+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ writel(val, dev->hw->wdma + reg);
+}
+
+static inline u32
+wdma_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ return readl(dev->hw->wdma + reg);
+}
+
+static inline u32
+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return 0;
+
+ return readl(dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return;
+
+ writel(val, dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline u32
+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+ if (!dev->rx_ring[ring].wpdma)
+ return 0;
+
+ return readl(dev->rx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+ if (!dev->rx_ring[ring].wpdma)
+ return;
+
+ writel(val, dev->rx_ring[ring].wpdma + reg);
+}
+
+static inline u32
+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ if (!dev->txfree_ring.wpdma)
+ return 0;
+
+ return readl(dev->txfree_ring.wpdma + reg);
+}
+
+static inline void
+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ if (!dev->txfree_ring.wpdma)
+ return;
+
+ writel(val, dev->txfree_ring.wpdma + reg);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index);
+void mtk_wed_exit(void);
+int mtk_wed_flow_add(int index);
+void mtk_wed_flow_remove(int index);
+void mtk_wed_fe_reset(void);
+void mtk_wed_fe_reset_complete(void);
+#else
+static inline void
+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
+{
+}
+static inline void
+mtk_wed_exit(void)
+{
+}
+static inline int mtk_wed_flow_add(int index)
+{
+ return -EINVAL;
+}
+static inline void mtk_wed_flow_remove(int index)
+{
+}
+
+static inline void mtk_wed_fe_reset(void)
+{
+}
+
+static inline void mtk_wed_fe_reset_complete(void)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
+#else
+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
new file mode 100644
index 000000000000..b244c02c5b51
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/seq_file.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+
+struct reg_dump {
+ const char *name;
+ u16 offset;
+ u8 type;
+ u8 base;
+};
+
+enum {
+ DUMP_TYPE_STRING,
+ DUMP_TYPE_WED,
+ DUMP_TYPE_WDMA,
+ DUMP_TYPE_WPDMA_TX,
+ DUMP_TYPE_WPDMA_TXFREE,
+ DUMP_TYPE_WPDMA_RX,
+ DUMP_TYPE_WED_RRO,
+};
+
+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_RING(_prefix, _base, ...) \
+ { _prefix " BASE", _base, __VA_ARGS__ }, \
+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
+
+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
+
+static void
+print_reg_val(struct seq_file *s, const char *name, u32 val)
+{
+ seq_printf(s, "%-32s %08x\n", name, val);
+}
+
+static void
+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
+ const struct reg_dump *regs, int n_regs)
+{
+ const struct reg_dump *cur;
+ u32 val;
+
+ for (cur = regs; cur < &regs[n_regs]; cur++) {
+ switch (cur->type) {
+ case DUMP_TYPE_STRING:
+ seq_printf(s, "%s======== %s:\n",
+ cur > regs ? "\n" : "",
+ cur->name);
+ continue;
+ case DUMP_TYPE_WED_RRO:
+ case DUMP_TYPE_WED:
+ val = wed_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WDMA:
+ val = wdma_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TX:
+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TXFREE:
+ val = wpdma_txfree_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_RX:
+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
+ break;
+ }
+ print_reg_val(s, cur->name, val);
+ }
+}
+
+
+static int
+wed_txinfo_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED TX"),
+ DUMP_WED(WED_TX_MIB(0)),
+ DUMP_WED_RING(WED_RING_TX(0)),
+
+ DUMP_WED(WED_TX_MIB(1)),
+ DUMP_WED_RING(WED_RING_TX(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
+
+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WPDMA_TX_RING(0),
+ DUMP_WPDMA_TX_RING(1),
+
+ DUMP_STR("WED WDMA RX"),
+ DUMP_WED(WED_WDMA_RX_MIB(0)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
+ DUMP_WED(WED_WDMA_RX_THRES(0)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
+
+ DUMP_WED(WED_WDMA_RX_MIB(1)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
+ DUMP_WED(WED_WDMA_RX_THRES(1)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
+
+ DUMP_STR("WDMA RX"),
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+ DUMP_STR("TX FREE"),
+ DUMP_WED(WED_RX_MIB(0)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev)
+ return 0;
+
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+
+static int
+wed_rxinfo_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WPDMA RX"),
+ DUMP_WPDMA_RX_RING(0),
+ DUMP_WPDMA_RX_RING(1),
+
+ DUMP_STR("WPDMA RX"),
+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
+
+ DUMP_STR("WED RX"),
+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
+
+ DUMP_STR("WED RRO"),
+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
+ DUMP_WED(WED_RROQM_MID_MIB),
+ DUMP_WED(WED_RROQM_MOD_MIB),
+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
+
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2N_MIB),
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+
+ DUMP_STR("WED WDMA TX"),
+ DUMP_WED(WED_WDMA_TX_MIB),
+ DUMP_WED_RING(WED_WDMA_RING_TX),
+
+ DUMP_STR("WDMA TX"),
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
+
+ DUMP_STR("WED RX BM"),
+ DUMP_WED(WED_RX_BM_BASE),
+ DUMP_WED(WED_RX_BM_RX_DMAD),
+ DUMP_WED(WED_RX_BM_PTR),
+ DUMP_WED(WED_RX_BM_TKID_MIB),
+ DUMP_WED(WED_RX_BM_BLEN),
+ DUMP_WED(WED_RX_BM_STS),
+ DUMP_WED(WED_RX_BM_INTF2),
+ DUMP_WED(WED_RX_BM_INTF),
+ DUMP_WED(WED_RX_BM_ERR_STS),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev)
+ return 0;
+
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+
+static int
+mtk_wed_reg_set(void *data, u64 val)
+{
+ struct mtk_wed_hw *hw = data;
+
+ regmap_write(hw->regs, hw->debugfs_reg, val);
+
+ return 0;
+}
+
+static int
+mtk_wed_reg_get(void *data, u64 *val)
+{
+ struct mtk_wed_hw *hw = data;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
+ "0x%08llx\n");
+
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+ struct dentry *dir;
+
+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
+ dir = debugfs_create_dir(hw->dirname, NULL);
+
+ hw->debugfs_dir = dir;
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+ if (hw->version != 1)
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
new file mode 100644
index 000000000000..071ed3dea860
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sujuan Chen <sujuan.chen@mediatek.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/mfd/syscon.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <asm/unaligned.h>
+
+#include "mtk_wed_regs.h"
+#include "mtk_wed_wo.h"
+#include "mtk_wed.h"
+
+static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+{
+ return readl(wo->boot.addr + reg);
+}
+
+static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+{
+ writel(val, wo->boot.addr + reg);
+}
+
+static struct sk_buff *
+mtk_wed_mcu_msg_alloc(const void *data, int data_len)
+{
+ int length = sizeof(struct mtk_wed_mcu_hdr) + data_len;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ memset(skb->head, 0, length);
+ skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr));
+ if (data && data_len)
+ skb_put_data(skb, data, data_len);
+
+ return skb;
+}
+
+static struct sk_buff *
+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
+{
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q),
+ expires - jiffies);
+ return skb_dequeue(&wo->mcu.res_q);
+}
+
+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
+{
+ skb_queue_tail(&wo->mcu.res_q, skb);
+ wake_up(&wo->mcu.wait);
+}
+
+static void
+mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb)
+{
+ u32 count = get_unaligned_le32(skb->data);
+ struct mtk_wed_wo_rx_stats *stats;
+ int i;
+
+ if (count * sizeof(*stats) > skb->len - sizeof(u32))
+ return;
+
+ stats = (struct mtk_wed_wo_rx_stats *)(skb->data + sizeof(u32));
+ for (i = 0 ; i < count ; i++)
+ wed->wlan.update_wo_rx_stats(wed, &stats[i]);
+}
+
+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
+ struct sk_buff *skb)
+{
+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+
+ skb_pull(skb, sizeof(*hdr));
+
+ switch (hdr->cmd) {
+ case MTK_WED_WO_EVT_LOG_DUMP:
+ dev_notice(wo->hw->dev, "%s\n", skb->data);
+ break;
+ case MTK_WED_WO_EVT_PROFILING: {
+ struct mtk_wed_wo_log_info *info = (void *)skb->data;
+ u32 count = skb->len / sizeof(*info);
+ int i;
+
+ for (i = 0 ; i < count ; i++)
+ dev_notice(wo->hw->dev,
+ "SN:%u latency: total=%u, rro:%u, mod:%u\n",
+ le32_to_cpu(info[i].sn),
+ le32_to_cpu(info[i].total),
+ le32_to_cpu(info[i].rro),
+ le32_to_cpu(info[i].mod));
+ break;
+ }
+ case MTK_WED_WO_EVT_RXCNT_INFO:
+ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
+ break;
+ default:
+ break;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static int
+mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb,
+ int id, int cmd, u16 *wait_seq, bool wait_resp)
+{
+ struct mtk_wed_mcu_hdr *hdr;
+
+ /* TODO: make it dynamic based on cmd */
+ wo->mcu.timeout = 20 * HZ;
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr));
+ hdr->cmd = cmd;
+ hdr->length = cpu_to_le16(skb->len);
+
+ if (wait_resp && wait_seq) {
+ u16 seq = ++wo->mcu.seq;
+
+ if (!seq)
+ seq = ++wo->mcu.seq;
+ *wait_seq = seq;
+
+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP);
+ hdr->seq = cpu_to_le16(seq);
+ }
+ if (id == MTK_WED_MODULE_ID_WO)
+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
+
+ return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
+}
+
+static int
+mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb,
+ int cmd, int seq)
+{
+ struct mtk_wed_mcu_hdr *hdr;
+
+ if (!skb) {
+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
+ cmd, seq);
+ return -ETIMEDOUT;
+ }
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+ if (le16_to_cpu(hdr->seq) != seq)
+ return -EAGAIN;
+
+ skb_pull(skb, sizeof(*hdr));
+ switch (cmd) {
+ case MTK_WED_WO_CMD_RXCNT_INFO:
+ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
+ const void *data, int len, bool wait_resp)
+{
+ unsigned long expires;
+ struct sk_buff *skb;
+ u16 seq;
+ int ret;
+
+ skb = mtk_wed_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&wo->mcu.mutex);
+
+ ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp);
+ if (ret || !wait_resp)
+ goto unlock;
+
+ expires = jiffies + wo->mcu.timeout;
+ do {
+ skb = mtk_wed_mcu_get_response(wo, expires);
+ ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq);
+ dev_kfree_skb(skb);
+ } while (ret == -EAGAIN);
+
+unlock:
+ mutex_unlock(&wo->mcu.mutex);
+
+ return ret;
+}
+
+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
+ int len)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+ if (dev->hw->version == 1)
+ return 0;
+
+ if (WARN_ON(!wo))
+ return -ENODEV;
+
+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len,
+ true);
+}
+
+static int
+mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
+ struct mtk_wed_wo_memory_region *region)
+{
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ int index;
+
+ index = of_property_match_string(wo->hw->node, "memory-region-names",
+ region->name);
+ if (index < 0)
+ return index;
+
+ np = of_parse_phandle(wo->hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ region->phy_addr = rmem->base;
+ region->size = rmem->size;
+ region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
+
+ return !region->addr ? -EINVAL : 0;
+}
+
+static int
+mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
+ struct mtk_wed_wo_memory_region *region)
+{
+ const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct mtk_wed_fw_region *fw_region;
+
+ trailer_ptr = fw->data + fw->size - sizeof(*trailer);
+ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
+ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
+ first_region_ptr = region_ptr;
+
+ while (region_ptr < trailer_ptr) {
+ u32 length;
+
+ fw_region = (const struct mtk_wed_fw_region *)region_ptr;
+ length = le32_to_cpu(fw_region->len);
+
+ if (region->phy_addr != le32_to_cpu(fw_region->addr))
+ goto next;
+
+ if (region->size < length)
+ goto next;
+
+ if (first_region_ptr < ptr + length)
+ goto next;
+
+ if (region->shared && region->consumed)
+ return 0;
+
+ if (!region->shared || !region->consumed) {
+ memcpy_toio(region->addr, ptr, length);
+ region->consumed = true;
+ return 0;
+ }
+next:
+ region_ptr += sizeof(*fw_region);
+ ptr += length;
+ }
+
+ return -EINVAL;
+}
+
+static int
+mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
+{
+ static struct mtk_wed_wo_memory_region mem_region[] = {
+ [MTK_WED_WO_REGION_EMI] = {
+ .name = "wo-emi",
+ },
+ [MTK_WED_WO_REGION_ILM] = {
+ .name = "wo-ilm",
+ },
+ [MTK_WED_WO_REGION_DATA] = {
+ .name = "wo-data",
+ .shared = true,
+ },
+ };
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct firmware *fw;
+ const char *fw_name;
+ u32 val, boot_cr;
+ int ret, i;
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
+ if (ret)
+ return ret;
+ }
+
+ wo->boot.name = "wo-boot";
+ ret = mtk_wed_get_memory_region(wo, &wo->boot);
+ if (ret)
+ return ret;
+
+ /* set dummy cr */
+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
+ wo->hw->index + 1);
+
+ /* load firmware */
+ if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
+ fw_name = MT7981_FIRMWARE_WO;
+ else
+ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
+
+ ret = request_firmware(&fw, fw_name, wo->hw->dev);
+ if (ret)
+ return ret;
+
+ trailer = (void *)(fw->data + fw->size -
+ sizeof(struct mtk_wed_fw_trailer));
+ dev_info(wo->hw->dev,
+ "MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n",
+ trailer->fw_ver, trailer->build_date);
+ dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
+ trailer->chip_id, trailer->num_region);
+
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* set the start address */
+ boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
+ : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+ /* wo firmware reset */
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+
+ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
+ val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
+ : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static u32
+mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo)
+{
+ return wed_r32(wo->hw->wed_dev,
+ MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL);
+}
+
+int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
+{
+ u32 val;
+ int ret;
+
+ skb_queue_head_init(&wo->mcu.res_q);
+ init_waitqueue_head(&wo->mcu.wait);
+ mutex_init(&wo->mcu.mutex);
+
+ ret = mtk_wed_mcu_load_firmware(wo);
+ if (ret)
+ return ret;
+
+ return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val,
+ 100, MTK_FW_DL_TIMEOUT);
+}
+
+MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
new file mode 100644
index 000000000000..a5d9d8a5bce2
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+
+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
new file mode 100644
index 000000000000..0a50bb98c5ea
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_REGS_H
+#define __MTK_WED_REGS_H
+
+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+
+struct mtk_wdma_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+} __packed __aligned(4);
+
+#define MTK_WED_REV_ID 0x004
+
+#define MTK_WED_RESET 0x008
+#define MTK_WED_RESET_TX_BM BIT(0)
+#define MTK_WED_RESET_RX_BM BIT(1)
+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
+#define MTK_WED_RESET_WED_RX_DMA BIT(13)
+#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
+#define MTK_WED_RESET_WED BIT(31)
+
+#define MTK_WED_CTRL 0x00c
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
+
+#define MTK_WED_EXT_INT_STATUS 0x020
+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_GET_BM_DMAD_SKIP BIT(25)
+#define MTK_WED_EXT_INT_STATUS_WPDMA_RX_D_DRV_ERR BIT(26)
+#define MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY BIT(27)
+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
+
+#define MTK_WED_EXT_INT_MASK 0x028
+#define MTK_WED_EXT_INT_MASK1 0x02c
+#define MTK_WED_EXT_INT_MASK2 0x030
+
+#define MTK_WED_STATUS 0x060
+#define MTK_WED_STATUS_TX GENMASK(15, 8)
+
+#define MTK_WED_TX_BM_CTRL 0x080
+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_BM_BASE 0x084
+
+#define MTK_WED_TX_BM_TKID 0x088
+#define MTK_WED_TX_BM_TKID_V2 0x0c8
+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+#define MTK_WED_TX_BM_BUF_LEN 0x08c
+
+#define MTK_WED_TX_BM_INTF 0x09c
+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
+
+#define MTK_WED_TX_BM_DYN_THR 0x0a0
+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0)
+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
+#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16)
+
+#define MTK_WED_TX_TKID_CTRL 0x0c0
+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_TKID_DYN_THR 0x0e0
+#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
+
+#define MTK_WED_TXP_DW0 0x120
+#define MTK_WED_TXP_DW1 0x124
+#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16)
+#define MTK_WED_TXDP_CTRL 0x130
+#define MTK_WED_TXDP_DW9_OVERWR BIT(9)
+#define MTK_WED_RX_BM_TKID_MIB 0x1cc
+
+#define MTK_WED_INT_STATUS 0x200
+#define MTK_WED_INT_MASK 0x204
+
+#define MTK_WED_GLO_CFG 0x208
+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MTK_WED_RESET_IDX 0x20c
+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
+#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
+
+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
+
+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
+
+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
+
+#define MTK_WED_SCR0 0x3c0
+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
+
+#define MTK_WED_WPDMA_GLO_CFG 0x508
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
+#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
+
+#define MTK_WED_WPDMA_RESET_IDX 0x50c
+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_CTRL 0x518
+#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31)
+
+#define MTK_WED_WPDMA_INT_CTRL 0x520
+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
+#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC BIT(22)
+#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_INT_MASK 0x524
+
+#define MTK_WED_WPDMA_INT_CTRL_TX 0x530
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2)
+
+#define MTK_WED_PCIE_CFG_BASE 0x560
+
+#define MTK_WED_PCIE_CFG_BASE 0x560
+#define MTK_WED_PCIE_CFG_INTM 0x564
+#define MTK_WED_PCIE_CFG_MSIS 0x568
+#define MTK_WED_PCIE_INT_TRIGGER 0x570
+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
+#define MTK_WED_PCIE_INT_CTRL 0x57c
+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
+
+#define MTK_WED_WPDMA_CFG_BASE 0x580
+#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
+#define MTK_WED_WPDMA_CFG_TX 0x588
+#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c
+
+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+
+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
+
+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
+#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
+#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
+#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
+
+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+#define MTK_WED_WPDMA_RX_RING 0x770
+
+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
+
+#define MTK_WED_WDMA_RING_TX 0x800
+
+#define MTK_WED_WDMA_TX_MIB 0x810
+
+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+
+#define MTK_WED_WDMA_GLO_CFG 0xa04
+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
+
+#define MTK_WED_WDMA_RESET_IDX 0xa08
+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
+#define MTK_WED_WDMA_INT_CLR 0xa24
+#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_CTRL 0xa2c
+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
+#define MTK_WED_WDMA_CFG_BASE 0xaa0
+#define MTK_WED_WDMA_OFFSET0 0xaa4
+#define MTK_WED_WDMA_OFFSET1 0xaa8
+
+#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16)
+#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16)
+
+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
+
+#define MTK_WED_RX_BM_RX_DMAD 0xd80
+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
+
+#define MTK_WED_RX_BM_BASE 0xd84
+#define MTK_WED_RX_BM_INIT_PTR 0xd88
+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
+
+#define MTK_WED_RX_PTR 0xd8c
+
+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
+
+#define MTK_WED_RING_OFS_BASE 0x00
+#define MTK_WED_RING_OFS_COUNT 0x04
+#define MTK_WED_RING_OFS_CPU_IDX 0x08
+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
+
+#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10)
+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
+
+#define MTK_WDMA_GLO_CFG 0x204
+#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
+#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
+#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
+
+#define MTK_WDMA_RESET_IDX 0x208
+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WDMA_INT_STATUS 0x220
+
+#define MTK_WDMA_INT_MASK 0x228
+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+
+#define MTK_WDMA_INT_GRP1 0x250
+#define MTK_WDMA_INT_GRP2 0x254
+
+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+
+/* DMA channel mapping */
+#define HIFSYS_DMA_AG_MAP 0x008
+
+#define MTK_WED_RTQM_GLO_CFG 0xb00
+#define MTK_WED_RTQM_BUSY BIT(1)
+#define MTK_WED_RTQM_Q_RST BIT(2)
+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
+#define MTK_WED_RTQM_Q2N_MIB 0xb80
+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
+
+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
+
+#define MTK_WED_RROQM_GLO_CFG 0xc04
+#define MTK_WED_RROQM_RST_IDX 0xc08
+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
+
+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
+
+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
+
+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
+
+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
+
+#define MTK_WED_RROQ_BASE_L 0xc80
+#define MTK_WED_RROQ_BASE_H 0xc84
+
+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
+
+#define MTK_WED_RROQM_MID_MIB 0xcc0
+#define MTK_WED_RROQM_MOD_MIB 0xcc4
+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
+
+#define MTK_WED_RX_BM_RX_DMAD 0xd80
+#define MTK_WED_RX_BM_BASE 0xd84
+#define MTK_WED_RX_BM_INIT_PTR 0xd88
+#define MTK_WED_RX_BM_PTR 0xd8c
+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
+
+#define MTK_WED_RX_BM_BLEN 0xd90
+#define MTK_WED_RX_BM_STS 0xd94
+#define MTK_WED_RX_BM_INTF2 0xd98
+#define MTK_WED_RX_BM_INTF 0xd9c
+#define MTK_WED_RX_BM_ERR_STS 0xda8
+
+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+#define MTK_WED_PCIE_INT_MASK 0x0
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
new file mode 100644
index 000000000000..69fba29055e9
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sujuan Chen <sujuan.chen@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_irq.h>
+#include <linux/bitfield.h>
+
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed_wo.h"
+
+static u32
+mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
+{
+ u32 val;
+
+ if (regmap_read(wo->mmio.regs, reg, &val))
+ val = ~0;
+
+ return val;
+}
+
+static void
+mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+{
+ regmap_write(wo->mmio.regs, reg, val);
+}
+
+static u32
+mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
+{
+ u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
+
+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
+}
+
+static void
+mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
+}
+
+static void
+mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
+}
+
+static void
+mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wo->mmio.lock, flags);
+ wo->mmio.irq_mask &= ~mask;
+ wo->mmio.irq_mask |= val;
+ if (set)
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
+ spin_unlock_irqrestore(&wo->mmio.lock, flags);
+}
+
+static void
+mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
+ tasklet_schedule(&wo->mmio.irq_tasklet);
+}
+
+static void
+mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
+}
+
+static void
+mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
+}
+
+static void
+mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ u32 val)
+{
+ wmb();
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
+}
+
+static void *
+mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
+ bool flush)
+{
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+ int index = (q->tail + 1) % q->n_desc;
+ struct mtk_wed_wo_queue_entry *entry;
+ struct mtk_wed_wo_queue_desc *desc;
+ void *buf;
+
+ if (!q->queued)
+ return NULL;
+
+ if (flush)
+ q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
+ else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
+ return NULL;
+
+ q->tail = index;
+ q->queued--;
+
+ desc = &q->desc[index];
+ entry = &q->entry[index];
+ buf = entry->buf;
+ if (len)
+ *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
+ le32_to_cpu(READ_ONCE(desc->ctrl)));
+ if (buf)
+ dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
+ DMA_FROM_DEVICE);
+ entry->buf = NULL;
+
+ return buf;
+}
+
+static int
+mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ bool rx)
+{
+ enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ int n_buf = 0;
+
+ while (q->queued < q->n_desc) {
+ struct mtk_wed_wo_queue_entry *entry;
+ dma_addr_t addr;
+ void *buf;
+
+ buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
+ if (!buf)
+ break;
+
+ addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
+ skb_free_frag(buf);
+ break;
+ }
+
+ q->head = (q->head + 1) % q->n_desc;
+ entry = &q->entry[q->head];
+ entry->addr = addr;
+ entry->len = q->buf_size;
+ q->entry[q->head].buf = buf;
+
+ if (rx) {
+ struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
+ u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
+ FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
+ entry->len);
+
+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+ }
+ q->queued++;
+ n_buf++;
+ }
+
+ return n_buf;
+}
+
+static void
+mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
+{
+ mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
+ mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
+}
+
+static void
+mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ for (;;) {
+ struct mtk_wed_mcu_hdr *hdr;
+ struct sk_buff *skb;
+ void *data;
+ u32 len;
+
+ data = mtk_wed_wo_dequeue(wo, q, &len, false);
+ if (!data)
+ break;
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb) {
+ skb_free_frag(data);
+ continue;
+ }
+
+ __skb_put(skb, len);
+ if (mtk_wed_mcu_check_msg(wo, skb)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+ if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
+ mtk_wed_mcu_rx_event(wo, skb);
+ else
+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
+ }
+
+ if (mtk_wed_wo_queue_refill(wo, q, true)) {
+ u32 index = (q->head - 1) % q->n_desc;
+
+ mtk_wed_wo_queue_kick(wo, q, index);
+ }
+}
+
+static irqreturn_t
+mtk_wed_wo_irq_handler(int irq, void *data)
+{
+ struct mtk_wed_wo *wo = data;
+
+ mtk_wed_wo_set_isr(wo, 0);
+ tasklet_schedule(&wo->mmio.irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
+ u32 intr, mask;
+
+ /* disable interrupts */
+ mtk_wed_wo_set_isr(wo, 0);
+
+ intr = mtk_wed_wo_get_isr(wo);
+ intr &= wo->mmio.irq_mask;
+ mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
+ mtk_wed_wo_irq_disable(wo, mask);
+
+ if (intr & MTK_WED_WO_RXCH_INT_MASK) {
+ mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
+ mtk_wed_wo_rx_complete(wo);
+ }
+}
+
+/* mtk wed wo hw queues */
+
+static int
+mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ int n_desc, int buf_size, int index,
+ struct mtk_wed_wo_queue_regs *regs)
+{
+ q->regs = *regs;
+ q->n_desc = n_desc;
+ q->buf_size = buf_size;
+
+ q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
+ &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
+ dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
+ q->desc_dma);
+}
+
+static void
+mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < q->n_desc; i++) {
+ struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
+
+ dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
+ DMA_TO_DEVICE);
+ skb_free_frag(entry->buf);
+ entry->buf = NULL;
+ }
+
+ if (!q->cache.va)
+ return;
+
+ page = virt_to_page(q->cache.va);
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
+ memset(&q->cache, 0, sizeof(q->cache));
+}
+
+static void
+mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ struct page *page;
+
+ for (;;) {
+ void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
+
+ if (!buf)
+ break;
+
+ skb_free_frag(buf);
+ }
+
+ if (!q->cache.va)
+ return;
+
+ page = virt_to_page(q->cache.va);
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
+ memset(&q->cache, 0, sizeof(q->cache));
+}
+
+static void
+mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
+ mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
+ mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
+}
+
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ struct sk_buff *skb)
+{
+ struct mtk_wed_wo_queue_entry *entry;
+ struct mtk_wed_wo_queue_desc *desc;
+ int ret = 0, index;
+ u32 ctrl;
+
+ q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
+ index = (q->head + 1) % q->n_desc;
+ if (q->tail == index) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ entry = &q->entry[index];
+ if (skb->len > entry->len) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ desc = &q->desc[index];
+ q->head = index;
+
+ dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
+ DMA_TO_DEVICE);
+ memcpy(entry->buf, skb->data, skb->len);
+ dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
+ DMA_TO_DEVICE);
+
+ ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
+ MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
+ WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+ mtk_wed_wo_queue_kick(wo, q, q->head);
+ mtk_wed_wo_kickout(wo);
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int
+mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
+{
+ return 0;
+}
+
+static int
+mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
+{
+ struct mtk_wed_wo_queue_regs regs;
+ struct device_node *np;
+ int ret;
+
+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
+ if (!np)
+ return -ENODEV;
+
+ wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
+ if (IS_ERR(wo->mmio.regs)) {
+ ret = PTR_ERR(wo->mmio.regs);
+ goto error_put;
+ }
+
+ wo->mmio.irq = irq_of_parse_and_map(np, 0);
+ wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
+ spin_lock_init(&wo->mmio.lock);
+ tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
+
+ ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
+ mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
+ KBUILD_MODNAME, wo);
+ if (ret)
+ goto error;
+
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
+
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
+ &regs);
+ if (ret)
+ goto error;
+
+ mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
+ mtk_wed_wo_queue_reset(wo, &wo->q_tx);
+
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
+
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
+ &regs);
+ if (ret)
+ goto error;
+
+ mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
+ mtk_wed_wo_queue_reset(wo, &wo->q_rx);
+
+ /* rx queue irqmask */
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
+
+ return 0;
+
+error:
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
+error_put:
+ of_node_put(np);
+ return ret;
+}
+
+static void
+mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
+{
+ /* disable interrupts */
+ mtk_wed_wo_set_isr(wo, 0);
+
+ tasklet_disable(&wo->mmio.irq_tasklet);
+
+ disable_irq(wo->mmio.irq);
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
+
+ mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
+ mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
+ mtk_wed_wo_queue_free(wo, &wo->q_tx);
+ mtk_wed_wo_queue_free(wo, &wo->q_rx);
+}
+
+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
+{
+ struct mtk_wed_wo *wo;
+ int ret;
+
+ wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
+ if (!wo)
+ return -ENOMEM;
+
+ hw->wed_wo = wo;
+ wo->hw = hw;
+
+ ret = mtk_wed_wo_hardware_init(wo);
+ if (ret)
+ return ret;
+
+ ret = mtk_wed_mcu_init(wo);
+ if (ret)
+ return ret;
+
+ return mtk_wed_wo_exception_init(wo);
+}
+
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
+{
+ struct mtk_wed_wo *wo = hw->wed_wo;
+
+ mtk_wed_wo_hw_deinit(wo);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
new file mode 100644
index 000000000000..7a1a2a28f1ac
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2022 Lorenzo Bianconi <lorenzo@kernel.org> */
+
+#ifndef __MTK_WED_WO_H
+#define __MTK_WED_WO_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+struct mtk_wed_hw;
+
+struct mtk_wed_mcu_hdr {
+ /* DW0 */
+ u8 version;
+ u8 cmd;
+ __le16 length;
+
+ /* DW1 */
+ __le16 seq;
+ __le16 flag;
+
+ /* DW2 */
+ __le32 status;
+
+ /* DW3 */
+ u8 rsv[20];
+};
+
+struct mtk_wed_wo_log_info {
+ __le32 sn;
+ __le32 total;
+ __le32 rro;
+ __le32 mod;
+};
+
+enum mtk_wed_wo_event {
+ MTK_WED_WO_EVT_LOG_DUMP = 0x1,
+ MTK_WED_WO_EVT_PROFILING = 0x2,
+ MTK_WED_WO_EVT_RXCNT_INFO = 0x3,
+};
+
+#define MTK_WED_MODULE_ID_WO 1
+#define MTK_FW_DL_TIMEOUT 4000000 /* us */
+#define MTK_WOCPU_TIMEOUT 2000000 /* us */
+
+enum {
+ MTK_WED_WARP_CMD_FLAG_RSP = BIT(0),
+ MTK_WED_WARP_CMD_FLAG_NEED_RSP = BIT(1),
+ MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2),
+};
+
+#define MTK_WED_WO_CPU_MCUSYS_RESET_ADDR 0x15194050
+#define MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK 0x20
+#define MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK 0x1
+
+enum {
+ MTK_WED_WO_REGION_EMI,
+ MTK_WED_WO_REGION_ILM,
+ MTK_WED_WO_REGION_DATA,
+ MTK_WED_WO_REGION_BOOT,
+ __MTK_WED_WO_REGION_MAX,
+};
+
+enum mtk_wed_wo_state {
+ MTK_WED_WO_STATE_UNDEFINED,
+ MTK_WED_WO_STATE_INIT,
+ MTK_WED_WO_STATE_ENABLE,
+ MTK_WED_WO_STATE_DISABLE,
+ MTK_WED_WO_STATE_HALT,
+ MTK_WED_WO_STATE_GATING,
+ MTK_WED_WO_STATE_SER_RESET,
+ MTK_WED_WO_STATE_WF_RESET,
+};
+
+enum mtk_wed_wo_done_state {
+ MTK_WED_WOIF_UNDEFINED,
+ MTK_WED_WOIF_DISABLE_DONE,
+ MTK_WED_WOIF_TRIGGER_ENABLE,
+ MTK_WED_WOIF_ENABLE_DONE,
+ MTK_WED_WOIF_TRIGGER_GATING,
+ MTK_WED_WOIF_GATING_DONE,
+ MTK_WED_WOIF_TRIGGER_HALT,
+ MTK_WED_WOIF_HALT_DONE,
+};
+
+enum mtk_wed_dummy_cr_idx {
+ MTK_WED_DUMMY_CR_FWDL,
+ MTK_WED_DUMMY_CR_WO_STATUS,
+};
+
+#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
+#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
+#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
+
+#define MTK_WO_MCU_CFG_LS_BASE 0
+#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
+#define MTK_WO_MCU_CFG_LS_FW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x004)
+#define MTK_WO_MCU_CFG_LS_CFG_DBG1_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x00c)
+#define MTK_WO_MCU_CFG_LS_CFG_DBG2_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x010)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x014)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_SET_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x018)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x01c)
+#define MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x050)
+#define MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x060)
+#define MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x064)
+
+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
+
+#define MTK_WED_WO_RING_SIZE 256
+#define MTK_WED_WO_CMD_LEN 1504
+
+#define MTK_WED_WO_TXCH_NUM 0
+#define MTK_WED_WO_RXCH_NUM 1
+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
+
+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
+#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
+ MTK_WED_WO_EXCEPTION_INT_MASK)
+
+#define MTK_WED_WO_CCIF_BUSY 0x004
+#define MTK_WED_WO_CCIF_START 0x008
+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
+#define MTK_WED_WO_CCIF_RCHNUM 0x010
+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
+
+#define MTK_WED_WO_CCIF_ACK 0x014
+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
+#define MTK_WED_WO_CCIF_DUMMY1 0x020
+#define MTK_WED_WO_CCIF_DUMMY2 0x024
+#define MTK_WED_WO_CCIF_DUMMY3 0x028
+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
+#define MTK_WED_WO_CCIF_SHADOW1 0x030
+#define MTK_WED_WO_CCIF_SHADOW2 0x034
+#define MTK_WED_WO_CCIF_SHADOW3 0x038
+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
+#define MTK_WED_WO_CCIF_DUMMY5 0x050
+#define MTK_WED_WO_CCIF_DUMMY6 0x054
+#define MTK_WED_WO_CCIF_DUMMY7 0x058
+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
+#define MTK_WED_WO_CCIF_SHADOW5 0x060
+#define MTK_WED_WO_CCIF_SHADOW6 0x064
+#define MTK_WED_WO_CCIF_SHADOW7 0x068
+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
+
+#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
+#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
+#define MTK_WED_WO_CTL_BURST BIT(15)
+#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
+#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
+#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
+#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
+#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
+
+struct mtk_wed_wo_memory_region {
+ const char *name;
+ void __iomem *addr;
+ phys_addr_t phy_addr;
+ u32 size;
+ bool shared:1;
+ bool consumed:1;
+};
+
+struct mtk_wed_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 rsv0[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 rsv1[15];
+} __packed;
+
+struct mtk_wed_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 num_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 rsv[2];
+ char fw_ver[10];
+ char build_date[15];
+ u32 crc;
+};
+
+struct mtk_wed_wo_queue_regs {
+ u32 desc_base;
+ u32 ring_size;
+ u32 cpu_idx;
+ u32 dma_idx;
+};
+
+struct mtk_wed_wo_queue_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+ __le32 reserved[4];
+} __packed __aligned(32);
+
+struct mtk_wed_wo_queue_entry {
+ dma_addr_t addr;
+ void *buf;
+ u32 len;
+};
+
+struct mtk_wed_wo_queue {
+ struct mtk_wed_wo_queue_regs regs;
+
+ struct page_frag_cache cache;
+
+ struct mtk_wed_wo_queue_desc *desc;
+ dma_addr_t desc_dma;
+
+ struct mtk_wed_wo_queue_entry *entry;
+
+ u16 head;
+ u16 tail;
+ int n_desc;
+ int queued;
+ int buf_size;
+
+};
+
+struct mtk_wed_wo {
+ struct mtk_wed_hw *hw;
+ struct mtk_wed_wo_memory_region boot;
+
+ struct mtk_wed_wo_queue q_tx;
+ struct mtk_wed_wo_queue q_rx;
+
+ struct {
+ struct mutex mutex;
+ int timeout;
+ u16 seq;
+
+ struct sk_buff_head res_q;
+ wait_queue_head_t wait;
+ } mcu;
+
+ struct {
+ struct regmap *regs;
+
+ spinlock_t lock;
+ struct tasklet_struct irq_tasklet;
+ int irq;
+ u32 irq_mask;
+ } mmio;
+};
+
+static inline int
+mtk_wed_mcu_check_msg(struct mtk_wed_wo *wo, struct sk_buff *skb)
+{
+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+
+ if (hdr->version)
+ return -EINVAL;
+
+ if (skb->len < sizeof(*hdr) || skb->len != le16_to_cpu(hdr->length))
+ return -EINVAL;
+
+ return 0;
+}
+
+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
+ struct sk_buff *skb);
+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
+ const void *data, int len, bool wait_resp);
+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
+ int len);
+int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
+ struct sk_buff *skb);
+
+#endif /* __MTK_WED_WO_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 5b11557f1ae4..0eb7b83637d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -204,9 +204,13 @@ out:
static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
{
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
int err = 0;
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
@@ -215,6 +219,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
err);
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
}
static void dump_err_buf(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e10b7b04b894..c56d2194cbfc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1994,21 +1994,16 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
{
- int port, err;
+ int p, port, err;
struct mlx4_vport_state *vp_admin;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_slave_state *slave_state =
&priv->mfunc.master.slave_state[slave];
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
- int min_port = find_first_bit(actv_ports.ports,
- priv->dev.caps.num_ports) + 1;
- int max_port = min_port - 1 +
- bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
- for (port = min_port; port <= max_port; port++) {
- if (!test_bit(port - 1, actv_ports.ports))
- continue;
+ for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+ port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
priv->mfunc.master.vf_admin[slave].enable_smi[port];
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
@@ -2063,19 +2058,13 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
{
- int port;
+ int p, port;
struct mlx4_vport_oper_state *vp_oper;
struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
&priv->dev, slave);
- int min_port = find_first_bit(actv_ports.ports,
- priv->dev.caps.num_ports) + 1;
- int max_port = min_port - 1 +
- bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
-
- for (port = min_port; port <= max_port; port++) {
- if (!test_bit(port - 1, actv_ports.ports))
- continue;
+ for_each_set_bit(p, actv_ports.ports, priv->dev.caps.num_ports) {
+ port = p + 1;
priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
MLX4_VF_SMI_DISABLED;
vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/crdump.c b/drivers/net/ethernet/mellanox/mlx4/crdump.c
index ac5468b77488..82a07a31cde7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/crdump.c
+++ b/drivers/net/ethernet/mellanox/mlx4/crdump.c
@@ -226,10 +226,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create cr-space region */
crdump->region_crspace =
- devlink_region_create(devlink,
- &region_cr_space_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- pci_resource_len(pdev, 0));
+ devl_region_create(devlink,
+ &region_cr_space_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ pci_resource_len(pdev, 0));
if (IS_ERR(crdump->region_crspace))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_cr_space_str,
@@ -237,10 +237,10 @@ int mlx4_crdump_init(struct mlx4_dev *dev)
/* Create fw-health region */
crdump->region_fw_health =
- devlink_region_create(devlink,
- &region_fw_health_ops,
- MAX_NUM_OF_DUMPS_TO_STORE,
- HEALTH_BUFFER_SIZE);
+ devl_region_create(devlink,
+ &region_fw_health_ops,
+ MAX_NUM_OF_DUMPS_TO_STORE,
+ HEALTH_BUFFER_SIZE);
if (IS_ERR(crdump->region_fw_health))
mlx4_warn(dev, "crdump: create devlink region %s err %ld\n",
region_fw_health_str,
@@ -253,6 +253,6 @@ void mlx4_crdump_end(struct mlx4_dev *dev)
{
struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
- devlink_region_destroy(crdump->region_fw_health);
- devlink_region_destroy(crdump->region_crspace);
+ devl_region_destroy(crdump->region_fw_health);
+ devl_region_destroy(crdump->region_crspace);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 024788549c25..9e3b76182088 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -58,9 +58,7 @@ u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
return hi | lo;
}
-void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
- struct skb_shared_hwtstamps *hwts,
- u64 timestamp)
+u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp)
{
unsigned int seq;
u64 nsec;
@@ -70,8 +68,15 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
} while (read_seqretry(&mdev->clock_lock, seq));
+ return ns_to_ktime(nsec);
+}
+
+void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ struct skb_shared_hwtstamps *hwts,
+ u64 timestamp)
+{
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
- hwts->hwtstamp = ns_to_ktime(nsec);
+ hwts->hwtstamp = mlx4_en_get_hwtstamp(mdev, timestamp);
}
/**
@@ -111,34 +116,27 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
}
/**
- * mlx4_en_phc_adjfreq - adjust the frequency of the hardware clock
+ * mlx4_en_phc_adjfine - adjust the frequency of the hardware clock
* @ptp: ptp clock structure
- * @delta: Desired frequency change in parts per billion
+ * @scaled_ppm: Desired frequency change in scaled parts per million
*
- * Adjust the frequency of the PHC cycle counter by the indicated delta from
- * the base frequency.
+ * Adjust the frequency of the PHC cycle counter by the indicated scaled_ppm
+ * from the base frequency.
+ *
+ * Scaled parts per million is ppm with a 16-bit binary fractional field.
**/
-static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+static int mlx4_en_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
- u64 adj;
- u32 diff, mult;
- int neg_adj = 0;
+ u32 mult;
unsigned long flags;
struct mlx4_en_dev *mdev = container_of(ptp, struct mlx4_en_dev,
ptp_clock_info);
- if (delta < 0) {
- neg_adj = 1;
- delta = -delta;
- }
- mult = mdev->nominal_c_mult;
- adj = mult;
- adj *= delta;
- diff = div_u64(adj, 1000000000ULL);
+ mult = (u32)adjust_by_scaled_ppm(mdev->nominal_c_mult, scaled_ppm);
write_seqlock_irqsave(&mdev->clock_lock, flags);
timecounter_read(&mdev->clock);
- mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
+ mdev->cycles.mult = mult;
write_sequnlock_irqrestore(&mdev->clock_lock, flags);
return 0;
@@ -237,7 +235,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
.n_per_out = 0,
.n_pins = 0,
.pps = 0,
- .adjfreq = mlx4_en_phc_adjfreq,
+ .adjfine = mlx4_en_phc_adjfine,
.adjtime = mlx4_en_phc_adjtime,
.gettime64 = mlx4_en_phc_gettime,
.settime64 = mlx4_en_phc_settime,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index d5fc72b1a36f..1184ac5751e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -147,13 +147,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
switch (cq->type) {
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
- netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq);
napi_enable(&cq->napi);
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
- netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
napi_enable(&cq->napi);
break;
case TX_XDP:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 10238bedd694..7d45f1d55f79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -89,15 +89,15 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION,
sizeof(drvinfo->version));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
+ strscpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info));
}
@@ -1141,7 +1141,9 @@ static void mlx4_en_get_pauseparam(struct net_device *dev,
}
static int mlx4_en_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -1208,7 +1210,9 @@ out:
}
static void mlx4_en_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *param)
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param,
+ struct netlink_ext_ack *extack)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -2106,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
en_err(priv,
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
i, offset, ee->len - i, ret);
- return 0;
+ return ret;
}
i += ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f1c10f2bda78..e11bc0ac880e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -33,6 +33,7 @@
#include <linux/bpf.h>
#include <linux/etherdevice.h>
+#include <linux/filter.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
@@ -2336,11 +2337,8 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
/* Unregister device - this will close the port if it was up */
- if (priv->registered) {
- devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
- priv->port));
+ if (priv->registered)
unregister_netdev(dev);
- }
if (priv->allocated)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
@@ -2427,10 +2425,6 @@ static int mlx4_en_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
/* device doesn't support time stamping */
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
return -EINVAL;
@@ -2895,6 +2889,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_bpf = mlx4_xdp,
};
+static const struct xdp_metadata_ops mlx4_xdp_metadata_ops = {
+ .xmo_rx_timestamp = mlx4_en_xdp_rx_timestamp,
+ .xmo_rx_hash = mlx4_en_xdp_rx_hash,
+};
+
struct mlx4_en_bond {
struct work_struct work;
struct mlx4_en_priv *priv;
@@ -3316,6 +3315,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->netdev_ops = &mlx4_netdev_ops_master;
else
dev->netdev_ops = &mlx4_netdev_ops;
+ dev->xdp_metadata_ops = &mlx4_xdp_metadata_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -3416,10 +3416,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
+ /* supports LSOv2 packets. */
+ netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
@@ -3474,6 +3479,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
mdev->profile.prof[priv->port].tx_ppp,
mdev->profile.prof[priv->port].tx_pause);
+ SET_NETDEV_DEVLINK_PORT(dev,
+ mlx4_get_devlink_port(mdev->dev, priv->port));
err = register_netdev(dev);
if (err) {
en_err(priv, "Netdev registration failed for port %d\n", port);
@@ -3481,8 +3488,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
priv->registered = 1;
- devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port),
- dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 650e6a1844ae..332472fe4990 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -661,9 +661,59 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
#endif
+struct mlx4_en_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_dev *mdev;
+ struct mlx4_en_rx_ring *ring;
+ struct net_device *dev;
+};
+
+int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
+ return -ENODATA;
+
+ *timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
+ mlx4_en_get_cqe_ts(_ctx->cqe));
+ return 0;
+}
+
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+ struct mlx4_cqe *cqe = _ctx->cqe;
+ enum xdp_rss_hash_type xht = 0;
+ __be16 status;
+
+ if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
+ return -ENODATA;
+
+ *hash = be32_to_cpu(cqe->immed_rss_invalid);
+ status = cqe->status;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_TCP))
+ xht = XDP_RSS_L4_TCP;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_UDP))
+ xht = XDP_RSS_L4_UDP;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV4F))
+ xht |= XDP_RSS_L3_IPV4;
+ if (status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) {
+ xht |= XDP_RSS_L3_IPV6;
+ if (cqe->ipv6_ext_mask)
+ xht |= XDP_RSS_L3_DYNHDR;
+ }
+ *rss_type = xht;
+
+ return 0;
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_xdp_buff mxbuf = {};
int factor = priv->cqe_factor;
struct mlx4_en_rx_ring *ring;
struct bpf_prog *xdp_prog;
@@ -671,7 +721,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
bool doorbell_pending;
bool xdp_redir_flush;
struct mlx4_cqe *cqe;
- struct xdp_buff xdp;
int polled = 0;
int index;
@@ -681,7 +730,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring = priv->rx_ring[cq_ring];
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
- xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
+ xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
xdp_redir_flush = false;
@@ -776,24 +825,28 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp_prepare_buff(&xdp, va - frags[0].page_offset,
- frags[0].page_offset, length, false);
- orig_data = xdp.data;
-
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
- length = xdp.data_end - xdp.data;
- if (xdp.data != orig_data) {
- frags[0].page_offset = xdp.data -
- xdp.data_hard_start;
- va = xdp.data;
+ xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset,
+ frags[0].page_offset, length, true);
+ orig_data = mxbuf.xdp.data;
+ mxbuf.cqe = cqe;
+ mxbuf.mdev = priv->mdev;
+ mxbuf.ring = ring;
+ mxbuf.dev = dev;
+
+ act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
+
+ length = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ if (mxbuf.xdp.data != orig_data) {
+ frags[0].page_offset = mxbuf.xdp.data -
+ mxbuf.xdp.data_hard_start;
+ va = mxbuf.xdp.data;
}
switch (act) {
case XDP_PASS:
break;
case XDP_REDIRECT:
- if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) {
+ if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) {
ring->xdp_redirect++;
xdp_redir_flush = true;
frags[0].page = NULL;
@@ -812,7 +865,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(dev, xdp_prog, act);
@@ -1067,7 +1120,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_qp_context *context;
int err = 0;
- context = kmalloc(sizeof(*context), GFP_KERNEL);
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -1078,7 +1131,6 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
}
qp->event = mlx4_en_sqp_event;
- memset(context, 0, sizeof(*context));
mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
qpn, ring->cqn, -1, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 817f4154b86d..65cb63f6c465 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -42,8 +42,8 @@
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/moduleparam.h>
#include <linux/indirect_call_wrapper.h>
+#include <net/ipv6.h>
#include "mlx4_en.h"
@@ -65,7 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
ring->sp_stride = stride;
- ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
+ ring->full_size = ring->size - HEADROOM - MLX4_MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = kvmalloc_node(tmp, GFP_KERNEL, node);
@@ -77,9 +77,11 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
ring->tx_info, tmp);
- ring->bounce_buf = kmalloc_node(MAX_DESC_SIZE, GFP_KERNEL, node);
+ ring->bounce_buf = kmalloc_node(MLX4_TX_BOUNCE_BUFFER_SIZE,
+ GFP_KERNEL, node);
if (!ring->bounce_buf) {
- ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ ring->bounce_buf = kmalloc(MLX4_TX_BOUNCE_BUFFER_SIZE,
+ GFP_KERNEL);
if (!ring->bounce_buf) {
err = -ENOMEM;
goto err_info;
@@ -226,7 +228,9 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
{
- return ring->prod - ring->cons > ring->full_size;
+ u32 used = READ_ONCE(ring->prod) - READ_ONCE(ring->cons);
+
+ return used > ring->full_size;
}
static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
@@ -635,19 +639,28 @@ static int get_real_size(const struct sk_buff *skb,
struct net_device *dev,
int *lso_header_size,
bool *inline_ok,
- void **pfrag)
+ void **pfrag,
+ int *hopbyhop)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int real_size;
if (shinfo->gso_size) {
*inline_ok = false;
- if (skb->encapsulation)
- *lso_header_size = (skb_inner_transport_header(skb) - skb->data) + inner_tcp_hdrlen(skb);
- else
- *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ *hopbyhop = 0;
+ if (skb->encapsulation) {
+ *lso_header_size = skb_inner_tcp_all_headers(skb);
+ } else {
+ /* Detects large IPV6 TCP packets and prepares for removal of
+ * HBH header that has been pushed by ip6_xmit(),
+ * mainly so that tcpdump can dissect them.
+ */
+ if (ipv6_has_hopopt_jumbo(skb))
+ *hopbyhop = sizeof(struct hop_jumbo_hdr);
+ *lso_header_size = skb_tcp_all_headers(skb);
+ }
real_size = CTRL_SIZE + shinfo->nr_frags * DS_SIZE +
- ALIGN(*lso_header_size + 4, DS_SIZE);
+ ALIGN(*lso_header_size - *hopbyhop + 4, DS_SIZE);
if (unlikely(*lso_header_size != skb_headlen(skb))) {
/* We add a segment for the skb linear buffer only if
* it contains data */
@@ -688,32 +701,32 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
} else {
inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
- memset(((void *)(inl + 1)) + skb->len, 0,
+ memset(inl->data + skb->len, 0,
MIN_PKT_LEN - skb->len);
}
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen, fragptr,
+ memcpy(inl->data + hlen, fragptr,
skb_frag_size(&shinfo->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (hlen <= spc) {
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (hlen < spc) {
- memcpy(((void *)(inl + 1)) + hlen,
+ memcpy(inl->data + hlen,
fragptr, spc - hlen);
fragptr += spc - hlen;
}
- inl = (void *) (inl + 1) + spc;
- memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+ inl = (void *)inl->data + spc;
+ memcpy(inl->data, fragptr, skb->len - spc);
} else {
- skb_copy_from_linear_data(skb, inl + 1, spc);
- inl = (void *) (inl + 1) + spc;
- skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+ skb_copy_from_linear_data(skb, inl->data, spc);
+ inl = (void *)inl->data + spc;
+ skb_copy_from_linear_data_offset(skb, spc, inl->data,
hlen - spc);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen - spc,
+ memcpy(inl->data + hlen - spc,
fragptr,
skb_frag_size(&shinfo->frags[0]));
}
@@ -874,6 +887,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int desc_size;
int real_size;
u32 index, bf_index;
+ struct ipv6hdr *h6;
__be32 op_own;
int lso_header_size;
void *fragptr = NULL;
@@ -882,6 +896,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
+ int hopbyhop;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@@ -891,18 +906,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_drop;
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
- &inline_ok, &fragptr);
+ &inline_ok, &fragptr, &hopbyhop);
if (unlikely(!real_size))
goto tx_drop_count;
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
nr_txbb = desc_size >> LOG_TXBB_SIZE;
- if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "Oversized header or SG list\n");
- goto tx_drop_count;
- }
bf_ok = ring->bf_enabled;
if (skb_vlan_tag_present(skb)) {
@@ -930,6 +940,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(index + nr_txbb <= ring->size))
tx_desc = ring->buf + (index << LOG_TXBB_SIZE);
else {
+ if (unlikely(nr_txbb > MLX4_MAX_DESC_TXBBS)) {
+ if (netif_msg_tx_err(priv))
+ en_warn(priv, "Oversized header or SG list\n");
+ goto tx_drop_count;
+ }
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
bf_ok = false;
@@ -944,7 +959,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
data = &tx_desc->data;
data_offset = offsetof(struct mlx4_en_tx_desc, data);
} else {
- int lso_align = ALIGN(lso_header_size + 4, DS_SIZE);
+ int lso_align = ALIGN(lso_header_size - hopbyhop + 4, DS_SIZE);
data = (void *)&tx_desc->lso + lso_align;
data_offset = offsetof(struct mlx4_en_tx_desc, lso) + lso_align;
@@ -1009,14 +1024,31 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+ lso_header_size -= hopbyhop;
/* Fill in the LSO prefix */
tx_desc->lso.mss_hdr_size = cpu_to_be32(
shinfo->gso_size << 16 | lso_header_size);
- /* Copy headers;
- * note that we already verified that it is linear */
- memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ if (unlikely(hopbyhop)) {
+ /* remove the HBH header.
+ * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+ */
+ memcpy(tx_desc->lso.header, skb->data, ETH_HLEN + sizeof(*h6));
+ h6 = (struct ipv6hdr *)((char *)tx_desc->lso.header + ETH_HLEN);
+ h6->nexthdr = IPPROTO_TCP;
+ /* Copy the TCP header after the IPv6 one */
+ memcpy(h6 + 1,
+ skb->data + ETH_HLEN + sizeof(*h6) +
+ sizeof(struct hop_jumbo_hdr),
+ tcp_hdrlen(skb));
+ /* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+ } else {
+ /* Copy headers;
+ * note that we already verified that it is linear
+ */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ }
ring->tso_packets++;
i = shinfo->gso_segs;
@@ -1053,7 +1085,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
}
- ring->prod += nr_txbb;
+ WRITE_ONCE(ring->prod, ring->prod + nr_txbb);
/* If we used a bounce buffer then copy descriptor back into place */
if (unlikely(bounce))
@@ -1184,7 +1216,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
rx_ring->xdp_tx++;
- ring->prod += MLX4_EN_XDP_TX_NRTXBB;
+ WRITE_ONCE(ring->prod, ring->prod + MLX4_EN_XDP_TX_NRTXBB);
/* Ensure new descriptor hits memory
* before setting ownership of this descriptor to HW
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 9e48509ed3b2..414e390e6b48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -244,9 +244,9 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
cpumask_empty(eq->affinity_mask))
return;
- hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
if (hint_err)
- mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+ mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
}
#endif
@@ -1123,9 +1123,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
free_cpumask_var(eq_table->eq[i].affinity_mask);
-#if defined(CONFIG_SMP)
- irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
-#endif
+ irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 42c96c9d7fb1..fe48d20d6118 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -463,7 +463,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
field = min(
bitmap_weight(actv_ports.ports, dev->caps.num_ports),
- dev->caps.num_ports);
+ (unsigned int) dev->caps.num_ports);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
size = dev->caps.function_caps; /* set PF behaviours */
@@ -1779,7 +1779,7 @@ static void get_board_id(void *vsd, char *board_id)
if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
- strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
+ strscpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
} else {
/*
* The board ID is a string but the firmware byte
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index d89a3da89e5a..59b8b3c73582 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -208,7 +208,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
chunk->sg, chunk->npages,
DMA_BIDIRECTIONAL);
- if (chunk->nsg <= 0)
+ if (!chunk->nsg)
goto fail;
}
@@ -222,7 +222,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
chunk->nsg = dma_map_sg(&dev->persist->pdev->dev, chunk->sg,
chunk->npages, DMA_BIDIRECTIONAL);
- if (chunk->nsg <= 0)
+ if (!chunk->nsg)
goto fail;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index b187c210d4d6..277738c50c56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -265,29 +265,29 @@ static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
value.vu32 = 1UL << log_num_mac;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = enable_64b_cqe_eqe;
- devlink_param_driverinit_value_set(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
value.vbool = enable_4k_uar;
- devlink_param_driverinit_value_set(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- value);
+ devl_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
value.vbool = false;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
}
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
@@ -3033,7 +3033,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err;
- err = devlink_port_register(devlink, &info->devlink_port, port);
+ err = devl_port_register(devlink, &info->devlink_port, port);
if (err)
return err;
@@ -3043,7 +3043,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
*/
if (!IS_ENABLED(CONFIG_MLX4_EN) &&
dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
- devlink_port_type_eth_set(&info->devlink_port, NULL);
+ devlink_port_type_eth_set(&info->devlink_port);
else if (!IS_ENABLED(CONFIG_MLX4_INFINIBAND) &&
dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
devlink_port_type_ib_set(&info->devlink_port, NULL);
@@ -3071,7 +3071,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
- devlink_port_unregister(&info->devlink_port);
+ devlink_port_type_clear(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3093,7 +3094,8 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
- devlink_port_unregister(&info->devlink_port);
+ devlink_port_type_clear(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
info->port = -1;
return err;
}
@@ -3109,7 +3111,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
- devlink_port_unregister(&info->devlink_port);
+ devlink_port_type_clear(&info->devlink_port);
+ devl_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
@@ -3333,6 +3336,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
int total_vfs, int *nvfs, struct mlx4_priv *priv,
int reset_flow)
{
+ struct devlink *devlink = priv_to_devlink(priv);
struct mlx4_dev *dev;
unsigned sum = 0;
int err;
@@ -3341,6 +3345,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
struct mlx4_dev_cap *dev_cap = NULL;
int existing_vfs = 0;
+ devl_assert_locked(devlink);
dev = &priv->dev;
INIT_LIST_HEAD(&priv->ctx_list);
@@ -3905,37 +3910,37 @@ static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
union devlink_param_value saved_value;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ &saved_value);
if (!err && mlx4_internal_err_reset != saved_value.vbool) {
mlx4_internal_err_reset = saved_value.vbool;
/* Notify on value changed on runtime configuration mode */
- devlink_param_value_changed(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
+ devl_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
}
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
if (!err)
log_num_mac = order_base_2(saved_value.vu32);
- err = devlink_param_driverinit_value_get(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ &saved_value);
if (!err)
enable_64b_cqe_eqe = saved_value.vbool;
- err = devlink_param_driverinit_value_get(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ &saved_value);
if (!err)
enable_4k_uar = saved_value.vbool;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ &saved_value);
if (!err && crdump->snapshot_enable != saved_value.vbool) {
crdump->snapshot_enable = saved_value.vbool;
- devlink_param_value_changed(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
+ devl_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
}
}
@@ -3999,6 +4004,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
if (!devlink)
return -ENOMEM;
+ devl_lock(devlink);
priv = devlink_priv(devlink);
dev = &priv->dev;
@@ -4015,8 +4021,8 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
- ret = devlink_params_register(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ ret = devl_params_register(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
if (ret)
goto err_devlink_unregister;
mlx4_devlink_set_params_init_values(devlink);
@@ -4025,16 +4031,17 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister;
pci_save_state(pdev);
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devl_unlock(devlink);
devlink_register(devlink);
return 0;
err_params_unregister:
- devlink_params_unregister(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ devl_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
+ devl_unlock(devlink);
devlink_free(devlink);
return ret;
}
@@ -4056,8 +4063,11 @@ static void mlx4_unload_one(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int pci_dev_data;
+ struct devlink *devlink;
int p, i;
+ devlink = priv_to_devlink(priv);
+ devl_assert_locked(devlink);
if (priv->removed)
return;
@@ -4137,6 +4147,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
devlink_unregister(devlink);
+ devl_lock(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4169,9 +4180,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
mlx4_pci_disable_device(dev);
- devlink_params_unregister(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ devl_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
+ devl_unlock(devlink);
devlink_free(devlink);
}
@@ -4292,15 +4304,20 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
mlx4_enter_error_state(persist);
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4333,6 +4350,7 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int err;
@@ -4340,6 +4358,8 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
@@ -4358,19 +4378,23 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
}
end:
mutex_unlock(&persist->interface_state_mutex);
-
+ devl_unlock(devlink);
}
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
mlx4_pci_disable_device(dev);
}
@@ -4385,12 +4409,16 @@ static int __maybe_unused mlx4_suspend(struct device *dev_d)
struct pci_dev *pdev = to_pci_dev(dev_d);
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
+ struct devlink *devlink;
mlx4_err(dev, "suspend was called\n");
+ devlink = priv_to_devlink(mlx4_priv(dev));
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return 0;
}
@@ -4402,6 +4430,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
struct mlx4_dev *dev = persist->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
+ struct devlink *devlink;
int total_vfs;
int ret = 0;
@@ -4409,6 +4438,8 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
+ devlink = priv_to_devlink(priv);
+ devl_lock(devlink);
mutex_lock(&persist->interface_state_mutex);
if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
@@ -4422,6 +4453,7 @@ static int __maybe_unused mlx4_resume(struct device *dev_d)
}
}
mutex_unlock(&persist->interface_state_mutex);
+ devl_unlock(devlink);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index e132ff4c82f2..321f801c1d7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -89,9 +89,19 @@
#define MLX4_EN_FILTER_HASH_SHIFT 4
#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
-/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
-#define MAX_DESC_SIZE 512
-#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
+#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
+#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
+
+/* Maximal size of the bounce buffer:
+ * 256 bytes for LSO headers.
+ * CTRL_SIZE for control desc.
+ * DS_SIZE if skb->head contains some payload.
+ * MAX_SKB_FRAGS frags.
+ */
+#define MLX4_TX_BOUNCE_BUFFER_SIZE \
+ ALIGN(256 + CTRL_SIZE + DS_SIZE + MAX_SKB_FRAGS * DS_SIZE, TXBB_SIZE)
+
+#define MLX4_MAX_DESC_TXBBS (MLX4_TX_BOUNCE_BUFFER_SIZE / TXBB_SIZE)
/*
* OS related constants and tunables
@@ -217,9 +227,7 @@ struct mlx4_en_tx_info {
#define MLX4_EN_BIT_DESC_OWN 0x80000000
-#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
#define MLX4_EN_MEMTYPE_PAD 0x100
-#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
struct mlx4_en_tx_desc {
@@ -315,7 +323,7 @@ struct mlx4_en_tx_ring {
struct mlx4_en_rx_desc {
/* actual number of entries depends on rx ring stride */
- struct mlx4_wqe_data_seg data[0];
+ DECLARE_FLEX_ARRAY(struct mlx4_wqe_data_seg, data);
};
struct mlx4_en_rx_ring {
@@ -788,10 +796,16 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
int mlx4_en_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
+struct xdp_md;
+int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp);
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type);
+
/*
* Functions for time stamping
*/
u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
+u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp);
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index b149e601f673..913ed255990f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -46,6 +46,13 @@
#define MLX4_BF_QP_SKIP_MASK 0xc0
#define MLX4_MAX_BF_QP_RANGE 0x40
+void mlx4_put_qp(struct mlx4_qp *qp)
+{
+ if (refcount_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+EXPORT_SYMBOL_GPL(mlx4_put_qp);
+
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
@@ -64,10 +71,8 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
return;
}
+ /* Need to call mlx4_put_qp() in event handler */
qp->event(qp, event_type);
-
- if (refcount_dec_and_test(&qp->refcount))
- complete(&qp->free);
}
/* used for INIT/CLOSE port logic */
@@ -523,8 +528,7 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- if (refcount_dec_and_test(&qp->refcount))
- complete(&qp->free);
+ mlx4_put_qp(qp);
wait_for_completion(&qp->free);
mlx4_qp_free_icm(dev, qp->qpn);
@@ -697,7 +701,8 @@ static int mlx4_create_zones(struct mlx4_dev *dev,
err = mlx4_bitmap_init(*bitmap + k, 1,
MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
0);
- mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
+ if (!err)
+ mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
}
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 92056452a9e3..bb1d7b039a7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -16,13 +16,9 @@ config MLX5_CORE
Core driver for low level functionality of the ConnectX-4 and
Connect-IB cards by Mellanox Technologies.
-config MLX5_ACCEL
- bool
-
config MLX5_FPGA
bool "Mellanox Technologies Innova support"
depends on MLX5_CORE
- select MLX5_ACCEL
help
Build support for the Innova family of network cards by Mellanox
Technologies. Innova network cards are comprised of a ConnectX chip
@@ -89,7 +85,7 @@ config MLX5_BRIDGE
config MLX5_CLS_ACT
bool "MLX5 TC classifier action support"
- depends on MLX5_ESWITCH && NET_CLS_ACT
+ depends on MLX5_ESWITCH && NET_CLS_ACT && NET_TC_SKB_EXT
default y
help
mlx5 ConnectX offloads support for TC classifier action (NET_CLS_ACT),
@@ -104,7 +100,7 @@ config MLX5_CLS_ACT
config MLX5_TC_CT
bool "MLX5 TC connection tracking offload support"
- depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT
default y
help
Say Y here if you want to support offloading connection tracking rules
@@ -115,6 +111,7 @@ config MLX5_TC_CT
config MLX5_TC_SAMPLE
bool "MLX5 TC sample offload support"
depends on MLX5_CLS_ACT
+ depends on PSAMPLE=y || PSAMPLE=n || MLX5_CORE=m
default y
help
Say Y here if you want to support offloading sample rules via tc
@@ -142,71 +139,29 @@ config MLX5_CORE_IPOIB
help
MLX5 IPoIB offloads & acceleration support.
-config MLX5_FPGA_IPSEC
- bool "Mellanox Technologies IPsec Innova support"
- depends on MLX5_CORE
- depends on MLX5_FPGA
- help
- Build IPsec support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-
-config MLX5_IPSEC
- bool "Mellanox Technologies IPsec Connect-X support"
+config MLX5_EN_MACSEC
+ bool "Connect-X support for MACSec offload"
depends on MLX5_CORE_EN
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- select MLX5_ACCEL
+ depends on MACSEC
+ default n
help
- Build IPsec support for the Connect-X family of network cards by Mellanox
- Technologies.
- Note: If you select this option, the mlx5_core driver will include
- IPsec support for the Connect-X family.
+ Build support for MACsec cryptography-offload acceleration in the NIC.
config MLX5_EN_IPSEC
- bool "IPSec XFRM cryptography-offload acceleration"
+ bool "Mellanox Technologies IPsec Connect-X support"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
help
Build support for IPsec cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
-config MLX5_FPGA_TLS
- bool "Mellanox Technologies TLS Innova support"
- depends on TLS_DEVICE
- depends on TLS=y || MLX5_CORE=m
- depends on MLX5_CORE_EN
- depends on MLX5_FPGA
- select MLX5_EN_TLS
- help
- Build TLS support for the Innova family of network cards by Mellanox
- Technologies. Innova network cards are comprised of a ConnectX chip
- and an FPGA chip on one board. If you select this option, the
- mlx5_core driver will include the Innova FPGA core and allow building
- sandbox-specific client drivers.
-
-config MLX5_TLS
+config MLX5_EN_TLS
bool "Mellanox Technologies TLS Connect-X support"
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
- select MLX5_ACCEL
- select MLX5_EN_TLS
- help
- Build TLS support for the Connect-X family of network cards by Mellanox
- Technologies.
-
-config MLX5_EN_TLS
- bool
help
Build support for TLS cryptography-offload acceleration in the NIC.
- Note: Support for hardware with this capability needs to be selected
- for this option to become available.
config MLX5_SW_STEERING
bool "Mellanox Technologies software-managed steering"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index e63bb9ceb9c0..ddf1e352f51d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,10 +14,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
- fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \
+ fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o lib/tout.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
+ fw_reset.o qos.o lib/tout.o lib/aso.o
#
# Netdev basic
@@ -28,7 +28,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o
+ en/qos.o en/htb.o en/trap.o en/fs_tt_redirect.o en/selq.o \
+ lib/crypto.o
#
# Netdev extra
@@ -39,14 +40,28 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o \
- en/mapping.o
+ en/mapping.o lag/mpesw.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
lib/fs_chains.o en/tc_tun.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
- en/tc/post_act.o en/tc/int_port.o
-mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
+ en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \
+ en/tc/post_meter.o en/tc/act_stats.o
+
+mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
+ en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
+ en/tc/act/tun.o en/tc/act/csum.o en/tc/act/pedit.o \
+ en/tc/act/vlan.o en/tc/act/vlan_mangle.o en/tc/act/mpls.o \
+ en/tc/act/mirred.o en/tc/act/mirred_nic.o \
+ en/tc/act/ct.o en/tc/act/sample.o en/tc/act/ptype.o \
+ en/tc/act/redirect_ingress.o en/tc/act/police.o
+
+ifneq ($(CONFIG_MLX5_TC_CT),)
+ mlx5_core-y += en/tc_ct.o en/tc/ct_fs_dmfs.o
+ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += en/tc/ct_fs_smfs.o
+endif
+
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
#
@@ -60,8 +75,9 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
-mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o en/rep/bridge.o
+mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o en/rep/bridge.o
+mlx5_core-$(CONFIG_THERMAL) += thermal.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
@@ -75,17 +91,16 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
#
# Accelerations & FPGA
#
-mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
-mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
-mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o
-mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o
-
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
+mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \
+ en_accel/macsec_stats.o
+
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
- en_accel/ipsec_stats.o en_accel/ipsec_fs.o
+ en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
+ en_accel/ipsec_offload.o lib/ipsec_fs_roce.o
-mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
+mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
en_accel/ktls_tx.o en_accel/ktls_rx.o
@@ -94,12 +109,15 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
steering/dr_ste_v0.o steering/dr_ste_v1.o \
+ steering/dr_ste_v2.o \
steering/dr_cmd.o steering/dr_fw.o \
- steering/dr_action.o steering/fs_dr.o
+ steering/dr_action.o steering/fs_dr.o \
+ steering/dr_definer.o steering/dr_ptrn.o \
+ steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
#
# SF device
#
-mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o
+mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_affinity.o
#
# SF manager
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
deleted file mode 100644
index 82b185121edb..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/accel.h
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef __MLX5E_ACCEL_H__
-#define __MLX5E_ACCEL_H__
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-static inline bool is_metadata_hdr_valid(struct sk_buff *skb)
-{
- __be16 *ethtype;
-
- if (unlikely(skb->len < ETH_HLEN + MLX5E_METADATA_ETHER_LEN))
- return false;
- ethtype = (__be16 *)(skb->data + ETH_ALEN * 2);
- if (*ethtype != cpu_to_be16(MLX5E_METADATA_ETHER_TYPE))
- return false;
- return true;
-}
-
-static inline void remove_metadata_hdr(struct sk_buff *skb)
-{
- struct ethhdr *old_eth;
- struct ethhdr *new_eth;
-
- /* Remove the metadata from the buffer */
- old_eth = (struct ethhdr *)skb->data;
- new_eth = (struct ethhdr *)(skb->data + MLX5E_METADATA_ETHER_LEN);
- memmove(new_eth, old_eth, 2 * ETH_ALEN);
- /* Ethertype is already in its new place */
- skb_pull_inline(skb, MLX5E_METADATA_ETHER_LEN);
-}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
deleted file mode 100644
index 09f5ce97af46..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/ipsec.h"
-#include "mlx5_core.h"
-#include "fpga/ipsec.h"
-#include "accel/ipsec_offload.h"
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops;
- int err = 0;
-
- ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
- mlx5_ipsec_offload_ops(mdev) :
- mlx5_fpga_ipsec_ops(mdev);
-
- if (!ipsec_ops || !ipsec_ops->init) {
- mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
- return;
- }
-
- err = ipsec_ops->init(mdev);
- if (err) {
- mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
- return;
- }
-
- mdev->ipsec_ops = ipsec_ops;
-}
-
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->cleanup)
- return;
-
- ipsec_ops->cleanup(mdev);
-}
-
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->device_caps)
- return 0;
-
- return ipsec_ops->device_caps(mdev);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_count)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_count(mdev);
-}
-
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->counters_read)
- return -EOPNOTSUPP;
-
- return ipsec_ops->counters_read(mdev, counters, count);
-}
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- __be32 saddr[4] = {}, daddr[4] = {};
-
- if (!ipsec_ops || !ipsec_ops->create_hw_context)
- return ERR_PTR(-EOPNOTSUPP);
-
- if (!xfrm->attrs.is_ipv6) {
- saddr[3] = xfrm->attrs.saddr.a4;
- daddr[3] = xfrm->attrs.daddr.a4;
- } else {
- memcpy(saddr, xfrm->attrs.saddr.a6, sizeof(saddr));
- memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
- }
-
- return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
- xfrm->attrs.is_ipv6, sa_handle);
-}
-
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->free_hw_context)
- return;
-
- ipsec_ops->free_hw_context(context);
-}
-
-struct mlx5_accel_esp_xfrm *
-mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
- struct mlx5_accel_esp_xfrm *xfrm;
-
- if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
- return ERR_PTR(-EOPNOTSUPP);
-
- xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
- if (IS_ERR(xfrm))
- return xfrm;
-
- xfrm->mdev = mdev;
- return xfrm;
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
-
-void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
- return;
-
- ipsec_ops->esp_destroy_xfrm(xfrm);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
-
-int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
-
- if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
- return -EOPNOTSUPP;
-
- return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
-}
-EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
deleted file mode 100644
index fbb9c5415d53..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_IPSEC_H__
-#define __MLX5_ACCEL_IPSEC_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/accel.h>
-
-#ifdef CONFIG_MLX5_ACCEL
-
-#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
- MLX5_ACCEL_IPSEC_CAP_DEVICE)
-
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count);
-
-void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle);
-void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
-
-void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
-
-struct mlx5_accel_ipsec_ops {
- u32 (*device_caps)(struct mlx5_core_dev *mdev);
- unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
- int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
- void* (*create_hw_context)(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *sa_handle);
- void (*free_hw_context)(void *context);
- int (*init)(struct mlx5_core_dev *mdev);
- void (*cleanup)(struct mlx5_core_dev *mdev);
- struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags);
- int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs);
- void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
-};
-
-#else
-
-#define MLX5_IPSEC_DEV(mdev) false
-
-static inline void *
-mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *xfrm,
- u32 *sa_handle)
-{
- return NULL;
-}
-
-static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
-
-static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
-
-static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
-
-#endif /* CONFIG_MLX5_ACCEL */
-
-#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
deleted file mode 100644
index d6667d38e1de..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
+++ /dev/null
@@ -1,385 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#include "mlx5_core.h"
-#include "ipsec_offload.h"
-#include "lib/mlx5.h"
-#include "en_accel/ipsec_fs.h"
-
-#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
- MLX5_ACCEL_IPSEC_CAP_LSO)
-
-struct mlx5_ipsec_sa_ctx {
- struct rhash_head hash;
- u32 enc_key_id;
- u32 ipsec_obj_id;
- /* hw ctx */
- struct mlx5_core_dev *dev;
- struct mlx5_ipsec_esp_xfrm *mxfrm;
-};
-
-struct mlx5_ipsec_esp_xfrm {
- /* reference counter of SA ctx */
- struct mlx5_ipsec_sa_ctx *sa_ctx;
- struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */
- struct mlx5_accel_esp_xfrm accel_xfrm;
-};
-
-static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
-{
- u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS;
-
- if (!mlx5_is_ipsec_device(mdev))
- return 0;
-
- if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
- !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
- return 0;
-
- if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
- MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
- caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
-
- if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
- caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
- caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
- }
-
- /* We can accommodate up to 2^24 different IPsec objects
- * because we use up to 24 bit in flow table metadata
- * to hold the IPsec Object unique handle.
- */
- WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
- return caps;
-}
-
-static int
-mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n",
- attrs->replay_type);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
- mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n",
- attrs->keymat_type);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.iv_algo !=
- MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
- mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n",
- attrs->keymat.aes_gcm.iv_algo);
- return -EOPNOTSUPP;
- }
-
- if (attrs->keymat.aes_gcm.key_len != 128 &&
- attrs->keymat.aes_gcm.key_len != 256) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n",
- attrs->keymat.aes_gcm.key_len);
- return -EOPNOTSUPP;
- }
-
- if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
- !MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
- mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static struct mlx5_accel_esp_xfrm *
-mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
- const struct mlx5_accel_esp_xfrm_attrs *attrs,
- u32 flags)
-{
- struct mlx5_ipsec_esp_xfrm *mxfrm;
- int err = 0;
-
- err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs);
- if (err)
- return ERR_PTR(err);
-
- mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL);
- if (!mxfrm)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&mxfrm->lock);
- memcpy(&mxfrm->accel_xfrm.attrs, attrs,
- sizeof(mxfrm->accel_xfrm.attrs));
-
- return &mxfrm->accel_xfrm;
-}
-
-static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
-{
- struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm,
- accel_xfrm);
-
- /* assuming no sa_ctx are connected to this xfrm_ctx */
- WARN_ON(mxfrm->sa_ctx);
- kfree(mxfrm);
-}
-
-struct mlx5_ipsec_obj_attrs {
- const struct aes_gcm_keymat *aes_gcm;
- u32 accel_flags;
- u32 esn_msb;
- u32 enc_key_id;
-};
-
-static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev,
- struct mlx5_ipsec_obj_attrs *attrs,
- u32 *ipsec_id)
-{
- const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm;
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
- u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
- void *obj, *salt_p, *salt_iv_p;
- int err;
-
- obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
-
- /* salt and seq_iv */
- salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
- memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
-
- switch (aes_gcm->icv_len) {
- case 64:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_8B);
- break;
- case 96:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_12B);
- break;
- case 128:
- MLX5_SET(ipsec_obj, obj, icv_length,
- MLX5_IPSEC_OBJECT_ICV_LEN_16B);
- break;
- default:
- return -EINVAL;
- }
- salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
- memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
- /* esn */
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
- MLX5_SET(ipsec_obj, obj, esn_en, 1);
- MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
- }
-
- MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id);
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
- MLX5_GENERAL_OBJECT_TYPES_IPSEC);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-
- return err;
-}
-
-static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id)
-{
- u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
-
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
- MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
- MLX5_GENERAL_OBJECT_TYPES_IPSEC);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
-
- mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev,
- struct mlx5_accel_esp_xfrm *accel_xfrm,
- const __be32 saddr[4], const __be32 daddr[4],
- const __be32 spi, bool is_ipv6, u32 *hw_handle)
-{
- struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs;
- struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
- struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
- struct mlx5_ipsec_esp_xfrm *mxfrm;
- struct mlx5_ipsec_sa_ctx *sa_ctx;
- int err;
-
- /* alloc SA context */
- sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
- if (!sa_ctx)
- return ERR_PTR(-ENOMEM);
-
- sa_ctx->dev = mdev;
-
- mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
- mutex_lock(&mxfrm->lock);
- sa_ctx->mxfrm = mxfrm;
-
- /* key */
- err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
- aes_gcm->key_len / BITS_PER_BYTE,
- MLX5_ACCEL_OBJ_IPSEC_KEY,
- &sa_ctx->enc_key_id);
- if (err) {
- mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
- goto err_sa_ctx;
- }
-
- ipsec_attrs.aes_gcm = aes_gcm;
- ipsec_attrs.accel_flags = accel_xfrm->attrs.flags;
- ipsec_attrs.esn_msb = accel_xfrm->attrs.esn;
- ipsec_attrs.enc_key_id = sa_ctx->enc_key_id;
- err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs,
- &sa_ctx->ipsec_obj_id);
- if (err) {
- mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
- goto err_enc_key;
- }
-
- *hw_handle = sa_ctx->ipsec_obj_id;
- mxfrm->sa_ctx = sa_ctx;
- mutex_unlock(&mxfrm->lock);
-
- return sa_ctx;
-
-err_enc_key:
- mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id);
-err_sa_ctx:
- mutex_unlock(&mxfrm->lock);
- kfree(sa_ctx);
- return ERR_PTR(err);
-}
-
-static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
-{
- struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context;
- struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm;
-
- mutex_lock(&mxfrm->lock);
- mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id);
- mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id);
- kfree(sa_ctx);
- mxfrm->sa_ctx = NULL;
- mutex_unlock(&mxfrm->lock);
-}
-
-static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
-{
- return 0;
-}
-
-static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
- struct mlx5_ipsec_obj_attrs *attrs,
- u32 ipsec_id)
-{
- u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
- u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
- u64 modify_field_select = 0;
- u64 general_obj_types;
- void *obj;
- int err;
-
- if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
- return 0;
-
- general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
- if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
- return -EINVAL;
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (err) {
- mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
- ipsec_id, err);
- return err;
- }
-
- obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
- modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
-
- /* esn */
- if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
- !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
- return -EOPNOTSUPP;
-
- obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
- MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
- if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
- MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
-
- /* general object fields set */
- MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
-
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
- const struct mlx5_accel_esp_xfrm_attrs *attrs)
-{
- struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
- struct mlx5_core_dev *mdev = xfrm->mdev;
- struct mlx5_ipsec_esp_xfrm *mxfrm;
-
- int err = 0;
-
- if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
- return 0;
-
- if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs))
- return -EOPNOTSUPP;
-
- mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
-
- mutex_lock(&mxfrm->lock);
-
- if (!mxfrm->sa_ctx)
- /* Not bound xfrm, change only sw attrs */
- goto change_sw_xfrm_attrs;
-
- /* need to add find and replace in ipsec_rhash_sa the sa_ctx */
- /* modify device with new hw_sa */
- ipsec_attrs.accel_flags = attrs->flags;
- ipsec_attrs.esn_msb = attrs->esn;
- err = mlx5_modify_ipsec_obj(mdev,
- &ipsec_attrs,
- mxfrm->sa_ctx->ipsec_obj_id);
-
-change_sw_xfrm_attrs:
- if (!err)
- memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
-
- mutex_unlock(&mxfrm->lock);
- return err;
-}
-
-static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = {
- .device_caps = mlx5_ipsec_offload_device_caps,
- .create_hw_context = mlx5_ipsec_offload_create_sa_ctx,
- .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
- .init = mlx5_ipsec_offload_init,
- .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
- .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
- .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
-};
-
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_ipsec_offload_device_caps(mdev))
- return NULL;
-
- return &ipsec_offload_ops;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
deleted file mode 100644
index 970c66d19c1d..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
-
-#ifndef __MLX5_IPSEC_OFFLOAD_H__
-#define __MLX5_IPSEC_OFFLOAD_H__
-
-#include <linux/mlx5/driver.h>
-#include "accel/ipsec.h"
-
-#ifdef CONFIG_MLX5_IPSEC
-
-const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- if (!MLX5_CAP_GEN(mdev, ipsec_offload))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
- return false;
-
- return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
- MLX5_CAP_ETH(mdev, insert_trailer);
-}
-
-#else
-static inline const struct mlx5_accel_ipsec_ops *
-mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
-static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
-{
- return false;
-}
-
-#endif /* CONFIG_MLX5_IPSEC */
-#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
deleted file mode 100644
index 6c2b86a26863..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/mlx5/device.h>
-
-#include "accel/tls.h"
-#include "mlx5_core.h"
-#include "lib/mlx5.h"
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-#include "fpga/tls.h"
-
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx)
-{
- return mlx5_fpga_tls_add_flow(mdev, flow, crypto_info,
- start_offload_tcp_sn, p_swid,
- direction_sx);
-}
-
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx)
-{
- mlx5_fpga_tls_del_flow(mdev, swid, GFP_KERNEL, direction_sx);
-}
-
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn)
-{
- return mlx5_fpga_tls_resync_rx(mdev, handle, seq, rcd_sn);
-}
-
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_is_tls_device(mdev) ||
- mlx5_accel_is_ktls_device(mdev);
-}
-
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_device_caps(mdev);
-}
-
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev)
-{
- return mlx5_fpga_tls_init(mdev);
-}
-
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev)
-{
- mlx5_fpga_tls_cleanup(mdev);
-}
-#endif
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id)
-{
- u32 sz_bytes;
- void *key;
-
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128: {
- struct tls12_crypto_info_aes_gcm_128 *info =
- (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- case TLS_CIPHER_AES_GCM_256: {
- struct tls12_crypto_info_aes_gcm_256 *info =
- (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
-
- key = info->key;
- sz_bytes = sizeof(info->key);
- break;
- }
- default:
- return -EINVAL;
- }
-
- return mlx5_create_encryption_key(mdev, key, sz_bytes,
- MLX5_ACCEL_OBJ_TLS_KEY,
- p_key_id);
-}
-
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
-{
- mlx5_destroy_encryption_key(mdev, key_id);
-}
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
deleted file mode 100644
index fd874f0c380a..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef __MLX5_ACCEL_TLS_H__
-#define __MLX5_ACCEL_TLS_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/tls.h>
-
-#ifdef CONFIG_MLX5_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id);
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
-
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_tx);
-}
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_GEN(mdev, tls_rx);
-}
-
-static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
-{
- if (!mlx5_accel_is_ktls_tx(mdev) &&
- !mlx5_accel_is_ktls_rx(mdev))
- return false;
-
- if (!MLX5_CAP_GEN(mdev, log_max_dek))
- return false;
-
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
-}
-
-static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info)
-{
- switch (crypto_info->cipher_type) {
- case TLS_CIPHER_AES_GCM_128:
- if (crypto_info->version == TLS_1_2_VERSION)
- return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
- break;
- }
-
- return false;
-}
-#else
-static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
-{ return false; }
-
-static inline int
-mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id) { return -ENOTSUPP; }
-static inline void
-mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) {}
-
-static inline bool
-mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
-static inline bool
-mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info) { return false; }
-#endif
-
-enum {
- MLX5_ACCEL_TLS_TX = BIT(0),
- MLX5_ACCEL_TLS_RX = BIT(1),
- MLX5_ACCEL_TLS_V12 = BIT(2),
- MLX5_ACCEL_TLS_V13 = BIT(3),
- MLX5_ACCEL_TLS_LRO = BIT(4),
- MLX5_ACCEL_TLS_IPV6 = BIT(5),
- MLX5_ACCEL_TLS_AES_GCM128 = BIT(30),
- MLX5_ACCEL_TLS_AES_GCM256 = BIT(31),
-};
-
-struct mlx5_ifc_tls_flow_bits {
- u8 src_port[0x10];
- u8 dst_port[0x10];
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
- union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
- u8 ipv6[0x1];
- u8 direction_sx[0x1];
- u8 reserved_at_2[0x1e];
-};
-
-#ifdef CONFIG_MLX5_FPGA_TLS
-int mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx);
-void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx);
-int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn);
-bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev);
-u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev);
-int mlx5_accel_tls_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev);
-
-#else
-
-static inline int
-mlx5_accel_tls_add_flow(struct mlx5_core_dev *mdev, void *flow,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn, u32 *p_swid,
- bool direction_sx) { return -ENOTSUPP; }
-static inline void mlx5_accel_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
- bool direction_sx) { }
-static inline int mlx5_accel_tls_resync_rx(struct mlx5_core_dev *mdev, __be32 handle,
- u32 seq, __be64 rcd_sn) { return 0; }
-static inline bool mlx5_accel_is_tls_device(struct mlx5_core_dev *mdev)
-{
- return mlx5_accel_is_ktls_device(mdev);
-}
-static inline u32 mlx5_accel_tls_device_caps(struct mlx5_core_dev *mdev) { return 0; }
-static inline int mlx5_accel_tls_init(struct mlx5_core_dev *mdev) { return 0; }
-static inline void mlx5_accel_tls_cleanup(struct mlx5_core_dev *mdev) { }
-#endif
-
-#endif /* __MLX5_ACCEL_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 291e427e9e4f..6aca004e88cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,53 +71,6 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
return cpu_handle;
}
-static int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node)
-{
- dma_addr_t t;
-
- buf->size = size;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
-
- buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
- if (!buf->frags)
- return -ENOMEM;
-
- buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
- &t, node);
- if (!buf->frags->buf)
- goto err_out;
-
- buf->frags->map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
-
- return 0;
-err_out:
- kfree(buf->frags);
- return -ENOMEM;
-}
-
-int mlx5_buf_alloc(struct mlx5_core_dev *dev,
- int size, struct mlx5_frag_buf *buf)
-{
- return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
-}
-EXPORT_SYMBOL(mlx5_buf_alloc);
-
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
-{
- dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
- buf->frags->map);
-
- kfree(buf->frags);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_free);
-
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node)
{
@@ -183,11 +136,11 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
u32 db_per_page = PAGE_SIZE / cache_line_size();
struct mlx5_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+ pgdir = kzalloc_node(sizeof(*pgdir), GFP_KERNEL, node);
if (!pgdir)
return NULL;
- pgdir->bitmap = bitmap_zalloc(db_per_page, GFP_KERNEL);
+ pgdir->bitmap = bitmap_zalloc_node(db_per_page, GFP_KERNEL, node);
if (!pgdir->bitmap) {
kfree(pgdir);
return NULL;
@@ -260,12 +213,6 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
-{
- return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
-}
-EXPORT_SYMBOL_GPL(mlx5_db_alloc);
-
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
{
u32 db_per_page = PAGE_SIZE / cache_line_size();
@@ -286,19 +233,6 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
-{
- u64 addr;
- int i;
-
- for (i = 0; i < buf->npages; i++) {
- addr = buf->frags->map + (i << buf->page_shift);
-
- pas[i] = cpu_to_be64(addr);
- }
-}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
-
void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm)
{
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a46284ca5172..d53de39539a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -31,14 +31,12 @@
*/
#include <linux/highmem.h>
-#include <linux/module.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/random.h>
-#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eq.h>
#include <linux/debugfs.h>
@@ -46,6 +44,27 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/tout.h"
+#define CREATE_TRACE_POINTS
+#include "diag/cmd_tracepoint.h"
+
+struct mlx5_ifc_mbox_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
enum {
CMD_IF_REV = 5,
@@ -70,6 +89,27 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
+static u16 in_to_opcode(void *in)
+{
+ return MLX5_GET(mbox_in, in, opcode);
+}
+
+/* Returns true for opcodes that might be triggered very frequently and throttle
+ * the command interface. Limit their command slots usage.
+ */
+static bool mlx5_cmd_is_throttle_opcode(u16 op)
+{
+ switch (op) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_SYNC_CRYPTO:
+ return true;
+ }
+ return false;
+}
+
static struct mlx5_cmd_work_ent *
cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
@@ -91,6 +131,7 @@ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
ent->context = context;
ent->cmd = cmd;
ent->page_queue = page_queue;
+ ent->op = in_to_opcode(in->first.data);
refcount_set(&ent->refcnt, 1);
return ent;
@@ -131,11 +172,8 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cmd->alloc_lock, flags);
+ lockdep_assert_held(&cmd->alloc_lock);
set_bit(idx, &cmd->bitmask);
- spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -145,13 +183,21 @@ static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)
{
+ struct mlx5_cmd *cmd = ent->cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
if (!refcount_dec_and_test(&ent->refcnt))
- return;
+ goto out;
- if (ent->idx >= 0)
- cmd_free_index(ent->cmd, ent->idx);
+ if (ent->idx >= 0) {
+ cmd_free_index(cmd, ent->idx);
+ up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
+ }
cmd_free_ent(ent);
+out:
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
@@ -186,10 +232,10 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
int xor_len = sizeof(*block) - sizeof(block->data) - 1;
if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
return 0;
}
@@ -255,12 +301,12 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff)
- return -EINVAL;
+ return -EHWPOISON;
for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf);
if (err)
- return err;
+ return -EHWPOISON;
next = next->next;
}
@@ -473,9 +519,15 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VHCA_STATE:
case MLX5_CMD_OP_MODIFY_VHCA_STATE:
case MLX5_CMD_OP_ALLOC_SF:
+ case MLX5_CMD_OP_SUSPEND_VHCA:
+ case MLX5_CMD_OP_RESUME_VHCA:
+ case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
+ case MLX5_CMD_OP_SAVE_VHCA_STATE:
+ case MLX5_CMD_OP_LOAD_VHCA_STATE:
+ case MLX5_CMD_OP_SYNC_CRYPTO:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
- return -EIO;
+ return -ENOLINK;
default:
mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
return -EINVAL;
@@ -670,6 +722,12 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
MLX5_COMMAND_STR_CASE(ALLOC_SF);
MLX5_COMMAND_STR_CASE(DEALLOC_SF);
+ MLX5_COMMAND_STR_CASE(SUSPEND_VHCA);
+ MLX5_COMMAND_STR_CASE(RESUME_VHCA);
+ MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
+ MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
default: return "unknown command opcode";
}
}
@@ -737,71 +795,68 @@ static int cmd_status_to_err(u8 status)
}
}
-struct mlx5_ifc_mbox_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x40];
-};
+void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
+{
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
-struct mlx5_ifc_mbox_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
+ mlx5_core_err_rl(dev,
+ "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n",
+ mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome, cmd_status_to_err(status));
+}
+EXPORT_SYMBOL(mlx5_cmd_out_err);
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
+static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
+{
+ u16 opcode, op_mod;
+ u16 uid;
- u8 reserved_at_40[0x40];
-};
+ opcode = in_to_opcode(in);
+ op_mod = MLX5_GET(mbox_in, in, op_mod);
+ uid = MLX5_GET(mbox_in, in, uid);
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
-{
- *status = MLX5_GET(mbox_out, out, status);
- *syndrome = MLX5_GET(mbox_out, out, syndrome);
+ if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
+ opcode != MLX5_CMD_OP_CREATE_UCTX)
+ mlx5_cmd_out_err(dev, opcode, op_mod, out);
}
-static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
+int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
{
- u32 syndrome;
- u8 status;
- u16 opcode;
- u16 op_mod;
- u16 uid;
+ /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
+ if (err == -ENXIO) {
+ u16 opcode = in_to_opcode(in);
+ u32 syndrome;
+ u8 status;
- mlx5_cmd_mbox_status(out, &status, &syndrome);
- if (!status)
- return 0;
+ /* PCI Error, emulate command return status, for smooth reset */
+ err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status);
+ MLX5_SET(mbox_out, out, status, status);
+ MLX5_SET(mbox_out, out, syndrome, syndrome);
+ if (!err)
+ return 0;
+ }
- opcode = MLX5_GET(mbox_in, in, opcode);
- op_mod = MLX5_GET(mbox_in, in, op_mod);
- uid = MLX5_GET(mbox_in, in, uid);
+ /* driver or FW delivery error */
+ if (err != -EREMOTEIO && err)
+ return err;
- if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
- mlx5_core_err_rl(dev,
- "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
- mlx5_command_str(opcode), opcode, op_mod,
- cmd_status_str(status), status, syndrome);
- else
- mlx5_core_dbg(dev,
- "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
- mlx5_command_str(opcode),
- opcode, op_mod,
- cmd_status_str(status),
- status,
- syndrome);
+ /* check outbox status */
+ err = cmd_status_to_err(MLX5_GET(mbox_out, out, status));
+ if (err)
+ cmd_status_print(dev, in, out);
- return cmd_status_to_err(status);
+ return err;
}
+EXPORT_SYMBOL(mlx5_cmd_check);
static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
{
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
- u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
struct mlx5_cmd_mailbox *next = msg->next;
int n = mlx5_calc_cmd_blocks(msg);
+ u16 op = ent->op;
int data_only;
u32 offset = 0;
int dump_len;
@@ -853,11 +908,6 @@ static void dump_command(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- return MLX5_GET(mbox_in, in->first.data, opcode);
-}
-
static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
static void cb_timeout_handler(struct work_struct *work)
@@ -875,13 +925,13 @@ static void cb_timeout_handler(struct work_struct *work)
/* Maybe got handled by eq recover ? */
if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
goto out; /* phew, already handled */
}
ent->ret = -ETIMEDOUT;
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
- ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ ent->idx, mlx5_command_str(ent->op), ent->op);
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
out:
@@ -900,25 +950,6 @@ static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
return cmd->allowed_opcode == opcode;
}
-static int cmd_alloc_index_retry(struct mlx5_cmd *cmd)
-{
- unsigned long alloc_end = jiffies + msecs_to_jiffies(1000);
- int idx;
-
-retry:
- idx = cmd_alloc_index(cmd);
- if (idx < 0 && time_before(jiffies, alloc_end)) {
- /* Index allocation can fail on heavy load of commands. This is a temporary
- * situation as the current command already holds the semaphore, meaning that
- * another command completion is being handled and it is expected to release
- * the entry index soon.
- */
- cpu_relax();
- goto retry;
- }
- return idx;
-}
-
bool mlx5_cmd_is_down(struct mlx5_core_dev *dev)
{
return pci_channel_offline(dev->pdev) ||
@@ -946,7 +977,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
- alloc_ret = cmd_alloc_index_retry(cmd);
+ alloc_ret = cmd_alloc_index(cmd);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
if (ent->callback) {
@@ -974,7 +1005,6 @@ static void cmd_work_handler(struct work_struct *work)
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
- ent->op = be32_to_cpu(lay->in[0]) >> 16;
if (ent->in->next)
lay->in_ptr = cpu_to_be64(ent->in->next->dma);
lay->inlen = cpu_to_be32(ent->in->len);
@@ -993,20 +1023,14 @@ static void cmd_work_handler(struct work_struct *work)
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
+ cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* Skip sending command to fw if internal error */
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) {
- u8 status = 0;
- u32 drv_synd;
-
- ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
- MLX5_SET(mbox_out, ent->out, status, status);
- MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
-
+ ent->ret = -ENXIO;
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
return;
}
- cmd_ent_get(ent); /* for the _real_ FW event on completion */
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -1020,6 +1044,31 @@ static void cmd_work_handler(struct work_struct *work)
}
}
+static int deliv_status_to_err(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_DELIVERY_STAT_OK:
+ case MLX5_DRIVER_STATUS_ABORTED:
+ return 0;
+ case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
+ case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
+ return -EBADR;
+ case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
+ case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
+ case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
+ return -EFAULT; /* Bad address */
+ case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
+ case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
+ case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
+ case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
+ return -ENOMSG;
+ case MLX5_CMD_DELIVERY_STAT_FW_ERR:
+ return -EIO;
+ default:
+ return -EINVAL;
+ }
+}
+
static const char *deliv_status_to_str(u8 status)
{
switch (status) {
@@ -1068,12 +1117,12 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
*/
if (wait_for_completion_timeout(&ent->done, timeout)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
return;
}
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
ent->ret = -ETIMEDOUT;
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
@@ -1100,12 +1149,10 @@ out_err:
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
- mlx5_command_str(msg_to_opcode(ent->in)),
- msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
- mlx5_command_str(msg_to_opcode(ent->in)),
- msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1116,19 +1163,29 @@ out_err:
/* Notes:
* 1. Callback functions may not sleep
* 2. page queue commands do not support asynchrous completion
+ *
+ * return value in case (!callback):
+ * ret < 0 : Command execution couldn't be submitted by driver
+ * ret > 0 : Command execution couldn't be performed by firmware
+ * ret == 0: Command was executed by FW, Caller must check FW outbox status.
+ *
+ * return value in case (callback):
+ * ret < 0 : Command execution couldn't be submitted by driver
+ * ret == 0: Command will be submitted to FW for execution
+ * and the callback will be called for further status updates
*/
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
- void *context, int page_queue, u8 *status,
+ void *context, int page_queue,
u8 token, bool force_polling)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
struct mlx5_cmd_stats *stats;
+ u8 status = 0;
int err = 0;
s64 ds;
- u16 op;
if (callback && page_queue)
return -EINVAL;
@@ -1156,21 +1213,20 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
cmd_work_handler(&ent->work);
} else if (!queue_work(cmd->wq, &ent->work)) {
mlx5_core_warn(dev, "failed to queue work\n");
- err = -ENOMEM;
+ err = -EALREADY;
goto out_free;
}
if (callback)
- goto out; /* mlx5_cmd_comp_handler() will put(ent) */
+ return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
if (err == -ETIMEDOUT || err == -ECANCELED)
goto out_free;
ds = ent->ts2 - ent->ts1;
- op = MLX5_GET(mbox_in, in->first.data, opcode);
- if (op < MLX5_CMD_OP_MAX) {
- stats = &cmd->stats[op];
+ if (ent->op < MLX5_CMD_OP_MAX) {
+ stats = &cmd->stats[ent->op];
spin_lock_irq(&stats->lock);
stats->sum += ds;
++stats->n;
@@ -1178,13 +1234,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
}
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
- *status = ent->status;
+ mlx5_command_str(ent->op), ds);
out_free:
+ status = ent->status;
cmd_ent_put(ent);
-out:
- return err;
+ return err ? : status;
}
static ssize_t dbg_write(struct file *filp, const char __user *buf,
@@ -1456,8 +1511,8 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
return -EFAULT;
err = sscanf(outlen_str, "%d", &outlen);
- if (err < 0)
- return err;
+ if (err != 1)
+ return -EINVAL;
ptr = kzalloc(outlen, GFP_KERNEL);
if (!ptr)
@@ -1501,7 +1556,7 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
+ dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev));
debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops);
debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops);
@@ -1602,8 +1657,6 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
vector = vec & 0xffffffff;
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
- struct semaphore *sem;
-
ent = cmd->ent_arr[i];
/* if we already completed the command, ignore it */
@@ -1622,26 +1675,22 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
cmd_ent_put(ent); /* timeout work was canceled */
if (!forced || /* Real FW completion */
- pci_channel_offline(dev->pdev) || /* FW is inaccessible */
- dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ mlx5_cmd_is_down(dev) || /* No real FW completion is expected */
+ !opcode_allowed(cmd, ent->op))
cmd_ent_put(ent);
- if (ent->page_queue)
- sem = &cmd->pages_sem;
- else
- sem = &cmd->sem;
ent->ts2 = ktime_get_ns();
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
dump_command(dev, ent, 0);
- if (!ent->ret) {
+
+ if (vec & MLX5_TRIGGERED_CMD_COMP)
+ ent->ret = -ENXIO;
+
+ if (!ent->ret) { /* Command completed by FW */
if (!cmd->checksum_disabled)
ent->ret = verify_signature(ent);
- else
- ent->ret = 0;
- if (vec & MLX5_TRIGGERED_CMD_COMP)
- ent->status = MLX5_DRIVER_STATUS_ABORTED;
- else
- ent->status = ent->lay->status_own >> 1;
+
+ ent->status = ent->lay->status_own >> 1;
mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
ent->ret, deliv_status_to_str(ent->status), ent->status);
@@ -1659,21 +1708,18 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
callback = ent->callback;
context = ent->context;
- err = ent->ret;
- if (!err) {
+ err = ent->ret ? : ent->status;
+ if (err > 0) /* Failed in FW, command didn't execute */
+ err = deliv_status_to_err(err);
+
+ if (!err)
err = mlx5_copy_from_msg(ent->uout,
ent->out,
ent->uout_size);
- err = err ? err : mlx5_cmd_check(dev,
- ent->in->first.data,
- ent->uout);
- }
-
mlx5_free_cmd_msg(dev, ent->out);
free_msg(dev, ent->in);
- err = err ? err : ent->status;
/* final consumer is done, release ent */
cmd_ent_put(ent);
callback(err, context);
@@ -1683,12 +1729,11 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
*/
complete(&ent->done);
}
- up(sem);
}
}
}
-void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
struct mlx5_cmd *cmd = &dev->cmd;
unsigned long bitmask;
@@ -1728,12 +1773,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
int i;
- for (i = 0; i < cmd->max_reg_cmds; i++)
- while (down_trylock(&cmd->sem))
+ for (i = 0; i < cmd->max_reg_cmds; i++) {
+ while (down_trylock(&cmd->sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
+ }
- while (down_trylock(&cmd->pages_sem))
+ while (down_trylock(&cmd->pages_sem)) {
mlx5_cmd_trigger_completions(dev);
+ cond_resched();
+ }
/* Unlock cmdif */
up(&cmd->pages_sem);
@@ -1741,31 +1791,6 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
up(&cmd->sem);
}
-static int status_to_err(u8 status)
-{
- switch (status) {
- case MLX5_CMD_DELIVERY_STAT_OK:
- case MLX5_DRIVER_STATUS_ABORTED:
- return 0;
- case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
- case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
- return -EBADR;
- case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
- case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
- case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
- return -EFAULT; /* Bad address */
- case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
- case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
- case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
- case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
- return -ENOMSG;
- case MLX5_CMD_DELIVERY_STAT_FW_ERR:
- return -EIO;
- default:
- return -EINVAL;
- }
-}
-
static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
gfp_t gfp)
{
@@ -1777,7 +1802,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
if (in_size <= 16)
goto cache_miss;
- for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
+ for (i = 0; i < dev->profile.num_cmd_caches; i++) {
ch = &cmd->cache[i];
if (in_size > ch->max_inbox_size)
continue;
@@ -1806,29 +1831,34 @@ cache_miss:
static int is_manage_pages(void *in)
{
- return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
+ return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
+/* Notes:
+ * 1. Callback functions may not sleep
+ * 2. Page queue commands do not support asynchrous completion
+ */
static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size, mlx5_cmd_cbk_t callback, void *context,
bool force_polling)
{
- struct mlx5_cmd_msg *inb;
- struct mlx5_cmd_msg *outb;
+ struct mlx5_cmd_msg *inb, *outb;
+ u16 opcode = in_to_opcode(in);
+ bool throttle_op;
int pages_queue;
gfp_t gfp;
- int err;
- u8 status = 0;
- u32 drv_synd;
- u16 opcode;
u8 token;
+ int err;
- opcode = MLX5_GET(mbox_in, in, opcode);
- if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) {
- err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
- MLX5_SET(mbox_out, out, status, status);
- MLX5_SET(mbox_out, out, syndrome, drv_synd);
- return err;
+ if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
+ return -ENXIO;
+
+ throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
+ if (throttle_op) {
+ /* atomic context may not sleep */
+ if (callback)
+ return -EINVAL;
+ down(&dev->cmd.throttle_sem);
}
pages_queue = is_manage_pages(in);
@@ -1837,7 +1867,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
inb = alloc_msg(dev, in_size, gfp);
if (IS_ERR(inb)) {
err = PTR_ERR(inb);
- return err;
+ goto out_up;
}
token = alloc_token(&dev->cmd);
@@ -1855,46 +1885,159 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status, token, force_polling);
+ pages_queue, token, force_polling);
+ if (callback)
+ return err;
+
+ if (err > 0) /* Failed in FW, command didn't execute */
+ err = deliv_status_to_err(err);
+
if (err)
goto out_out;
- mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
- if (status) {
- err = status_to_err(status);
- goto out_out;
+ /* command completed by FW */
+ err = mlx5_copy_from_msg(out, outb, out_size);
+out_out:
+ mlx5_free_cmd_msg(dev, outb);
+out_in:
+ free_msg(dev, inb);
+out_up:
+ if (throttle_op)
+ up(&dev->cmd.throttle_sem);
+ return err;
+}
+
+static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
+{
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
+
+ trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod,
+ cmd_status_str(status), status, syndrome,
+ cmd_status_to_err(status));
+}
+
+static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
+ u32 syndrome, int err)
+{
+ struct mlx5_cmd_stats *stats;
+
+ if (!err)
+ return;
+
+ stats = &dev->cmd.stats[opcode];
+ spin_lock_irq(&stats->lock);
+ stats->failed++;
+ if (err < 0)
+ stats->last_failed_errno = -err;
+ if (err == -EREMOTEIO) {
+ stats->failed_mbox_status++;
+ stats->last_failed_mbox_status = status;
+ stats->last_failed_syndrome = syndrome;
}
+ spin_unlock_irq(&stats->lock);
+}
- if (!callback)
- err = mlx5_copy_from_msg(out, outb, out_size);
+/* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
+static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out)
+{
+ u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
+ u8 status = MLX5_GET(mbox_out, out, status);
-out_out:
- if (!callback)
- mlx5_free_cmd_msg(dev, outb);
+ if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */
+ err = -EIO;
-out_in:
- if (!callback)
- free_msg(dev, inb);
+ if (!err && status != MLX5_CMD_STAT_OK) {
+ err = -EREMOTEIO;
+ mlx5_cmd_err_trace(dev, opcode, op_mod, out);
+ }
+
+ cmd_status_log(dev, opcode, status, syndrome, err);
return err;
}
+/**
+ * mlx5_cmd_do - Executes a fw command, wait for completion.
+ * Unlike mlx5_cmd_exec, this function will not translate or intercept
+ * outbox.status and will return -EREMOTEIO when
+ * outbox.status != MLX5_CMD_STAT_OK
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return:
+ * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK.
+ * Caller must check FW outbox status.
+ * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK.
+ * < 0 : Command execution couldn't be performed by firmware or driver
+ */
+int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
+{
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
+ u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
+ u16 opcode = in_to_opcode(in);
+
+ return cmd_status_err(dev, err, opcode, op_mod, out);
+}
+EXPORT_SYMBOL(mlx5_cmd_do);
+
+/**
+ * mlx5_cmd_exec - Executes a fw command, wait for completion
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return: 0 if no error, FW command execution was successful
+ * and outbox status is ok.
+ */
int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size)
{
- int err;
+ int err = mlx5_cmd_do(dev, in, in_size, out, out_size);
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
- return err ? : mlx5_cmd_check(dev, in, out);
+ return mlx5_cmd_check(dev, err, in, out);
}
EXPORT_SYMBOL(mlx5_cmd_exec);
+/**
+ * mlx5_cmd_exec_polling - Executes a fw command, poll for completion
+ * Needed for driver force teardown, when command completion EQ
+ * will not be available to complete the command
+ *
+ * @dev: mlx5 core device
+ * @in: inbox mlx5_ifc command buffer
+ * @in_size: inbox buffer size
+ * @out: outbox mlx5_ifc buffer
+ * @out_size: outbox size
+ *
+ * @return: 0 if no error, FW command execution was successful
+ * and outbox status is ok.
+ */
+int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size)
+{
+ int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
+ u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
+ u16 opcode = in_to_opcode(in);
+
+ err = cmd_status_err(dev, err, opcode, op_mod, out);
+ return mlx5_cmd_check(dev, err, in, out);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_polling);
+
void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
struct mlx5_async_ctx *ctx)
{
ctx->dev = dev;
/* Starts at 1 to avoid doing wake_up if we are not cleaning up */
atomic_set(&ctx->num_inflight, 1);
- init_waitqueue_head(&ctx->wait);
+ init_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
@@ -1908,19 +2051,21 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
*/
void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
{
- atomic_dec(&ctx->num_inflight);
- wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
+ if (!atomic_dec_and_test(&ctx->num_inflight))
+ wait_for_completion(&ctx->inflight_done);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
static void mlx5_cmd_exec_cb_handler(int status, void *_work)
{
struct mlx5_async_work *work = _work;
- struct mlx5_async_ctx *ctx = work->ctx;
+ struct mlx5_async_ctx *ctx;
+ ctx = work->ctx;
+ status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out);
work->user_callback(status, work);
if (atomic_dec_and_test(&ctx->num_inflight))
- wake_up(&ctx->wait);
+ complete(&ctx->inflight_done);
}
int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
@@ -1931,28 +2076,20 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
+ work->opcode = in_to_opcode(in);
+ work->op_mod = MLX5_GET(mbox_in, in, op_mod);
+ work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
return -EIO;
ret = cmd_exec(ctx->dev, in, in_size, out, out_size,
mlx5_cmd_exec_cb_handler, work, false);
if (ret && atomic_dec_and_test(&ctx->num_inflight))
- wake_up(&ctx->wait);
+ complete(&ctx->inflight_done);
return ret;
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
-int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
- void *out, int out_size)
-{
- int err;
-
- err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
-
- return err ? : mlx5_cmd_check(dev, in, out);
-}
-EXPORT_SYMBOL(mlx5_cmd_exec_polling);
-
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct cmd_msg_cache *ch;
@@ -1960,7 +2097,7 @@ static void destroy_msg_cache(struct mlx5_core_dev *dev)
struct mlx5_cmd_msg *n;
int i;
- for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
+ for (i = 0; i < dev->profile.num_cmd_caches; i++) {
ch = &dev->cmd.cache[i];
list_for_each_entry_safe(msg, n, &ch->head, list) {
list_del(&msg->list);
@@ -1990,7 +2127,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
int k;
/* Initialize and fill the caches with initial entries */
- for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
+ for (k = 0; k < dev->profile.num_cmd_caches; k++) {
ch = &cmd->cache[k];
spin_lock_init(&ch->lock);
INIT_LIST_HEAD(&ch->head);
@@ -2066,15 +2203,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
- cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
- if (!cmd->stats)
- return -ENOMEM;
-
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
- if (!cmd->pool) {
- err = -ENOMEM;
- goto dma_pool_err;
- }
+ if (!cmd->pool)
+ return -ENOMEM;
err = alloc_cmd_page(dev, cmd);
if (err)
@@ -2116,6 +2247,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
sema_init(&cmd->sem, cmd->max_reg_cmds);
sema_init(&cmd->pages_sem, 1);
+ sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2));
cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
@@ -2158,8 +2290,6 @@ err_free_page:
err_free_pool:
dma_pool_destroy(cmd->pool);
-dma_pool_err:
- kvfree(cmd->stats);
return err;
}
@@ -2172,7 +2302,6 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
destroy_msg_cache(dev);
free_cmd_page(dev, cmd);
dma_pool_destroy(cmd->pool);
- kvfree(cmd->stats);
}
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 5371ad0a12eb..4caa1b6f40ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
#include <rdma/ib_verbs.h>
@@ -86,8 +85,9 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq,
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
-int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
- u32 *in, int inlen, u32 *out, int outlen)
+/* Callers must verify outbox status in case of err */
+int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen)
{
int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
c_eqn_or_apu_element);
@@ -101,7 +101,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
memset(out, 0, outlen);
MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
- err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+ err = mlx5_cmd_do(dev, in, inlen, out, outlen);
if (err)
return err;
@@ -148,6 +148,16 @@ err_cmd:
mlx5_cmd_exec_in(dev, destroy_cq, din);
return err;
}
+EXPORT_SYMBOL(mlx5_create_cq);
+
+/* oubox is checked and err val is normalized */
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u32 *in, int inlen, u32 *out, int outlen)
+{
+ int err = mlx5_create_cq(dev, cq, in, inlen, out, outlen);
+
+ return mlx5_cmd_check(dev, err, in, out);
+}
EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 10d195042ab5..bb95b40d25eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/cq.h>
@@ -99,26 +98,32 @@ void mlx5_unregister_debugfs(void)
debugfs_remove(mlx5_debugfs_root);
}
+struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev)
+{
+ return dev->priv.dbg.dbg_root;
+}
+EXPORT_SYMBOL(mlx5_debugfs_get_dev_root);
+
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
+ dev->priv.dbg.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg.dbg_root);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.qp_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.qp_debugfs);
}
EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
+ dev->priv.dbg.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg.dbg_root);
}
void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.eq_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.eq_debugfs);
}
static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
@@ -161,6 +166,28 @@ static const struct file_operations stats_fops = {
.write = average_write,
};
+static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_cmd *cmd;
+ char tbuf[6];
+ int weight;
+ int field;
+ int ret;
+
+ cmd = filp->private_data;
+ weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
+ field = cmd->max_reg_cmds - weight;
+ ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
+ return simple_read_from_buffer(buf, count, pos, tbuf, ret);
+}
+
+static const struct file_operations slots_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = slots_read,
+};
+
void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
struct mlx5_cmd_stats *stats;
@@ -168,8 +195,10 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
const char *namep;
int i;
- cmd = &dev->priv.cmdif_debugfs;
- *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
+ cmd = &dev->priv.dbg.cmdif_debugfs;
+ *cmd = debugfs_create_dir("commands", dev->priv.dbg.dbg_root);
+
+ debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);
for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
stats = &dev->cmd.stats[i];
@@ -180,23 +209,54 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
debugfs_create_file("average", 0400, stats->root, stats,
&stats_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
+ debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
+ debugfs_create_u64("failed_mbox_status", 0400, stats->root,
+ &stats->failed_mbox_status);
+ debugfs_create_u32("last_failed_errno", 0400, stats->root,
+ &stats->last_failed_errno);
+ debugfs_create_u8("last_failed_mbox_status", 0400, stats->root,
+ &stats->last_failed_mbox_status);
+ debugfs_create_x32("last_failed_syndrome", 0400, stats->root,
+ &stats->last_failed_syndrome);
}
}
}
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.cmdif_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
}
void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
{
- dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
+ dev->priv.dbg.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg.dbg_root);
}
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
{
- debugfs_remove_recursive(dev->priv.cq_debugfs);
+ debugfs_remove_recursive(dev->priv.dbg.cq_debugfs);
+}
+
+void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
+{
+ struct dentry *pages;
+
+ dev->priv.dbg.pages_debugfs = debugfs_create_dir("pages", dev->priv.dbg.dbg_root);
+ pages = dev->priv.dbg.pages_debugfs;
+
+ debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
+ debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
+ debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
+ debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
+ debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
+ debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
+ debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
+ &dev->priv.reclaim_pages_discard);
+}
+
+void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+ debugfs_remove_recursive(dev->priv.dbg.pages_debugfs);
}
static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
@@ -441,7 +501,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.dbg.qp_debugfs,
&qp->dbg, qp->qpn, qp_fields,
ARRAY_SIZE(qp_fields), qp);
if (err)
@@ -468,7 +528,7 @@ int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.dbg.eq_debugfs,
&eq->dbg, eq->eqn, eq_fields,
ARRAY_SIZE(eq_fields), eq);
if (err)
@@ -493,7 +553,7 @@ int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
if (!mlx5_debugfs_root)
return 0;
- err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
+ err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.dbg.cq_debugfs,
&cq->dbg, cq->cqn, cq_fields,
ARRAY_SIZE(cq_fields), cq);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a8b84d53dfb0..1b33533b15de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc_vdpa.h>
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+#include "devlink.h"
/* intf dev list mutex */
static DEFINE_MUTEX(mlx5_intf_mutex);
@@ -106,17 +107,6 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev)
return true;
}
-static bool is_eth_enabled(struct mlx5_core_dev *dev)
-{
- union devlink_param_value val;
- int err;
-
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
- &val);
- return err ? false : val.vbool;
-}
-
bool mlx5_vnet_supported(struct mlx5_core_dev *dev)
{
if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET))
@@ -144,9 +134,9 @@ static bool is_vnet_enabled(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
- &val);
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ &val);
return err ? false : val.vbool;
}
@@ -215,9 +205,9 @@ static bool is_ib_enabled(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
- &val);
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &val);
return err ? false : val.vbool;
}
@@ -245,7 +235,7 @@ static const struct mlx5_adev_device {
.is_enabled = &is_ib_enabled },
[MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth",
.is_supported = &mlx5_eth_supported,
- .is_enabled = &is_eth_enabled },
+ .is_enabled = &mlx5_core_is_eth_enabled },
[MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep",
.is_supported = &is_eth_rep_supported },
[MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep",
@@ -340,6 +330,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
struct auxiliary_driver *adrv;
int ret = 0, i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
@@ -370,10 +361,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
/* Pay attention that this is not PCI driver that
* mlx5_core_dev is connected, but auxiliary driver.
- *
- * Here we can race of module unload with devlink
- * reload, but we don't need to take extra lock because
- * we are holding global mlx5_intf_mutex.
*/
if (!adev->dev.driver)
continue;
@@ -393,7 +380,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
return ret;
}
-void mlx5_detach_device(struct mlx5_core_dev *dev)
+void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
@@ -401,6 +388,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
pm_message_t pm = {};
int i;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
@@ -421,7 +409,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
adrv = to_auxiliary_drv(adev->dev.driver);
- if (adrv->suspend) {
+ if (adrv->suspend && suspend) {
adrv->suspend(adev, pm);
continue;
}
@@ -438,6 +426,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
{
int ret;
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
@@ -450,6 +439,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
+ devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
@@ -538,7 +528,7 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
return add_drivers(dev);
}
-static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
+bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
{
u64 fsystem_guid, psystem_guid;
@@ -555,12 +545,9 @@ static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
PCI_SLOT(dev->pdev->devfn));
}
-static int next_phys_dev(struct device *dev, const void *data)
+static int _next_phys_dev(struct mlx5_core_dev *mdev,
+ const struct mlx5_core_dev *curr)
{
- struct mlx5_adev *madev = container_of(dev, struct mlx5_adev, adev.dev);
- struct mlx5_core_dev *mdev = madev->mdev;
- const struct mlx5_core_dev *curr = data;
-
if (!mlx5_core_is_pf(mdev))
return 0;
@@ -574,22 +561,52 @@ static int next_phys_dev(struct device *dev, const void *data)
return 1;
}
-/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+static void *pci_get_other_drvdata(struct device *this, struct device *other)
{
- struct auxiliary_device *adev;
- struct mlx5_adev *madev;
+ if (this->driver != other->driver)
+ return NULL;
+
+ return pci_get_drvdata(to_pci_dev(other));
+}
+
+static int next_phys_dev_lag(struct device *dev, const void *data)
+{
+ struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
+
+ mdev = pci_get_other_drvdata(this->device, dev);
+ if (!mdev)
+ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
+ !MLX5_CAP_GEN(mdev, lag_master) ||
+ (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
+ MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
+ return 0;
+
+ return _next_phys_dev(mdev, data);
+}
+
+static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
+ int (*match)(struct device *dev, const void *data))
+{
+ struct device *next;
if (!mlx5_core_is_pf(dev))
return NULL;
- adev = auxiliary_find_device(NULL, dev, &next_phys_dev);
- if (!adev)
+ next = bus_find_device(&pci_bus_type, NULL, dev, match);
+ if (!next)
return NULL;
- madev = container_of(adev, struct mlx5_adev, adev);
- put_device(&adev->dev);
- return madev->mdev;
+ put_device(next);
+ return pci_get_drvdata(to_pci_dev(next));
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
+{
+ lockdep_assert_held(&mlx5_intf_mutex);
+ return mlx5_get_next_dev(dev, &next_phys_dev_lag);
}
void mlx5_dev_list_lock(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 1c98652b244a..4b607785d694 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -7,6 +7,7 @@
#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
+#include "lag/lag.h"
#include "esw/qos.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
@@ -46,10 +47,6 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
u32 running_fw, stored_fw;
int err;
- err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
- if (err)
- return err;
-
err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id);
if (err)
return err;
@@ -100,14 +97,19 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
}
net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
- err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
+ err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive, extack);
if (err)
- goto out;
+ return err;
err = mlx5_fw_reset_wait_reset_done(dev);
-out:
if (err)
- NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
+ return err;
+
+ mlx5_unload_one_devl_locked(dev, true);
+ err = mlx5_health_wait_pci_up(dev);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
+
return err;
}
@@ -138,6 +140,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
+ int ret = 0;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
if (sf_dev_allocated) {
@@ -154,23 +157,32 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (mlx5_core_is_mp_slave(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
+ return -EOPNOTSUPP;
+ }
+
if (pci_num_vf(pdev)) {
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
}
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one(dev);
- return 0;
+ mlx5_unload_one_devl_locked(dev, false);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
- return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
- return mlx5_devlink_reload_fw_activate(devlink, extack);
+ ret = mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+ else
+ ret = mlx5_devlink_reload_fw_activate(devlink, extack);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
+
+ return ret;
}
static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
@@ -178,24 +190,27 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ int ret = 0;
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- return mlx5_load_one(dev);
+ ret = mlx5_load_one_devl_locked(dev, false);
+ break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
break;
/* On fw_activate action, also driver is reloaded and reinit performed */
*actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
- return mlx5_load_one(dev);
+ ret = mlx5_load_one_devl_locked(dev, true);
+ break;
default:
/* Unsupported action should not get to this function */
WARN_ON(1);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
}
- return 0;
+ return ret;
}
static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
@@ -254,9 +269,10 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap_event_ctx trap_event_ctx;
enum devlink_trap_action action_orig;
struct mlx5_devlink_trap *dl_trap;
- int err = 0;
+ int err;
if (is_mdev_switchdev_mode(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
@@ -266,26 +282,25 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP)
+ return -EOPNOTSUPP;
if (action == dl_trap->trap.action)
- goto out;
+ return 0;
action_orig = dl_trap->trap.action;
dl_trap->trap.action = action;
+ trap_event_ctx.trap = &dl_trap->trap;
+ trap_event_ctx.err = 0;
err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
- &dl_trap->trap);
- if (err)
+ &trap_event_ctx);
+ if (err == NOTIFY_BAD)
dl_trap->trap.action = action_orig;
-out:
- return err;
+
+ return trap_event_ctx.err;
}
static const struct devlink_ops mlx5_devlink_ops = {
@@ -305,6 +320,10 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
.rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
+ .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
+ .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
+ .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
+ .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
@@ -383,70 +402,6 @@ void mlx5_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
-static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- char *value = val.vstr;
- int err = 0;
-
- if (!strcmp(value, "dmfs")) {
- return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
-
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
-
- if (!smfs_cap) {
- err = -EOPNOTSUPP;
- NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported by current device");
- }
-
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
- NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
- }
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int mlx5_devlink_fs_mode_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- enum mlx5_flow_steering_mode mode;
-
- if (!strcmp(ctx->val.vstr, "smfs"))
- mode = MLX5_FLOW_STEERING_MODE_SMFS;
- else
- mode = MLX5_FLOW_STEERING_MODE_DMFS;
- dev->priv.steering->mode = mode;
-
- return 0;
-}
-
-static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
- strcpy(ctx->val.vstr, "smfs");
- else
- strcpy(ctx->val.vstr, "dmfs");
- return 0;
-}
-
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -455,7 +410,7 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
bool new_state = val.vbool;
if (new_state && !MLX5_CAP_GEN(dev, roce) &&
- !MLX5_CAP_GEN(dev, roce_rw_supported)) {
+ !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
return -EOPNOTSUPP;
}
@@ -483,75 +438,118 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
return 0;
}
-static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_ESWITCH_MANAGER(dev))
return -EOPNOTSUPP;
- return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool);
+ if (ctx->val.vbool)
+ return mlx5_lag_mpesw_enable(dev);
+
+ mlx5_lag_mpesw_disable(dev);
+ return 0;
}
-static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_ESWITCH_MANAGER(dev))
return -EOPNOTSUPP;
- ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+ ctx->val.vbool = mlx5_lag_is_mpesw(dev);
return 0;
}
-static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
+static int mlx5_devlink_esw_multiport_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- u8 esw_mode;
if (!MLX5_ESWITCH_MANAGER(dev)) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
return -EOPNOTSUPP;
}
- esw_mode = mlx5_eswitch_mode(dev);
- if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+
+ if (mlx5_eswitch_mode(dev) != MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
- "E-Switch must either disabled or non switchdev mode");
+ "E-Switch must be in switchdev mode");
return -EBUSY;
}
+
return 0;
}
#endif
-static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL;
+}
+
+static int
+mlx5_devlink_hairpin_num_queues_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ return val.vu32 ? 0 : -EINVAL;
+}
+
+static int
+mlx5_devlink_hairpin_queue_size_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u32 val32 = val.vu32;
+
+ if (!is_power_of_2(val32)) {
+ NL_SET_ERR_MSG_MOD(extack, "Value is not power of two");
+ return -EINVAL;
+ }
+
+ if (val32 > BIT(MLX5_CAP_GEN(dev, log_max_hairpin_num_packets))) {
+ NL_SET_ERR_MSG_FMT_MOD(
+ extack, "Maximum hairpin queue size is %lu",
+ BIT(MLX5_CAP_GEN(dev, log_max_hairpin_num_packets)));
+ return -EINVAL;
+ }
- mlx5_fw_reset_enable_remote_dev_reset_set(dev, ctx->val.vbool);
return 0;
}
-static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static void mlx5_devlink_hairpin_params_init_values(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ u32 link_speed = 0;
+ u64 link_speed64;
- ctx->val.vbool = mlx5_fw_reset_enable_remote_dev_reset_get(dev);
- return 0;
+ /* set hairpin pair per each 50Gbs share of the link */
+ mlx5_port_max_linkspeed(dev, &link_speed);
+ link_speed = max_t(u32, link_speed, 50000);
+ link_speed64 = link_speed;
+ do_div(link_speed64, 50000);
+
+ value.vu32 = link_speed64;
+ devl_param_driverinit_value_set(
+ devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, value);
+
+ value.vu32 =
+ BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(dev),
+ MLX5_CAP_GEN(dev, log_max_hairpin_num_packets)));
+ devl_param_driverinit_value_set(
+ devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, value);
}
static const struct devlink_param mlx5_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
- "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
- mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
#ifdef CONFIG_MLX5_ESWITCH
@@ -560,16 +558,17 @@ static const struct devlink_param mlx5_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL,
mlx5_devlink_large_group_num_validate),
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
- "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
+ "esw_multiport", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_esw_port_metadata_get,
- mlx5_devlink_esw_port_metadata_set,
- mlx5_devlink_esw_port_metadata_validate),
+ mlx5_devlink_esw_multiport_get,
+ mlx5_devlink_esw_multiport_set,
+ mlx5_devlink_esw_multiport_validate),
#endif
- DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_enable_remote_dev_reset_get,
- mlx5_devlink_enable_remote_dev_reset_set, NULL),
+ DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_eq_depth_validate),
+ DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_eq_depth_validate),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
@@ -577,44 +576,43 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS)
- strcpy(value.vstr, "dmfs");
- else
- strcpy(value.vstr, "smfs");
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
- value);
-
value.vbool = MLX5_CAP_GEN(dev, roce);
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
#ifdef CONFIG_MLX5_ESWITCH
value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
- value);
-
- if (MLX5_ESWITCH_MANAGER(dev)) {
- if (mlx5_esw_vport_match_metadata_supported(dev->priv.eswitch)) {
- dev->priv.eswitch->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
- value.vbool = true;
- } else {
- value.vbool = false;
- }
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
- value);
- }
+ devl_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ value);
#endif
+
+ value.vu32 = MLX5_COMP_EQ_SIZE;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ value);
+
+ value.vu32 = MLX5_NUM_ASYNC_EQE;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ value);
}
-static const struct devlink_param enable_eth_param =
+static const struct devlink_param mlx5_devlink_eth_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, NULL);
+ NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
+ "hairpin_num_queues", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_hairpin_num_queues_validate),
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
+ "hairpin_queue_size", DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlx5_devlink_hairpin_queue_size_validate),
+};
-static int mlx5_devlink_eth_param_register(struct devlink *devlink)
+static int mlx5_devlink_eth_params_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
@@ -623,25 +621,30 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink)
if (!mlx5_eth_supported(dev))
return 0;
- err = devlink_param_register(devlink, &enable_eth_param);
+ err = devl_params_register(devlink, mlx5_devlink_eth_params,
+ ARRAY_SIZE(mlx5_devlink_eth_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ value);
+
+ mlx5_devlink_hairpin_params_init_values(devlink);
+
return 0;
}
-static void mlx5_devlink_eth_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_eth_params_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!mlx5_eth_supported(dev))
return;
- devlink_param_unregister(devlink, &enable_eth_param);
+ devl_params_unregister(devlink, mlx5_devlink_eth_params,
+ ARRAY_SIZE(mlx5_devlink_eth_params));
}
static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
@@ -656,11 +659,12 @@ static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
return 0;
}
-static const struct devlink_param enable_rdma_param =
+static const struct devlink_param mlx5_devlink_rdma_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, mlx5_devlink_enable_rdma_validate);
+ NULL, NULL, mlx5_devlink_enable_rdma_validate),
+};
-static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
+static int mlx5_devlink_rdma_params_register(struct devlink *devlink)
{
union devlink_param_value value;
int err;
@@ -668,30 +672,33 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0;
- err = devlink_param_register(devlink, &enable_rdma_param);
+ err = devl_params_register(devlink, mlx5_devlink_rdma_params,
+ ARRAY_SIZE(mlx5_devlink_rdma_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
return 0;
}
-static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_rdma_params_unregister(struct devlink *devlink)
{
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
- devlink_param_unregister(devlink, &enable_rdma_param);
+ devl_params_unregister(devlink, mlx5_devlink_rdma_params,
+ ARRAY_SIZE(mlx5_devlink_rdma_params));
}
-static const struct devlink_param enable_vnet_param =
+static const struct devlink_param mlx5_devlink_vnet_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, NULL);
+ NULL, NULL, NULL),
+};
-static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
+static int mlx5_devlink_vnet_params_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
@@ -700,56 +707,121 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
if (!mlx5_vnet_supported(dev))
return 0;
- err = devlink_param_register(devlink, &enable_vnet_param);
+ err = devl_params_register(devlink, mlx5_devlink_vnet_params,
+ ARRAY_SIZE(mlx5_devlink_vnet_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ value);
return 0;
}
-static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_vnet_params_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!mlx5_vnet_supported(dev))
return;
- devlink_param_unregister(devlink, &enable_vnet_param);
+ devl_params_unregister(devlink, mlx5_devlink_vnet_params,
+ ARRAY_SIZE(mlx5_devlink_vnet_params));
}
static int mlx5_devlink_auxdev_params_register(struct devlink *devlink)
{
int err;
- err = mlx5_devlink_eth_param_register(devlink);
+ err = mlx5_devlink_eth_params_register(devlink);
if (err)
return err;
- err = mlx5_devlink_rdma_param_register(devlink);
+ err = mlx5_devlink_rdma_params_register(devlink);
if (err)
goto rdma_err;
- err = mlx5_devlink_vnet_param_register(devlink);
+ err = mlx5_devlink_vnet_params_register(devlink);
if (err)
goto vnet_err;
return 0;
vnet_err:
- mlx5_devlink_rdma_param_unregister(devlink);
+ mlx5_devlink_rdma_params_unregister(devlink);
rdma_err:
- mlx5_devlink_eth_param_unregister(devlink);
+ mlx5_devlink_eth_params_unregister(devlink);
return err;
}
static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
{
- mlx5_devlink_vnet_param_unregister(devlink);
- mlx5_devlink_rdma_param_unregister(devlink);
- mlx5_devlink_eth_param_unregister(devlink);
+ mlx5_devlink_vnet_params_unregister(devlink);
+ mlx5_devlink_rdma_params_unregister(devlink);
+ mlx5_devlink_eth_params_unregister(devlink);
+}
+
+static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (val.vu32 == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs value must be greater than 0");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(val.vu32)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only power of 2 values are supported for max_macs");
+ return -EINVAL;
+ }
+
+ if (ilog2(val.vu32) >
+ MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list)) {
+ NL_SET_ERR_MSG_MOD(extack, "max_macs value is out of the supported range");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlx5_devlink_max_uc_list_params[] = {
+ DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, mlx5_devlink_max_uc_list_validate),
+};
+
+static int mlx5_devlink_max_uc_list_params_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ union devlink_param_value value;
+ int err;
+
+ if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
+ return 0;
+
+ err = devl_params_register(devlink, mlx5_devlink_max_uc_list_params,
+ ARRAY_SIZE(mlx5_devlink_max_uc_list_params));
+ if (err)
+ return err;
+
+ value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
+ return 0;
+}
+
+static void
+mlx5_devlink_max_uc_list_params_unregister(struct devlink *devlink)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
+ return;
+
+ devl_params_unregister(devlink, mlx5_devlink_max_uc_list_params,
+ ARRAY_SIZE(mlx5_devlink_max_uc_list_params));
}
#define MLX5_TRAP_DROP(_id, _group_id) \
@@ -766,42 +838,46 @@ static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
};
-static int mlx5_devlink_traps_register(struct devlink *devlink)
+int mlx5_devlink_traps_register(struct devlink *devlink)
{
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
- err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ err = devl_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
if (err)
return err;
- err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
- &core_dev->priv);
+ err = devl_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
if (err)
goto err_trap_group;
return 0;
err_trap_group:
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
return err;
}
-static void mlx5_devlink_traps_unregister(struct devlink *devlink)
+void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
- devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
- devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
- ARRAY_SIZE(mlx5_trap_groups_arr));
+ devl_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devl_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
}
-int mlx5_devlink_register(struct devlink *devlink)
+int mlx5_devlink_params_register(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
- err = devlink_params_register(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ /* Here only the driver init params should be registered.
+ * Runtime params should be registered by the code which
+ * behaviour they configure.
+ */
+
+ err = devl_params_register(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
if (err)
return err;
@@ -811,27 +887,24 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto auxdev_reg_err;
- err = mlx5_devlink_traps_register(devlink);
+ err = mlx5_devlink_max_uc_list_params_register(devlink);
if (err)
- goto traps_reg_err;
-
- if (!mlx5_core_is_mp_slave(dev))
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ goto max_uc_list_err;
return 0;
-traps_reg_err:
+max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
- devlink_params_unregister(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ devl_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
return err;
}
-void mlx5_devlink_unregister(struct devlink *devlink)
+void mlx5_devlink_params_unregister(struct devlink *devlink)
{
- mlx5_devlink_traps_unregister(devlink);
+ mlx5_devlink_max_uc_list_params_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
- devlink_params_unregister(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ devl_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index 30bf4882779b..defba5bd91d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -11,6 +11,9 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
+ MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES,
+ MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE,
};
struct mlx5_trap_ctx {
@@ -24,16 +27,34 @@ struct mlx5_devlink_trap {
struct list_head list;
};
+struct mlx5_devlink_trap_event_ctx {
+ struct mlx5_trap_ctx *trap;
+ int err;
+};
+
struct mlx5_core_dev;
void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
struct devlink_port *dl_port);
int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
enum devlink_trap_action *action);
+int mlx5_devlink_traps_register(struct devlink *devlink);
+void mlx5_devlink_traps_unregister(struct devlink *devlink);
struct devlink *mlx5_devlink_alloc(struct device *dev);
void mlx5_devlink_free(struct devlink *devlink);
-int mlx5_devlink_register(struct devlink *devlink);
-void mlx5_devlink_unregister(struct devlink *devlink);
+int mlx5_devlink_params_register(struct devlink *devlink);
+void mlx5_devlink_params_unregister(struct devlink *devlink);
+
+static inline bool mlx5_core_is_eth_enabled(struct mlx5_core_dev *dev)
+{
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ &val);
+ return err ? false : val.vbool;
+}
#endif /* __MLX5_DEVLINK_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
new file mode 100644
index 000000000000..406ebe17405f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_CMD_TP_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_CMD_TP_H_
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+TRACE_EVENT(mlx5_cmd,
+ TP_PROTO(const char *command_str, u16 opcode, u16 op_mod,
+ const char *status_str, u8 status, u32 syndrome, int err),
+ TP_ARGS(command_str, opcode, op_mod, status_str, status, syndrome, err),
+ TP_STRUCT__entry(__string(command_str, command_str)
+ __field(u16, opcode)
+ __field(u16, op_mod)
+ __string(status_str, status_str)
+ __field(u8, status)
+ __field(u32, syndrome)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(command_str, command_str);
+ __entry->opcode = opcode;
+ __entry->op_mod = op_mod;
+ __assign_str(status_str, status_str);
+ __entry->status = status;
+ __entry->syndrome = syndrome;
+ __entry->err = err;
+ ),
+ TP_printk("%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)",
+ __get_str(command_str), __entry->opcode, __entry->op_mod,
+ __get_str(status_str), __entry->status, __entry->syndrome,
+ __entry->err)
+);
+
+#endif /* _MLX5_CMD_TP_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ./diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cmd_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 7841ef6c193c..6d73127b7217 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -228,6 +228,17 @@ const char *parse_fs_hdrs(struct trace_seq *p,
return ret;
}
+static const char
+*fs_dest_range_field_to_str(enum mlx5_flow_dest_range_field field)
+{
+ switch (field) {
+ case MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN:
+ return "packet len";
+ default:
+ return "unknown dest range field";
+ }
+}
+
const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst,
u32 counter_id)
@@ -259,6 +270,18 @@ const char *parse_fs_dst(struct trace_seq *p,
case MLX5_FLOW_DESTINATION_TYPE_PORT:
trace_seq_printf(p, "port\n");
break;
+ case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+ trace_seq_printf(p, "field=%s min=%d max=%d\n",
+ fs_dest_range_field_to_str(dst->range.field),
+ dst->range.min, dst->range.max);
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ trace_seq_printf(p, "flow_table_type=%u id:%u\n", dst->ft->type,
+ dst->ft->id);
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ trace_seq_printf(p, "none\n");
+ break;
}
trace_seq_putc(p, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index eae9aa9c0811..f40497823e65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
MLX5_GET(mtrc_cap, out, num_string_trace);
tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+ tracer->str_db.loaded = false;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
@@ -233,6 +234,8 @@ static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer)
int i;
for (i = 0; i < num_string_db; i++) {
+ if (!string_db_size_out[i])
+ continue;
tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL);
if (!tracer->str_db.buffer[i])
goto free_strings_db;
@@ -278,6 +281,8 @@ static void mlx5_tracer_read_strings_db(struct work_struct *work)
}
for (i = 0; i < num_string_db; i++) {
+ if (!tracer->str_db.size_out[i])
+ continue;
offset = 0;
MLX5_SET(mtrc_stdb, in, string_db_index, i);
num_of_reads = tracer->str_db.size_out[i] /
@@ -384,6 +389,8 @@ static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer
str_ptr = tracer_event->string_event.string_param;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ if (!tracer->str_db.size_out[i])
+ continue;
if (str_ptr > tracer->str_db.base_address_out[i] &&
str_ptr < tracer->str_db.base_address_out[i] +
tracer->str_db.size_out[i]) {
@@ -459,6 +466,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id);
tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost);
+ tracer_event->out = trace;
switch (tracer_event->event_id) {
case TRACER_EVENT_TYPE_TIMESTAMP:
@@ -581,6 +589,26 @@ void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
mlx5_tracer_clean_message(str_frmt);
}
+static int mlx5_tracer_handle_raw_string(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+
+ cur_string = mlx5_tracer_message_insert(tracer, tracer_event);
+ if (!cur_string)
+ return -1;
+
+ cur_string->event_id = tracer_event->event_id;
+ cur_string->timestamp = tracer_event->string_event.timestamp;
+ cur_string->lost = tracer_event->lost_event;
+ cur_string->string = "0x%08x%08x";
+ cur_string->num_of_params = 2;
+ cur_string->params[0] = upper_32_bits(*tracer_event->out);
+ cur_string->params[1] = lower_32_bits(*tracer_event->out);
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ return 0;
+}
+
static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
@@ -589,7 +617,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
if (tracer_event->string_event.tdsn == 0) {
cur_string = mlx5_tracer_get_string(tracer, tracer_event);
if (!cur_string)
- return -1;
+ return mlx5_tracer_handle_raw_string(tracer, tracer_event);
cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
cur_string->last_param_num = 0;
@@ -602,9 +630,9 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
} else {
cur_string = mlx5_tracer_message_get(tracer, tracer_event);
if (!cur_string) {
- pr_debug("%s Got string event for unknown string tdsm: %d\n",
+ pr_debug("%s Got string event for unknown string tmsn: %d\n",
__func__, tracer_event->string_event.tmsn);
- return -1;
+ return mlx5_tracer_handle_raw_string(tracer, tracer_event);
}
cur_string->last_param_num += 1;
if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
@@ -638,7 +666,7 @@ static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer,
trace_timestamp = (timestamp_event.timestamp & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
else
- trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) |
+ trace_timestamp = ((timestamp_event.timestamp - 1) & MASK_52_7) |
(str_frmt->timestamp & MASK_6_0);
mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp);
@@ -675,6 +703,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
if (!tracer->owner)
return;
+ if (unlikely(!tracer->str_db.loaded))
+ goto arm;
+
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
@@ -732,6 +763,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
&tmp_trace_block[TRACES_PER_BLOCK - 1]);
}
+arm:
mlx5_fw_tracer_arm(dev);
}
@@ -752,6 +784,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+ tracer->buff.consumer_index = 0;
return err;
}
@@ -816,7 +849,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
tracer->owner = false;
- tracer->buff.consumer_index = 0;
return;
}
@@ -926,6 +958,14 @@ unlock:
return err;
}
+static void mlx5_fw_tracer_update_db(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, update_db_work);
+
+ mlx5_fw_tracer_reload(tracer);
+}
+
/* Create software resources (Buffers, etc ..) */
struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
{
@@ -953,6 +993,8 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change);
INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db);
INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces);
+ INIT_WORK(&tracer->update_db_work, mlx5_fw_tracer_update_db);
+ mutex_init(&tracer->state_lock);
err = mlx5_query_mtrc_caps(tracer);
@@ -999,11 +1041,15 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
if (IS_ERR_OR_NULL(tracer))
return 0;
- dev = tracer->dev;
-
if (!tracer->str_db.loaded)
queue_work(tracer->work_queue, &tracer->read_fw_strings_work);
+ mutex_lock(&tracer->state_lock);
+ if (test_and_set_bit(MLX5_TRACER_STATE_UP, &tracer->state))
+ goto unlock;
+
+ dev = tracer->dev;
+
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
@@ -1024,6 +1070,8 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
goto err_notifier_unregister;
}
+unlock:
+ mutex_unlock(&tracer->state_lock);
return 0;
err_notifier_unregister:
@@ -1033,6 +1081,7 @@ err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work);
+ mutex_unlock(&tracer->state_lock);
return err;
}
@@ -1042,17 +1091,27 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
if (IS_ERR_OR_NULL(tracer))
return;
+ mutex_lock(&tracer->state_lock);
+ if (!test_and_clear_bit(MLX5_TRACER_STATE_UP, &tracer->state))
+ goto unlock;
+
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
tracer->owner);
mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb);
cancel_work_sync(&tracer->ownership_change_work);
cancel_work_sync(&tracer->handle_traces_work);
+ /* It is valid to get here from update_db_work. Hence, don't wait for
+ * update_db_work to finished.
+ */
+ cancel_work(&tracer->update_db_work);
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
+unlock:
+ mutex_unlock(&tracer->state_lock);
}
/* Free software resources (Buffers, etc ..) */
@@ -1069,6 +1128,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
+ mutex_destroy(&tracer->state_lock);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
@@ -1078,6 +1138,8 @@ static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
struct mlx5_core_dev *dev;
int err;
+ if (test_and_set_bit(MLX5_TRACER_RECREATE_DB, &tracer->state))
+ return 0;
cancel_work_sync(&tracer->read_fw_strings_work);
mlx5_fw_tracer_clean_ready_list(tracer);
mlx5_fw_tracer_clean_print_hash(tracer);
@@ -1088,17 +1150,18 @@ static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
err = mlx5_query_mtrc_caps(tracer);
if (err) {
mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
- return err;
+ goto out;
}
err = mlx5_fw_tracer_allocate_strings_db(tracer);
if (err) {
mlx5_core_warn(dev, "FWTracer: Allocate strings DB failed %d\n", err);
- return err;
+ goto out;
}
mlx5_fw_tracer_init_saved_traces_array(tracer);
-
- return 0;
+out:
+ clear_bit(MLX5_TRACER_RECREATE_DB, &tracer->state);
+ return err;
}
int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
@@ -1136,8 +1199,10 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
queue_work(tracer->work_queue, &tracer->ownership_change_work);
break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
- if (likely(tracer->str_db.loaded))
- queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ queue_work(tracer->work_queue, &tracer->handle_traces_work);
+ break;
+ case MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE:
+ queue_work(tracer->work_queue, &tracer->update_db_work);
break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 4762b55b0b0e..5c548bb74f07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -63,6 +63,11 @@ struct mlx5_fw_trace_data {
char msg[TRACE_STR_MSG];
};
+enum mlx5_fw_tracer_state {
+ MLX5_TRACER_STATE_UP = BIT(0),
+ MLX5_TRACER_RECREATE_DB = BIT(1),
+};
+
struct mlx5_fw_tracer {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -104,6 +109,9 @@ struct mlx5_fw_tracer {
struct work_struct handle_traces_work;
struct hlist_head hash[MESSAGE_HASH_SIZE];
struct list_head ready_strings_list;
+ struct work_struct update_db_work;
+ struct mutex state_lock; /* Synchronize update work with reload flows */
+ unsigned long state;
};
struct tracer_string_format {
@@ -158,6 +166,7 @@ struct tracer_event {
struct tracer_string_event string_event;
struct tracer_timestamp_event timestamp_event;
};
+ u64 *out;
};
struct mlx5_ifc_tracer_event_bits {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
new file mode 100644
index 000000000000..9114661cd967
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
+
+#include "reporter_vnic.h"
+#include "devlink.h"
+
+#define VNIC_ENV_GET64(vnic_env_stats, c) \
+ MLX5_GET64(query_vnic_env_out, (vnic_env_stats)->query_vnic_env_out, \
+ vport_env.c)
+
+struct mlx5_vnic_diag_stats {
+ __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
+};
+
+int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
+ struct mlx5_vnic_diag_stats vnic;
+ int err;
+
+ MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
+ MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
+ MLX5_SET(query_vnic_env_in, in, other_vport, !!other_vport);
+
+ err = mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters");
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "total_error_queues",
+ VNIC_ENV_GET64(&vnic, total_error_queues));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "send_queue_priority_update_flow",
+ VNIC_ENV_GET64(&vnic, send_queue_priority_update_flow));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "comp_eq_overrun",
+ VNIC_ENV_GET64(&vnic, comp_eq_overrun));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "async_eq_overrun",
+ VNIC_ENV_GET64(&vnic, async_eq_overrun));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "cq_overrun",
+ VNIC_ENV_GET64(&vnic, cq_overrun));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "invalid_command",
+ VNIC_ENV_GET64(&vnic, invalid_command));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "quota_exceeded_command",
+ VNIC_ENV_GET64(&vnic, quota_exceeded_command));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
+ VNIC_ENV_GET64(&vnic, nic_receive_steering_discard));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
+
+ return mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false);
+}
+
+static const struct devlink_health_reporter_ops mlx5_reporter_vnic_ops = {
+ .name = "vnic",
+ .diagnose = mlx5_reporter_vnic_diagnose,
+};
+
+void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ health->vnic_reporter =
+ devlink_health_reporter_create(devlink,
+ &mlx5_reporter_vnic_ops,
+ 0, dev);
+ if (IS_ERR(health->vnic_reporter))
+ mlx5_core_warn(dev,
+ "Failed to create vnic reporter, err = %ld\n",
+ PTR_ERR(health->vnic_reporter));
+}
+
+void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ if (!IS_ERR_OR_NULL(health->vnic_reporter))
+ devlink_health_reporter_destroy(health->vnic_reporter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h
new file mode 100644
index 000000000000..eba87a39e9b1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
+ */
+#ifndef __MLX5_REPORTER_VNIC_H
+#define __MLX5_REPORTER_VNIC_H
+
+#include "mlx5_core.h"
+
+void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev);
+void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport);
+
+#endif /* __MLX5_REPORTER_VNIC_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
index 538adab6878b..c5b560a8b026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -31,6 +31,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
struct mlx5_rsc_dump {
u32 pdn;
u32 mkey;
+ u32 number_of_menu_items;
u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
};
@@ -50,21 +51,37 @@ static int mlx5_rsc_dump_sgmt_get_by_name(char *name)
return -EINVAL;
}
-static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page)
+#define MLX5_RSC_DUMP_MENU_HEADER_SIZE (MLX5_ST_SZ_BYTES(resource_dump_info_segment) + \
+ MLX5_ST_SZ_BYTES(resource_dump_command_segment) + \
+ MLX5_ST_SZ_BYTES(resource_dump_menu_segment))
+
+static int mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct page *page,
+ int read_size, int start_idx)
{
void *data = page_address(page);
enum mlx5_sgmt_type sgmt_idx;
int num_of_items;
char *sgmt_name;
void *member;
+ int size = 0;
void *menu;
int i;
- menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
- num_of_items = MLX5_GET(resource_dump_menu_segment, menu, num_of_records);
+ if (!start_idx) {
+ menu = MLX5_ADDR_OF(menu_resource_dump_response, data, menu);
+ rsc_dump->number_of_menu_items = MLX5_GET(resource_dump_menu_segment, menu,
+ num_of_records);
+ size = MLX5_RSC_DUMP_MENU_HEADER_SIZE;
+ data += size;
+ }
+ num_of_items = rsc_dump->number_of_menu_items;
+
+ for (i = 0; start_idx + i < num_of_items; i++) {
+ size += MLX5_ST_SZ_BYTES(resource_dump_menu_record);
+ if (size >= read_size)
+ return start_idx + i;
- for (i = 0; i < num_of_items; i++) {
- member = MLX5_ADDR_OF(resource_dump_menu_segment, menu, record[i]);
+ member = data + MLX5_ST_SZ_BYTES(resource_dump_menu_record) * i;
sgmt_name = MLX5_ADDR_OF(resource_dump_menu_record, member, segment_name);
sgmt_idx = mlx5_rsc_dump_sgmt_get_by_name(sgmt_name);
if (sgmt_idx == -EINVAL)
@@ -72,6 +89,7 @@ static void mlx5_rsc_dump_read_menu_sgmt(struct mlx5_rsc_dump *rsc_dump, struct
rsc_dump->fw_segment_type[sgmt_idx] = MLX5_GET(resource_dump_menu_record,
member, segment_type);
}
+ return 0;
}
static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
@@ -168,6 +186,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
struct mlx5_rsc_dump_cmd *cmd = NULL;
struct mlx5_rsc_key key = {};
struct page *page;
+ int start_idx = 0;
int size;
int err;
@@ -189,7 +208,7 @@ static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
if (err < 0)
goto destroy_cmd;
- mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page);
+ start_idx = mlx5_rsc_dump_read_menu_sgmt(dev->rsc_dump, page, size, start_idx);
} while (err > 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 464eb3a18450..d000236ddbac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -87,7 +87,11 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
mlx5_host_pf_cleanup(dev);
- err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
+ err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
+
+ err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]);
+ if (err)
+ mlx5_core_warn(dev, "Timeout reclaiming external host VFs pages err(%d)\n", err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f0ac6b0d9653..b8987a404d75 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -59,6 +59,7 @@
#include "lib/hv_vhca.h"
#include "lib/clock.h"
#include "en/rx_res.h"
+#include "en/selq.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -92,29 +93,26 @@ struct page_pool;
#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \
MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD))
-#define MLX5_MPWRQ_LOG_WQE_SZ 18
-#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
- MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
-#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
-
-#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
-#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
-#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
-/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
- * WQEs, This page will absorb write overflow by the hardware, when
- * receiving packets larger than MTU. These oversize packets are
- * dropped by the driver at a later stage.
+#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18
+
+/* Keep in sync with mlx5e_mpwrq_log_wqe_sz.
+ * These are theoretical maximums, which can be further restricted by
+ * capabilities. These values are used for static resource allocations and
+ * sanity checks.
+ * MLX5_SEND_WQE_MAX_SIZE is a bit bigger than the maximum cacheline-aligned WQE
+ * size actually used at runtime, but it's not a problem when calculating static
+ * array sizes.
*/
-#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
-#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
+#define MLX5_UMR_MAX_FLEX_SPACE \
+ (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
+ MLX5_UMR_FLEX_ALIGNMENT))
+#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
+ rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt))
+
#define MLX5E_MAX_RQ_NUM_MTTS \
- ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
+ (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
+#define MLX5E_MAX_RQ_NUM_KSMS (U16_MAX - 1) /* So that num_ksms fits into u16. */
#define MLX5E_ORDER2_MAX_PACKET_MTU (order_base_2(10 * 1024))
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW \
- (ilog2(MLX5E_MAX_RQ_NUM_MTTS / MLX5E_REQUIRED_WQE_MTTS))
-#define MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW \
- (MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW + \
- (MLX5_MPWRQ_LOG_WQE_SZ - MLX5E_ORDER2_MAX_PACKET_MTU))
#define MLX5E_MIN_SKB_FRAG_SZ (MLX5_SKB_FRAG_SZ(MLX5_RX_HEADROOM))
#define MLX5E_LOG_MAX_RX_WQE_BULK \
@@ -126,8 +124,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE (1 + MLX5E_LOG_MAX_RX_WQE_BULK)
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE min_t(u8, 0xd, \
- MLX5E_LOG_MAX_RQ_NUM_PACKETS_MPW)
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
@@ -145,18 +142,10 @@ struct page_pool;
#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
-#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_UMR_WQE_INLINE_SZ \
- (sizeof(struct mlx5e_umr_wqe) + \
- ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(struct mlx5_mtt), \
- MLX5_UMR_MTT_ALIGNMENT))
-#define MLX5E_UMR_WQEBBS \
- (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
-
#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
(sizeof(struct mlx5e_umr_wqe) +\
(sizeof(struct mlx5_klm) * (sgl_len)))
@@ -171,10 +160,10 @@ struct page_pool;
(((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
- ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
+ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -188,12 +177,6 @@ do { \
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
-enum mlx5e_rq_group {
- MLX5E_RQ_GROUP_REGULAR,
- MLX5E_RQ_GROUP_XSK,
-#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
-};
-
static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
{
if (mlx5_lag_is_lacp_owner(mdev))
@@ -222,10 +205,40 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
+/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
+ * bytes units. Driver hardens the limitation to 1KB (16
+ * WQEBBs), unless firmware capability is stricter.
+ */
+static inline u8 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
+{
+ BUILD_BUG_ON(MLX5_SEND_WQE_MAX_WQEBBS > U8_MAX);
+
+ return (u8)min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
+ MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
+}
+
+static inline u8 mlx5e_get_max_sq_aligned_wqebbs(struct mlx5_core_dev *mdev)
+{
+/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
+ * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
+ * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64)
+ * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower
+ * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
+ * cache-aligned.
+ */
+ u8 wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+
+ wqebbs = min_t(u8, wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+#if L1_CACHE_BYTES >= 128
+ wqebbs = ALIGN_DOWN(wqebbs, 2);
+#endif
+ return wqebbs;
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_ll {
@@ -234,7 +247,7 @@ struct mlx5e_rx_wqe_ll {
};
struct mlx5e_rx_wqe_cyc {
- struct mlx5_wqe_data_seg data[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
struct mlx5e_umr_wqe {
@@ -242,8 +255,9 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
union {
- struct mlx5_mtt inline_mtts[0];
- struct mlx5_klm inline_klms[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
+ DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
+ DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms);
};
};
@@ -294,11 +308,11 @@ struct mlx5e_params {
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
struct {
- struct mlx5e_mqprio_rl *rl;
+ u64 max_rate[TC_MAX_QUEUE];
+ u32 hw_id[TC_MAX_QUEUE];
} channel;
} mqprio;
bool rx_cqe_compress_def;
- bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
struct dim_cq_moder tx_cq_moderation;
struct mlx5e_packet_merge_param packet_merge;
@@ -321,15 +335,20 @@ static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
params->mqprio.num_tc : 1;
}
+/* Keep this enum consistent with the corresponding strings array
+ * declared in en/reporter_rx.c
+ */
enum {
- MLX5E_RQ_STATE_ENABLED,
+ MLX5E_RQ_STATE_ENABLED = 0,
MLX5E_RQ_STATE_RECOVERING,
- MLX5E_RQ_STATE_AM,
+ MLX5E_RQ_STATE_DIM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
- MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
+ MLX5E_RQ_STATE_MINI_CQE_ENHANCED, /* set when enhanced mini_cqe_cap is used */
+ MLX5E_RQ_STATE_XSK, /* set to indicate an xsk rq */
+ MLX5E_NUM_RQ_STATES, /* Must be kept last */
};
struct mlx5e_cq {
@@ -356,6 +375,7 @@ struct mlx5e_cq_decomp {
u8 mini_arr_idx;
u16 left;
u16 wqe_counter;
+ bool last_cqe_title;
} ____cacheline_aligned_in_smp;
enum mlx5e_dma_map_type {
@@ -369,15 +389,20 @@ struct mlx5e_sq_dma {
enum mlx5e_dma_map_type type;
};
+/* Keep this enum consistent with with the corresponding strings array
+ * declared in en/reporter_tx.c
+ */
enum {
- MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_ENABLED = 0,
MLX5E_SQ_STATE_MPWQE,
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
- MLX5E_SQ_STATE_AM,
+ MLX5E_SQ_STATE_DIM,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
MLX5E_SQ_STATE_PENDING_XSK_TX,
MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC,
+ MLX5E_SQ_STATE_XDP_MULTIBUF,
+ MLX5E_NUM_SQ_STATES, /* Must be kept last */
};
struct mlx5e_tx_mpwqe {
@@ -428,15 +453,16 @@ struct mlx5e_txqsq {
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
unsigned int hw_mtu;
- struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
+ struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
/* control path */
@@ -449,67 +475,18 @@ struct mlx5e_txqsq {
cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
-struct mlx5e_dma_info {
- dma_addr_t addr;
- union {
- struct page *page;
- struct xdp_buff *xsk;
- };
-};
-
-/* XDP packets can be transmitted in different ways. On completion, we need to
- * distinguish between them to clean up things in a proper way.
- */
-enum mlx5e_xdp_xmit_mode {
- /* An xdp_frame was transmitted due to either XDP_REDIRECT from another
- * device or XDP_TX from an XSK RQ. The frame has to be unmapped and
- * returned.
- */
- MLX5E_XDP_XMIT_MODE_FRAME,
-
- /* The xdp_frame was created in place as a result of XDP_TX from a
- * regular RQ. No DMA remapping happened, and the page belongs to us.
- */
- MLX5E_XDP_XMIT_MODE_PAGE,
-
- /* No xdp_frame was created at all, the transmit happened from a UMEM
- * page. The UMEM Completion Ring producer pointer has to be increased.
- */
- MLX5E_XDP_XMIT_MODE_XSK,
-};
-
-struct mlx5e_xdp_info {
- enum mlx5e_xdp_xmit_mode mode;
- union {
- struct {
- struct xdp_frame *xdpf;
- dma_addr_t dma_addr;
- } frame;
- struct {
- struct mlx5e_rq *rq;
- struct mlx5e_dma_info di;
- } page;
- };
-};
-
-struct mlx5e_xmit_data {
- dma_addr_t dma_addr;
- void *data;
- u32 len;
-};
-
struct mlx5e_xdp_info_fifo {
- struct mlx5e_xdp_info *xi;
+ union mlx5e_xdp_info *xi;
u32 *cc;
u32 *pc;
u32 mask;
};
struct mlx5e_xdpsq;
+struct mlx5e_xmit_data;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *,
- struct mlx5e_xdp_info *,
int);
struct mlx5e_xdpsq {
@@ -541,6 +518,8 @@ struct mlx5e_xdpsq {
u32 sqn;
struct device *pdev;
__be32 mkey_be;
+ u16 stop_room;
+ u8 max_sq_mpw_wqebbs;
u8 min_inline_mode;
unsigned long state;
unsigned int hw_mtu;
@@ -580,20 +559,36 @@ struct mlx5e_icosq {
struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
+struct mlx5e_frag_page {
+ struct page *page;
+ u16 frags;
+};
+
+enum mlx5e_wqe_frag_flag {
+ MLX5E_WQE_FRAG_LAST_IN_PAGE,
+ MLX5E_WQE_FRAG_SKIP_RELEASE,
+};
+
struct mlx5e_wqe_frag_info {
- struct mlx5e_dma_info *di;
+ union {
+ struct mlx5e_frag_page *frag_page;
+ struct xdp_buff **xskp;
+ };
u32 offset;
- bool last_in_page;
+ u8 flags;
};
-struct mlx5e_umr_dma_info {
- struct mlx5e_dma_info dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+union mlx5e_alloc_units {
+ DECLARE_FLEX_ARRAY(struct mlx5e_frag_page, frag_pages);
+ DECLARE_FLEX_ARRAY(struct page *, pages);
+ DECLARE_FLEX_ARRAY(struct xdp_buff *, xsk_buffs);
};
struct mlx5e_mpw_info {
- struct mlx5e_umr_dma_info umr;
u16 consumed_strides;
- DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
+ DECLARE_BITMAP(skip_release_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
+ struct mlx5e_frag_page linear_page;
+ union mlx5e_alloc_units alloc_units;
};
#define MLX5E_MAX_RX_FRAGS 4
@@ -601,23 +596,19 @@ struct mlx5e_mpw_info {
/* a single cache unit is capable to serve one napi call (for non-striding rq)
* or a MPWQE (for striding rq).
*/
-#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
- MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_MAX_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+ MLX5_MPWRQ_MAX_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
-struct mlx5e_page_cache {
- u32 head;
- u32 tail;
- struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
-};
struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt,
+ u32 head_offset, u32 page_idx);
typedef struct sk_buff *
-(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
- struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
+(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
@@ -639,13 +630,24 @@ struct mlx5e_rq_frags_info {
struct mlx5e_rq_frag_info arr[MLX5E_MAX_RX_FRAGS];
u8 num_frags;
u8 log_num_frags;
- u8 wqe_bulk;
+ u16 wqe_bulk;
+ u16 refill_unit;
+ u8 wqe_index_mask;
+};
+
+struct mlx5e_dma_info {
+ dma_addr_t addr;
+ union {
+ struct mlx5e_frag_page *frag_page;
+ struct page *page;
+ };
};
struct mlx5e_shampo_hd {
u32 mkey;
struct mlx5e_dma_info *info;
- struct page *last_page;
+ struct mlx5e_frag_page *pages;
+ u16 curr_page_index;
u16 hd_per_wq;
u16 hd_per_wqe;
unsigned long *bitmap;
@@ -661,13 +663,20 @@ struct mlx5e_hw_gro_data {
int second_ip_id;
};
+enum mlx5e_mpwrq_umr_mode {
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED,
+ MLX5E_MPWRQ_UMR_MODE_UNALIGNED,
+ MLX5E_MPWRQ_UMR_MODE_OVERSIZED,
+ MLX5E_MPWRQ_UMR_MODE_TRIPLE,
+};
+
struct mlx5e_rq {
/* data path */
union {
struct {
struct mlx5_wq_cyc wq;
struct mlx5e_wqe_frag_info *frags;
- struct mlx5e_dma_info *di;
+ union mlx5e_alloc_units *alloc_units;
struct mlx5e_rq_frags_info info;
mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe;
@@ -676,12 +685,19 @@ struct mlx5e_rq {
struct mlx5e_umr_wqe umr_wqe;
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
+ __be32 umr_mkey_be;
u16 num_strides;
u16 actual_wq_head;
u8 log_stride_sz;
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
+ u8 min_wqe_bulk;
+ u8 page_shift;
+ u8 pages_per_wqe;
+ u8 umr_wqebbs;
+ u8 mtts_per_wqe;
+ u8 umr_mode;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
@@ -696,7 +712,6 @@ struct mlx5e_rq {
struct mlx5e_rq_stats *stats;
struct mlx5e_cq cq;
struct mlx5e_cq_decomp cqd;
- struct mlx5e_page_cache page_cache;
struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
struct mlx5e_icosq *icosq;
@@ -731,7 +746,7 @@ struct mlx5e_rq {
u8 wq_type;
u32 rqn;
struct mlx5_core_dev *mdev;
- u32 umr_mkey;
+ struct mlx5e_channel *channel;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
@@ -783,6 +798,8 @@ struct mlx5e_channel {
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
int ix;
int cpu;
+ /* Sync between icosq recovery and XSK enable/disable. */
+ struct mutex icosq_recovery_lock;
};
struct mlx5e_ptp;
@@ -818,11 +835,6 @@ enum {
MLX5E_STATE_XDP_ACTIVE,
};
-enum {
- MLX5E_TC_PRIO = 0,
- MLX5E_NIC_PRIO
-};
-
struct mlx5e_modify_sq_param {
int curr_state;
int next_state;
@@ -862,24 +874,13 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask;
};
-struct mlx5e_htb {
- DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
- DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
- struct mlx5e_sq_stats **qos_sq_stats;
- u16 max_qos_sqs;
- u16 maj_id;
- u16 defcls;
-};
-
struct mlx5e_trap;
+struct mlx5e_htb;
struct mlx5e_priv {
/* priv data path fields - start */
- /* +1 for port ptp ts */
- struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
- MLX5E_QOS_MAX_LEAF_NODES];
- int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
- int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_selq selq;
+ struct mlx5e_txqsq **txq2sq;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -893,9 +894,9 @@ struct mlx5e_priv {
struct mlx5e_channels channels;
u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC];
struct mlx5e_rx_res *rx_res;
- u32 tx_rates[MLX5E_MAX_NUM_SQS];
+ u32 *tx_rates;
- struct mlx5e_flow_steering fs;
+ struct mlx5e_flow_steering *fs;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
@@ -909,9 +910,11 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_trap *en_trap;
struct mlx5e_stats stats;
- struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_channel_stats **channel_stats;
struct mlx5e_channel_stats trap_stats;
struct mlx5e_ptp_stats ptp_stats;
+ struct mlx5e_sq_stats **htb_qos_sq_stats;
+ u16 htb_max_qos_sqs;
u16 stats_nch;
u16 max_nch;
u8 max_opened_tc;
@@ -922,7 +925,6 @@ struct mlx5e_priv {
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
- int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -931,6 +933,9 @@ struct mlx5e_priv {
const struct mlx5e_profile *profile;
void *ppriv;
+#ifdef CONFIG_MLX5_EN_MACSEC
+ struct mlx5e_macsec *macsec;
+#endif
#ifdef CONFIG_MLX5_EN_IPSEC
struct mlx5e_ipsec *ipsec;
#endif
@@ -944,8 +949,14 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
struct mlx5e_scratchpad scratchpad;
- struct mlx5e_htb htb;
+ struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
+ struct dentry *dfs_root;
+};
+
+struct mlx5e_dev {
+ struct mlx5e_priv *priv;
+ struct devlink_port dl_port;
};
struct mlx5e_rx_handlers {
@@ -956,6 +967,14 @@ struct mlx5e_rx_handlers {
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
+enum mlx5e_profile_feature {
+ MLX5E_PROFILE_FEATURE_PTP_RX,
+ MLX5E_PROFILE_FEATURE_PTP_TX,
+ MLX5E_PROFILE_FEATURE_QOS_HTB,
+ MLX5E_PROFILE_FEATURE_FS_VLAN,
+ MLX5E_PROFILE_FEATURE_FS_TC,
+};
+
struct mlx5e_profile {
int (*init)(struct mlx5_core_dev *mdev,
struct net_device *netdev);
@@ -969,23 +988,26 @@ struct mlx5e_profile {
int (*update_rx)(struct mlx5e_priv *priv);
void (*update_stats)(struct mlx5e_priv *priv);
void (*update_carrier)(struct mlx5e_priv *priv);
+ int (*max_nch_limit)(struct mlx5_core_dev *mdev);
unsigned int (*stats_grps_num)(struct mlx5e_priv *priv);
mlx5e_stats_grp_t *stats_grps;
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
- u8 rq_groups;
- bool rx_ptp_support;
+ u32 features;
};
+#define mlx5e_profile_feature_cap(profile, feature) \
+ ((profile)->features & BIT(MLX5E_PROFILE_FEATURE_##feature))
+
void mlx5e_build_ptys2ethtool_map(void);
-bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
+ enum mlx5e_mpwrq_umr_mode umr_mode);
void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
@@ -1008,15 +1030,13 @@ struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5e_xsk_param *xsk, int node,
struct mlx5e_rq *rq);
+#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
-int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
-void mlx5e_close_icosq(struct mlx5e_icosq *sq);
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect);
@@ -1038,6 +1058,9 @@ void mlx5e_close_cq(struct mlx5e_cq *cq);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
+void mlx5e_trigger_napi_icosq(struct mlx5e_channel *c);
+void mlx5e_trigger_napi_sched(struct napi_struct *napi);
+
int mlx5e_open_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs);
void mlx5e_close_channels(struct mlx5e_channels *chs);
@@ -1057,13 +1080,12 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
-int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
-int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
+int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state);
void mlx5e_activate_rq(struct mlx5e_rq *rq);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
@@ -1098,6 +1120,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
extern const struct ethtool_ops mlx5e_ethtool_ops;
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
@@ -1110,8 +1133,6 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
-int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
-void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
@@ -1140,7 +1161,8 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
void mlx5e_ethtool_get_ringparam(struct mlx5e_priv *priv,
- struct ethtool_ringparam *param);
+ struct ethtool_ringparam *param,
+ struct kernel_ethtool_ringparam *kernel_param);
int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv,
struct ethtool_ringparam *param);
void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
@@ -1148,9 +1170,12 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
struct ethtool_channels *ch);
int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal);
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
- struct ethtool_coalesce *coal);
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
struct ethtool_link_ksettings *link_ksettings);
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
@@ -1180,14 +1205,14 @@ mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
}
+int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev);
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
struct net_device *netdev,
struct mlx5_core_dev *mdev);
void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
struct net_device *
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- unsigned int txqs, unsigned int rxqs);
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
@@ -1199,6 +1224,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
+void mlx5e_set_xdp_feature(struct net_device *netdev);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features);
@@ -1209,4 +1235,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t
int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
#endif
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index e7c14c0de0a7..48581ea3adcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -10,28 +10,33 @@ unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
return chs->num;
}
-void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+static struct mlx5e_channel *mlx5e_channels_get(struct mlx5e_channels *chs, unsigned int ix)
{
- struct mlx5e_channel *c;
+ WARN_ON_ONCE(ix >= mlx5e_channels_get_num(chs));
+ return chs->c[ix];
+}
- WARN_ON(ix >= mlx5e_channels_get_num(chs));
- c = chs->c[ix];
+bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix)
+{
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
- *rqn = c->rq.rqn;
+ return test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
}
-bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
{
- struct mlx5e_channel *c;
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
- WARN_ON(ix >= mlx5e_channels_get_num(chs));
- c = chs->c[ix];
+ *rqn = c->rq.rqn;
+}
- if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
- return false;
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn)
+{
+ struct mlx5e_channel *c = mlx5e_channels_get(chs, ix);
+
+ WARN_ON_ONCE(!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state));
*rqn = c->xskrq.rqn;
- return true;
}
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index ca00cbc827cb..637ca90daaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -9,8 +9,9 @@
struct mlx5e_channels;
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs);
+bool mlx5e_channels_is_xsk(struct mlx5e_channels *chs, unsigned int ix);
void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
-bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
+void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index 9976de8b9047..b59aee75de94 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -40,13 +40,11 @@ struct mlx5e_dcbx_dp {
};
void mlx5e_dcbnl_build_netdev(struct net_device *netdev);
-void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev);
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#else
static inline void mlx5e_dcbnl_build_netdev(struct net_device *netdev) {}
-static inline void mlx5e_dcbnl_build_rep_netdev(struct net_device *netdev) {}
static inline void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) {}
static inline void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) {}
static inline void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index ae52e7f38306..c6b6e290fd79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -4,6 +4,31 @@
#include "en/devlink.h"
#include "eswitch.h"
+static const struct devlink_ops mlx5e_devlink_ops = {
+};
+
+struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_dev *mlx5e_dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc_ns(&mlx5e_devlink_ops, sizeof(*mlx5e_dev),
+ devlink_net(priv_to_devlink(mdev)), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+ devlink_register(devlink);
+ return devlink_priv(devlink);
+}
+
+void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx5e_dev);
+
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
+
static void
mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
{
@@ -14,56 +39,36 @@ mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_i
memcpy(ppid->id, &parent_id, sizeof(parent_id));
}
-int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
+int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev,
+ struct mlx5_core_dev *mdev)
{
- struct devlink *devlink = priv_to_devlink(priv->mdev);
+ struct devlink *devlink = priv_to_devlink(mlx5e_dev);
struct devlink_port_attrs attrs = {};
struct netdev_phys_item_id ppid = {};
- struct devlink_port *dl_port;
unsigned int dl_port_index;
- if (mlx5_core_is_pf(priv->mdev)) {
+ if (mlx5_core_is_pf(mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = mlx5_get_dev_index(priv->mdev);
- if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
- mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
+ attrs.phys.port_number = mlx5_get_dev_index(mdev);
+ if (MLX5_ESWITCH_MANAGER(mdev)) {
+ mlx5e_devlink_get_port_parent_id(mdev, &ppid);
memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
attrs.switch_id.id_len = ppid.id_len;
}
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev,
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(mdev,
MLX5_VPORT_UPLINK);
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, 0);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(mdev, 0);
}
- dl_port = mlx5e_devlink_get_dl_port(priv);
- memset(dl_port, 0, sizeof(*dl_port));
- devlink_port_attrs_set(dl_port, &attrs);
+ devlink_port_attrs_set(&mlx5e_dev->dl_port, &attrs);
- return devlink_port_register(devlink, dl_port, dl_port_index);
+ return devlink_port_register(devlink, &mlx5e_dev->dl_port,
+ dl_port_index);
}
-void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv)
+void mlx5e_devlink_port_unregister(struct mlx5e_dev *mlx5e_dev)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
-
- devlink_port_type_eth_set(dl_port, priv->netdev);
-}
-
-void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
-{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
-
- devlink_port_unregister(dl_port);
-}
-
-struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!netif_device_present(dev))
- return NULL;
-
- return mlx5e_devlink_get_dl_port(priv);
+ devlink_port_unregister(&mlx5e_dev->dl_port);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
index 10b50feb9883..d5ec4461f300 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -7,15 +7,11 @@
#include <net/devlink.h>
#include "en.h"
-int mlx5e_devlink_port_register(struct mlx5e_priv *priv);
-void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
-void mlx5e_devlink_port_type_eth_set(struct mlx5e_priv *priv);
-struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev);
-
-static inline struct devlink_port *
-mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv)
-{
- return &priv->mdev->mlx5e_res.dl_port;
-}
+struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
+ struct mlx5_core_dev *mdev);
+void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev);
+int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev,
+ struct mlx5_core_dev *mdev);
+void mlx5e_devlink_port_unregister(struct mlx5e_dev *mlx5e_dev);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 678ffbb48a25..e5a44b0b9616 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -8,32 +8,17 @@
#include "lib/fs_ttc.h"
struct mlx5e_post_act;
+struct mlx5e_tc_table;
enum {
MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL,
+ MLX5E_TC_MISS_LEVEL,
};
-struct mlx5e_tc_table {
- /* Protects the dynamic assignment of the t parameter
- * which is the nic tc root table.
- */
- struct mutex t_lock;
- struct mlx5_flow_table *t;
- struct mlx5_fs_chains *chains;
- struct mlx5e_post_act *post_act;
-
- struct rhashtable ht;
-
- struct mod_hdr_tbl mod_hdr;
- struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
- DECLARE_HASHTABLE(hairpin_tbl, 8);
-
- struct notifier_block netdevice_nb;
- struct netdev_net_notifier netdevice_nn;
-
- struct mlx5_tc_ct_priv *ct;
- struct mapping_ctx *mapping;
+enum {
+ MLX5E_TC_PRIO = 0,
+ MLX5E_NIC_PRIO
};
struct mlx5e_flow_table {
@@ -99,113 +84,126 @@ enum {
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
- MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
-struct mlx5e_priv;
-
-#ifdef CONFIG_MLX5_EN_RXNFC
-
-struct mlx5e_ethtool_table {
- struct mlx5_flow_table *ft;
- int num_rules;
-};
-
-#define ETHTOOL_NUM_L3_L4_FTS 7
-#define ETHTOOL_NUM_L2_FTS 4
-
-struct mlx5e_ethtool_steering {
- struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
- struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
- struct list_head rules;
- int tot_num_rules;
-};
-
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
-int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs);
-#else
-static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
-static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
-static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
-{ return -EOPNOTSUPP; }
-static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
- struct ethtool_rxnfc *info, u32 *rule_locs)
-{ return -EOPNOTSUPP; }
-#endif /* CONFIG_MLX5_EN_RXNFC */
+struct mlx5e_flow_steering;
+struct mlx5e_rx_res;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables;
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
-int mlx5e_arfs_enable(struct mlx5e_priv *priv);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple);
+void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple);
+int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs);
+int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs);
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#else
-static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
-static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
-static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
-static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res, bool ntuple)
+{ return 0; }
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple) {}
+static inline int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
+{ return -EOPNOTSUPP; }
#endif
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp;
#endif
+struct mlx5e_profile;
struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
-struct mlx5e_flow_steering {
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_namespace *egress_ns;
+void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ struct ttc_params *ttc_params, bool tunnel);
+
+void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs);
+int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res);
+
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+
+void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
+void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc);
+
+int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
+ struct mlx5e_rx_res *rx_res,
+ const struct mlx5e_profile *profile,
+ struct net_device *netdev);
+void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
+ const struct mlx5e_profile *profile);
+
+struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
+ struct mlx5_core_dev *mdev,
+ bool state_destroy,
+ struct dentry *dfs_root);
+void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
+struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
+struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
+struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
+struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
+void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
#ifdef CONFIG_MLX5_EN_RXNFC
- struct mlx5e_ethtool_steering ethtool;
+struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
#endif
- struct mlx5e_tc_table tc;
- struct mlx5e_promisc_table promisc;
- struct mlx5e_vlan_table *vlan;
- struct mlx5e_l2_table l2;
- struct mlx5_ttc_table *ttc;
- struct mlx5_ttc_table *inner_ttc;
+struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner);
+void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner);
#ifdef CONFIG_MLX5_EN_ARFS
- struct mlx5e_arfs_tables *arfs;
+struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs);
#endif
+struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs);
+struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any);
+struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp);
#ifdef CONFIG_MLX5_EN_TLS
- struct mlx5e_accel_fs_tcp *accel_tcp;
+struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp);
#endif
- struct mlx5e_fs_udp *udp;
- struct mlx5e_fs_any *any;
- struct mlx5e_ptp_fs *ptp_fs;
-};
-
-void mlx5e_set_ttc_params(struct mlx5e_priv *priv,
- struct ttc_params *ttc_params, bool tunnel);
-
-void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
-int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
-
-void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
-
-void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
-
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-
-int mlx5e_fs_init(struct mlx5e_priv *priv);
-void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
-
-int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
-void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
-int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
-void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
+void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy);
+void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs, bool vlan_strip_disable);
+
+struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs);
+int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
+void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs);
+int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num);
+void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs);
+void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
+ struct net_device *netdev,
+ __be16 proto, u16 vid);
+void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+
+struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs);
+
+#define fs_err(fs, fmt, ...) \
+ mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_dbg(fs, fmt, ...) \
+ mlx5_core_dbg(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_warn(fs, fmt, ...) \
+ mlx5_core_warn(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+
+#define fs_warn_once(fs, fmt, ...) \
+ mlx5_core_warn_once(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
new file mode 100644
index 000000000000..9e276fd3c0cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_ethtool.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5E_FS_ETHTOOL_H__
+#define __MLX5E_FS_ETHTOOL_H__
+
+struct mlx5e_priv;
+struct mlx5e_ethtool_steering;
+#ifdef CONFIG_MLX5_EN_RXNFC
+int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool);
+void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool);
+void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs);
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+#else
+static inline int mlx5e_ethtool_alloc(struct mlx5e_ethtool_steering **ethtool)
+{ return 0; }
+static inline void mlx5e_ethtool_free(struct mlx5e_ethtool_steering *ethtool) { }
+static inline void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs) { }
+static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs) { }
+static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
+{ return -EOPNOTSUPP; }
+static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs)
+{ return -EOPNOTSUPP; }
+#endif
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
index 7aa25a5e29d7..03cb79adf912 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c